From 6ea9f3fbd5e82e0d96a655b5a2f79141d30796b0 Mon Sep 17 00:00:00 2001 From: Josh Burt Date: Mon, 13 May 2024 07:53:56 -0700 Subject: [PATCH] Enhancement - Leverage requests session object for network level retry management (#183) --- .github/workflows/pull-request.yml | 11 +- .github/workflows/python-publish.yml | 5 +- CONTRIBUTING.md | 1 - LICENSE | 2 +- README.md | 4 +- ae5_tools/api.py | 107 ++- ae5_tools/common/config/environment.py | 1 + anaconda-project.yml | 10 - build-package.sh | 14 + docs/source/COMMANDS.md | 3 - docs/source/README.md | 3 +- docs/source/SYSTEM_TESTS.md | 178 ----- docs/source/conf.py | 2 +- docs/source/index.rst | 1 - tests/adsp/common/fixture_manager.py | 291 -------- tests/adsp/common/utils.py | 84 +-- tests/fixtures/system/fixtures.json | 47 -- tests/fixtures/system/fixtures.orig.json | 47 -- tests/fixtures/system/testproj1.tar.gz | Bin 2741 -> 0 bytes tests/fixtures/system/testproj2.tar.gz | Bin 2740 -> 0 bytes tests/fixtures/system/testproj3.tar.gz | Bin 2198 -> 0 bytes tests/load/.env | 11 - tests/load/__init__.py | 0 tests/load/ae5_tools/__init__.py | 0 tests/load/ae5_tools/test_sessions.py | 88 --- tests/load/runner.py | 29 - tests/system/.env | 15 - tests/system/__init__.py | 0 tests/system/ae5_tools/__init__.py | 0 tests/system/ae5_tools/cli/__init__.py | 0 .../system/ae5_tools/cli/commands/__init__.py | 0 .../ae5_tools/cli/commands/test_deploy.py | 176 ----- .../system/ae5_tools/cli/commands/test_job.py | 238 ------ .../cli/commands/test_secret_system.py | 48 -- tests/system/ae5_tools/cli/test_cli.py | 524 -------------- tests/system/ae5_tools/conftest.py | 73 -- tests/system/ae5_tools/test_api.py | 685 ------------------ .../ae5_tools/test_api_secret_system.py | 45 -- .../ae5_tools/test_api_user_list_system.py | 38 - tests/system/ae5_tools/test_options.py | 164 ----- tests/system/runner.py | 147 ---- tests/system/state.py | 8 - tests/unit/ae5_tools/test_api.py | 120 +++ 43 files changed, 250 insertions(+), 2970 deletions(-) create mode 100755 build-package.sh delete mode 100644 docs/source/SYSTEM_TESTS.md delete mode 100644 tests/adsp/common/fixture_manager.py delete mode 100644 tests/fixtures/system/fixtures.json delete mode 100644 tests/fixtures/system/fixtures.orig.json delete mode 100644 tests/fixtures/system/testproj1.tar.gz delete mode 100644 tests/fixtures/system/testproj2.tar.gz delete mode 100644 tests/fixtures/system/testproj3.tar.gz delete mode 100644 tests/load/.env delete mode 100644 tests/load/__init__.py delete mode 100644 tests/load/ae5_tools/__init__.py delete mode 100644 tests/load/ae5_tools/test_sessions.py delete mode 100644 tests/load/runner.py delete mode 100644 tests/system/.env delete mode 100644 tests/system/__init__.py delete mode 100644 tests/system/ae5_tools/__init__.py delete mode 100644 tests/system/ae5_tools/cli/__init__.py delete mode 100644 tests/system/ae5_tools/cli/commands/__init__.py delete mode 100644 tests/system/ae5_tools/cli/commands/test_deploy.py delete mode 100644 tests/system/ae5_tools/cli/commands/test_job.py delete mode 100644 tests/system/ae5_tools/cli/commands/test_secret_system.py delete mode 100644 tests/system/ae5_tools/cli/test_cli.py delete mode 100644 tests/system/ae5_tools/conftest.py delete mode 100644 tests/system/ae5_tools/test_api.py delete mode 100644 tests/system/ae5_tools/test_api_secret_system.py delete mode 100644 tests/system/ae5_tools/test_api_user_list_system.py delete mode 100644 tests/system/ae5_tools/test_options.py delete mode 100644 tests/system/runner.py delete mode 100644 tests/system/state.py create mode 100644 tests/unit/ae5_tools/test_api.py diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index cc074427..7da23abb 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -6,9 +6,6 @@ on: jobs: build: runs-on: ubuntu-latest -# runs-on: self-hosted -# runs-on: self-hosted-amd64-small-privileged -# runs-on: self-hosted-amd64-large-privileged-on-demand-storage defaults: run: shell: bash -el {0} @@ -40,16 +37,10 @@ jobs: - name: Build Conda Package run: | mkdir build - conda build conda-recipe --output-folder build + ./build-package.sh - name: Run Integration Tests run: | anaconda-project run test:integration:slipstream -# - name: Run System Tests -# env: -# AE5_HOSTNAME: dev1.ae.anacondaconnect.com -# CI: true -# run: | -# anaconda-project run test:system - name: Upload to anaconda.org (Dev Build) env: ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 122a0e5d..f1fe21df 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -8,9 +8,6 @@ on: jobs: package: runs-on: ubuntu-latest -# runs-on: self-hosted -# runs-on: self-hosted-amd64-small-privileged -# runs-on: self-hosted-amd64-large-privileged-on-demand-storage defaults: run: shell: bash -el {0} @@ -33,7 +30,7 @@ jobs: - name: Build Conda Package run: | mkdir build - conda build conda-recipe --output-folder build + ./build-package.sh - name: Upload to anaconda.org env: ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e5189cd..c366a9da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,7 +21,6 @@ These commands are used during development for solution management. | test | Default | Run all test suites | | test:unit | Default | Unit Test Suite | | test:integration | Default | Integration Test Suite | -| test:system | Default | System Test Suite | ## Contributing diff --git a/LICENSE b/LICENSE index 79ca3dae..d96347cb 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2023 Anaconda, Inc. +Copyright 2024 Anaconda, Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/README.md b/README.md index dc98197f..05a4a78f 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,6 @@ There is already a fair amount of inline help, so type `ae5 --help` to get start For k8s service integration refer to: * [k8s Service Component Documentation](docs/source/K8S_SERVER.md) * [Project Commands Documentation](docs/source/COMMANDS.md) -* [System and Load Testing](docs/source/SYSTEM_TESTS.md) * [Contributing](CONTRIBUTING.md) ## Installation @@ -66,9 +65,10 @@ The package has the following particular dependencies: - `revision`: `list`, `info`, `download` - `sample`: `info`, `list` - `secret`: `list`, `add`, `delete` + - `role`: `add`, `remove` - `run`: `delete`, `info`, `list`, `log`, `stop` - `session`: `branches`, `changes`, `info`, `list`, `open`, `start`, `stop` - - `user`: `info`, `list` + - `user`: `info`, `list`, `create`, `delete` - Simple commands: `call`, `login`, `logout` - Login options: `--hostname`, `--username`, `--admin-username`, `--admin-hostname`, `--impersonate` - Output format options: `--format`, `--filter`, `--columns`, `--sort`, `--width`, `--wide`, `--no-header` diff --git a/ae5_tools/api.py b/ae5_tools/api.py index 85dbd2fd..fec102c1 100644 --- a/ae5_tools/api.py +++ b/ae5_tools/api.py @@ -15,9 +15,13 @@ import requests from dateutil import parser +from requests import Session +from requests.adapters import HTTPAdapter from requests.packages import urllib3 +from urllib3 import Retry from .archiver import create_tar_archive +from .common.config.environment import demand_env_var, get_env_var from .config import config from .docker import build_image, get_condarc, get_dockerfile from .filter import filter_list_of_dicts, filter_vars, split_filter @@ -222,8 +226,6 @@ def __init__(self, response, method, url, **kwargs): msg.append(f' json: {kwargs["json"]}') super(AEUnexpectedResponseError, self).__init__("\n".join(msg)) - pass - class AESessionBase(object): """Base class for AE5 API interactions.""" @@ -250,15 +252,60 @@ def __init__(self, hostname, username, password, prefix, persist): self.password = password self.persist = persist self.prefix = prefix.lstrip("/") - self.session = requests.Session() - self.session.verify = False - self.session.cookies = LWPCookieJar() + self.session: Session = AESessionBase._build_requests_session() + + # Cloudflare headers need to be present on all requests (even before auth can be start). + self._set_cf_headers() + + # Proceed with auth flow if self.persist: self._load() self.connected = self._connected() if self.connected: self._set_header() + def _set_cf_headers(self): + """ + If cloudflare auth is enabled, then get and set the headers + https://developers.cloudflare.com/cloudflare-one/identity/service-tokens/#connect-your-service-to-access + CF-Access-Client-Id: + CF-Access-Client-Secret: + """ + + if get_env_var(name="CF_ACCESS_CLIENT_ID") and get_env_var(name="CF_ACCESS_CLIENT_SECRET"): + self.session.headers["CF-Access-Client-Id"] = demand_env_var(name="CF_ACCESS_CLIENT_ID") + self.session.headers["CF-Access-Client-Secret"] = demand_env_var(name="CF_ACCESS_CLIENT_SECRET") + + @staticmethod + def _build_requests_session() -> Session: + """ + Responsible for creating the requests session object. + This implementation is global right now, but future work should allow more granular + control of retries on a per-call basis. + """ + + session: Session = Session() + session.cookies = LWPCookieJar() + + # TODO: This should be parameterized + session.verify = False + + # Status Code Defaults + # 403, 501, 502 are seen when ae5 is behind CloudFlare + # 502, 503, 504 can be encountered when ae5 is under heavy load + # TODO: this should be definable on a per command basis, and parameterized. + retries: Retry = Retry( + total=10, + backoff_factor=0.1, + status_forcelist=[403, 502, 503, 504], + allowed_methods={"POST", "PUT", "PATCH", "GET", "DELETE", "OPTIONS", "HEAD"}, + ) + + adapter: HTTPAdapter = HTTPAdapter(max_retries=retries) + session.mount(prefix="https://", adapter=adapter) + + return session + @staticmethod def _auth_message(msg, nl=True): print(msg, file=sys.stderr, end="\n" if nl else "") @@ -291,6 +338,9 @@ def _is_login(self, response): pass def authorize(self): + # Cloudflare headers need to be present on all requests (even before auth can be start). + self._set_cf_headers() + key = f"{self.username}@{self.hostname}" need_password = self.password is None last_valid = True @@ -594,6 +644,9 @@ def _set_header(self): s.headers["x-xsrftoken"] = cookie.value break + # Ensure that Cloudflare headers get added [back] to session when setting the other auth headers. + self._set_cf_headers() + def _load(self): s = self.session if os.path.exists(self._filename): @@ -1787,11 +1840,45 @@ def _set_header(self): self.session.headers["Authorization"] = f'Bearer {self._sdata["access_token"]}' def _connect(self, password): - resp = self.session.post( - self._login_base + "/token", - data={"username": self.username, "password": password, "grant_type": "password", "client_id": "admin-cli"}, - ) - self._sdata = {} if resp.status_code == 401 else resp.json() + try: + # Set the initial security data to an empty dictionary. + self._sdata = {} + + # Get our auth + params: dict = {"username": self.username, "password": password, "grant_type": "password", "client_id": "admin-cli"} + resp: requests.Response = self.session.post( + self._login_base + "/token", + data=params, + ) + + if resp.status_code not in [401]: + try: + self._sdata = resp.json() + except json.decoder.JSONDecodeError as error: + # The response is not json parsable. + # This is most likely some type of error (serialized, or html content, etc). + print(f"Received an unexpected response.\nStatus Code: {resp.status_code}\n{resp.text}") + + except requests.exceptions.RetryError: + message: str = f"Exceeded maximum retry limit on call to {self._login_base}/token" + try: + message += f", response code seen: {resp.status_code}, last response: {resp.text}" + except NameError: + # if `resp` is not defined, then we hit the retry max before it was declared + # during the `session.post` operation. + pass + + print(message) + + except Exception as error: + message: str = f"Unknown error calling {self._login_base}/token" + try: + message += f", response code seen: {resp.status_code}, last response: {resp.text}" + except NameError: + # if `resp` is not defined, just pass. + pass + print(message) + print(str(error)) def _disconnect(self): if self._sdata: diff --git a/ae5_tools/common/config/environment.py b/ae5_tools/common/config/environment.py index c562389b..5797541e 100644 --- a/ae5_tools/common/config/environment.py +++ b/ae5_tools/common/config/environment.py @@ -1,4 +1,5 @@ """ Helper functions for environment variables. """ + from __future__ import annotations import os diff --git a/anaconda-project.yml b/anaconda-project.yml index 1398884e..f4fee313 100644 --- a/anaconda-project.yml +++ b/anaconda-project.yml @@ -71,16 +71,6 @@ commands: conda install build/noarch/ae5-tools-*.tar.bz2 py.test --cov=ae5_tools -v tests/integration --cov-append --cov-report=xml -vv - test:load: - env_spec: default - unix: | - python -m tests.load.runner - - test:system: - env_spec: default - unix: | - python -m tests.system.runner - # Documentation Commands #################################################### build:apidocs: diff --git a/build-package.sh b/build-package.sh new file mode 100755 index 00000000..4b647af3 --- /dev/null +++ b/build-package.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Perform the build +conda build conda-recipe --no-anaconda-upload --no-test --output-folder build + +# Check the exit status of the cp command +if [ $? -eq 0 ]; then + echo "Build successful" +else + echo "Build failed" +fi + +echo "Moving on ..." +exit 0 diff --git a/docs/source/COMMANDS.md b/docs/source/COMMANDS.md index a6cf7893..71ebb827 100644 --- a/docs/source/COMMANDS.md +++ b/docs/source/COMMANDS.md @@ -15,9 +15,6 @@ These commands are used during project development. | test | Default | Run all test suites | | test:unit | Default | Unit Test Suite | | test:integration | Default | Integration Test Suite | -| test:system | Default | System Test Suite | -| test:load | Default | Load Test Suite | - ### Runtime diff --git a/docs/source/README.md b/docs/source/README.md index 5c33387b..e053a351 100644 --- a/docs/source/README.md +++ b/docs/source/README.md @@ -57,9 +57,10 @@ The package has the following particular dependencies: - `revision`: `list`, `info`, `download` - `sample`: `info`, `list` - `secret`: `list`, `add`, `delete` + - `role`: `add`, `remove` - `run`: `delete`, `info`, `list`, `log`, `stop` - `session`: `branches`, `changes`, `info`, `list`, `open`, `start`, `stop` - - `user`: `info`, `list` + - `user`: `info`, `list`, `create`, `delete` - Simple commands: `call`, `login`, `logout` - Login options: `--hostname`, `--username`, `--admin-username`, `--admin-hostname`, `--impersonate` - Output format options: `--format`, `--filter`, `--columns`, `--sort`, `--width`, `--wide`, `--no-header` diff --git a/docs/source/SYSTEM_TESTS.md b/docs/source/SYSTEM_TESTS.md deleted file mode 100644 index 5fb410be..00000000 --- a/docs/source/SYSTEM_TESTS.md +++ /dev/null @@ -1,178 +0,0 @@ -# System Tests - -## Overview -AE5 system and load testing can be accomplished using the ae5-tools test harness. - -## Setup -The test harness can generate (but will not forcibly recreate by default), and tear-down (if configured to do so) all required fixtures on the target system. To accomplish this, the harness will leverage the realm administrator account for instance. - -The configuration is driven by environmental variables. The defaults managed for the harness are as follows: - -| Variable | Default | Description | -|--------------------|----------------------|----------------------------------| -| AE5_HOSTNAME | anaconda.example.com | AE5 instance F.Q.D.N. | -| AE5_ADMIN_USERNAME | admin | AE5 realm admin username | -| AE5_ADMIN_PASSWORD | admin | AE5 realm admin password | -| AE5_K8S_ENDPOINT | ssh:centos | K8S service endpoint declaration | -| AE5_K8S_PORT | 23456 | K8S service port | -| CI | False | CI environment test skip flag | - - -All harness environmental variables can be over-ridden or extended. - -The defaults will be over-ridden if the environmental variables are defined before the harness is executed. (e.g. the harness will not over-ride them if they already exist). This allows for flexibility for scenarios such as build-runners, local development, or remote testing against an instance. -As-is the tests can run out-of-the-box against a local development instance. - -The defaults are defined for the system and load tests within `tests/system/.env` and `tests/load/.env` respectively. - -## Fixture Control - -### System Tests -The fixtures and flags for the creation and removal are defined within the configuration here: `tests/fixtures/system/fixtures.json`. - -The harness currently supports the creation of the following fixture types: -1. User accounts -2. Projects (from upload) - -Additional fixture support can be added by extending the fixture manager. - -Test Suites are subclasses of a fixture manager and include logic for the configuration of the fixtures and their relationships specific to the test suite in execution. - -**Default Fixtures and Control Flags** -```json lines -{ - "force": false, - "teardown": false, - "accounts": [ - { - "id": "1", - "username": "tooltest", - "email": "tooltest@localhost.local", - "firstname": "tooltest", - "lastname": "1", - "password": "tooltest" - }, - { - "id": "2", - "username": "tooltest2", - "email": "tooltest2@localhost.local", - "firstname": "tooltest", - "lastname": "2", - "password": "tooltest2" - }, - { - "id": "3", - "username": "tooltest3", - "email": "tooltest3@localhost.local", - "firstname": "tooltest", - "lastname": "3", - "password": "tooltest3" - } - ], - "projects": [ - { - "name": "testproj1", - "artifact": "tests/fixtures/system/testproj1.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj2", - "artifact": "tests/fixtures/system/testproj2.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj3", - "artifact": "tests/fixtures/system/testproj3.tar.gz", - "tag": "0.1.0" - } - ] -} -``` - -## Execution -The test suites can be run by executing either of the below commands. - -**System Tests** - -```commandline -anaconda-project run test:system -``` - -**Load Tests** - -**WARNING** - Load testing should **NEVER** be done again a production instance - it **CAN** bring down the instance if too much load is requested. - -```commandline -anaconda-project run test:load -``` - -## Creating Test Suites -Additional test suites can be created and used as context objects within testing frameworks. Below is a simple example of how to subclass the FixtureManager in order to create a suite which handles account creation for a development team. - -**tests/system/create-dev-accounts.py** -```python -from __future__ import annotations - -import json -import logging - -from dotenv import load_dotenv - -from tests.adsp.common.fixture_manager import FixtureManager - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class DevTeamSystemFixtures(FixtureManager): - def _setup(self) -> None: - # Create dev accounts - self.create_fixture_accounts(accounts=self.config["service_accounts"], force=self.config["force"]) - - -if __name__ == "__main__": - # Load env vars, - do NOT override previously defined ones - load_dotenv(override=False) - - fixtures_file: str = "tests/fixtures/system/dev-team.json" - - with open(file=fixtures_file, mode="r", encoding="utf-8") as file: - config: dict = json.load(file) - - with DevTeamSystemFixtures(config=config) as manager: - print(str(manager)) - -``` - -**tests/fixtures/system/dev-team.json** -```json lines -{ - "force": false, - "teardown": false, - "accounts": [ - { - "id": "1", - "username": "developerone", - "email": "developerone@localhost.local", - "firstname": "Developer", - "lastname": "One", - "enabled": true, - "email_verified": true, - "password": "developerone", - "password_temporary": false - }, - { - "id": "2", - "username": "developertwo", - "email": "developertwo@localhost.local", - "firstname": "Developer", - "lastname": "Two", - "enabled": true, - "email_verified": true, - "password": "developertwo", - "password_temporary": false - }, - ], - "projects": [] -} -``` \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index f4efe34b..2589b380 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -7,7 +7,7 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "AE5 Tools" -copyright = "2023, Anaconda, Inc." +copyright = "2024, Anaconda, Inc." author = "Anaconda, Inc. and friends" # -- General configuration --------------------------------------------------- diff --git a/docs/source/index.rst b/docs/source/index.rst index 1e350522..e4d762d5 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -7,7 +7,6 @@ AE5 Tools Documentation README COMMANDS - SYSTEM_TESTS K8S_SERVER diff --git a/tests/adsp/common/fixture_manager.py b/tests/adsp/common/fixture_manager.py deleted file mode 100644 index 2126b025..00000000 --- a/tests/adsp/common/fixture_manager.py +++ /dev/null @@ -1,291 +0,0 @@ -from __future__ import annotations - -import json -import logging -import time -from abc import abstractmethod -from copy import copy, deepcopy - -from ae5_tools import AEAdminSession, AEException, AEUnexpectedResponseError, AEUserSession, demand_env_var, get_env_var - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class FixtureManager: - def __init__(self, config: dict | None = None, ae_admin_session: AEAdminSession | None = None) -> None: - self.accounts: list[dict] = [] - self.projects: list[dict] = [] - self.sessions: list[dict] = [] # not connections, but user sessions within adsp - self.config: dict = config - self.ae_admin_session: AEAdminSession = ae_admin_session if ae_admin_session else FixtureManager.build_session(admin=True) - - def __del__(self): - if "teardown" in self.config and self.config["teardown"]: - self.destroy_fixture_projects(ignore_error=True) - self.destroy_fixture_accounts() - - @abstractmethod - def _setup(self) -> None: - """ """ - - def __enter__(self) -> FixtureManager: - self._setup() - return self - - def __exit__(self, type, value, traceback): - self.__del__() - - def _get_account(self, id: str) -> dict: - return [account for account in self.accounts if account["id"] == id][0] - - def load(self, state: str) -> None: - with open(file=state, mode="r", encoding="utf-8") as file: - partial_state: dict = json.load(file) - - self.projects = partial_state["projects"] - self.config = partial_state["config"] - self.accounts = partial_state["accounts"] - self.create_fixture_connections() - - @staticmethod - def _resolve_conn_params( - hostname: str | None = None, username: str | None = None, password: str | None = None, admin: bool = False - ) -> tuple[str, str, str]: - hostname = hostname if hostname else demand_env_var(name="AE5_HOSTNAME") - - if not username: - if admin: - username = demand_env_var(name="AE5_ADMIN_USERNAME") - else: - username = demand_env_var(name="AE5_USERNAME") - - if not password: - if admin: - password = demand_env_var(name="AE5_ADMIN_PASSWORD") - else: - password = demand_env_var(name="AE5_PASSWORD") - - return hostname, username, password - - @staticmethod - def build_session( - hostname: str | None = None, username: str | None = None, password: str | None = None, admin: bool = False - ) -> AEUserSession | AEAdminSession: - params: tuple = FixtureManager._resolve_conn_params(hostname=hostname, username=username, password=password, admin=admin) - if admin: - return AEAdminSession(*params) - return AEUserSession(*params) - - def create_fixture_accounts(self, accounts: list, force: bool = False) -> None: - local_accounts: list = deepcopy(accounts) - while len(local_accounts) > 0: - retry: bool = True - account = local_accounts.pop() - while retry: - try: - self.ae_admin_session.user_create( - username=account["username"], - email=account["email"], - firstname=account["firstname"], - lastname=account["lastname"], - password=account["password"], - email_verified=True, - enabled=True, - password_temporary=False, - ) - self.accounts.append(account) - logger.info("User account %s created.", account["username"]) - retry = False - except AEUnexpectedResponseError as error: - if "Unexpected response: 409 Conflict" in str(error): - if force: - # remove, and retry. - logger.warning("User account %s already exists, removing..", account["username"]) - self._destroy_account(username=account["username"]) - else: - logger.warning( - "User account %s already exists, will not [re]create (or remove). Password may be incorrect..", account["username"] - ) - self.accounts.append(account) - retry = False - else: - raise error from error - - def create_fixture_connections(self) -> None: - for account in self.accounts: - self._create_fixture_conn(username=account["username"]) - - def _create_fixture_conn(self, username: str) -> None: - account: dict = [user for user in self.accounts if user["username"] == username][0] - if "conn" in account: - logger.warning("User account %s already has an active connection, skipping ...", username) - else: - if "password" in account: - logger.info("Creating connection for user %s", account["username"]) - account["conn"] = FixtureManager.build_session( - hostname=self.ae_admin_session.hostname, - username=account["username"], - password=account["password"], - admin=False, - ) - else: - logger.warning("Unable to create connection for user %s, no password specified!", account["username"]) - - def destroy_fixture_accounts(self) -> None: - while len(self.accounts) > 0: - account: dict = self.accounts.pop() - if account["conn"]: - logger.info("Disconnecting user %s", account["username"]) - try: - account["conn"].disconnect() - except AEException as error: - if "Invalid username or password." in str(error): - # Most likely the account has already been deleted. - logger.warning(str(error)) - logger.warning("Most likely the account has already been deleted.") - pass - else: - logger.error(str(error)) - raise error from error - account["conn"] = None - self._destroy_account(username=account["username"]) - - def destroy_fixture_projects(self, ignore_error: bool = False) -> None: - local_projects: list[dict] = deepcopy(self.projects) - for project in local_projects: - retry: bool = True - while retry: - try: - if "record" in project: - self._destroy_fixture_project(name=project["record"]["name"], owner=project["record"]["owner"], force=self.config["force"]) - retry = False - except AEException as error: - if ignore_error: - logger.warning(str(error)) - if "Invalid username or password." in str(error): - retry = False - else: - logger.error(str(error)) - else: - logger.error(str(error)) - time.sleep(5) - - def _destroy_account(self, username: str) -> None: - if username == self.ae_admin_session.username: - raise Exception("Will not delete self") - else: - try: - self.ae_admin_session.user_delete(username=username) - logger.info("User account %s deleted.", username) - except AEException as error: - msg: str = error.args[0] - if msg == f"No records found matching username={username}|id={username}": - logger.warning("No user found matching username=%s|id=%s, skipping removal.", username, username) - else: - raise error from error - - def upload_fixture_project(self, proj_params: dict, owner: str, force: bool = False): - conn: AEUserSession = self.get_account_conn(username=owner) # [user for user in self.accounts if user["username"] == owner][0]["conn"] - - retry: bool = True - while retry: - logger.info("Uploading project %s for account %s ..", proj_params["name"], owner) - try: - response: dict = conn.project_upload( - project_archive=proj_params["artifact"], name=proj_params["name"], tag=proj_params["tag"], wait=True - ) - proj: dict = deepcopy(proj_params) - proj["record"] = response - self.projects.append(proj) - retry = False - except AEUnexpectedResponseError as error: - if "Unexpected response: 400 Project name is not unique" in str(error): - if force: - # delete, and then allow it to loop ... - logger.warning("Project %s for account %s already exists, forcibly deleting ..", proj_params["name"], owner) - time.sleep(2) - self._destroy_fixture_project(name=proj_params["name"], owner=owner, force=force) - else: - logger.warning("Project %s for account %s already exists, pulling project info ..", proj_params["name"], owner) - response: dict = conn.project_info(ident=f"{owner}/{proj_params['name']}") - proj: dict = deepcopy(proj_params) - proj["record"] = response - self.projects.append(proj) - retry = False - else: - raise error from error - - def _lookup_fixture(self, name: str, owner: str) -> dict | None: - for project in self.projects: - if "record" in project: - if owner == project["record"]["owner"] and name == project["record"]["name"]: - return project - - def _unmanage_fixture(self, name: str, owner: str) -> None: - for project in self.projects: - if "record" in project: - if project["record"]["owner"] == owner and project["record"]["name"] == name: - self.projects.remove(project) - - def _destroy_fixture_project(self, name: str, owner: str, force: bool) -> None: - # Ensure fixture is managed - if not force and not self._lookup_fixture(name=name, owner=owner): - logger.warning("Unable to find managed project fixture for project %s for owner %s, skipping removal..", name, owner) - logger.warning(self.projects) - return - - conn: AEUserSession = self.get_account_conn(username=owner) - - retry: bool = True - while retry: - if self._does_project_exist(name=name, owner=owner): - """""" - try: - logger.info("Deleting project %s for account %s ..", name, owner) - conn.project_delete(ident=f"{owner}/{name}") - self._unmanage_fixture(name=name, owner=owner) - time.sleep(10) - except AEException as error: - if f"No projects found matching name={name}" in str(error): - # then we are out of sync .. - logger.info("Project state is out of sync, enforced wait before retry") - time.sleep(30) - else: - raise error from error - except Exception as error: - logger.error("unhandled exception") - logger.error(type(error)) - logger.error(str(error)) - raise error from error - else: - retry = False - - def get_account_conn(self, username: str) -> AEUserSession: - return [user for user in self.accounts if user["username"] == username][0]["conn"] - - def _does_project_exist(self, name: str, owner: str) -> bool: - conn: AEUserSession = self.get_account_conn(username=owner) - try: - conn.project_info(ident=f"{owner}/{name}") - return True - except AEException as error: - if "No projects found matching name" in str(error): - return False - else: - raise error from error - except Exception as error: - logger.error("unhandled exception") - logger.error(type(error)) - logger.error(str(error)) - raise error from error - - def __str__(self) -> str: - partial: dict = {"config": self.config, "accounts": [], "projects": self.projects} - for account in self.accounts: - new_account: dict = copy(account) # shallow - if "conn" in new_account: - del new_account["conn"] - partial["accounts"].append(new_account) - - return json.dumps(partial, indent=4) diff --git a/tests/adsp/common/utils.py b/tests/adsp/common/utils.py index 86e1ab6d..ae0bd9ec 100644 --- a/tests/adsp/common/utils.py +++ b/tests/adsp/common/utils.py @@ -1,54 +1,14 @@ from __future__ import annotations -import csv +import json import logging import os -import shlex import subprocess -import tarfile -from io import StringIO +from datetime import datetime logger = logging.getLogger(__name__) -def _process_launch_wait(shell_out_cmd: str, cwd: str = ".") -> None: - args = shlex.split(shell_out_cmd) - - try: - with subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE) as process: - for line in iter(process.stdout.readline, b""): - logger.info(line) - - if process.returncode != 0: - raise Exception("subprocess failed") - except Exception as error: - # Catch and handle ALL errors - logger.error("Exception was caught while executing task.") - logger.error(str(error)) - raise error - - -class CMDException(Exception): - def __init__(self, cmd, code, stdoutb, stderrb): - msg = [f"Command returned a non-zero status code {code}"] - msg.append("Command: " + cmd) - if stdoutb: - msg.append("--- STDOUT ---") - msg.extend(x for x in stdoutb.decode().splitlines()) - if stderrb: - msg.append("--- STDERR ---") - msg.extend(x for x in stderrb.decode().splitlines()) - super(CMDException, self).__init__("\n".join(msg)) - - -def _get_vars(*vars): - missing = [v for v in vars if not os.environ.get(v)] - if missing: - raise RuntimeError("The following environment variables must be set: {}".format(" ".join(missing))) - result = tuple(os.environ[v] for v in vars) - return result[0] if len(result) == 1 else result - - def _cmd(*cmd, table=True): if len(cmd) > 1: cmd_str = " ".join(cmd) @@ -59,40 +19,16 @@ def _cmd(*cmd, table=True): print(f"Executing: ae5 {cmd_str}") cmd = ("coverage", "run", "--source=ae5_tools", "-m", "ae5_tools.cli.main") + cmd + ("--yes",) if table: - cmd += "--format", "csv" + cmd += "--format", "json" print(f"Executing: {cmd}") p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open(os.devnull)) stdoutb, stderrb = p.communicate() if p.returncode != 0: raise CMDException(cmd_str, p.returncode, stdoutb, stderrb) - text = stdoutb.decode() - if not table or not text.strip(): - return text - result = list(csv.DictReader(StringIO(text))) - if result and list(result[0].keys()) == ["field", "value"]: - return {rec["field"]: rec["value"] for rec in result} - return result - - -def _compare_tarfiles(fname1, fname2): - content = ({}, {}) - for fn, cdict in zip((fname1, fname2), content): - with tarfile.open(name=fn, mode="r") as tar: - for tinfo in tar: - if tinfo.isfile(): - cdict[tinfo.name.split("/", 1)[1]] = tar.extractfile(tinfo).read() - if content[0] == content[1]: - return - msg = [] - for k in set(content[0]) | set(content[1]): - c1 = content[0].get(k) - c2 = content[1].get(k) - if c1 == c2: - continue - if not msg: - msg.append("Comparing: f1={}, f2={}".format(fname, fname2)) - if c1 is None or c2 is None: - msg.append("File {} only found in {}".format(k, "f1" if c1 else "f2")) - else: - msg.append("File {} differs: f1: {}B, f2: {}zB".format(k, len(c1), len(c2))) - assert False, "\n".join(msg) + text = stdoutb.decode(encoding="utf-8") + try: + return json.loads(text) + except Exception: + # Not json parse-able, so return as-is to caller + pass + return text diff --git a/tests/fixtures/system/fixtures.json b/tests/fixtures/system/fixtures.json deleted file mode 100644 index c3486f15..00000000 --- a/tests/fixtures/system/fixtures.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "force": false, - "teardown": false, - "accounts": [ - { - "id": "1", - "username": "tooltest", - "email": "tooltest@localhost.local", - "firstname": "tooltest", - "lastname": "1", - "password": "tooltest" - }, - { - "id": "2", - "username": "tooltest2", - "email": "tooltest2@localhost.local", - "firstname": "tooltest", - "lastname": "2", - "password": "tooltest2" - }, - { - "id": "3", - "username": "tooltest3", - "email": "tooltest3@localhost.local", - "firstname": "tooltest", - "lastname": "3", - "password": "tooltest3" - } - ], - "projects": [ - { - "name": "testproj1", - "artifact": "tests/fixtures/system/testproj1.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj2", - "artifact": "tests/fixtures/system/testproj2.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj3", - "artifact": "tests/fixtures/system/testproj3.tar.gz", - "tag": "0.1.0" - } - ] -} \ No newline at end of file diff --git a/tests/fixtures/system/fixtures.orig.json b/tests/fixtures/system/fixtures.orig.json deleted file mode 100644 index c3486f15..00000000 --- a/tests/fixtures/system/fixtures.orig.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "force": false, - "teardown": false, - "accounts": [ - { - "id": "1", - "username": "tooltest", - "email": "tooltest@localhost.local", - "firstname": "tooltest", - "lastname": "1", - "password": "tooltest" - }, - { - "id": "2", - "username": "tooltest2", - "email": "tooltest2@localhost.local", - "firstname": "tooltest", - "lastname": "2", - "password": "tooltest2" - }, - { - "id": "3", - "username": "tooltest3", - "email": "tooltest3@localhost.local", - "firstname": "tooltest", - "lastname": "3", - "password": "tooltest3" - } - ], - "projects": [ - { - "name": "testproj1", - "artifact": "tests/fixtures/system/testproj1.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj2", - "artifact": "tests/fixtures/system/testproj2.tar.gz", - "tag": "0.1.0" - }, - { - "name": "testproj3", - "artifact": "tests/fixtures/system/testproj3.tar.gz", - "tag": "0.1.0" - } - ] -} \ No newline at end of file diff --git a/tests/fixtures/system/testproj1.tar.gz b/tests/fixtures/system/testproj1.tar.gz deleted file mode 100644 index eaee114bd35dfab519698e98e522c875e52c847f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2741 zcmV;m3QF}KiwFP!000001MOPfkK?!v-{<@l2JY>Ec;c@lvp^mS6fIB`MS=FMC<575 z5=~@FE6I~Me|yWsy|!yU>`q8oqDcPu;Y73R{wP(+;~cazD53OY-UE{zs@tqe?Xm zZ{2QN7D=6D8(uypEG;+ddb7zheppqTtWFL*De6a_<>})0)_JC1kDOA{aqE_=?7%tS zmYWoymp-y`o9B6*ru?y7=hY#7_~mCH9yXier|}PePw>CVlJ$q;*SqFV(|3;lulIj^ z6plG(MA^Xu?>S@0|M^e*KiN$DpQh<*^WmEq@jLhbk5R+Tk?%^;GSkng{d=SL;a?nE zW%zr~_!9o}Y?}YmBwJ-O{=WhJ5NV^Hc;%K-RZK!N*g7U!8}+I)k>aGu`|pG z)kf7xiHh5ZwRHsMbA0i@S_W%swhXq5`$jGnKSX~L5|?*QRffa*Ix73<@4Y*zcCp{< zzGBsh@AnUjA7l97|5g6q8lV1~a=cXP3}Mxz{9XEev z<$YcJPs8kK`Qrmh{{ee(^SXoeAMd&@77x6AwhxO3%gr<1?BTmfgzv!r-CFO@0bjy@ zmcOn43HFP>)uk!T`2Rn_|IZ@)_crr?Eqy)yryfS%e%%>AkN+e~(rk+V478Z>{|%^R z4gYwDVwA<}#iHi6GD0KJKSqDRw?o?S6K}0}<}b-2a=g(JfE>9Kb7RtAQby_kOjyaP zGdm*cpP7)Xlspoxm;B)#NYNjK9mYw_DbGc`ij(M|4ACZz{>&Yw7=tE8;BV z>V5|+S73BGnVe&Q3!VaI|#`Xk$mHQ)id$@H2OZ=uY9)+yECCUoj=vVP@N;6iTTE}{Zp*j>6xHt}j3r*w%h3LZ;xdh-3L;ekz@ zZpcB^0R->>)gLckbGOUj_{v}7EQ^{9JcW20?*JBUosR&pd^n(#adMu;24C- zxI35?tm1nTyc5v}aBywp0U9A6H{(Y9s+j}(r9{aC;L!I(e(k6aJ`Ro5*_%5de0etw zb*N{eZLANKQI!L@bZF>cpCmZfaPE4rSsHI&nO}nNkw3qUrXvIlW{iv=1ixqUK{Rq2 z4IxV{4xB#q*CPsGhzZ9LaT!I<5!7uZcRsv{IgZfJXxSa1BrPH9qQZe?Kn`ta!_8E1 zDtzwSklO*+4rO<{`9w+B^`RKTX!PWXGhK_yP3&`WjV#a{tN8JSUWKCSb_ES`QMU|K z7_BB6s3b!92w8<(?+jm0Mn0fbE|umjm=-e1Hc$l`=QB4@1%0ATDBr^neX*KyA&0jw zp`M1~i>M(wEI>Ut#X<#u6Ck)oR|{ur)*iW0o!@8-x~Fmo#F0_M=&+-C{E63M$Q+}g zo<#e^FjO9iv7R(P?jFnWGmI}5aMXC45*?q0w*?bd-MRaFEiM9cyb57U1u!lGYm8FJ zMLERj8xeE>c=63=C40%b78Es;R1Hr4+6*%Ujr10p1`B-?MQO|3MVa$;M_toOclNmn@BKCIplap&jPaI`A$+k1n~i#6 zUe^6}2<>gawpI?sUcR!2DMBCs*JQPZiHF%CinD;Ls-a7vxF7hS3OEQGpPbwfWJl~r zb8MRl*SOkuf%Hu9KbHrrVU&4_(L}4jzsUCOVj5&YVB3DE)FZEgWVwxV!r^)g8d2M7 z@C{Bjd3Z11*g}VArr6O!N_Ba&T<;!(zDtX1!!YDC8io1{20wVC(6zGP1)T?^;zmPT z1ZnClhJsIOowisnA@YKqp|LWm;@0{|AztYHX&JdeUnv;FT`##eViHI!b3TXA9V&1d zqy|hlTgo0rzck1=z#iK3JaA7B{o*U}Eba@T3Z61Z|5wAofPP=KHTFt)VUp?#V# z@fnybgT^6{EHAvYoyM0blYwz~vpuPR_LF218(m+qXj29fY9~O`Ox`*6E?mpb-icdmCW)bMrm45`MotKo)eS!g<8y;rYGB z+>r%f>RN%Rv$D^AE+-m*x8Tx(P8@V#LcF zlM=zjd3TYuH$Sk9aq$9na>E-;XOOY-V3NNH|5lNXW49q1QdG|~gLkXH_bct0-a%jg z{f}$(FZ>N4ejoC_-~U0?oqzxLP3V38$KL>*IoIzy<4fQFf5!hwkEc;c@lvp^mS6fIB`MS=FMC<575 z5=~@FE6I~Me|yWsy|!yU>`q8oqWJM6=|r>Z{wP(+;~cazD53OY-UE{zs@tqe?Xm zZ{2R2)pc2xkDK+jPRfL3>%2~~lvmsJYMU^&sW+SYF{>B9x6U*DdgPRnj$5~UELNLp zoo})v$@6Wp-LCTkulQzr0KqC*J^b=BFdsIX;-~W;{+{GNz{Tc6@#|glr|CP-|F8Fd zd=!p3XGGb-1MfLw!2kTG{hw?m{!f!)4f5aW(v-e)|Nj^@+#LC?6fHCT4DH_=y)Xa8 zu~mk@_lz&)f1XY8KV7Yhe9r%GKtDvP)1$v8o-|2XX8S zb3(OIbyA|@HeziZf%zO?Jg}C*nwl+x?c%jQ|!{WynKKOr?|F_1c|E3%-l{&*&-Qlo!h=uOkau1JGXRSo*EUx-Xr+dfE zUs-uy7yr{Rds_bZfYN`!UfjIyVExCtu8YM3Z=dbM;=ywBj5mAuZh-I|`G2?8`!V24 z`Jd&VssH(U&i`*f|3CTvvl;$-oB6+%zCQn_9!B4O-5Ech|4Ejl*);#tb(+rk{|%^R z4gYvg#VCu{i$%?CWrW5={}}xN-wtWRPrS9_nZJNVR)7n6Q#n zXLiJ_e`Z3mQu0W&Uh;=~AVq%^b{HoSQ=W@>6(`X_8KO-b{h2$27=xH`5$@!7Xq0u4 zR=ShSco5R+!p)(R7MYQ1}0z@`jg!%-o(jj z6uhKWB?Z2POW@YUTWQOl#gRE>{DQ}~3+|qQ?YA-6-L^?(dhHYl(ln4bqeKnc6>%1F zb-#m^ar(+FS;-~v3i5o(5PXfZu)}A#u|0ub<^IUs9&X*hcq#W*+&C_$4{)do0$gWR zr}JG7mm@*}o;R%bmr6$V8GMN+2Rcdyn?ndCM$m>@Hm;n|QU2Q@TVL1&<{;J^B9B@W3Wc zH{_t|00MY`>W`PNx!YxMd_Hoe05#X~hHz9@Yv4+>OHOx?{Say?F<_;nGOS^+HX{Ls zxy`RmiZTUBL1J@=tD|B1Gz83O_o_2;w~32_UZt<35C8BoKG)Hc>tHR`rJMKrsA0|Q z8K0*Ci@aq84e48fgYKVc?zJB6RF|C{ESi#?Ic(vx3p_(ld_gFViIO}L6>C*tfMXCQ zV@$b)Mm56}qtxEVL%SIr#QFC|JIAP;>{0o2BvgmHDLLBz;NhT!*1K8Qw6 zqakFe#evhO{(3|K3^Cz2W?V)y=LqVyk~<&X#2iQHXSD2&P?DB_x~Onq8IVI8+Hf-! zoC=S98*)2HwnN$7ZXPHJyFL^{7>%Ahai(igxru#Fu8{?rV--KX(5p~X-L9ZPF6x$n z3ZvCT1C>N5A0exd>z(22$;gLnl}n|03#J7`*#@dW<9y}@s-Oqjgz|kFqAylcE?{{3 z66$FvzK9y4!$PVDr&y=}Z~_F^=xX6?&DtY3s`DF-LHAS+fjBa17#(&L$DepD2ILqG z^(5LShN1FEjP<1Xaran`pJ9BlkVlQTDbew1cv~=W)t$S)*Ww~D$Ey&wRFK9+V2x1< zSd>GYz7atOfEV9L_1}3%~SxUK9)uL+#B+CMUJz<(|!%eKKtLyc%`$OqFfW2tG zBNyNwt1)pcaIaPAGdAEgatMMv;97}k2*@&~nv~Ct1Q2-r3nH1mzmXp_bsu~%K{mol6aKQx_mLQ9A*3ni*LdGYw{YfHN9G*sK6 zwi9*R5;El(nj6Rz8lG*5y*5LHppo7}(_o=*qFLH zXpU_&;Tl)_E|8uH{^#<5HHq*RwT%k}Ot=)1JIHVi{Pqfw~OVDN)C3SBGvUC?;|6gL{$ zB7mv07z#eAb=qRRgvbkahQ`XMid*X=g?OR&r)A^@eWhRwcfI7^h)E!^%y|rgFR&*@y8){(thY3hGgd*Mb%byS$Qx|HkA7k z-kZrDs5FN6bnj0H8KD*yKzh)P?(Fcr0=Bn7RQLEoYf}F10Eh1svI%pE;N=}QwUfII zLmA!s!k)S0?yVDh1{EWt)k*ia<`d>FZCmblqYQT8RyD(rmJ|Lc9@x#{}6I=6gQ$Qe(iH4!U8v_gC;EG#P1ZB_PC?zTre64Vt?Qrfk#t z!a|e+neN{(IV+W4OFIP1U8gaZz?JKu&9%n2EfoE#R-Fe0-}YC&++Q? zi7NsmEmF2smy$_o9dCw#fJO~Avb=`LSmGBS;12Kg?2X*N{u=PLAOUJ&Y-z7U`!r$V zGcZ{OjYA+=UU+FcjW1Is1LN>!dr|@IC&?r>y1ru3rVJ$5^ki5r$R-ZpxB-;$ZGGU3 z{$BRTYFA3HYewSsJ`(!EwS#AWLbzInejj(aR4~{KtrAfg9Ku`J9Zt2AVt?W0 zeB<45I)YzLoMT3%kHo%9`*1m)ah1lugdjUzrM+H^c$s5T zBDgs3F0%IK2bM7|UXq>M@CMTvAXXks@;Bk%D$;T6Hbg^;>Un1HZuR$mr9IO-={_mU6`}~i;0XlQ8-*?8BzW;yg|1?Qf^Y{NV%{0?YGtD&9 uOf$_i(@ZnXG}BBo%{0?YGtD&9Of$_i(@ZnXG}BC9L;nG4ikB?_Pyhf~o@x02 diff --git a/tests/fixtures/system/testproj3.tar.gz b/tests/fixtures/system/testproj3.tar.gz deleted file mode 100644 index abd521223fe23ff05843d970a168ca63867c158c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2198 zcmV;H2x<2piwFP!000001MOOEZ`(K$&S(7!*1d(@B9`A|I}OeU_f&s%a{njj zcTvdGlrS|4+fm$kgS|JT(;dXou4azq9lFN_BLHzCF(iUS-tKiX%D?+3+sqKH(@ zaM1RmqocUv4ZB{y8{;>f$ZL1v&^sF7&am71b{je#hQt2$_=o38{11UJ4*K73n%nV{ z$N%o%-~`TpI`us=L_mPP!@zyE8L5-}m8fHExRmfC+d z>=*xZ!nh#&+ITAd2c6zV{I~l98~@J$%y7tAgqxY*e~?f$6CUEkou`T8ymqsBX#S1N zzufVCXu;u>_44BtpR)ot*ZOE-=71XivOPX!1?d>ZTvq2UZSz>0E)!YQYr-rg)i zuzZR*t;cxeCHj>nT0B=g6cF%5U|-H5VAwjRtSE<>mGDrzh_|oVsbWw~Z&y|6cX}r`zuJ+cy560q;qYaCA~eFFIvP z2Mv|vh2xwcNz*KWv$zu6h}>b*VGd=^5;!DDMpz`_1!X3#UFQRr3VIe1uQ&*$oJj>N zQmV>gm~z;HP++8l&-IN|aej)NG0G+mj7;EkHwS&;mxO~AhQmox&=QYVo|5H6Tyy~{ zV&OBS0>v0k)wnsz1EN52&C@BLY3Co{YsWxx!T%e9IHPmSS4 zf-paJoX?*>I~%JDs;1*PzW?s=Uv*P+`))R#h<|Od8vng^*Pj2+0ftj@jJC3X<3vP; zf@V6vj?t14YEz>z<06S_oBV*YtOyRn4Fw?}NWXE(6Of_LAx!kmGsKfTb`W&LI8T&* zL?~zU?s%d1moJxB7ymeYJNDn5U43mC%a9rv!5L){pUI`{FZV=$`S+Tr%(ILOCH<*V zna|CPKv9XDKz|_k4Pj4P4|0+1HX=mv)5C$#Zc|wb!h^4u)`xn^zr{lCWMo}sXOo+1 z@X@B2F_M7R$4^d%!y7yyxe0Fi}k58}1{>k~t z+l%veCx!lEv!VLJdi^1#f1X}l!%J01^JV={l2Y!@|BGPS-^LUC@3s5Y`~Pmc?{zKz zKLcLs+XvAKDAN}-Wmxr~oNB&qLe&>RnsbbD6*rG`i{k+O1LYw(myStju$W{(4Fr`P zCt{N8-0e~~ib6)yqYH1hkXV&s@5kfIYtyn-ZtSq?~zrT7wviJ*n zqy83(Xjp@$nrEbbs0rgvwWcaf>J_b(l5AAEW--*v?o3cc8bN(~ntpFcK~yf7qtEPE zpA*G;HgFWd6yzb&%T>;>=<>@hG(syWD5x7ovKUw|bp!&dFV%!vsxHR0TH=7$5GE*qW zU^C*-n5jdG2D=X$fY-B8{XO*RTu@g!P*6e117(sZLPpkix59W6QmGlp-m@j4+9AHq zi|U7qb9D48P9#~Fe@CU676+&FX9D9GEe6`%dM-)xt_-NX?2iSU1L$APQnx79%ubW0 zLcE*rcc(r_2trE8c{uEu*#G!V_}4S7G1~7W2TP0BX4C+lO}xSgi0VO z$k6vNL}LMa#B3$GeJ@=@vPPPcz^JD3H{xhErQsBX7|QrGfY#hp;_zAws_z<+zH?lV z=QRm)V840uQeWfmv;HNIJhmcj*ZjA1`Ro!cxX=W8mVwRURzXgAo!e!ZX`{Ra)G{dFCLL(NfAN7ds5a)^R_@uvcbQdl&-6~LX;HDkh@ zHB*>Ph8h;4)XM_SNbnDUwRN%CWK^- zzGavngT3N^<2qxh$gphRk*Ra|xyFErs;8U;xODpvToq ztEApEjav0`t@f%~q=CjEs-a^9713(IkI?+dvV(<%g@uKMg@uKMg@uKMg@uKMg@uKM Yg@uKMg@uKMg~c}f7jbr9zW`7G0LX+`-T(jq diff --git a/tests/load/.env b/tests/load/.env deleted file mode 100644 index 4f351840..00000000 --- a/tests/load/.env +++ /dev/null @@ -1,11 +0,0 @@ -# Development Environments -AE5_HOSTNAME=anaconda.example.com -# AE5_HOSTNAME=dev1.ae.anacondaconnect.com -# AE5_HOSTNAME=dev2.ae.anacondaconnect.com - -# Realm Admin -AE5_ADMIN_USERNAME=admin -AE5_ADMIN_PASSWORD=admin - -# Meant to be leaved (and set) from within a build runner to allow skipping tests which should not run in CI. -CI=False diff --git a/tests/load/__init__.py b/tests/load/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/load/ae5_tools/__init__.py b/tests/load/ae5_tools/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/load/ae5_tools/test_sessions.py b/tests/load/ae5_tools/test_sessions.py deleted file mode 100644 index c37b32bc..00000000 --- a/tests/load/ae5_tools/test_sessions.py +++ /dev/null @@ -1,88 +0,0 @@ -import logging -import uuid - -from ae5_tools import AEUserSession -from tests.adsp.common.fixture_manager import FixtureManager - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def pytest_generate_tests(metafunc): - idlist = [] - argvalues = [] - for scenario in metafunc.cls.scenarios: - idlist.append(scenario[0]) - items = scenario[1].items() - argnames = [x[0] for x in items] - argvalues.append([x[1] for x in items]) - metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") - - -class LoadTestFixtureSuite(FixtureManager): - def _setup(self) -> None: - # Create Fixtures - - self._create_service_accounts() - self._upload_projects() - - def _create_service_accounts(self): - # Create service accounts (and connections) - self.create_fixture_accounts(accounts=self.config["accounts"], force=self.config["force"]) - self.create_fixture_connections() - - def _upload_projects(self): - # 1. Each user gets a project. - for account, proj in zip(self.config["accounts"], self.config["projects"]): - self.upload_fixture_project(proj_params=proj, owner=account["username"], force=self.config["force"]) - - @staticmethod - def gen_config(size: int = 1) -> dict: - # load our fixtures - config: dict = { - "force": True, - "teardown": True, - "accounts": [], - "projects": [], - } - - prefix: str = "ae-load-test" - for i in range(size): - account: dict = {} - account["id"] = str(i + 1) - account["username"] = prefix + "-account-" + account["id"] - account["email"] = account["username"] + "@localhost.local" - account["firstname"] = prefix + "-account" - account["lastname"] = account["id"] - account["password"] = str(uuid.uuid4()) - config["accounts"].append(account) - - project: dict = {"name": prefix + "-" + str(i + 1), "artifact": "tests/fixtures/system/testproj1.tar.gz", "tag": "0.1.0"} - config["projects"].append(project) - - return config - - -# Create scenarios -min_sessions: int = 1 -max_sessions: int = 15 -scenarios: list = [] -for size in range(min_sessions, max_sessions + 1): - scenarios.append(("session load scenario ", {"size": size})) - - -class TestSeries: - scenarios = scenarios - - def test_sessions(self, size): - with LoadTestFixtureSuite(config=LoadTestFixtureSuite.gen_config(size=size)) as manager: - print(f"Testing Session Count {size}") - - sessions: list[dict] = [] - records = [project["record"] for project in manager.projects] - for record in records: - owner: str = record["owner"] - project_id: str = record["id"] - account_conn: AEUserSession = manager.get_account_conn(username=owner) - account_session = account_conn.session_start(ident=project_id, editor="jupyterlab", resource_profile="default", wait=False) - sessions.append(account_session) diff --git a/tests/load/runner.py b/tests/load/runner.py deleted file mode 100644 index 2737ef84..00000000 --- a/tests/load/runner.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import annotations - -import logging - -from dotenv import load_dotenv - -from ae5_tools import demand_env_var_as_bool, get_env_var -from tests.adsp.common.utils import _process_launch_wait - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def run() -> None: - shell_out_cmd: str = "python -m pytest --cov=ae5_tools --show-capture=all -rP tests/load/ae5_tools --cov-append --cov-report=xml -vv" - - if get_env_var(name="CI") and demand_env_var_as_bool(name="CI"): - shell_out_cmd += " --ci-skip" - - logger.info("Test Runner Configuration Complete") - logger.info("Executing: %s", shell_out_cmd) - _process_launch_wait(shell_out_cmd=shell_out_cmd) - - -if __name__ == "__main__": - # Load env vars, - do NOT override previously defined ones - load_dotenv(override=False) - - run() diff --git a/tests/system/.env b/tests/system/.env deleted file mode 100644 index af5dbb40..00000000 --- a/tests/system/.env +++ /dev/null @@ -1,15 +0,0 @@ -# Development Environments -AE5_HOSTNAME=anaconda.example.com -# AE5_HOSTNAME=dev1.ae.anacondaconnect.com -# AE5_HOSTNAME=dev2.ae.anacondaconnect.com - -# Realm Admin -AE5_ADMIN_USERNAME=admin -AE5_ADMIN_PASSWORD=admin - -# k8s service integration configuration -AE5_K8S_ENDPOINT=ssh:centos -AE5_K8S_PORT=23456 - -# Meant to be leaved (and set) from within a build runner to allow skipping tests which should not run in CI. -CI=False diff --git a/tests/system/__init__.py b/tests/system/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/ae5_tools/__init__.py b/tests/system/ae5_tools/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/ae5_tools/cli/__init__.py b/tests/system/ae5_tools/cli/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/ae5_tools/cli/commands/__init__.py b/tests/system/ae5_tools/cli/commands/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/ae5_tools/cli/commands/test_deploy.py b/tests/system/ae5_tools/cli/commands/test_deploy.py deleted file mode 100644 index e35d94a2..00000000 --- a/tests/system/ae5_tools/cli/commands/test_deploy.py +++ /dev/null @@ -1,176 +0,0 @@ -import json - -import pytest - -from ae5_tools.api import AEUserSession -from tests.adsp.common.utils import _cmd, _get_vars -from tests.system.state import load_account - - -@pytest.fixture(scope="session") -def user_session(): - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - s = AEUserSession(hostname, username, password) - yield s - s.disconnect() - - -@pytest.fixture(scope="module") -def project_list(user_session): - return _cmd("project", "list", "--collaborators") - - -@pytest.fixture(scope="module") -def cli_project(project_list): - return next(rec for rec in project_list if rec["name"] == "testproj3") - - -############################################################################### -# /: tests -############################################################################### - - -def test_deploy_by_owner_and_name_project_latest_implicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["owner"]}/{prec["name"]}', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "latest" - - -def test_deploy_by_owner_and_name_project_latest_explicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["owner"]}/{prec["name"]}:latest', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "latest" - - -def test_deploy_by_owner_and_name_project_first_explicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["owner"]}/{prec["name"]}:0.1.0', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "0.1.0" - - -############################################################################### -# : tests -############################################################################### - - -def test_deploy_by_id_and_revision_project_latest_implicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["id"]}', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "latest" - - -def test_deploy_by_id_and_revision_project_latest_explicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["id"]}:latest', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "latest" - - -def test_deploy_by_id_and_revision_project_first_explicit(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["id"]}:0.1.0', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - "--wait", - ) - _cmd("deployment", "stop", drec["id"]) - - revision: str = drec["revision"] - assert revision == "0.1.0" diff --git a/tests/system/ae5_tools/cli/commands/test_job.py b/tests/system/ae5_tools/cli/commands/test_job.py deleted file mode 100644 index ff050b27..00000000 --- a/tests/system/ae5_tools/cli/commands/test_job.py +++ /dev/null @@ -1,238 +0,0 @@ -import json -import time - -import pytest - -from ae5_tools.api import AEUserSession -from tests.adsp.common.utils import _cmd, _get_vars -from tests.system.state import load_account - - -@pytest.fixture(scope="session") -def user_session(): - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - s = AEUserSession(hostname, username, password) - yield s - s.disconnect() - - -@pytest.fixture(scope="module") -def project_list(user_session): - return _cmd("project", "list", "--collaborators") - - -@pytest.fixture(scope="module") -def cli_project(project_list): - return next(rec for rec in project_list if rec["name"] == "testproj3") - - -def test_job_run(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd("job", "create", prec["id"], "--name", "testjob1", "--command", "run", "--run", "--wait") - - # Execute the test (Run a previously created job) - run_job_result: dict = _cmd("job", "run", "testjob1") - - # Review Test Results - assert run_job_result["name"] == "testjob1" - assert run_job_result["project_name"] == "testproj3" - - # Ensure the new triggered run completes. - wait_time: int = 5 - counter: int = 0 - max_loop: int = 100 - wait: bool = True - while wait: - run_once_status: dict = _cmd("run", "info", run_job_result["id"]) - if run_once_status["state"] == "completed": - wait = False - else: - counter += 1 - time.sleep(wait_time) - if counter > max_loop: - wait = False - assert counter < max_loop - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -############################################################################### -# : tests -############################################################################### - - -def test_job_run_implicit_revision_latest(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd("job", "create", prec["id"], "--name", "testjob1", "--command", "run", "--run", "--wait") - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "latest" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -def test_job_run_explicit_revision_latest(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd("job", "create", f"{prec['id']}:latest", "--name", "testjob1", "--command", "run", "--run", "--wait") - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "latest" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -def test_job_run_explicit_revision_first(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd("job", "create", f"{prec['id']}:0.1.0", "--name", "testjob1", "--command", "run", "--run", "--wait") - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "0.1.0" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -############################################################################### -# /: tests -############################################################################### - - -def test_job_run_by_owner_and_name_implicit_revision_latest(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd("job", "create", f"{prec['owner']}/{prec['name']}", "--name", "testjob1", "--command", "run", "--run", "--wait") - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "latest" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -def test_job_run_by_owner_and_name_explicit_revision_latest(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd( - "job", - "create", - f"{prec['owner']}/{prec['name']}:latest", - "--name", - "testjob1", - "--command", - "run", - "--run", - "--wait", - ) - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "latest" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) - - -def test_job_run_by_owner_and_name_explicit_revision_first(cli_project): - # Set up the test - - # Create a pre-existing job, (run it and wait for completion) - prec = cli_project - create_job_result: dict = _cmd( - "job", - "create", - f"{prec['owner']}/{prec['name']}:0.1.0", - "--name", - "testjob1", - "--command", - "run", - "--run", - "--wait", - ) - - # Review Test Results - assert create_job_result["name"] == "testjob1" - assert create_job_result["project_name"] == "testproj3" - assert create_job_result["revision"] == "0.1.0" - - # Cleanup after the test - - # Remove runs - job_runs: list[dict] = _cmd("job", "runs", create_job_result["id"]) - for run in job_runs: - _cmd("run", "delete", run["id"]) - - # Remove job - _cmd("job", "delete", create_job_result["id"]) diff --git a/tests/system/ae5_tools/cli/commands/test_secret_system.py b/tests/system/ae5_tools/cli/commands/test_secret_system.py deleted file mode 100644 index 759830b4..00000000 --- a/tests/system/ae5_tools/cli/commands/test_secret_system.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -import uuid - -import pytest - -from ae5_tools.api import AEUserSession -from tests.adsp.common.utils import CMDException, _cmd, _get_vars -from tests.system.state import load_account - - -@pytest.fixture(scope="session") -def user_session(): - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - s = AEUserSession(hostname, username, password) - yield s - s.disconnect() - - -@pytest.fixture(scope="module") -def secret_name(): - return str(uuid.uuid4()).replace("-", "_") - - -##################################################### -# Test Cases For secret_add, secret_list, and secret_delete -##################################################### - - -def test_secret_create_and_list_and_delete(user_session, secret_name): - secret_value: str = str(uuid.uuid4()) - secret_add_result: str = _cmd("secret", "add", secret_name, secret_value) - assert secret_add_result == "" - - user_secrets: list[dict] = user_session.secret_list() - assert secret_name in [secret["secret_name"] for secret in user_secrets] - - secret_delete_result: str = _cmd("secret", "delete", secret_name) - assert secret_delete_result == "" - - -def test_secret_delete_gracefully_fails(user_session): - secret_key: str = str(uuid.uuid4()).replace("-", "_") - with pytest.raises(CMDException) as context: - _cmd("secret", "delete", secret_key) - assert f"User secret {secret_key} was not found and cannot be deleted." in str(context.value) diff --git a/tests/system/ae5_tools/cli/test_cli.py b/tests/system/ae5_tools/cli/test_cli.py deleted file mode 100644 index 23ac91a7..00000000 --- a/tests/system/ae5_tools/cli/test_cli.py +++ /dev/null @@ -1,524 +0,0 @@ -import glob -import os -import pprint -import tarfile -import tempfile -import time -from datetime import datetime - -import pytest -import requests - -from ae5_tools.api import AEUnexpectedResponseError -from tests.adsp.common.utils import CMDException, _cmd, _compare_tarfiles -from tests.system.state import load_account - - -@pytest.fixture(scope="module") -def project_list(user_session): - return _cmd("project", "list", "--collaborators") - - -def test_project_info(project_list): - for rec0 in project_list: - id = rec0["id"] - pair = "{}/{}".format(rec0["owner"], rec0["name"]) - rec1 = _cmd("project", "info", id) - rec2 = _cmd("project", "info", pair) - rec3 = _cmd("project", "info", f"{pair}/{id}") - assert all(rec0[k] == v for k, v in rec2.items()), pprint.pformat((rec0, rec2)) - assert all(rec1[k] == v for k, v in rec2.items()), pprint.pformat((rec1, rec2)) - assert rec2 == rec3 - - -def test_project_info_errors(project_list): - with pytest.raises(CMDException) as excinfo: - _cmd("project", "info", "testproj1") - assert "Multiple projects" in str(excinfo.value) - with pytest.raises(CMDException) as excinfo: - _cmd("project", "info", "testproj4") - assert "No projects" in str(excinfo.value) - - -@pytest.fixture(scope="module") -def resource_profiles(user_session): - return _cmd("resource-profile", "list") - - -def test_resource_profiles(resource_profiles): - for rec in resource_profiles: - rec2 = _cmd("resource-profile", "info", rec["name"]) - assert rec == rec2 - # Dropping because the * is getting expanded for some reason in the tests - # with pytest.raises(CMDException) as excinfo: - # _cmd('resource-profile', 'info', '*') - # assert 'Multiple resource profiles found' in str(excinfo.value) - with pytest.raises(CMDException) as excinfo: - _cmd("resource-profile", "info", "abcdefg") - assert "No resource profiles found" in str(excinfo.value) - - -@pytest.fixture(scope="module") -def editors(user_session): - return _cmd("editor", "list") - - -def test_editors(editors): - for rec in editors: - assert rec == _cmd("editor", "info", rec["id"]) - assert sum(rec["is_default"].lower() == "true" for rec in editors) == 1 - assert set(rec["id"] for rec in editors).issuperset({"jupyterlab", "notebook"}) - - -@pytest.mark.xfail -def test_endpoints(): - slist = _cmd("endpoint", "list") - for rec in slist: - rec2 = _cmd("endpoint", "info", rec["id"]) - assert rec == rec2 - - -def test_samples(): - slist = _cmd("sample", "list") - assert sum(rec["is_default"].lower() == "true" for rec in slist) == 1 - assert sum(rec["is_template"].lower() == "true" for rec in slist) > 1 - for rec in slist: - rec2 = _cmd("sample", "info", rec["id"]) - rec3 = _cmd("sample", "info", rec["name"]) - assert rec == rec2 and rec == rec3 - - -def test_sample_clone(): - cname = "NLP-API" - pname = "testclone" - rrec1 = _cmd("sample", "clone", cname, "--name", pname) - with pytest.raises(CMDException) as excinfo: - _cmd("sample", "clone", cname, "--name", pname) - rrec2 = _cmd("sample", "clone", cname, "--name", pname, "--make-unique") - rrec3 = _cmd("sample", "clone", cname) - _cmd("project", "delete", rrec1["id"]) - _cmd("project", "delete", rrec2["id"]) - _cmd("project", "delete", rrec3["id"]) - - -@pytest.fixture(scope="module") -def cli_project(project_list): - return next(rec for rec in project_list if rec["name"] == "testproj3") - - -@pytest.fixture(scope="module") -def cli_revisions(cli_project): - prec = cli_project - revs = _cmd("project", "revision", "list", prec["id"]) - return prec, revs - - -@pytest.fixture(scope="module") -def downloaded_project(user_session, cli_revisions): - prec, revs = cli_revisions - with tempfile.TemporaryDirectory() as tempd: - fname = _cmd("project", "download", prec["id"], table=False).strip() - assert fname == prec["name"] + ".tar.gz" - with tarfile.open(fname, "r") as tf: - tf.extractall(path=tempd) - dnames = glob.glob(os.path.join(tempd, "*", "anaconda-project.yml")) - assert len(dnames) == 1 - dname = os.path.dirname(dnames[0]) - yield fname, dname - for r in _cmd("project", "list"): - if r["name"].startswith("test_upload"): - _cmd("project", "delete", r["id"]) - assert not any(r["name"].startswith("test_upload") for r in _cmd("project", "list")) - - -def test_project_download(downloaded_project): - pass - - -def test_project_upload(downloaded_project): - fname, dname = downloaded_project - _cmd("project", "upload", fname, "--name", "test_upload1", "--tag", "1.2.3") - rrec = _cmd("project", "revision", "list", "test_upload1") - assert len(rrec) == 1 - rev = rrec[0]["name"] - fname2 = _cmd("project", "download", f"test_upload1:{rev}", table=False).strip() - assert fname2 == f"test_upload1-{rev}.tar.gz" - assert os.path.exists(fname2) - _compare_tarfiles(fname, fname2) - if rev == "0.0.1": - pytest.xfail("5.4.1 revision issue") - assert rev == "1.2.3" - - -def test_project_upload_as_directory(downloaded_project): - """Behavior changes in 5.6.2""" - fname, dname = downloaded_project - _cmd("project", "upload", dname, "--name", "test_upload2", "--tag", "1.3.4") - rrec = _cmd("project", "revision", "list", "test_upload2") - assert len(rrec) == 1 - rev = rrec[0]["name"] - fname2 = _cmd("project", "download", f"test_upload2:{rev}", table=False).strip() - assert fname2 == f"test_upload2-{rev}.tar.gz" - assert os.path.exists(fname2) - assert rev == "1.3.4" - - -def test_project_revisions(cli_revisions): - prec, revs = cli_revisions - rev0 = _cmd("project", "revision", "info", prec["id"]) - assert revs[0] == rev0 - rev0 = _cmd("project", "revision", "info", prec["id"] + ":latest") - assert revs[0] == rev0 - for rev in revs: - revN = _cmd("project", "revision", "info", prec["id"] + ":" + rev["id"]) - assert rev == revN - - -def test_project_revision_errors(cli_revisions): - prec, revs = cli_revisions - with pytest.raises(CMDException) as excinfo: - _cmd("project", "revision", "info", "testproj1") - assert "Multiple projects" in str(excinfo.value) - with pytest.raises(CMDException) as excinfo: - _cmd("project", "revision", "info", "testproj4") - assert "No projects" in str(excinfo.value) - with pytest.raises(CMDException) as excinfo: - _cmd("project", "revision", "info", prec["id"] + ":a.b.c") - assert "No revisions" in str(excinfo.value) - - -def test_project_patch(cli_project, editors, resource_profiles): - prec = cli_project - old, new = {}, {} - for what, wlist in ( - ("resource-profile", (r["name"] for r in resource_profiles)), - ("editor", (e["id"] for e in editors)), - ): - old[what] = prec[what.replace("-", "_")] - new[what] = next(v for v in wlist if v != old) - cmd0 = ["project", "patch", prec["id"]] - prec2 = _cmd(*(cmd0 + [f"--{k}={v}" for k, v in new.items()])) - assert {k: prec2[k.replace("-", "_")] for k in new} == new - prec3 = _cmd(*(cmd0 + [f"--{k}={v}" for k, v in old.items()])) - assert {k: prec3[k.replace("-", "_")] for k in old} == old - - -def test_project_collaborators(cli_project, project_list): - prec = cli_project - uname = next(rec["owner"] for rec in project_list if rec["owner"] != prec["owner"]) - id = prec["id"] - with pytest.raises(CMDException) as excinfo: - _cmd("project", "collaborator", "info", id, uname) - assert f"No collaborators found matching id={uname}" in str(excinfo.value) - clist = _cmd("project", "collaborator", "add", id, uname) - assert len(clist) == 1 - clist = _cmd("project", "collaborator", "add", id, "everyone", "--group", "--read-only") - assert len(clist) == 2 - assert all( - c["id"] == uname - and c["permission"] == "rw" - and c["type"] == "user" - or c["id"] == "everyone" - and c["permission"] == "r" - and c["type"] == "group" - for c in clist - ) - clist = _cmd("project", "collaborator", "add", id, uname, "--read-only") - assert len(clist) == 2 - assert all( - c["id"] == uname - and c["permission"] == "r" - and c["type"] == "user" - or c["id"] == "everyone" - and c["permission"] == "r" - and c["type"] == "group" - for c in clist - ) - clist = _cmd("project", "collaborator", "remove", id, uname, "everyone") - assert len(clist) == 0 - with pytest.raises(CMDException) as excinfo: - clist = _cmd("project", "collaborator", "remove", id, uname) - assert f"Collaborator(s) not found: {uname}" in str(excinfo.value) - - -def test_project_activity(cli_project): - prec = cli_project - activity = _cmd("project", "activity", prec["id"]) - assert 1 <= len(activity) <= 10 - activity2 = _cmd("project", "activity", "--latest", prec["id"]) - assert activity[0] == activity2 - activity3 = _cmd("project", "activity", "--limit", "1", prec["id"]) - assert activity[0] == activity3[0] - with pytest.raises(CMDException) as excinfo: - _cmd("project", "activity", "--latest", "--all", prec["id"]) - with pytest.raises(CMDException) as excinfo: - _cmd("project", "activity", "--limit", "2", "--all", prec["id"]) - with pytest.raises(CMDException) as excinfo: - _cmd("project", "activity", "--latest", "--limit", "2", prec["id"]) - - -@pytest.fixture(scope="module") -def cli_session(cli_project): - prec = cli_project - srec = _cmd("session", "start", f'{prec["owner"]}/{prec["name"]}') - srec2 = _cmd("session", "restart", srec["id"], "--wait") - assert not any(r["id"] == srec["id"] for r in _cmd("session", "list")) - yield prec, srec2 - _cmd("session", "stop", srec2["id"]) - assert not any(r["id"] == srec2["id"] for r in _cmd("session", "list")) - - -# DNS resolution and ingress are currently failing in the new CI environments and is a known issue right now. -# TODO: Re-enable this test when issues have been resolved. -@pytest.mark.ci_skip -def test_session(cli_session): - prec, srec = cli_session - assert srec["owner"] == prec["owner"], srec - assert srec["name"] == prec["name"], srec - # Ensure that the session can be retrieved by its project ID as well - srec2 = _cmd("session", "info", f'{srec["owner"]}/*/{prec["id"]}') - assert srec2["id"] == srec["id"] - endpoint = srec["id"].rsplit("-", 1)[-1] - sdata = _cmd("call", "/", f"--endpoint={endpoint}", table=False) - assert "Jupyter Notebook requires JavaScript." in sdata, sdata - - -def test_project_sessions(cli_session): - prec, srec = cli_session - slist = _cmd("project", "sessions", prec["id"]) - assert len(slist) == 1 and slist[0]["id"] == srec["id"] - - -def test_session_branches_5_7_0(cli_session): - """Behavior updated in 5.7.0""" - prec, srec = cli_session - branches = _cmd("session", "branches", prec["id"]) - bdict = {r["branch"]: r["sha1"] for r in branches} - assert set(bdict) == {"master", "parent", "local"}, branches - assert bdict["local"] == bdict["master"], branches - - -def test_session_before_changes(cli_session): - prec, srec = cli_session - changes1 = _cmd("session", "changes", prec["id"]) - changes1 = [c for c in changes1 if c["path"] != ".projectignore"] - assert changes1 == [], changes1 - changes2 = _cmd("session", "changes", "--master", prec["id"]) - changes2 = [c for c in changes1 if c["path"] != ".projectignore"] - assert changes2 == [], changes2 - - -@pytest.fixture(scope="module") -def cli_deployment(cli_project): - prec = cli_project - dname = "testdeploy" - ename = "testendpoint" - drec = _cmd( - "project", - "deploy", - f'{prec["owner"]}/{prec["name"]}', - "--name", - dname, - "--endpoint", - ename, - "--command", - "default", - "--private", - ) - drec2 = _cmd("deployment", "restart", drec["id"], "--wait") - assert not any(r["id"] == drec["id"] for r in _cmd("deployment", "list")) - yield prec, drec2 - _cmd("deployment", "stop", drec2["id"]) - assert not any(r["id"] == drec2["id"] for r in _cmd("deployment", "list")) - - -def test_deploy(cli_deployment): - prec, drec = cli_deployment - assert drec["owner"] == prec["owner"], drec - assert drec["project_name"] == prec["name"], drec - for attempt in range(10): - try: - ldata = _cmd("call", "/", "--endpoint", drec["endpoint"], table=False) - break - except (AEUnexpectedResponseError, CMDException): - time.sleep(attempt * 10) - pass - else: - raise RuntimeError("Could not get the endpoint to respond") - assert ldata.strip() == "Hello Anaconda Enterprise!", ldata - - -def test_project_deployments(cli_deployment): - prec, drec = cli_deployment - dlist = _cmd("project", "deployments", prec["id"]) - assert len(dlist) == 1 and dlist[0]["id"] == drec["id"] - - -def test_deploy_patch(cli_deployment): - prec, drec = cli_deployment - flag = "--private" if drec["public"].lower() == "true" else "--public" - drec2 = _cmd("deployment", "patch", flag, drec["id"]) - assert drec2["public"] != drec["public"] - flag = "--private" if drec2["public"].lower() == "true" else "--public" - drec3 = _cmd("deployment", "patch", flag, drec["id"]) - assert drec3["public"] == drec["public"] - - -def test_deploy_token(user_session, cli_deployment): - prec, drec = cli_deployment - token = _cmd("deployment", "token", drec["id"], table=False).strip() - resp = requests.get( - f'https://{drec["endpoint"]}.' + user_session.hostname, - headers={"Authorization": f"Bearer {token}"}, - verify=False, - ) - assert resp.status_code == 200 - assert resp.text.strip() == "Hello Anaconda Enterprise!", resp.text - - -def test_deploy_logs(cli_deployment): - prec, drec = cli_deployment - id = drec["id"] - app_prefix = "anaconda-app-" + id.rsplit("-", 1)[-1] + "-" - app_logs = _cmd("deployment", "logs", id, table=False) - event_logs = _cmd("deployment", "logs", id, "--events", table=False) - proxy_logs = _cmd("deployment", "logs", id, "--proxy", table=False) - assert "The project is ready to run commands." in app_logs - assert app_prefix in event_logs, event_logs - assert "App Proxy is fully operational!" in proxy_logs, proxy_logs - - -def test_deploy_duplicate(cli_deployment): - prec, drec = cli_deployment - dname = drec["name"] + "-dup" - with pytest.raises(CMDException) as excinfo: - _cmd( - "project", - "deploy", - prec["id"], - "--name", - dname, - "--endpoint", - drec["endpoint"], - "--command", - "default", - "--private", - "--wait", - ) - assert f'endpoint "{drec["endpoint"]}" is already in use' in str(excinfo.value) - assert not any(r["name"] == dname for r in _cmd("deployment", "list")) - - -def test_deploy_collaborators(cli_deployment): - uname: str = load_account(id="2")["username"] - prec, drec = cli_deployment - clist = _cmd("deployment", "collaborator", "list", drec["id"]) - assert len(clist) == 0 - clist = _cmd("deployment", "collaborator", "add", drec["id"], uname) - assert len(clist) == 1 - clist = _cmd("deployment", "collaborator", "add", drec["id"], "everyone", "--group") - assert len(clist) == 2 - clist = _cmd("deployment", "collaborator", "add", drec["id"], uname) - assert len(clist) == 2 - assert all(c["id"] == uname and c["type"] == "user" or c["id"] == "everyone" and c["type"] == "group" for c in clist) - for crec in clist: - crec2 = _cmd("deployment", "collaborator", "info", drec["id"], crec["id"]) - assert crec2["id"] == crec["id"] and crec2["type"] == crec["type"] - clist = _cmd("deployment", "collaborator", "remove", drec["id"], uname, "everyone") - assert len(clist) == 0 - with pytest.raises(CMDException) as excinfo: - clist = _cmd("deployment", "collaborator", "remove", drec["id"], uname) - assert f"Collaborator(s) not found: {uname}" in str(excinfo.value) - - -def test_deploy_broken(cli_deployment): - prec, drec = cli_deployment - dname = drec["name"] + "-broken" - with pytest.raises(CMDException) as excinfo: - _cmd("project", "deploy", prec["id"], "--name", dname, "--command", "broken", "--private", "--stop-on-error") - assert "Error completing deployment start: App failed to run" in str(excinfo.value) - assert not any(r["name"] == dname for r in _cmd("deployment", "list")) - - -@pytest.mark.skip(reason="Failing against CI - k8s gravity issue") -def test_k8s_node(user_session): - user_session.disconnect() - nlist = _cmd("node", "list") - for nrec in nlist: - nrec2 = _cmd("node", "info", nrec["name"]) - assert nrec2["name"] == nrec["name"] - - -@pytest.mark.skip(reason="Failing against CI - k8s gravity issue") -def test_k8s_pod(user_session, cli_session, cli_deployment): - _, srec = cli_session - _, drec = cli_deployment - plist = _cmd("pod", "list") - assert any(prec["id"] == srec["id"] for prec in plist) - assert any(prec["id"] == drec["id"] for prec in plist) - for prec in plist: - prec2 = _cmd("pod", "info", prec["id"]) - assert prec2["id"] == prec["id"] - srec2 = _cmd("session", "info", srec["id"], "--k8s") - assert srec2["id"] == srec["id"] - drec2 = _cmd("deployment", "info", drec["id"], "--k8s") - assert drec2["id"] == drec["id"] - - -def test_job_run1(cli_project): - prec = cli_project - _cmd("job", "create", prec["id"], "--name", "testjob1", "--command", "run", "--run", "--wait") - jrecs = _cmd("job", "list") - assert len(jrecs) == 1, jrecs - rrecs = _cmd("run", "list") - assert len(rrecs) == 1, rrecs - ldata1 = _cmd("run", "log", rrecs[0]["id"], table=False) - assert ldata1.strip().endswith("Hello Anaconda Enterprise!"), repr(ldata1) - _cmd("job", "create", prec["id"], "--name", "testjob1", "--make-unique", "--command", "run", "--run", "--wait") - jrecs = _cmd("job", "list") - assert len(jrecs) == 2, jrecs - jrecs2 = _cmd("project", "jobs", prec["id"]) - assert {r["id"]: r for r in jrecs} == {r["id"]: r for r in jrecs2} - rrecs = _cmd("run", "list") - assert len(rrecs) == 2, rrecs - rrecs2 = _cmd("project", "runs", prec["id"]) - assert {r["id"]: r for r in rrecs} == {r["id"]: r for r in rrecs2} - for rrec in rrecs: - _cmd("run", "delete", rrec["id"]) - for jrec in jrecs: - _cmd("job", "delete", jrec["id"]) - assert not _cmd("job", "list") - assert not _cmd("run", "list") - - -def test_job_run2(cli_project): - prec = cli_project - # Test cleanup mode and variables in jobs - variables = {"INTEGRATION_TEST_KEY_1": "value1", "INTEGRATION_TEST_KEY_2": "value2"} - cmd = ["project", "run", prec["id"], "--command", "run_with_env_vars", "--name", "testjob2"] - for k, v in variables.items(): - cmd.extend(("--variable", f"{k}={v}")) - _cmd(*cmd) - # The job record should have already been deleted - assert not _cmd("job", "list") - rrecs = _cmd("run", "list") - assert len(rrecs) == 0, rrecs - - -def test_login_time(admin_session, user_session): - # The current login time should be before the present - now = datetime.now() - user_list = _cmd("user", "list") - urec = next((r for r in user_list if r["username"] == user_session.username), None) - assert urec is not None - ltm1 = datetime.strptime(urec["lastLogin"], "%Y-%m-%d %H:%M:%S.%f") - assert ltm1 < now - # No more testing here, because we want to preserve the existing sessions - - -def test_realm_roles(admin_session): - _cmd("project", "list") - user_list = _cmd("user", "list") - - # Validate realms roles are present on the user - assert "realm_roles" in user_list[0] diff --git a/tests/system/ae5_tools/conftest.py b/tests/system/ae5_tools/conftest.py deleted file mode 100644 index 16071fcb..00000000 --- a/tests/system/ae5_tools/conftest.py +++ /dev/null @@ -1,73 +0,0 @@ -import pytest - -from ae5_tools.api import AEAdminSession, AEUserSession -from tests.adsp.common.utils import _get_vars -from tests.system.state import load_account - - -# Expectations: the user AE5_USERNAME should have at least three projects: -# - project names: testproj1, testproj2, testproj3 -# - all three editors should be represented -# - the projects should have 2, 1, and 0 collaborators, respectively -# Furthermore, there should be a second user satisfying the following: -# - project names: testproj1, testproj2 -# - AE5_USERNAME is a collaborator on both -@pytest.fixture(scope="session") -def user_session(): - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - - s = AEUserSession(hostname, username, password) - for run in s.run_list(): - if run["owner"] == username: - s.run_delete(run) - for job in s.job_list(): - if job["owner"] == username: - s.job_delete(job) - for dep in s.deployment_list(): - if dep["owner"] == username: - s.deployment_stop(dep) - for sess in s.session_list(): - if sess["owner"] == username: - s.session_stop(sess) - plist = s.project_list() - for p in plist: - if p["name"] not in {"testproj1", "testproj2", "testproj3"} and p["owner"] == username: - s.project_delete(p["id"]) - # Make sure testproj3 is using the Jupyter editor - prec = s.project_info(f"{username}/testproj3", collaborators=True) - if prec["editor"] != "notebook" or prec["resource_profile"] != "default": - s.project_patch(prec, editor="notebook", resource_profile="default") - # Make sure testproj3 has no collaborators - if prec["_collaborators"]: - collabs = tuple(c["id"] for c in prec["_collaborators"]) - s.project_collaborator_remove(prec, collabs) - plist = s.project_list(collaborators=True) - plist = s.project_list(collaborators=True) - powned = [p for p in plist if p["owner"] == username] - pother = [p for p in plist if p["owner"] != username] - - # Assert there are exactly 3 projects owned by the test user - assert len(powned) == 3 - - # Need at least two duplicated project names to properly test sorting/filtering - assert len(set(p["name"] for p in powned).intersection(p["name"] for p in pother)) >= 2 - - # Make sure all three editors are represented - assert len(set(p["editor"] for p in powned)) == 3 - - # Make sure we have 0, 1, and 2 collaborators represented - assert set(len(p["_collaborators"]) for p in plist if p["owner"] == username).issuperset((0, 1, 2)) - - yield s - s.disconnect() - - -@pytest.fixture(scope="session") -def admin_session(): - hostname, username, password = _get_vars("AE5_HOSTNAME", "AE5_ADMIN_USERNAME", "AE5_ADMIN_PASSWORD") - s = AEAdminSession(hostname, username, password) - yield s - del s diff --git a/tests/system/ae5_tools/test_api.py b/tests/system/ae5_tools/test_api.py deleted file mode 100644 index 64f5303e..00000000 --- a/tests/system/ae5_tools/test_api.py +++ /dev/null @@ -1,685 +0,0 @@ -import glob -import os -import tarfile -import tempfile -import time -from datetime import datetime - -import pytest -import requests - -from ae5_tools.api import AEException, AEUnexpectedResponseError, AEUserSession -from tests.adsp.common.utils import _compare_tarfiles, _get_vars -from tests.system.state import load_account - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def test_unexpected_response(user_session): - with pytest.raises(AEUnexpectedResponseError) as excinfo: - raise AEUnexpectedResponseError("string", "https://test.me", "string") - exc = str(excinfo.value).strip() - assert "Unexpected response: string" == exc - print(excinfo.value) - with pytest.raises(AEUnexpectedResponseError) as excinfo: - raise AEUnexpectedResponseError( - AttrDict({"status_code": 404, "reason": "reason", "headers": "headers", "text": "text"}), - "get", - "https://test.me", - params="params", - data="data", - json="json", - ) - exc = [x.strip() for x in str(excinfo.value).splitlines()] - assert "Unexpected response: 404 reason" in exc - assert "headers: headers" in exc - assert "text: text" in exc - assert "params: params" in exc - assert "data: data" in exc - assert "json: json" in exc - - -def test_user_session(monkeypatch, capsys): - with pytest.raises(ValueError) as excinfo: - AEUserSession("", "") - assert "Must supply hostname and username" in str(excinfo.value) - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - with pytest.raises(AEException) as excinfo: - c = AEUserSession(hostname, username, "x" + password, persist=False) - c.authorize() - del c - assert "Invalid username or password." in str(excinfo.value) - passwords = [password, "", "x" + password] - monkeypatch.setattr("getpass.getpass", lambda x: passwords.pop()) - c = AEUserSession(hostname, username, persist=False) - c.authorize() - captured = capsys.readouterr() - assert f"Password for {username}@{hostname}" in captured.err - assert f"Invalid username or password; please try again." in captured.err - assert f"Must supply a password" in captured.err - - -@pytest.mark.skip(reason="Failing against CI - k8s gravity issue") -def test_user_k8s_session(monkeypatch, capsys): - with pytest.raises(ValueError) as excinfo: - AEUserSession("", "") - assert "Must supply hostname and username" in str(excinfo.value) - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - with pytest.raises(AEException) as excinfo: - c = AEUserSession(hostname, username, "x" + password, persist=False) - c.authorize() - del c - assert "Invalid username or password." in str(excinfo.value) - passwords = [password, "", "x" + password] - monkeypatch.setattr("getpass.getpass", lambda x: passwords.pop()) - c = AEUserSession(hostname, username, persist=False) - c.authorize() - captured = capsys.readouterr() - assert f"Password for {username}@{hostname}" in captured.err - assert f"Invalid username or password; please try again." in captured.err - assert f"Must supply a password" in captured.err - - true_endpoint, c._k8s_endpoint = c._k8s_endpoint, "ssh:fakeuser" - with pytest.raises(AEException) as excinfo: - c._k8s("status") - assert "Error establishing k8s connection" in str(excinfo.value) - c._k8s_endpoint = "fakek8sendpoint" - with pytest.raises(AEException) as excinfo: - c._k8s("status") - assert "No deployment found at endpoint fakek8sendpoint" in str(excinfo.value) - with pytest.raises(AEException) as excinfo: - c._k8s("status") - assert "No k8s connection available" in str(excinfo.value) - c._k8s_endpoint = true_endpoint - assert c._k8s("status") == "Alive and kicking" - - -@pytest.fixture(scope="module") -def project_list(user_session): - return user_session.project_list(collaborators=True) - - -def test_project_list_df(user_session, project_list): - with pytest.raises(ImportError) as excinfo: - df = user_session.project_list(collaborators=True, format="_dataframe") - assert 'Pandas must be installed in order to use format="dataframe"' in str(excinfo.value) - df = user_session.project_list(collaborators=True, format="dataframe") - assert len(df) == len(project_list) - mismatch = False - for row, row_df in zip(project_list, df.itertuples()): - for k, v in row.items(): - if k.startswith("_"): - continue - v_df = getattr(row_df, k, None) - if k in ("created", "updated"): - v = v.replace("T", " ") - if str(v) != str(v_df): - print(f'{row["owner"]}/{row["name"]}, {k}: {v} != {v_df}') - mismatch = True - assert not mismatch - - -def test_project_info(user_session, project_list): - for rec0 in project_list: - id = rec0["id"] - pair = "{}/{}".format(rec0["owner"], rec0["name"]) - rec1 = user_session.project_info(id, collaborators=True) - rec2 = user_session.project_info(pair) - rec3 = user_session.project_info(f"{pair}/{id}") - assert all(rec0[k] == v for k, v in rec2.items()) - assert all(rec1[k] == v for k, v in rec2.items()) - assert rec2 == rec3 - - -def test_project_info_errors(user_session, project_list): - with pytest.raises(AEException) as excinfo: - result = user_session.project_info("testproj1") - print(result) - assert "Multiple projects" in str(excinfo.value) - user_session.project_info("testproj1", quiet=True) - with pytest.raises(AEException) as excinfo: - user_session.project_info("testproj4") - assert "No projects" in str(excinfo.value) - user_session.project_info("testproj4", quiet=True) - - -@pytest.fixture(scope="module") -def resource_profiles(user_session): - return user_session.resource_profile_list() - - -def test_resource_profiles(user_session, resource_profiles): - for rec in resource_profiles: - assert rec == user_session.resource_profile_info(rec["name"]) - with pytest.raises(AEException) as excinfo: - user_session.resource_profile_info("*") - assert "Multiple resource profiles found" in str(excinfo.value) - with pytest.raises(AEException) as excinfo: - user_session.resource_profile_info("") - assert "No resource profiles found" in str(excinfo.value) - - -@pytest.fixture(scope="module") -def editors(user_session): - return user_session.editor_list() - - -def test_editors(user_session, editors): - for rec in editors: - assert rec == user_session.editor_info(rec["id"]) - assert sum(rec["is_default"] for rec in editors) == 1 - assert set(rec["id"] for rec in editors).issuperset({"jupyterlab", "notebook"}) - - -@pytest.mark.xfail -def test_endpoints(user_session): - slist = user_session.endpoint_list() - for rec in slist: - rec2 = user_session.endpoint_info(rec["id"]) - assert rec == rec2 - - -def test_samples(user_session): - slist = user_session.sample_list() - assert sum(rec["is_default"] for rec in slist) == 1 - assert sum(rec["is_template"] for rec in slist) > 1 - for rec in slist: - rec2 = user_session.sample_info(rec["id"]) - rec3 = user_session.sample_info(rec["name"]) - assert rec == rec2 and rec == rec3 - - -def test_sample_clone(user_session): - cname = "NLP API" - pname = "testclone" - rrec1 = user_session.sample_clone(cname, name=pname, wait=True) - with pytest.raises(AEException) as excinfo: - user_session.sample_clone(cname, name=pname, wait=True) - rrec2 = user_session.sample_clone(cname, name=pname, make_unique=True, wait=True) - rrec3 = user_session.sample_clone(cname, wait=True) - user_session.project_delete(rrec1) - user_session.project_delete(rrec2) - user_session.project_delete(rrec3) - - -@pytest.fixture(scope="module") -def api_project(user_session, project_list): - return next(rec for rec in project_list if rec["name"] == "testproj3") - - -@pytest.fixture(scope="module") -def api_revisions(user_session, api_project): - prec = api_project - revs = user_session.revision_list(prec) - return prec, revs - - -@pytest.fixture(scope="module") -def downloaded_project(user_session, api_revisions): - prec, revs = api_revisions - with tempfile.TemporaryDirectory() as tempd: - fname = user_session.project_download(prec) - assert fname == prec["name"] + ".tar.gz" - with tarfile.open(fname, "r") as tf: - tf.extractall(path=tempd) - dnames = glob.glob(os.path.join(tempd, "*", "anaconda-project.yml")) - assert len(dnames) == 1 - dname = os.path.dirname(dnames[0]) - yield fname, dname - for r in user_session.session_list(): - if r["name"].startswith("test_upload"): - user_session.session_stop(r) - for r in user_session.project_list(): - if r["name"].startswith("test_upload"): - user_session.project_delete(r) - assert not any(r["name"].startswith("test_upload") for r in user_session.project_list()) - - -def test_project_download(user_session, downloaded_project): - # Use this to exercise a couple of branches in _api - pass - - -def test_project_upload(user_session, downloaded_project): - fname, dname = downloaded_project - user_session.project_upload(fname, "test_upload1", "1.2.3", wait=True) - rrec = user_session.revision_list("test_upload1") - rev = rrec[0]["name"] - fname2 = user_session.project_download(f"test_upload1:{rev}") - assert fname2 == f"test_upload1-{rev}.tar.gz" - assert os.path.exists(fname2) - _compare_tarfiles(fname, fname2) - if rev == "0.0.1": - pytest.xfail("5.4.1 revision issue") - assert rev == "1.2.3" - - -def test_project_upload_as_directory(user_session, downloaded_project): - fname, dname = downloaded_project - user_session.project_upload(dname, "test_upload2", "1.3.4", wait=True) - rrec = user_session.revision_list("test_upload2") - assert len(rrec) == 1 - rev = rrec[0]["name"] - fname2 = user_session.project_download(f"test_upload2:{rev}") - assert fname2 == f"test_upload2-{rev}.tar.gz" - assert os.path.exists(fname2) - if rev == "0.0.1": - pytest.xfail("5.4.1 revision issue") - assert rev == "1.3.4" - - -def _soft_equal(d1, d2): - if isinstance(d1, dict) and isinstance(d2, dict): - for k in set(d1) | set(d2): - if k in d1 and k in d2: - if not _soft_equal(d1[k], d2[k]): - return False - return True - else: - return d1 == d2 - - -def test_project_revisions(user_session, api_revisions): - prec, revs = api_revisions - rev0 = user_session.revision_info(prec) - # There are sometimes minor differences in the '_project' - # record due to the exact way it is retrieved. For instance, - # the project_create_status value will be missing in the - # info calls; and prec has the collaborators entries it. - # So we do a rougher verification that the project entry - # is correct. - assert _soft_equal(rev0["_project"], revs[0]["_project"]) - rev0["_project"] = revs[0]["_project"] - assert rev0 == revs[0] - rev0 = user_session.revision_info(f'{prec["id"]}:latest') - assert _soft_equal(rev0["_project"], revs[0]["_project"]) - rev0["_project"] = revs[0]["_project"] - assert revs[0] == rev0 - for rev in revs: - revN = user_session.revision_info(f'{prec["id"]}:{rev["id"]}') - assert _soft_equal(revN["_project"], rev["_project"]) - revN["_project"] = rev["_project"] - assert rev == revN - commands = user_session.revision_commands(prec) - assert rev0["commands"] == ", ".join(c["id"] for c in commands) - - -def test_project_revision_errors(user_session, api_revisions): - prec, revs = api_revisions - with pytest.raises(AEException) as excinfo: - user_session.revision_info("testproj1") - user_session.revision_info("testproj1", quiet=True) - assert "Multiple projects" in str(excinfo.value) - with pytest.raises(AEException) as excinfo: - user_session.revision_info("testproj4") - assert "No projects" in str(excinfo.value) - user_session.revision_info("testproj4", quiet=True) - user_session.revision_info(f'{prec["id"]}:0.*', quiet=True) - with pytest.raises(AEException) as excinfo: - user_session.revision_info(f'{prec["id"]}:a.b.c') - assert "No revisions" in str(excinfo.value) - user_session.revision_info(f'{prec["id"]}:a.b.c', quiet=True) - - -def test_project_patch(user_session, api_project, editors, resource_profiles): - prec = api_project - old, new = {}, {} - for what, wlist in ( - ("resource_profile", (r["name"] for r in resource_profiles)), - ("editor", (e["id"] for e in editors)), - ): - old[what] = prec[what] - new[what] = next(v for v in wlist if v != old) - prec2 = user_session.project_patch(prec, **new) - assert {k: prec2[k] for k in new} == new - prec3 = user_session.project_patch(prec2, **old) - assert {k: prec3[k] for k in old} == old - - -def test_project_collaborators(user_session, api_project, project_list): - prec = api_project - uname = next(rec["owner"] for rec in project_list if rec["owner"] != prec["owner"]) - with pytest.raises(AEException) as excinfo: - user_session.project_collaborator_info(prec, uname) - user_session.project_collaborator_info(prec, uname, quiet=True) - assert f"No collaborators found matching id={uname}" in str(excinfo.value) - clist = user_session.project_collaborator_add(prec, uname) - assert len(clist) == 1 - clist = user_session.project_collaborator_add(prec, "everyone", group=True, read_only=True) - assert len(clist) == 2 - assert all( - c["id"] == uname - and c["permission"] == "rw" - and c["type"] == "user" - or c["id"] == "everyone" - and c["permission"] == "r" - and c["type"] == "group" - for c in clist - ) - clist = user_session.project_collaborator_add(prec, uname, read_only=True) - assert len(clist) == 2 - assert all( - c["id"] == uname - and c["permission"] == "r" - and c["type"] == "user" - or c["id"] == "everyone" - and c["permission"] == "r" - and c["type"] == "group" - for c in clist - ) - collabs = tuple(crec["id"] for crec in clist) - clist = user_session.project_collaborator_remove(prec, collabs) - assert len(clist) == 0 - with pytest.raises(AEException) as excinfo: - user_session.project_collaborator_remove(prec, uname) - assert f"Collaborator(s) not found: {uname}" in str(excinfo.value) - - -def test_project_activity(user_session, api_project): - prec = api_project - activity = user_session.project_activity(prec) - assert 1 <= len(activity) <= 10 - activity2 = user_session.project_activity(prec, latest=True) - assert activity[0] == activity2 - activity3 = user_session.project_activity(prec, limit=1) - assert activity[0] == activity3[0] - with pytest.raises(AEException) as excinfo: - user_session.project_activity(prec, latest=True, all=True) - with pytest.raises(AEException) as excinfo: - user_session.project_activity(prec, latest=True, limit=2) - with pytest.raises(AEException) as excinfo: - user_session.project_activity(prec, all=True, limit=2) - - -@pytest.fixture(scope="module") -def api_session(user_session, api_project): - prec = api_project - srec = user_session.session_start(prec, wait=False) - srec2 = user_session.session_restart(srec, wait=True) - assert not any(r["id"] == srec["id"] for r in user_session.session_list()) - yield prec, srec2 - user_session.session_stop(srec2) - assert not any(r["id"] == srec2["id"] for r in user_session.session_list()) - - -# DNS resolution and ingress are currently failing in the new CI environments and is a known issue right now. -# TODO: Re-enable this test when issues have been resolved. -@pytest.mark.ci_skip -def test_session(user_session, api_session): - prec, srec = api_session - assert srec["owner"] == prec["owner"], srec - # Ensure that the session can be retrieved by its project ID as well - srec2 = user_session.session_info(f'{srec["owner"]}/*/{prec["id"]}') - assert srec["id"] == srec2["id"] - endpoint = srec["id"].rsplit("-", 1)[-1] - sdata = user_session._get("/", subdomain=endpoint, format="text") - assert "Jupyter Notebook requires JavaScript." in sdata, sdata - - -def test_session_name(user_session, api_session): - prec, srec = api_session - assert srec["name"] == prec["name"], srec - - -def test_project_sessions(user_session, api_session): - prec, srec = api_session - slist = user_session.project_sessions(prec) - assert len(slist) == 1 and slist[0]["id"] == srec["id"] - - -def test_session_branches_5_7_0(user_session, api_session): - """Behavior changed in 5.7.0""" - prec, srec = api_session - branches = user_session.session_branches(srec, format="json") - bdict = {r["branch"]: r["sha1"] for r in branches} - assert set(bdict) == {"local", "master", "parent"}, branches - assert bdict["local"] == bdict["master"], branches - - -def test_session_before_changes(user_session, api_session): - prec, srec = api_session - changes1 = user_session.session_changes(srec, format="json") - changes1 = [c for c in changes1 if c["path"] != ".projectignore"] - assert changes1 == [], changes1 - changes2 = user_session.session_changes(srec, master=True, format="json") - changes2 = [c for c in changes2 if c["path"] != ".projectignore"] - assert changes2 == [], changes2 - - -@pytest.fixture(scope="module") -def api_deployment(user_session, api_project): - prec = api_project - dname = "testdeploy" - ename = "testendpoint" - drec = user_session.deployment_start(prec, name=dname, endpoint=ename, command="default", public=False, wait=False, _skip_endpoint_test=True) - drec2 = user_session.deployment_restart(drec, wait=True) - assert not any(r["id"] == drec["id"] for r in user_session.deployment_list()) - yield prec, drec2 - user_session.deployment_stop(drec2) - assert not any(r["id"] == drec2["id"] for r in user_session.deployment_list()) - - -def test_deploy(user_session, api_deployment): - prec, drec = api_deployment - assert drec["owner"] == prec["owner"], drec - assert drec["project_name"] == prec["name"], drec - for attempt in range(3): - try: - ldata = user_session._get("/", subdomain=drec["endpoint"], format="text") - break - except AEUnexpectedResponseError: - time.sleep(attempt * 5) - pass - else: - raise RuntimeError("Could not get the endpoint to respond") - assert ldata.strip() == "Hello Anaconda Enterprise!", ldata - - -def test_project_deployment(user_session, api_deployment): - prec, drec = api_deployment - dlist = user_session.project_deployments(prec) - assert len(dlist) == 1 and dlist[0]["id"] == drec["id"] - - -def test_deploy_patch(user_session, api_deployment): - prec, drec = api_deployment - drec2 = user_session.deployment_patch(drec, public=not drec["public"]) - assert drec2["public"] != drec["public"] - drec3 = user_session.deployment_patch(drec, public=not drec2["public"]) - assert drec3["public"] == drec["public"] - - -def test_deploy_token(user_session, api_deployment): - prec, drec = api_deployment - token = user_session.deployment_token(drec) - resp = requests.get( - f'https://{drec["endpoint"]}.' + user_session.hostname, - headers={"Authorization": f"Bearer {token}"}, - verify=False, - ) - assert resp.status_code == 200 - assert resp.text.strip() == "Hello Anaconda Enterprise!", resp.text - with pytest.raises(AEException) as excinfo: - token = user_session.deployment_token(drec, format="table") - assert "Response is not a tabular format" in str(excinfo.value) - - -def test_deploy_logs(user_session, api_deployment): - prec, drec = api_deployment - app_prefix = "anaconda-app-" + drec["id"].rsplit("-", 1)[-1] + "-" - logs = user_session.deployment_logs(drec, format="json") - assert set(logs) == {"app", "events", "name", "proxy"}, logs - assert logs["name"].startswith(app_prefix), logs["name"] - assert "The project is ready to run commands." in logs["app"], logs["app"] - assert app_prefix in logs["events"], logs["events"] - assert "App Proxy is fully operational!" in logs["proxy"], logs["proxy"] - - -def test_deploy_duplicate(user_session, api_deployment): - prec, drec = api_deployment - dname = drec["name"] + "-dup" - with pytest.raises(RuntimeError) as excinfo: - user_session.deployment_start(prec, name=dname, endpoint=drec["endpoint"], command="default", public=False, wait=True) - assert f'endpoint "{drec["endpoint"]}" is already in use' in str(excinfo.value) - assert not any(r["name"] == dname for r in user_session.deployment_list()) - - -def test_deploy_collaborators(user_session, api_deployment): - uname: str = load_account(id="2")["username"] - prec, drec = api_deployment - clist = user_session.deployment_collaborator_list(drec) - assert len(clist) == 0 - clist = user_session.deployment_collaborator_add(drec, uname) - assert len(clist) == 1 - clist = user_session.deployment_collaborator_add(drec, "everyone", group=True) - assert len(clist) == 2 - clist = user_session.deployment_collaborator_add(drec, uname) - assert len(clist) == 2 - assert all(c["id"] == uname and c["type"] == "user" or c["id"] == "everyone" and c["type"] == "group" for c in clist) - for crec in clist: - crec2 = user_session.deployment_collaborator_info(drec, crec["id"]) - assert crec2["id"] == crec["id"] and crec2["type"] == crec["type"] - clist = user_session.deployment_collaborator_remove(drec, (uname, "everyone")) - assert len(clist) == 0 - with pytest.raises(AEException) as excinfo: - user_session.deployment_collaborator_remove(drec, uname) - assert f"Collaborator(s) not found: {uname}" in str(excinfo.value) - - -def test_deploy_broken(user_session, api_deployment): - prec, drec = api_deployment - dname = drec["name"] + "-broken" - with pytest.raises(RuntimeError) as excinfo: - user_session.deployment_start(prec, name=dname, command="broken", public=False, stop_on_error=True) - assert "Error completing deployment start: App failed to run" in str(excinfo.value) - assert not any(r["name"] == dname for r in user_session.deployment_list()) - - -@pytest.mark.skip(reason="Failing against CI - k8s gravity issue") -def test_k8s_node(user_session): - nlist = user_session.node_list() - for nrec in nlist: - nrec2 = user_session.node_info(nrec["name"]) - assert nrec2["name"] == nrec["name"] - - -@pytest.mark.skip(reason="Failing against CI - k8s gravity issue") -def test_k8s_pod(user_session, api_session, api_deployment): - _, srec = api_session - _, drec = api_deployment - plist = user_session.pod_list() - assert any(prec["id"] == srec["id"] for prec in plist) - assert any(prec["id"] == drec["id"] for prec in plist) - for prec in plist: - prec2 = user_session.pod_info(prec["id"]) - assert prec2["id"] == prec["id"] - srec2 = user_session.session_info(srec["id"], k8s=True) - assert srec2["id"] == srec["id"] - drec2 = user_session.deployment_info(drec["id"], k8s=True) - assert drec2["id"] == drec["id"] - - -def test_job_run1(user_session, api_project): - prec = api_project - uname = user_session.username - user_session.job_create(prec, name="testjob1", command="run", run=True, wait=True) - jrecs = user_session.job_list() - assert len(jrecs) == 1, jrecs - rrecs = user_session.run_list() - assert len(rrecs) == 1, rrecs - ldata1 = user_session.run_log(rrecs[0]["id"], format="text") - assert ldata1.endswith("Hello Anaconda Enterprise!\n"), repr(ldata1) - user_session.job_create(prec, name="testjob1", command="run", make_unique=True, run=True, wait=True) - jrecs = user_session.job_list() - assert len(jrecs) == 2, jrecs - jrecs2 = user_session.project_jobs(prec) - assert {r["id"]: r for r in jrecs} == {r["id"]: r for r in jrecs2} - rrecs = user_session.run_list() - rrecs2 = user_session.project_runs(prec) - assert len(rrecs) == 2, rrecs - assert {r["id"]: r for r in rrecs} == {r["id"]: r for r in rrecs2} - for rrec in rrecs: - user_session.run_delete(rrec["id"]) - for jrec in jrecs: - user_session.job_delete(jrec["id"]) - assert not user_session.job_list() - assert not user_session.run_list() - - -def test_job_run2(user_session, api_project): - prec = api_project - # Test cleanup mode and variables in jobs - variables = {"INTEGRATION_TEST_KEY_1": "value1", "INTEGRATION_TEST_KEY_2": "value2"} - user_session.job_create(prec, name="testjob2", command="run_with_env_vars", variables=variables, run=True, wait=True, cleanup=True) - # The job, and run records should have already been deleted - assert not user_session.job_list() - assert not user_session.run_list() - - -def test_job_run3(user_session, api_project): - prec = api_project - # Test cleanup mode and variables in jobs - variables = {"INTEGRATION_TEST_KEY_1": "value1", "INTEGRATION_TEST_KEY_2": "value2"} - job_create_response = user_session.job_create( - prec, name="testjob2", command="run_with_env_vars", variables=variables, run=True, wait=True, cleanup=False - ) - - rrecs = user_session.run_list() - assert len(rrecs) == 1, rrecs - ldata2 = user_session.run_log(rrecs[0]["id"], format="text") - # Confirm that the environment variables were passed through - outvars = dict(line.strip().replace(" ", "").split(":", 1) for line in ldata2.splitlines() if line.startswith("INTEGRATION_TEST_KEY_")) - assert variables == outvars, outvars - user_session.run_delete(rrecs[0]["id"]) - assert not user_session.run_list() - - user_session.job_delete(job_create_response["id"]) - assert not user_session.job_list() - - -def test_login_time(admin_session, user_session): - # The current session should already be authenticated - now = datetime.now() - plist0 = user_session.project_list() - user_list = admin_session.user_list() - urec = next((r for r in user_list if r["username"] == user_session.username), None) - assert urec is not None - ltm1 = datetime.fromtimestamp(urec["lastLogin"] / 1000.0) - assert ltm1 < now - - # Create new login session. This should change lastLogin - password: str = load_account(id="1")["password"] - user_sess2 = AEUserSession(user_session.hostname, user_session.username, password, persist=False) - plist1 = user_sess2.project_list() - urec = admin_session.user_info(urec["id"]) - ltm2 = datetime.fromtimestamp(urec["lastLogin"] / 1000.0) - assert ltm2 > ltm1 - user_sess2.disconnect() - assert plist1 == plist0 - - # Create new impersonation session. This should not change lastLogin - user_sess3 = AEUserSession(admin_session.hostname, user_session.username, admin_session, persist=False) - plist2 = user_sess3.project_list() - urec = admin_session.user_info(urec["id"]) - ltm3 = datetime.fromtimestamp(urec["lastLogin"] / 1000.0) - assert ltm3 == ltm2 - user_sess3.disconnect() - # Confirm the impersonation worked by checking the project lists are the same - assert plist2 == plist0 - - # Access the original login session. It should not reauthenticate - plist3 = user_session.project_list() - urec = admin_session.user_info(urec["id"]) - ltm4 = datetime.fromtimestamp(urec["lastLogin"] / 1000.0) - assert ltm4 == ltm3 - assert plist3 == plist0 diff --git a/tests/system/ae5_tools/test_api_secret_system.py b/tests/system/ae5_tools/test_api_secret_system.py deleted file mode 100644 index 90833bc0..00000000 --- a/tests/system/ae5_tools/test_api_secret_system.py +++ /dev/null @@ -1,45 +0,0 @@ -import uuid - -import pytest - -from ae5_tools.api import AEException, AEUserSession -from tests.adsp.common.utils import _get_vars -from tests.system.state import load_account - - -@pytest.fixture(scope="session") -def user_session(): - hostname: str = _get_vars("AE5_HOSTNAME") - local_account: dict = load_account(id="1") - username: str = local_account["username"] - password: str = local_account["password"] - s = AEUserSession(hostname, username, password) - yield s - s.disconnect() - - -@pytest.fixture(scope="module") -def secret_name(): - return "MOCK_SECRET" - - -##################################################### -# Test Cases For secret_add, secret_list, and secret_delete -##################################################### - - -def test_secret_add_and_list(user_session, secret_name): - user_session.secret_add(key=secret_name, value=str(uuid.uuid4())) - results: list[dict] = user_session.secret_list() - assert secret_name in [secret["secret_name"] for secret in results] - - -def test_secret_delete(user_session, secret_name): - user_session.secret_delete(key=secret_name) - - -def test_secret_delete_should_gracefully_fail(user_session, secret_name): - new_key: str = str(uuid.uuid4()).replace("-", "_") - with pytest.raises(AEException) as context: - user_session.secret_delete(key=new_key) - assert str(context.value) == f"User secret {new_key} was not found and cannot be deleted." diff --git a/tests/system/ae5_tools/test_api_user_list_system.py b/tests/system/ae5_tools/test_api_user_list_system.py deleted file mode 100644 index ba69d1d8..00000000 --- a/tests/system/ae5_tools/test_api_user_list_system.py +++ /dev/null @@ -1,38 +0,0 @@ -##################################################### -# Test Cases For user_list -##################################################### - - -def test_user_list_has_realm_roles(admin_session): - # Test Case - User list contains realm roles - - # Execute the test - user_list: list = admin_session.user_list() - - # The live system will have more than 0 users - assert len(user_list) > 0 - - # Look for the admin account - account: dict = [user for user in user_list if user["username"] == "anaconda-enterprise"][0] - - # Ensure the new property is present - assert "realm_roles" in account - - # Ensure the property has roles which would be present on the account - assert len(account["realm_roles"]) > 0 - - # Ensure the account has the expected roles assigned. - - # Roles - expected_roles: list[str] = [ - "offline_access", - "uma_authorization", - ] - - # Groups - expected_groups: list[str] = ["admins", "developers", "everyone"] - - for role in expected_roles: - assert role in account["realm_roles"] - for group in expected_groups: - assert group in account["realm_groups"] diff --git a/tests/system/ae5_tools/test_options.py b/tests/system/ae5_tools/test_options.py deleted file mode 100644 index 775e2ffa..00000000 --- a/tests/system/ae5_tools/test_options.py +++ /dev/null @@ -1,164 +0,0 @@ -import pytest - -from tests.adsp.common.utils import _cmd - - -@pytest.fixture(scope="module") -def project_list(user_session): - return _cmd("project list") - - -@pytest.fixture(scope="module") -def project_dup_names(project_list): - counts = {} - for p in project_list: - counts[p["name"]] = counts.get(p["name"], 0) + 1 - return sorted(p for p, v in counts.items() if v > 1) - - -def test_owner(user_session, project_list): - uname = user_session.username - first_list = None - for cmd in (f"project list {uname}/*", f"project list --filter owner={uname}"): - plist = _cmd(cmd) - if first_list is None: - assert all(p["owner"] == uname for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_name(project_dup_names): - pname = project_dup_names[0] - first_list = None - for cmd in (f"project list {pname}", f"project list */{pname}", f"project list --filter name={pname}"): - plist = _cmd(cmd) - if first_list is None: - assert len(plist) > 1 - assert all(p["name"] == pname for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_owner_name(user_session, project_dup_names): - uname = user_session.username - pname = project_dup_names[0] - first_list = None - for cmd in ( - f"project list {uname}/{pname}", - f"project list {uname}/* --filter name={pname}", - f"project list --filter name={pname} {uname}/*", - f"project list */{pname} --filter owner={uname}", - f"project list --filter owner={uname} */{pname}", - f"project list {pname} --filter owner={uname}", - f"project list --filter owner={uname} --filter name={pname}", - f"project list --filter name={pname} --filter owner={uname}", - f"project list --filter owner={uname},name={pname}", - f"project list --filter owner={uname}&name={pname}", - ): - plist = _cmd(cmd) - if first_list is None: - assert len(plist) == 1 - assert all(p["name"] == pname or p["owner"] == uname for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_boolean_pipe(user_session, project_dup_names): - uname = user_session.username - pname = project_dup_names[0] - plist = _cmd(f"project list --filter name={pname}|owner={uname}") - first_list = None - for cmd in ( - f"project list --filter name={pname}|owner={uname}", - f"project list --filter owner={uname}|name={pname}", - ): - plist = _cmd(cmd) - if first_list is None: - assert (p["name"] == pname or p["owner"] == uname for p in plist) - assert any(p["name"] != pname for p in plist) - assert any(p["owner"] != uname for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_boolean_comma(user_session, project_dup_names): - uname = user_session.username - pname = project_dup_names[0] - pname2 = project_dup_names[1] - first_list = None - # The , has lower priority than the |, and equal to multiple --filter commands - for cmd in ( - f"project list --filter name={pname}|name={pname2},owner={uname}", - f"project list --filter owner={uname},name={pname}|name={pname2}", - f"project list --filter name={pname}|name={pname2} --filter owner={uname}", - f"project list --filter owner={uname} --filter name={pname}|name={pname2}", - ): - plist = _cmd(cmd) - if first_list is None: - assert (p["name"] in (pname, pname2) and p["owner"] == uname for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_boolean_ampersand(user_session, project_dup_names): - uname = user_session.username - pname = project_dup_names[0] - pname2 = project_dup_names[1] - first_list = None - # The & has higher priority than the | - for cmd in ( - f"project list --filter name={pname}|name={pname2}&owner={uname}", - f"project list --filter name={pname}|owner={uname}&name={pname2}", - f"project list --filter owner={uname}&name={pname2}|name={pname}", - f"project list --filter name={pname2}&owner={uname}|name={pname}", - ): - plist = _cmd(cmd) - if first_list is None: - assert (p["name"] == pname or (p["name"] == pname2 and p["owner"] == uname) for p in plist) - first_list = plist - else: - assert plist == first_list - - -def test_columns(user_session): - uname = user_session.username - for cmd in ( - f"project list --columns name,editor,id --filter owner={uname}", - f"project list --columns name,editor,id", - ): - plist = _cmd(cmd) - assert (list(p) == ["name", "editor", "id"] for p in plist) - - -def test_sort(user_session, project_dup_names): - name_filter = "|".join(f"name={n}" for n in project_dup_names) - plist1 = _cmd(f"project list --filter {name_filter} --sort name,owner") - plist2 = _cmd(f"project list --filter {name_filter} --sort name,-owner") - plist3 = _cmd(f"project list --filter {name_filter} --sort -name,owner") - plist4 = _cmd(f"project list --filter {name_filter} --sort -name,-owner") - assert plist1 == plist4[::-1] - assert plist2 == plist3[::-1] - assert [p["name"] for p in plist1] == [p["name"] for p in plist2] - slist1 = [(p["name"], p["owner"]) for p in plist1] - assert slist1 == sorted(slist1) - slist2 = [(p["name"], p["owner"]) for p in plist2] - assert slist2 != sorted(slist2) - - -def test_filter_comparison(project_list): - owners = sorted(set(p["owner"] for p in project_list)) - plist1 = _cmd(f"project list --sort owner,name --filter owner<{owners[1]}") - plist2 = _cmd(f"project list --sort owner,name --filter owner<={owners[0]}") - assert plist1 == plist2 - plist3 = _cmd(f"project list --sort owner,name --filter owner>={owners[1]}") - plist4 = _cmd(f"project list --sort owner,name --filter owner>{owners[0]}") - plist5 = _cmd(f"project list --sort owner,name --filter owner!={owners[0]}") - assert plist3 == plist4 - assert plist3 == plist5 - plist6 = _cmd(f"project list --sort owner,name") - assert plist1 + plist3 == plist6 diff --git a/tests/system/runner.py b/tests/system/runner.py deleted file mode 100644 index f416874c..00000000 --- a/tests/system/runner.py +++ /dev/null @@ -1,147 +0,0 @@ -from __future__ import annotations - -import json -import logging -import uuid - -from dotenv import load_dotenv - -from ae5_tools import AEUserSession, demand_env_var_as_bool, get_env_var -from tests.adsp.common.fixture_manager import FixtureManager -from tests.adsp.common.utils import _process_launch_wait - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def run() -> None: - shell_out_cmd: str = "python -m pytest --cov=ae5_tools --show-capture=all -rP tests/system/ae5_tools --cov-append --cov-report=xml -vv" - - if get_env_var(name="CI") and demand_env_var_as_bool(name="CI"): - shell_out_cmd += " --ci-skip" - - _process_launch_wait(shell_out_cmd=shell_out_cmd) - - -class SystemTestFixtureSuite(FixtureManager): - """ - System Test Setup - 1. Environment Setup - A. Create test accounts: tooltest, tooltest2, tooltest3 - B. Upload projects 1,2,3 -> user accounts 1,2,3 - C. Set expected fixture attributes (tool, sharing) - 3. (Optional) Environment Teardown - - This covers the current suite of system tests. - Tests which need additions to this are expected to manage the lifecycle of those effects. - """ - - def _setup(self) -> None: - # Create Fixtures - - self._create_service_accounts() - self._upload_projects() - self._build_relationships() - self._set_project_properties() - - def _create_service_accounts(self): - # Create service accounts (and connections) - self.create_fixture_accounts(accounts=self.config["accounts"], force=self.config["force"]) - self.create_fixture_connections() - - def _upload_projects(self): - # 1. Each user gets all three projects. - for account in self.config["accounts"]: - for proj in self.config["projects"]: - self.upload_fixture_project(proj_params=proj, owner=account["username"], force=self.config["force"]) - - def _build_relationships(self): - # 2. Build our relationships. - logger.info("Building project / account relationships") - - # User 3 shares projects 1 & 2 with User 1 - source_user = self._get_account(id="3") - source_user_conn: AEUserSession = self.get_account_conn(username=source_user["username"]) - target_user_name: str = self._get_account(id="1")["username"] - - for project in self.projects: - if project["record"]["owner"] == source_user["username"] and project["record"]["name"] in [ - "testproj1", - "testproj2", - ]: - project_id: str = project["record"]["id"] - source_user_conn.project_collaborator_add(ident=project_id, userid=target_user_name) - - # User 1 shares projects to different numbers of users - source_user_conn: AEUserSession = self.get_account_conn(username=self._get_account(id="1")["username"]) - for project in self.projects: - if project["record"]["owner"] == self._get_account(id="1")["username"]: - project_name: str = project["record"]["name"] - project_id: str = project["record"]["id"] - logger.info("Configuring sharing on project %s for %s", project["record"]["name"], project["record"]["owner"]) - - if project_name == self.config["projects"][0]["name"]: - # Add user 2 - target_user_name: str = self._get_account(id="2")["username"] - source_user_conn.project_collaborator_add(ident=project_id, userid=target_user_name) - elif project_name == self.config["projects"][1]["name"]: - # Add user 2 - target_user_name: str = self._get_account(id="2")["username"] - source_user_conn.project_collaborator_add(ident=project_id, userid=target_user_name) - - # Add user 3 - target_user_name: str = self._get_account(id="3")["username"] - source_user_conn.project_collaborator_add(ident=project_id, userid=target_user_name) - - elif project_name == self.config["projects"][2]["name"]: - """""" - else: - raise NotImplementedError("Unknown project to update contributor on") - - def _set_project_properties(self): - # 3. Set editors for user 1's projects - source_user_conn: AEUserSession = self.get_account_conn(username=self._get_account(id="1")["username"]) - for project in self.projects: - if project["record"]["owner"] == self._get_account(id="1")["username"]: - project_name: str = project["record"]["name"] - project_id: str = project["record"]["id"] - logger.info("Setting default editor on project %s for %s", project["record"]["name"], project["record"]["owner"]) - - if project_name == self.config["projects"][0]["name"]: - source_user_conn.project_patch(ident=project_id, editor="jupyterlab") # jupyterlab, notebook, vscode - elif project_name == self.config["projects"][1]["name"]: - source_user_conn.project_patch(ident=project_id, editor="vscode") # jupyterlab, notebook, vscode - elif project_name == self.config["projects"][2]["name"]: - source_user_conn.project_patch(ident=project_id, editor="notebook") # jupyterlab, notebook, vscode - else: - raise NotImplementedError("Unknown project to update default editor on") - - @staticmethod - def gen_config(randomize: bool = False) -> dict: - # load our fixtures - with open(file="tests/fixtures/system/fixtures.json", mode="r", encoding="utf-8") as file: - config: dict = json.load(file) - - if randomize: - # randomize! - for account in config["accounts"]: - prefix: str = "ae-system-test" - account_id: str = str(uuid.uuid4()) - account["username"] = prefix + "-" + account_id - account["email"] = account["username"] + "@localhost.local" - account["firstname"] = account_id - account["lastname"] = prefix - account["password"] = str(uuid.uuid4()) - - return config - - -if __name__ == "__main__": - # Load env vars, - do NOT override previously defined ones - load_dotenv(override=False) - - with SystemTestFixtureSuite(config=SystemTestFixtureSuite.gen_config()) as manager: - # serialize to allow individual tests to operate (in other processes) - with open(file="system-test-state.json", mode="w", encoding="utf-8") as file: - file.write(str(manager)) - run() diff --git a/tests/system/state.py b/tests/system/state.py deleted file mode 100644 index e8d86e4a..00000000 --- a/tests/system/state.py +++ /dev/null @@ -1,8 +0,0 @@ -import json - -with open(file="system-test-state.json", mode="r", encoding="utf-8") as file: - FIXTURE_STATE: dict = json.load(file) - - -def load_account(id: str) -> dict: - return [account for account in FIXTURE_STATE["accounts"] if account["id"] == id][0] diff --git a/tests/unit/ae5_tools/test_api.py b/tests/unit/ae5_tools/test_api.py new file mode 100644 index 00000000..38a5564a --- /dev/null +++ b/tests/unit/ae5_tools/test_api.py @@ -0,0 +1,120 @@ +import json +import os +from unittest.mock import MagicMock + +import requests + +from ae5_tools.api import AEAdminSession, AESessionBase, AEUserSession + +base_params: dict = {"hostname": "mock-hostname", "username": "mock-username", "password": "", "persist": False} + + +def unset_cf_env_vars(): + if "CF_ACCESS_CLIENT_ID" in os.environ: + del os.environ["CF_ACCESS_CLIENT_ID"] + if "CF_ACCESS_CLIENT_SECRET" in os.environ: + del os.environ["CF_ACCESS_CLIENT_SECRET"] + + +def set_cf_env_vars(): + os.environ["CF_ACCESS_CLIENT_ID"] = "MOCK-CLIENT-ID" + os.environ["CF_ACCESS_CLIENT_SECRET"] = "MOCK-CLIENT-SECRET" + + +class AESessionBaseTester(AESessionBase): + _connected = MagicMock(return_value=True) + _connect = MagicMock(return_value=True) + _set_header = MagicMock(return_value=True) + + def __init__(self, hostname, username, password, prefix, persist, **kwargs): + super().__init__(hostname, username, password, prefix, persist) + + +def test_cloudformation_headers_are_added_if_present(): + # Ensure CloudFormation CF Header code path is executed + set_cf_env_vars() + + tester: AESessionBaseTester = AESessionBaseTester(**base_params, prefix="mock-prefix") + + assert tester.session.headers["CF-Access-Client-Id"] == "MOCK-CLIENT-ID" + assert tester.session.headers["CF-Access-Client-Secret"] == "MOCK-CLIENT-SECRET" + + del tester + unset_cf_env_vars() + + tester: AESessionBaseTester = AESessionBaseTester(**base_params, prefix="mock-prefix") + assert "CF-Access-Client-Id" not in tester.session.headers + assert "CF-Access-Client-Secret" not in tester.session.headers + + +def test_cloudformation_headers_are_included_on_authorize(): + unset_cf_env_vars() + + tester: AESessionBaseTester = AESessionBaseTester(**base_params, prefix="mock-prefix") + assert "CF-Access-Client-Id" not in tester.session.headers + assert "CF-Access-Client-Secret" not in tester.session.headers + + set_cf_env_vars() + tester.authorize() + assert tester.session.headers["CF-Access-Client-Id"] == "MOCK-CLIENT-ID" + assert tester.session.headers["CF-Access-Client-Secret"] == "MOCK-CLIENT-SECRET" + + unset_cf_env_vars() + + +def test_user_session_added_cloudflare_headers_if_present(): + unset_cf_env_vars() + + user_session: AEUserSession = AEUserSession(**base_params) + user_session._set_header() + assert "CF-Access-Client-Id" not in user_session.session.headers + assert "CF-Access-Client-Secret" not in user_session.session.headers + del user_session + + set_cf_env_vars() + user_session: AEUserSession = AEUserSession(**base_params) + user_session._set_header() + assert user_session.session.headers["CF-Access-Client-Id"] == "MOCK-CLIENT-ID" + assert user_session.session.headers["CF-Access-Client-Secret"] == "MOCK-CLIENT-SECRET" + + unset_cf_env_vars() + + +def test_admin_session_connect_successes_with_cloudflare(): + admin_session: AEAdminSession = AEAdminSession(**base_params) + admin_session.session = MagicMock(return_value=True) + + mock_response: MagicMock = MagicMock() + mock_response.status_code = 200 + mock_response.json = MagicMock(return_value={"key": "value"}) + admin_session.session.post = MagicMock(return_value=mock_response) + + admin_session._connect(password=base_params["password"]) + assert admin_session._sdata == {"key": "value"} + + +def test_admin_session_gracefully_failures_deserializing_cloudflare_error(): + admin_session: AEAdminSession = AEAdminSession(**base_params) + resp: requests.Response = requests.Response() + resp.status_code = 200 + resp.json = MagicMock(side_effect=[json.decoder.JSONDecodeError("Boom!", "", 0)]) + admin_session.session.post = MagicMock(return_value=resp) + admin_session._connect(password=base_params["password"]) + assert admin_session._sdata == {} + + +def test_admin_session_gracefully_failures_on_cloudflare_error(): + admin_session: AEAdminSession = AEAdminSession(**base_params) + resp: requests.Response = requests.Response() + resp.status_code = 200 + resp.json = MagicMock(side_effect=[Exception("Boom!")]) + admin_session.session.post = MagicMock(return_value=resp) + admin_session._connect(password=base_params["password"]) + assert admin_session._sdata == {} + + +def test_admin_session_gracefully_failures_with_exceeded_retry_count(): + admin_session: AEAdminSession = AEAdminSession(**base_params) + admin_session.session.post = MagicMock(side_effect=[requests.exceptions.RetryError("Boom!")]) + admin_session._connect(password=base_params["password"]) + assert admin_session._sdata == {}