From 05fa6b5975e3e758b384b254854a3f1eb4c4056e Mon Sep 17 00:00:00 2001 From: Austin Dickey Date: Wed, 15 Feb 2023 14:40:35 -0600 Subject: [PATCH] benchalerts: move from separate repo (#747) * benchalerts: initial commit from separate repo * disable integration tests that GET /compare/runs because that times out --- .github/workflows/actions.yml | 7 + .github/workflows/release.yml | 3 + README.md | 2 + benchalerts/README.md | 110 ++++ benchalerts/benchalerts/__init__.py | 25 + benchalerts/benchalerts/_version.py | 18 + benchalerts/benchalerts/clients.py | 400 ++++++++++++++ benchalerts/benchalerts/log.py | 26 + benchalerts/benchalerts/parse_conbench.py | 216 ++++++++ benchalerts/benchalerts/talk_to_conbench.py | 197 +++++++ benchalerts/benchalerts/workflows.py | 286 ++++++++++ benchalerts/requirements-dev.txt | 15 + benchalerts/requirements.txt | 17 + benchalerts/setup.py | 61 ++ benchalerts/tests/integration_tests/README.md | 54 ++ .../tests/integration_tests/conftest.py | 41 ++ .../test_clients_integration.py | 34 ++ .../test_talk_to_conbench_integration.py | 71 +++ .../test_workflows_integration.py | 123 ++++ benchalerts/tests/unit_tests/__init__.py | 13 + benchalerts/tests/unit_tests/conftest.py | 69 +++ .../unit_tests/expected_md/details_error.md | 3 + .../expected_md/details_noregressions.md | 3 + .../expected_md/details_regressions.md | 3 + .../details_workflow_noregressions.md | 3 + .../details_workflow_regressions.md | 3 + .../unit_tests/expected_md/summary_error.md | 1 + .../expected_md/summary_nobaseline.md | 10 + .../expected_md/summary_noregressions.md | 17 + .../unit_tests/expected_md/summary_pending.md | 1 + .../expected_md/summary_pending_nobaseline.md | 1 + .../expected_md/summary_regressions.md | 25 + .../summary_workflow_noregressions.md | 14 + .../summary_workflow_regressions.md | 19 + ...h_benchmarks_run_id_contender_wo_base.json | 114 ++++ ...are_runs_some_baseline_some_contender.json | 82 +++ ...seline_some_contender_threshold_z_500.json | 82 +++ .../GET_conbench_error_with_content.json | 7 + .../GET_conbench_error_without_content.json | 3 + .../GET_conbench_runs_contender_wo_base.json | 52 ++ .../GET_conbench_runs_sha_abc.json | 53 ++ .../GET_conbench_runs_sha_no_baseline.json | 53 ++ .../GET_conbench_runs_sha_no_runs.json | 4 + .../GET_conbench_runs_some_baseline.json | 52 ++ .../GET_conbench_runs_some_contender.json | 52 ++ .../GET_github_app_installations.json | 55 ++ .../GET_github_commits_abc_pulls.json | 523 ++++++++++++++++++ .../GET_github_commits_no_prs_pulls.json | 4 + .../mocked_responses/POST_conbench_login.json | 3 + ...hub_app_installations_1_access_tokens.json | 137 +++++ .../POST_github_check-runs.json | 93 ++++ .../POST_github_issues_1347_comments.json | 34 ++ .../POST_github_statuses_abc.json | 35 ++ .../POST_github_statuses_no_baseline.json | 35 ++ benchalerts/tests/unit_tests/mocks.py | 76 +++ benchalerts/tests/unit_tests/test_clients.py | 123 ++++ .../tests/unit_tests/test_parse_conbench.py | 190 +++++++ .../tests/unit_tests/test_talk_to_conbench.py | 51 ++ benchalerts/tests/unit_tests/test_version.py | 19 + .../tests/unit_tests/test_workflows.py | 156 ++++++ 60 files changed, 3979 insertions(+) create mode 100644 benchalerts/README.md create mode 100644 benchalerts/benchalerts/__init__.py create mode 100644 benchalerts/benchalerts/_version.py create mode 100644 benchalerts/benchalerts/clients.py create mode 100644 benchalerts/benchalerts/log.py create mode 100644 benchalerts/benchalerts/parse_conbench.py create mode 100644 benchalerts/benchalerts/talk_to_conbench.py create mode 100644 benchalerts/benchalerts/workflows.py create mode 100644 benchalerts/requirements-dev.txt create mode 100644 benchalerts/requirements.txt create mode 100644 benchalerts/setup.py create mode 100644 benchalerts/tests/integration_tests/README.md create mode 100644 benchalerts/tests/integration_tests/conftest.py create mode 100644 benchalerts/tests/integration_tests/test_clients_integration.py create mode 100644 benchalerts/tests/integration_tests/test_talk_to_conbench_integration.py create mode 100644 benchalerts/tests/integration_tests/test_workflows_integration.py create mode 100644 benchalerts/tests/unit_tests/__init__.py create mode 100644 benchalerts/tests/unit_tests/conftest.py create mode 100644 benchalerts/tests/unit_tests/expected_md/details_error.md create mode 100644 benchalerts/tests/unit_tests/expected_md/details_noregressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/details_regressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/details_workflow_noregressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/details_workflow_regressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_error.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_nobaseline.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_noregressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_pending.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_pending_nobaseline.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_regressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_workflow_noregressions.md create mode 100644 benchalerts/tests/unit_tests/expected_md/summary_workflow_regressions.md create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_benchmarks_run_id_contender_wo_base.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender_threshold_z_500.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_with_content.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_without_content.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_contender_wo_base.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_abc.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_baseline.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_runs.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_baseline.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_contender.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_github_app_installations.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_abc_pulls.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_no_prs_pulls.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_conbench_login.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_github_app_installations_1_access_tokens.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_github_check-runs.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_github_issues_1347_comments.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_abc.json create mode 100644 benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_no_baseline.json create mode 100644 benchalerts/tests/unit_tests/mocks.py create mode 100644 benchalerts/tests/unit_tests/test_clients.py create mode 100644 benchalerts/tests/unit_tests/test_parse_conbench.py create mode 100644 benchalerts/tests/unit_tests/test_talk_to_conbench.py create mode 100644 benchalerts/tests/unit_tests/test_version.py create mode 100644 benchalerts/tests/unit_tests/test_workflows.py diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index f1cc4d095..17bf331f2 100755 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -144,12 +144,19 @@ jobs: # in order of dependency pip install \ -e './benchclients/python[dev]' \ + -e './benchalerts[dev]' \ -e ./benchadapt/python \ -e ./benchconnect \ -e ./benchrun/python - name: Test benchclients run: | pytest -vv benchclients/python/tests + - name: Test benchalerts + env: + GITHUB_APP_ID: ${{ secrets.CONBENCH_APP_ID }} + GITHUB_APP_PRIVATE_KEY: ${{ secrets.CONBENCH_APP_PRIVATE_KEY }} + run: | + pytest -vv --log-level DEBUG benchalerts/tests - name: Test benchadapt run: | pytest -vv benchadapt/python/tests diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1239f6788..2fd566b7d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,6 +9,7 @@ on: type: choice options: - benchadapt + - benchalerts - benchclients - benchconnect - benchrun @@ -42,6 +43,8 @@ jobs: # Find package root dir if [[ '${{ inputs.package }}' = 'benchadapt' ]]; then pkg_root=./benchadapt/python + elif [[ '${{ inputs.package }}' = 'benchalerts' ]]; then + pkg_root=./benchalerts elif [[ '${{ inputs.package }}' = 'benchclients' ]]; then pkg_root=./benchclients/python elif [[ '${{ inputs.package }}' = 'benchconnect' ]]; then diff --git a/README.md b/README.md index e92e48424..aebace2ec 100755 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ versions, so consider pinning packages to a specific version in your code. ```bash pip install benchadapt +pip install benchalerts pip install benchclients pip install benchconnect pip install benchrun @@ -79,6 +80,7 @@ from git like so: ```bash pip install 'benchadapt@git+https://github.com/conbench/conbench.git@main#subdirectory=benchadapt/python' +pip install 'benchalerts@git+https://github.com/conbench/conbench.git@main#subdirectory=benchalerts' pip install 'benchclients@git+https://github.com/conbench/conbench.git@main#subdirectory=benchclients/python' pip install 'benchconnect@git+https://github.com/conbench/conbench.git@main#subdirectory=benchconnect' pip install 'benchrun@git+https://github.com/conbench/conbench.git@main#subdirectory=benchrun/python' diff --git a/benchalerts/README.md b/benchalerts/README.md new file mode 100644 index 000000000..e2b9dd048 --- /dev/null +++ b/benchalerts/README.md @@ -0,0 +1,110 @@ +# benchalerts + +A package to facilitate automated alerting based on Conbench data. + +## Overview + +This package is intended to make the following steps easier in CI. Before these steps, +it is assumed that an execution environment has performed a run of benchmarks and +submitted the results to Conbench. + +- Hit the Conbench API to understand if there were any: + - errors + - regressions (with configuration for how these regressions may be detected) + - improvements (with configuration for how these improvements may be detected) +- Format and submit a summary of these findings to various places (again, with + configuration): + - GitHub Status on a commit + - GitHub Check on a commit with a Markdown summary + +In the future, there will be more places to submit alerts/reports/summaries, and more +configuration possible. + +Currently, the way to configure these workflows in CI is to create and run a Python +script that imports this package and runs a workflow, like so: + +```python +import os +from benchalerts import update_github_check_based_on_regressions + +update_github_check_based_on_regressions( + contender_sha=os.environ["GITHUB_SHA"], repo="my_org/my_repo" +) +``` + +See the docstrings of each function for more details on how to configure the workflow, +including how to set up the required environment variables. + +## GitHub App Authentication + +The preferred method that `benchalerts` recommends for authenticating and posting to +GitHub is to use a machine user called a [GitHub +App](https://docs.github.com/en/developers/apps/getting-started-with-apps/about-apps). +Using an App will allow you to post using a "bot" entity without taking up a seat in +your organization, and will allow you to use the extra features of the [Checks +API](https://docs.github.com/en/rest/guides/getting-started-with-the-checks-api). These +features give much more context when analyzing benchmark runs. + +Each Conbench server must create its own GitHub App for security reasons. To do so, +follow these instructions. + +### Creating a GitHub App to work with `benchalerts` + +1. Go to the official [GitHub + instructions](https://docs.github.com/en/developers/apps/building-github-apps/creating-a-github-app) + for creating an App. + - If you are an admin of your GitHub organization, follow the instructions for "a + GitHub App owned by an organization." This method is preferred because the org + will own the app instead of a user, who may not be part of the org in the + future. (This will not affect the identity of the bot that posts to GitHub, just + the ownership of the App.) + - If not, you can follow the instructions for "a GitHub App owned by a personal + account." You will send an installation request to org admins after creating the + app. You can always transfer the ownership of the app to an org later. +1. For the App Name, use `conbench-`. +1. For the Homepage URL, use the link to your Conbench server. +1. Ignore the Callback URL and Setup URL. +1. Uncheck the "Active" box under Webhook. Since this App will not be an active service, + we don't need GitHub to push webhook events to the App. +1. For full use of this package, the App requires the following permissions: + - Repository > Checks > Read and Write + - Repository > Commit statuses > Read and Write + - Repository > Pull requests > Read and Write +1. After creating the App, save the App ID for later. +1. For the App's photo, use [this + one](https://avatars.githubusercontent.com/u/61704591). +1. In the App Settings, scroll down to Private Keys and generate a private key. This + will download a file to your computer. Treat the contents of this file like a + password. +1. IMPORTANT: After creation, go to + `https://github.com/apps//installations/new` to install the new App + on the repos you'd like it to be able to post to. You must be a member of the + organization to install the App on. If you are not an admin, an email request will + be sent to org admins, which must be approved. + +### Running `benchalerts` as the GitHub App you created + +All that's necessary to use `benchalerts` workflows that post to GitHub as your App is +to set the following environment variables: + +- `GITHUB_APP_ID` - the App ID from above +- `GITHUB_APP_PRIVATE_KEY` - the _contents_ of the private key file from above. This is + a multiline file, so ensure you quote the contents correctly if necessary. + +Since `benchalerts` is usually used in CI, it's recommended to set these two environment +variables in your CI pipeline as secret env vars. Most CI systems have a mechanism for +doing this. For security reasons, do not check these values into version control. + +## License information + +Copyright (c) 2022, Voltron Data. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this +file except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under +the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the specific language governing +permissions and limitations under the License. diff --git a/benchalerts/benchalerts/__init__.py b/benchalerts/benchalerts/__init__.py new file mode 100644 index 000000000..1b8a879d1 --- /dev/null +++ b/benchalerts/benchalerts/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._version import __version__ +from .workflows import ( + update_github_check_based_on_regressions, + update_github_status_based_on_regressions, +) + +__all__ = [ + "__version__", + "update_github_check_based_on_regressions", + "update_github_status_based_on_regressions", +] diff --git a/benchalerts/benchalerts/_version.py b/benchalerts/benchalerts/_version.py new file mode 100644 index 000000000..46e5a3a5e --- /dev/null +++ b/benchalerts/benchalerts/_version.py @@ -0,0 +1,18 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Please do not add anything else to this file except __version__ + +__version__ = "0.4.0" diff --git a/benchalerts/benchalerts/clients.py b/benchalerts/benchalerts/clients.py new file mode 100644 index 000000000..21585ef3b --- /dev/null +++ b/benchalerts/benchalerts/clients.py @@ -0,0 +1,400 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import datetime +import enum +import os +import textwrap +from json import dumps +from typing import Optional + +import jwt +import requests +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry + +from .log import fatal_and_log, log + + +class _BaseClient(abc.ABC): + """A client to interact with an API. + + Parameters + ---------- + adapter + A requests adapter to mount to the requests session. If not given, one will be + created with a backoff retry strategy. + """ + + base_url: str + timeout_s = 10 + + def __init__(self, adapter: Optional[HTTPAdapter]): + if not adapter: + retry_strategy = Retry( + total=5, + status_forcelist=frozenset((429, 502, 503, 504)), + backoff_factor=4, # will retry in 2, 4, 8, 16, 32 seconds + ) + adapter = HTTPAdapter(max_retries=retry_strategy) + + self.session = requests.Session() + self.session.mount("https://", adapter) + + def get(self, path: str, params: Optional[dict] = None) -> dict: + url = self.base_url + path + log.debug(f"GET {url} {params=}") + res = self.session.get(url=url, params=params, timeout=self.timeout_s) + self._maybe_raise(res) + return res.json() + + def post(self, path: str, json: dict = None) -> Optional[dict]: + json = json or {} + url = self.base_url + path + log.debug(f"POST {url} {dumps(json)}") + res = self.session.post(url=url, json=json, timeout=self.timeout_s) + self._maybe_raise(res) + if res.content: + return res.json() + + @staticmethod + def _maybe_raise(res: requests.Response): + try: + res.raise_for_status() + except requests.HTTPError as e: + try: + res_content = e.response.content.decode() + except AttributeError: + res_content = e.response.content + log.error(f"Response content: {res_content}") + raise + + +class GitHubAppClient(_BaseClient): + """A client to interact with a GitHub App. + + Parameters + ---------- + adapter + A requests adapter to mount to the requests session. If not given, one will be + created with a backoff retry strategy. + + Environment variables + --------------------- + GITHUB_APP_ID + The numeric GitHub App ID you can get from its settings page. + GITHUB_APP_PRIVATE_KEY + The full contents of the private key file downloaded from the App's settings + page. + """ + + def __init__(self, adapter: Optional[HTTPAdapter] = None): + app_id = os.getenv("GITHUB_APP_ID") + if not app_id: + fatal_and_log("Environment variable GITHUB_APP_ID not found") + + private_key = os.getenv("GITHUB_APP_PRIVATE_KEY") + if not private_key: + fatal_and_log("Environment variable GITHUB_APP_PRIVATE_KEY not found") + + super().__init__(adapter=adapter) + encoded_jwt = self._encode_jwt(app_id=app_id, private_key=private_key) + self.session.headers = {"Authorization": f"Bearer {encoded_jwt}"} + self.base_url = "https://api.github.com/app" + + @staticmethod + def _encode_jwt(app_id: str, private_key: str) -> str: + """Create, sign, and encode a JSON web token to use for GitHub App endpoints.""" + payload = { + "iss": app_id, + "iat": datetime.datetime.utcnow() - datetime.timedelta(minutes=1), + "exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=10), + } + encoded_jwt = jwt.encode(payload=payload, key=private_key, algorithm="RS256") + return encoded_jwt + + def get_app_access_token(self) -> str: + """Authenticate as the GitHub App and generate an app installation access token. + + The token lasts for 1 hour, which should be plenty of time to do anything this + package needs to do. + + Returns + ------- + str + A temporary API token to use for the GitHub API endpoints that the app has + permission to access. + """ + # We tell developers to create a new app for each organization, so they can + # control the private key. So there should be exactly 1 installation here. + # (Note: 1 installation could have multiple repos in the same organization.) + installations = self.get("/installations") + install_id = installations[0]["id"] + + token_info = self.post(f"/installations/{install_id}/access_tokens") + return token_info["token"] + + +# used as inputs to some GitHubRepoClient methods +class StatusState(str, enum.Enum): + ERROR = "error" + FAILURE = "failure" + PENDING = "pending" + SUCCESS = "success" + + +# used as inputs to some GitHubRepoClient methods +class CheckStatus(str, enum.Enum): + # statuses + QUEUED = "queued" + IN_PROGRESS = "in_progress" + # conclusions + ACTION_REQUIRED = "action_required" + CANCELLED = "cancelled" + FAILURE = "failure" + NEUTRAL = "neutral" + SUCCESS = "success" + SKIPPED = "skipped" + TIMED_OUT = "timed_out" + + +class GitHubRepoClient(_BaseClient): + """A client to interact with a GitHub repo. + + You may authenticate with the GitHub API using a GitHub Personal Access Token or a + GitHub App. The correct environment variables must be set depending on which method + of authentication you're using. If all are set, the App method will be used. + + Parameters + ---------- + repo + The repo name, in the form 'owner/repo'. + adapter + A requests adapter to mount to the requests session. If not given, one will be + created with a backoff retry strategy. + + Environment variables + --------------------- + GITHUB_APP_ID + The numeric GitHub App ID you can get from its settings page. Only used for + GitHub App authentication. + GITHUB_APP_PRIVATE_KEY + The full contents of the private key file downloaded from the App's settings + page. Only used for GitHub App authentication. + GITHUB_API_TOKEN + A GitHub API token with ``repo`` access. Only used for Personal Access Token + authentication. + """ + + def __init__(self, repo: str, adapter: Optional[HTTPAdapter] = None): + if os.getenv("GITHUB_APP_ID") or os.getenv("GITHUB_APP_PRIVATE_KEY"): + log.info("Attempting to authenticate as a GitHub App.") + app_client = GitHubAppClient(adapter=adapter) + token = app_client.get_app_access_token() + else: + token = os.getenv("GITHUB_API_TOKEN") + if not token: + fatal_and_log("Environment variable GITHUB_API_TOKEN not found.") + + super().__init__(adapter=adapter) + self.session.headers = {"Authorization": f"Bearer {token}"} + self.base_url = f"https://api.github.com/repos/{repo}" + + def create_pull_request_comment( + self, + comment: str, + *, + pull_number: Optional[int] = None, + commit_sha: Optional[str] = None, + ): + """Create a comment on a pull request, specified either by pull request number + or commit SHA. + + Parameters + ---------- + comment + The comment text. + pull_number + The number of the pull request. Specify either this or ``commit_sha``. + commit_sha + The SHA of a commit associated with the pull request. Specify either this + or ``pull_number``. + """ + if not pull_number and not commit_sha: + fatal_and_log("pull_number and commit_sha are both missing") + + if commit_sha: + pull_numbers = [ + pull["number"] for pull in self.get(f"/commits/{commit_sha}/pulls") + ] + if len(pull_numbers) != 1: + fatal_and_log( + "Need exactly 1 pull request associated with commit " + f"'{commit_sha}'. Found {pull_numbers}." + ) + pull_number = pull_numbers[0] + + log.info( + f"Posting the following message to pull request #{pull_number}:\n\n" + + comment + ) + return self.post(f"/issues/{pull_number}/comments", json={"body": comment}) + + def update_commit_status( + self, + commit_sha: str, + title: str, + description: str, + state: StatusState, + details_url: Optional[str] = None, + ) -> dict: + """Update the GitHub status of a commit. + + A commit may have many statuses, each with their own title. Updating a previous + status with the same title for a given commit will result in overwriting that + status on that commit. + + Parameters + ---------- + commit_sha + The 40-character SHA of the commit to update. + title + The title of the status. Subsequent updates with the same title will update + the same status. + description + The short description of the status. + state + The overall status of the commit. Must be one of the StatusState enum + values. + details_url + A URL to be linked to when clicking on status Details. Default None. + + Returns + ------- + dict + GitHub's details about the new status. + """ + if not isinstance(state, StatusState): + fatal_and_log("state must be a StatusState", etype=TypeError) + + json = { + "state": state.value, + "description": textwrap.shorten(description, 140), + "context": title, + } + if details_url: + json["target_url"] = details_url + + return self.post(f"/statuses/{commit_sha}", json=json) + + def update_check( + self, + name: str, + commit_sha: str, + status: CheckStatus, + title: Optional[str] = None, + summary: Optional[str] = None, + details: Optional[str] = None, + details_url: Optional[str] = None, + ): + """Adds a new (or updates an existing) GitHub Check on a commit. + + A GitHub Check is a more fully-featured commit status, but you must be + authenticated as a GitHub App to update checks. Subsequent uses of this method + with the same ``name`` will overwrite the previous check. + + Parameters + ---------- + name + The name of the check. Subsequent updates with the same name will overwrite + the previous check. + commit_sha + The 40-character SHA of the commit to update. + status + The overall check status. Must be one of the CheckStatus enum values. If + it's QUEUED or IN_PROGRESS, the "started_at" field will be sent in the + payload with the current time in UTC. If it's another value, the + "completed_at" field will be sent instead. + title + The short title of the check results. Default None. If supplied, summary + must be supplied. + summary + A longer summary of the check results. Supports Markdown. Default None. If + supplied, title must be supplied. + details + Details about the check results. Supports Markdown. Default None. + details_url + A URL to be linked to when clicking on check Details. Default None. + + Returns + ------- + dict + GitHub's details about the new status. + """ + json = {"name": name, "head_sha": commit_sha} + + if status in [CheckStatus.QUEUED, CheckStatus.IN_PROGRESS]: + json["status"] = status.value + json["started_at"] = datetime.datetime.utcnow().isoformat() + "Z" + elif isinstance(status, CheckStatus): + json["conclusion"] = status.value + json["completed_at"] = datetime.datetime.utcnow().isoformat() + "Z" + else: + fatal_and_log("status must be a CheckStatus", etype=TypeError) + + if title: + json["output"] = {"title": title, "summary": summary} + if details: + json["output"]["text"] = details + + if details_url: + json["details_url"] = details_url + + return self.post("/check-runs", json=json) + + +class ConbenchClient(_BaseClient): + """A client to interact with a Conbench server. + + Parameters + ---------- + adapter + A requests adapter to mount to the requests session. If not given, one will be + created with a backoff retry strategy. + + Environment variables + --------------------- + CONBENCH_URL + The URL of the Conbench server. Required. + CONBENCH_EMAIL + The email to use for Conbench login. Only required if the server is private. + CONBENCH_PASSWORD + The password to use for Conbench login. Only required if the server is private. + """ + + def __init__(self, adapter: Optional[HTTPAdapter] = None): + url = os.getenv("CONBENCH_URL") + if not url: + fatal_and_log("Environment variable CONBENCH_URL not found") + + super().__init__(adapter=adapter) + self.base_url = url + "/api" + + login_creds = { + "email": os.getenv("CONBENCH_EMAIL"), + "password": os.getenv("CONBENCH_PASSWORD"), + } + if login_creds["email"] and login_creds["password"]: + self.post("/login/", json=login_creds) diff --git a/benchalerts/benchalerts/log.py b/benchalerts/benchalerts/log.py new file mode 100644 index 000000000..93c2a0f18 --- /dev/null +++ b/benchalerts/benchalerts/log.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +logging.basicConfig( + format="%(levelname)s [%(asctime)s] %(message)s", level=logging.INFO +) +log = logging.getLogger(__name__) + + +def fatal_and_log(msg: str, etype: BaseException = ValueError): + """If an error occurs, log the message and raise an exception.""" + log.error(msg) + raise etype(msg) diff --git a/benchalerts/benchalerts/parse_conbench.py b/benchalerts/benchalerts/parse_conbench.py new file mode 100644 index 000000000..eb77ada89 --- /dev/null +++ b/benchalerts/benchalerts/parse_conbench.py @@ -0,0 +1,216 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap +from dataclasses import dataclass +from typing import List + +from .clients import CheckStatus +from .talk_to_conbench import RunComparison + + +def _clean(text: str) -> str: + """Clean text so it displays nicely as GitHub Markdown.""" + return textwrap.fill(textwrap.dedent(text), 10000).replace(" ", "\n\n").strip() + + +@dataclass +class _CaseInfo: + run_id: str + run_reason: str + run_time: str + run_link: str + case_name: str + case_link: str + + @property + def Run_Reason(self): + return self.run_reason.title() + + +def _list_cases(case_infos: List[_CaseInfo]) -> str: + """Create a Markdown list of case information.""" + out = "" + previous_run_id = "" + + for case in case_infos: + if case.run_id != previous_run_id: + out += f"\n\n- {case.Run_Reason} Run at [{case.run_time}]({case.run_link})" + previous_run_id = case.run_id + out += f"\n - [{case.case_name}]({case.case_link})" + + if out: + out += "\n\n" + + return out + + +def benchmarks_with_errors(comparisons: List[RunComparison]) -> List[_CaseInfo]: + """Find information about benchmark cases that had errors.""" + out = [] + + for comparison in comparisons: + if comparison.compare_results: + out += [ + _CaseInfo( + run_id=comparison.contender_id, + run_reason=comparison.contender_reason, + run_time=comparison.contender_datetime, + run_link=comparison.compare_link, + case_name=case["benchmark"], + case_link=comparison.case_link(case["contender_id"]), + ) + for case in comparison.compare_results + if case["contender_error"] + ] + elif comparison.benchmark_results: + out += [ + _CaseInfo( + run_id=comparison.contender_id, + run_reason=comparison.contender_reason, + run_time=comparison.contender_datetime, + run_link=comparison.contender_link, + case_name=case["tags"].get("name", str(case["tags"])), + case_link=comparison.case_link(case["id"]), + ) + for case in comparison.benchmark_results + if case["error"] + ] + + return out + + +def benchmarks_with_z_regressions(comparisons: List[RunComparison]) -> List[_CaseInfo]: + """Find information about benchmark cases whose z-scores were extreme enough to + constitute a regression. + """ + out = [] + + for comparison in comparisons: + if comparison.compare_results: + out += [ + _CaseInfo( + run_id=comparison.contender_id, + run_reason=comparison.contender_reason, + run_time=comparison.contender_datetime, + run_link=comparison.compare_link, + case_name=case["benchmark"], + case_link=comparison.case_link(case["contender_id"]), + ) + for case in comparison.compare_results + if case["contender_z_regression"] + ] + + return out + + +def regression_summary( + comparisons: List[RunComparison], warn_if_baseline_isnt_parent: bool +) -> str: + """Generate a Markdown summary of what happened regarding errors and regressions.""" + sha = comparisons[0].contender_info["commit"]["sha"][:8] + errors = benchmarks_with_errors(comparisons) + regressions = benchmarks_with_z_regressions(comparisons) + summary = "" + + if errors: + summary += _clean( + """ + ## Benchmarks with errors + + These are errors that were caught while running the benchmarks. You can + click the link next to each case to go to the Conbench entry for that + benchmark, which might have more information about what the error was. + """ + ) + summary += _list_cases(errors) + + summary += "## Benchmarks with performance regressions\n\n" + + if not any(comparison.baseline_info for comparison in comparisons): + summary += _clean( + f""" + Conbench could not find a baseline run for contender commit `{sha}`. A + baseline run needs to be on the default branch in the same repository, with + the same hardware and context, and have at least one of the same benchmark + cases. + """ + ) + return summary + + summary += _clean( + f""" + Contender commit `{sha}` had {len(regressions)} performance regression(s) + compared to its baseline commit. + """ + ) + summary += "\n\n" + + if regressions: + summary += "### Benchmarks with regressions:" + summary += _list_cases(regressions) + + if ( + any(not comparison.baseline_is_parent for comparison in comparisons) + and warn_if_baseline_isnt_parent + ): + summary += _clean( + """ + ### Note + + The baseline commit was not the immediate parent of the contender commit. + See the link below for details. + """ + ) + + return summary + + +def regression_details(comparisons: List[RunComparison]) -> str: + """Generate Markdown details of what happened regarding regressions.""" + if not any(comparison.baseline_info for comparison in comparisons): + return None + + z_score_threshold = comparisons[0].compare_results[0]["threshold_z"] + details = _clean( + f""" + Conbench has details about {len(comparisons)} total run(s) on this commit. + + This report was generated using a z-score threshold of {z_score_threshold}. A + regression is defined as a benchmark exhibiting a z-score higher than the + threshold in the "bad" direction (e.g. down for iterations per second; up for + total time taken). + """ + ) + return details + + +def regression_check_status( + comparisons: List[RunComparison], +) -> CheckStatus: + """Return a different status based on errors and regressions.""" + regressions = benchmarks_with_z_regressions(comparisons) + + if any(comparison.has_errors for comparison in comparisons): + # has errors + return CheckStatus.ACTION_REQUIRED + if not any(comparison.baseline_info for comparison in comparisons): + # no baseline runs found + return CheckStatus.SKIPPED + elif regressions: + # at least one regression + return CheckStatus.FAILURE + else: + # no regressions + return CheckStatus.SUCCESS diff --git a/benchalerts/benchalerts/talk_to_conbench.py b/benchalerts/benchalerts/talk_to_conbench.py new file mode 100644 index 000000000..23459abb2 --- /dev/null +++ b/benchalerts/benchalerts/talk_to_conbench.py @@ -0,0 +1,197 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional + +from .clients import ConbenchClient +from .log import fatal_and_log, log + + +@dataclass +class RunComparison: + """Track info about a comparison between a contender run and its baseline. Used for + outputting information from get_comparison_to_baseline(). + + Parameters + ---------- + contender_info + The dict returned from Conbench when hitting /runs/{contender_run_id}. Contains + info about the run's ID, commit, errors, links, etc. + baseline_info + The dict returned from Conbench when hitting /runs/{baseline_run_id}, if a + baseline run exists for this contender run. Contains info about the run's ID, + commit, errors, links, etc. + compare_results + The list returned from Conbench when hitting + /compare/runs/{baseline_run_id}...{contender_run_id}, if a baseline run exists + for this contender run. Contains a comparison for every case run to its + baseline, including the statistics and regression analysis. + benchmark_results + The list returned from Conbench when hitting + /benchmarks?run_id={contender_run_id}, if the contender run has errors. Contains + info about each case in the contender run, including statistics and tracebacks. + Only used when a baseline run doesn't exist, because otherwise all this + information is already in the compare_results. + """ + + contender_info: dict + baseline_info: Optional[dict] = None + compare_results: Optional[List[dict]] = None + benchmark_results: Optional[List[dict]] = None + + @property + def baseline_is_parent(self) -> Optional[bool]: + """Whether the baseline run is on a commit that's the immediate parent of the + contender commit. + """ + if self.baseline_info: + return ( + self.baseline_info["commit"]["sha"] + == self.contender_info["commit"]["parent_sha"] + ) + + @property + def contender_reason(self) -> str: + """The contender run reason.""" + return self.contender_info["reason"] + + @property + def contender_datetime(self) -> str: + """The contender run datetime.""" + dt: str = self.contender_info["timestamp"] + return dt.replace("T", " ") + + @property + def contender_link(self) -> str: + """The link to the contender run page in the webapp.""" + return f"{self._app_url}/runs/{self.contender_id}" + + @property + def compare_link(self) -> Optional[str]: + """The link to the run comparison page in the webapp.""" + if self._compare_path: + # self._compare_path has a leading slash already + return f"{self._app_url}{self._compare_path}" + + def case_link(self, case_id: str) -> str: + """Get the link to a specific benchmark case result in the webapp.""" + return f"{self._app_url}/benchmarks/{case_id}" + + @property + def has_errors(self) -> bool: + """Whether this run has any benchmark errors.""" + return self.contender_info["has_errors"] + + @property + def contender_id(self) -> str: + """The contender run_id.""" + return self.contender_info["id"] + + @property + def _baseline_id(self) -> Optional[str]: + """The baseline run_id.""" + if self.baseline_info: + return self.baseline_info["id"] + + @property + def _app_url(self) -> str: + """The base URL to use for links to the webapp.""" + self_link: str = self.contender_info["links"]["self"] + return self_link.rsplit("/api/", 1)[0] + + @property + def _compare_path(self) -> Optional[str]: + """The API path to get comparisons between the baseline and contender.""" + if self._baseline_id: + return f"/compare/runs/{self._baseline_id}...{self.contender_id}/" + + @property + def _baseline_path(self) -> Optional[str]: + """The API path to get the baseline info.""" + baseline_link: Optional[str] = self.contender_info["links"].get("baseline") + if baseline_link: + return baseline_link.rsplit("/api", 1)[-1] + + +def get_comparison_to_baseline( + conbench: ConbenchClient, + contender_sha: str, + z_score_threshold: Optional[float] = None, +) -> List[RunComparison]: + """Get benchmark comparisons between the given contender commit and its baseline + commit. + + The baseline commit is defined by conbench, and it's typically the most recent + ancestor of the contender commit that's on the default branch. + + Parameters + ---------- + conbench + A ConbenchClient instance. + contender_sha + The commit SHA of the contender commit to compare. Needs to match EXACTLY what + conbench has stored; typically 40 characters. It can't be a shortened version of + the SHA. + z_score_threshold + The (positive) z-score threshold to send to the conbench compare endpoint. + Benchmarks with a z-score more extreme than this threshold will be marked as + regressions or improvements in the result. Default is to use whatever conbench + uses for default. + + Returns + ------- + List[RunComparison] + Information about each run associated with the contender commit, and a + comparison to its baseline run if that exists. + """ + out_list = [] + contender_run_ids = [ + run["id"] for run in conbench.get("/runs/", params={"sha": contender_sha}) + ] + if not contender_run_ids: + fatal_and_log( + f"Contender commit '{contender_sha}' doesn't have any runs in conbench." + ) + + log.info(f"Getting comparisons from {len(contender_run_ids)} run(s)") + for run_id in contender_run_ids: + run_comparison = RunComparison(contender_info=conbench.get(f"/runs/{run_id}/")) + + if run_comparison._baseline_path: + run_comparison.baseline_info = conbench.get(run_comparison._baseline_path) + + compare_params = ( + {"threshold_z": z_score_threshold} if z_score_threshold else None + ) + run_comparison.compare_results = conbench.get( + run_comparison._compare_path, params=compare_params + ) + + else: + log.warning( + "Conbench could not find a baseline run for the contender run " + f"{run_id}. A baseline run needs to be on the default branch in the " + "same repository, with the same hardware and context, and have at " + "least one of the same benchmark cases." + ) + if run_comparison.has_errors: + # get more information so we have more details about errors + run_comparison.benchmark_results = conbench.get( + "/benchmarks/", params={"run_id": run_id} + ) + + out_list.append(run_comparison) + + return out_list diff --git a/benchalerts/benchalerts/workflows.py b/benchalerts/benchalerts/workflows.py new file mode 100644 index 000000000..bc9c38bbf --- /dev/null +++ b/benchalerts/benchalerts/workflows.py @@ -0,0 +1,286 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Optional + +from .clients import CheckStatus, ConbenchClient, GitHubRepoClient, StatusState +from .log import log +from .parse_conbench import ( + _clean, + benchmarks_with_z_regressions, + regression_check_status, + regression_details, + regression_summary, +) +from .talk_to_conbench import get_comparison_to_baseline + + +def update_github_status_based_on_regressions( + contender_sha: str, + z_score_threshold: Optional[float] = None, + repo: Optional[str] = None, + github: Optional[GitHubRepoClient] = None, + conbench: Optional[ConbenchClient] = None, +) -> dict: + """Grab the benchmark result comparisons for a given contender commit, and post to + GitHub whether there were any regressions, in the form of a commit status. + + Parameters + ---------- + contender_sha + The SHA of the contender commit to compare. Needs to match EXACTLY what + conbench has stored; typically 40 characters. It can't be a shortened + version of the SHA. + z_score_threshold + The (positive) z-score threshold. Benchmarks with a z-score more extreme than + this threshold will be marked as regressions. Default is to use whatever + conbench uses for default. + repo + The repo name to post the status to, in the form 'owner/repo'. Either provide + this or ``github``. + github + A GitHubRepoClient instance. Either provide this or ``repo``. + conbench + A ConbenchClient instance. If not given, one will be created using the standard + environment variables. + + Environment variables + --------------------- + BUILD_URL + The URL of the build running this code. If provided, the GitHub status will link + to the build when there's an error in this workflow. + GITHUB_APP_ID + The ID of a GitHub App that has been set up according to this package's + instructions and installed to your repo. Recommended over GITHUB_API_TOKEN. Only + required if a GitHubRepoClient is not provided. + GITHUB_APP_PRIVATE_KEY + The private key file contents of a GitHub App that has been set up according to + this package's instructions and installed to your repo. Recommended over + GITHUB_API_TOKEN. Only required if GitHubRepoClient is not provided. + GITHUB_API_TOKEN + A GitHub Personal Access Token with the ``repo:status`` permission. Only + required if not going with GitHub App authentication and if a GitHubRepoClient + is not provided. + CONBENCH_URL + The URL of the Conbench server. Only required if a ConbenchClient is not + provided. + CONBENCH_EMAIL + The email to use for Conbench login. Only required if a ConbenchClient is not + provided and the server is private. + CONBENCH_PASSWORD + The password to use for Conbench login. Only required if a ConbenchClient is not + provided and the server is private. + + Returns + ------- + dict + GitHub's details about the new status. + """ + build_url = os.getenv("BUILD_URL") + github = github or GitHubRepoClient(repo=repo) + + def update_status(description, state, details_url): + """Shortcut for updating the "conbench" status on the given SHA, with debug + logging. + """ + res = github.update_commit_status( + commit_sha=contender_sha, + title="conbench", + description=description, + state=state, + details_url=details_url, + ) + log.debug(res) + return res + + # mark the task as pending + update_status( + description="Finding possible regressions", + state=StatusState.PENDING, + details_url=build_url, + ) + + # If anything above this line fails, we can't tell github that it failed. + # If anything in here fails, we can! + try: + conbench = conbench or ConbenchClient() + comparisons = get_comparison_to_baseline( + conbench, contender_sha, z_score_threshold + ) + regressions = benchmarks_with_z_regressions(comparisons) + log.info(f"Found the following regressions: {regressions}") + + if not any(comparison.baseline_info for comparison in comparisons): + desc = "Could not find any baseline runs to compare to" + state = StatusState.SUCCESS + elif regressions: + desc = f"There were {len(regressions)} benchmark regressions in this commit" + state = StatusState.FAILURE + else: + desc = "There were no benchmark regressions in this commit" + state = StatusState.SUCCESS + + # point to the homepage table filtered to runs of this commit + url = f"{os.environ['CONBENCH_URL']}/?search={contender_sha}" + return update_status(description=desc, state=state, details_url=url) + + except Exception as e: + update_status( + description=f"Failed finding regressions: {e}", + state=StatusState.ERROR, + details_url=build_url, + ) + log.error(f"Updated status with error: {e}") + raise + + +def update_github_check_based_on_regressions( + contender_sha: str, + z_score_threshold: Optional[float] = None, + warn_if_baseline_isnt_parent: bool = True, + repo: Optional[str] = None, + github: Optional[GitHubRepoClient] = None, + conbench: Optional[ConbenchClient] = None, +) -> dict: + """Grab the benchmark result comparisons for a given contender commit, and post to + GitHub whether there were any regressions, in the form of a commit check. + + You must use GitHub App authentication to use this workflow. + + Parameters + ---------- + contender_sha + The SHA of the contender commit to compare. Needs to match EXACTLY what + conbench has stored; typically 40 characters. It can't be a shortened + version of the SHA. + z_score_threshold + The (positive) z-score threshold. Benchmarks with a z-score more extreme than + this threshold will be marked as regressions. Default is to use whatever + conbench uses for default. + warn_if_baseline_isnt_parent + If True, will add a warning to all reports generated where the baseline commit + isn't the contender commit's direct parent. This is good to leave True for + workflows run on the default branch, but might be noisy for workflows run on + pull request commits. + repo + The repo name to post the status to, in the form 'owner/repo'. Either provide + this or ``github``. + github + A GitHubRepoClient instance. Either provide this or ``repo``. + conbench + A ConbenchClient instance. If not given, one will be created using the standard + environment variables. + + Environment variables + --------------------- + BUILD_URL + The URL of the build running this code. If provided, the GitHub Check will link + to the build when there's an error in this workflow. + GITHUB_APP_ID + The ID of a GitHub App that has been set up according to this package's + instructions and installed to your repo. Only required if a GitHubRepoClient is + not provided. + GITHUB_APP_PRIVATE_KEY + The private key file contents of a GitHub App that has been set up according to + this package's instructions and installed to your repo. Only required if + GitHubRepoClient is not provided. + CONBENCH_URL + The URL of the Conbench server. Only required if a ConbenchClient is not + provided. + CONBENCH_EMAIL + The email to use for Conbench login. Only required if a ConbenchClient is not + provided and the server is private. + CONBENCH_PASSWORD + The password to use for Conbench login. Only required if a ConbenchClient is not + provided and the server is private. + + Returns + ------- + dict + GitHub's details about the new check. + """ + build_url = os.getenv("BUILD_URL") + github = github or GitHubRepoClient(repo=repo) + + def update_check(status, title, summary, details, details_url): + """Shortcut for updating the "Conbench performance report" check on the given + SHA, with debug logging. + """ + res = github.update_check( + name="Conbench performance report", + commit_sha=contender_sha, + status=status, + title=title, + summary=summary, + details=details, + details_url=details_url, + ) + log.debug(res) + return res + + # mark the task as pending + update_check( + status=CheckStatus.IN_PROGRESS, + title="Analyzing performance", + summary=f"Analyzing `{contender_sha[:8]}` for regressions...", + details=None, + details_url=build_url, + ) + + # If anything above this line fails, we can't tell github that it failed. + # If anything in here fails, we can! + try: + conbench = conbench or ConbenchClient() + comparisons = get_comparison_to_baseline( + conbench, contender_sha, z_score_threshold + ) + regressions = benchmarks_with_z_regressions(comparisons) + + status = regression_check_status(comparisons) + summary = regression_summary(comparisons, warn_if_baseline_isnt_parent) + details = regression_details(comparisons) + if any(comparison.has_errors for comparison in comparisons): + title = "Some benchmarks had errors" + else: + title = f"Found {len(regressions)} regression(s)" + # point to the homepage table filtered to runs of this commit + url = f"{os.environ['CONBENCH_URL']}/?search={contender_sha}" + + return update_check( + status=status, + title=title, + summary=summary, + details=details, + details_url=url, + ) + + except Exception as e: + summary = _clean( + """ + The CI build running the regression analysis failed. This does not + necessarily mean this commit has benchmark regressions, but there is an + error that must be resolved before we can find out. + """ + ) + details = f"Error: `{repr(e)}`\n\nSee build link below." + update_check( + status=CheckStatus.NEUTRAL, + title="Error when analyzing performance", + summary=summary, + details=details, + details_url=build_url, + ) + log.error(f"Updated status with error: {e}") + raise diff --git a/benchalerts/requirements-dev.txt b/benchalerts/requirements-dev.txt new file mode 100644 index 000000000..7ed58c30d --- /dev/null +++ b/benchalerts/requirements-dev.txt @@ -0,0 +1,15 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pytest diff --git a/benchalerts/requirements.txt b/benchalerts/requirements.txt new file mode 100644 index 000000000..42be78bc0 --- /dev/null +++ b/benchalerts/requirements.txt @@ -0,0 +1,17 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +requests +urllib3 +pyjwt[crypto] diff --git a/benchalerts/setup.py b/benchalerts/setup.py new file mode 100644 index 000000000..66a08d6e7 --- /dev/null +++ b/benchalerts/setup.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib +from typing import List + +import setuptools + + +def read_requirements_file(filepath: pathlib.Path) -> List[str]: + """Parse a requirements.txt file into a list of package requirements""" + with open(filepath, "r") as f: + requirements = [ + line.strip() for line in f if line.strip() and not line.startswith("#") + ] + return requirements + + +pkg_root = pathlib.Path(__file__).parent + +__version__ = "" +with open(pkg_root / "benchalerts" / "_version.py", "r") as f: + exec(f.read()) # only overwrites the __version__ variable + +with open(pkg_root / "README.md", "r") as f: + long_description = f.read() + +base_requirements = read_requirements_file(pkg_root / "requirements.txt") +dev_requirements = read_requirements_file(pkg_root / "requirements-dev.txt") + + +setuptools.setup( + name="benchalerts", + version=__version__, + description="Automated alerting for conbench", + long_description=long_description, + long_description_content_type="text/markdown", + packages=setuptools.find_packages(), + entry_points={}, + classifiers=[ + "Programming Language :: Python :: 3.8", + "License :: OSI Approved :: Apache 2 License", + ], + python_requires=">=3.8", + maintainer="Austin Dickey", + maintainer_email="austin@voltrondata.com", + url="https://github.com/conbench/benchalerts", + install_requires=base_requirements, + extras_require={"dev": dev_requirements}, +) diff --git a/benchalerts/tests/integration_tests/README.md b/benchalerts/tests/integration_tests/README.md new file mode 100644 index 000000000..3505ce584 --- /dev/null +++ b/benchalerts/tests/integration_tests/README.md @@ -0,0 +1,54 @@ +Integration tests +----------------- + +These tests will interact with various services like GitHub and Conbench. To run only +these tests, do + + pytest -vv --log-level=DEBUG tests/integration_tests + +To run tests that interact with GitHub, you need the following environment variables +configured correctly: + +- `GITHUB_API_TOKEN` - a Personal Access Token that has at least the `repo:status` + permission. Only used for the integration tests that need a PAT. + + If the token has insufficient permissions, the tests will fail with a 403. + + If this environment variable isn't found, the PAT tests will be skipped. This is + currently the case in our GitHub Actions CI builds. +- `GITHUB_APP_ID` - the GitHub App ID of an App that was created following the + instructions in the + [main README](../../README.md#creating-a-github-app-to-work-with-benchalerts). + The App must be installed on the `conbench` organization, with access to the + `conbench/benchalerts` repository. + + If the App has insufficient permissions, the tests will fail with a 403. + + If this environment variable isn't found, the App tests will be skipped. This + variable is populated in our GitHub Actions CI builds, so some of these tests are + run in CI. +- `GITHUB_APP_PRIVATE_KEY` - the contents of the private key file of the same app as + above. + + If this environment variable isn't found, the App tests will be skipped. +- `CI` - this env var must *NOT* be set, or the tests that post comments to PRs will be + skipped. By default, `CI=true` in GitHub Actions, so we'll never run these PR + comment tests in the CI build. (We still run other GitHub App-authenticated + integration tests.) + +License information +------------------- + +Copyright (c) 2022, Voltron Data. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/benchalerts/tests/integration_tests/conftest.py b/benchalerts/tests/integration_tests/conftest.py new file mode 100644 index 000000000..1cf6f218f --- /dev/null +++ b/benchalerts/tests/integration_tests/conftest.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest +from _pytest.fixtures import SubRequest + + +@pytest.fixture +def github_auth(request: SubRequest, monkeypatch: pytest.MonkeyPatch) -> str: + """Sets the correct env vars based on the requested GitHub auth type. + + You can do @pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) + to paramatrize this fixture. + """ + auth_type = request.param + + if auth_type == "pat": + if not os.getenv("GITHUB_API_TOKEN"): + pytest.skip("GITHUB_API_TOKEN not found") + monkeypatch.delenv("GITHUB_APP_ID", raising=False) + monkeypatch.delenv("GITHUB_APP_PRIVATE_KEY", raising=False) + + elif auth_type == "app": + if not os.getenv("GITHUB_APP_ID") or not os.getenv("GITHUB_APP_PRIVATE_KEY"): + pytest.skip("GITHUB_APP_ID or GITHUB_APP_PRIVATE_KEY not found") + monkeypatch.delenv("GITHUB_API_TOKEN", raising=False) + + return auth_type diff --git a/benchalerts/tests/integration_tests/test_clients_integration.py b/benchalerts/tests/integration_tests/test_clients_integration.py new file mode 100644 index 000000000..e59e70862 --- /dev/null +++ b/benchalerts/tests/integration_tests/test_clients_integration.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest + +from benchalerts.clients import GitHubRepoClient + + +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +def test_create_pull_request_comment(github_auth: str): + if os.getenv("CI"): + pytest.skip("Don't post a PR comment from CI") + + gh = GitHubRepoClient("conbench/benchalerts") + res = gh.create_pull_request_comment( + "posted from an integration test", commit_sha="adc9b73" + ) + if github_auth == "pat": + assert res["user"]["type"] == "User" + elif github_auth == "app": + assert res["user"]["type"] == "Bot" diff --git a/benchalerts/tests/integration_tests/test_talk_to_conbench_integration.py b/benchalerts/tests/integration_tests/test_talk_to_conbench_integration.py new file mode 100644 index 000000000..c0a7d094a --- /dev/null +++ b/benchalerts/tests/integration_tests/test_talk_to_conbench_integration.py @@ -0,0 +1,71 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from benchalerts.clients import ConbenchClient +from benchalerts.talk_to_conbench import get_comparison_to_baseline + + +@pytest.mark.parametrize( + ["conbench_url", "commit", "expected_len", "expected_bip"], + [ + # baseline is parent + ( + "https://conbench.ursa.dev/", + "bc7de406564fa7b2bcb9bf055cbaba31ca0ca124", + 8, + True, + ), + # baseline is not parent + ( + "https://velox-conbench.voltrondata.run", + "2319922d288c519baa3bffe59c0bedbcb6c827cd", + 1, + False, + ), + # no baseline + ( + "https://velox-conbench.voltrondata.run", + "b74e7045fade737e39b0f9867bc8b8b23fe00b78", + 1, + None, + ), + # errors + ( + "https://conbench.ursa.dev", + "9fa34df27eb1445ac11b0ab0298d421b04be80f7", + 7, + True, + ), + ], +) +def test_get_comparison_to_baseline( + monkeypatch: pytest.MonkeyPatch, conbench_url, commit, expected_len, expected_bip +): + pytest.skip( + "https://github.com/conbench/conbench/issues/745 means timeouts cause this to fail" + ) + monkeypatch.setenv("CONBENCH_URL", conbench_url) + cb = ConbenchClient() + comparisons = get_comparison_to_baseline(cb, commit) + assert len(comparisons) == expected_len + for comparison in comparisons: + assert comparison.baseline_is_parent is expected_bip + assert comparison.contender_link + assert comparison.contender_id + if comparison.compare_results: + assert comparison.compare_link + for benchmark in comparison.compare_results: + assert benchmark["contender_run_id"] diff --git a/benchalerts/tests/integration_tests/test_workflows_integration.py b/benchalerts/tests/integration_tests/test_workflows_integration.py new file mode 100644 index 000000000..bc715303f --- /dev/null +++ b/benchalerts/tests/integration_tests/test_workflows_integration.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import pytest + +import benchalerts.workflows as flows +from benchalerts.clients import GitHubRepoClient + + +@pytest.mark.parametrize( + "arrow_commit", + [ + # no errors + "13a7b605ede88ca15b053f119909c48d0919c6f8", + # errors + "9fa34df27eb1445ac11b0ab0298d421b04be80f7", + ], +) +@pytest.mark.parametrize( + "workflow", + [ + flows.update_github_status_based_on_regressions, + flows.update_github_check_based_on_regressions, + ], +) +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +@pytest.mark.parametrize("z_score_threshold", [None, 500]) +def test_update_github_status_based_on_regressions( + monkeypatch: pytest.MonkeyPatch, + github_auth: str, + z_score_threshold, + workflow, + arrow_commit: str, +): + """While this test is running, you can watch + https://github.com/conbench/benchalerts/pull/5 to see the statuses change! + """ + pytest.skip( + "https://github.com/conbench/conbench/issues/745 means timeouts cause this to fail" + ) + if ( + workflow == flows.update_github_status_based_on_regressions + and not arrow_commit.startswith("13a") + ): + pytest.skip("Skipping redundant tests to cut down on test time") + + if ( + workflow == flows.update_github_check_based_on_regressions + and github_auth == "pat" + ): + pytest.skip("Can't use the Checks API with a PAT") + + # note: something *might* go wrong if we go past 1000 statuses on this test SHA? + # https://docs.github.com/en/rest/commits/statuses#create-a-commit-status + test_status_repo = "conbench/benchalerts" + test_status_commit = "4b9543876e8c1cee54c56980c3b2363aad71a8d4" + + arrow_conbench_url = "https://conbench.ursa.dev/" + + github_run_id = os.getenv("GITHUB_RUN_ID", "2974120883") + build_url = f"https://github.com/{test_status_repo}/actions/runs/{github_run_id}" + monkeypatch.setenv("BUILD_URL", build_url) + + # first, test an error + monkeypatch.delenv("CONBENCH_URL", raising=False) + with pytest.raises(ValueError, match="CONBENCH_URL not found"): + workflow(contender_sha=test_status_commit, repo=test_status_repo) + + # sleep to see the updated status on the PR + time.sleep(1) + + # next, a success if z_score_threshold=500, or failure if z_score_threshold=None + monkeypatch.setenv("CONBENCH_URL", arrow_conbench_url) + + # Even though we're grabbing Arrow benchmarks, we want to post to our own repo for + # testing. This class overrides the methods to post statuses to a different commit. + class GitHubDifferentRepoClient(GitHubRepoClient): + def update_commit_status(self, commit_sha, **kwargs): + return super().update_commit_status(commit_sha=test_status_commit, **kwargs) + + def update_check(self, commit_sha, **kwargs): + return super().update_check(commit_sha=test_status_commit, **kwargs) + + github = GitHubDifferentRepoClient(repo=test_status_repo) + + res = workflow( + contender_sha=arrow_commit, z_score_threshold=z_score_threshold, github=github + ) + if workflow == flows.update_github_status_based_on_regressions: + if z_score_threshold is None: + assert res["state"] == "failure" + else: + assert res["state"] == "success" + + if github_auth == "pat": + assert res["creator"]["type"] == "User" + elif github_auth == "app": + assert res["creator"]["type"] == "Bot" + + elif workflow == flows.update_github_check_based_on_regressions: + if arrow_commit.startswith("9fa"): + assert res["conclusion"] == "action_required" + elif z_score_threshold is None: + assert res["conclusion"] == "failure" + else: + assert res["conclusion"] == "success" + + # sleep to see the updated status on the PR + time.sleep(1) diff --git a/benchalerts/tests/unit_tests/__init__.py b/benchalerts/tests/unit_tests/__init__.py new file mode 100644 index 000000000..65164120e --- /dev/null +++ b/benchalerts/tests/unit_tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/benchalerts/tests/unit_tests/conftest.py b/benchalerts/tests/unit_tests/conftest.py new file mode 100644 index 000000000..743c99bc7 --- /dev/null +++ b/benchalerts/tests/unit_tests/conftest.py @@ -0,0 +1,69 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from _pytest.fixtures import SubRequest + + +@pytest.fixture +def github_auth(request: SubRequest, monkeypatch: pytest.MonkeyPatch) -> str: + """Sets the correct env vars based on the requested GitHub auth type. + + You can do @pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) + to paramatrize this fixture. + """ + auth_type = request.param + + if auth_type == "pat": + monkeypatch.setenv("GITHUB_API_TOKEN", "token") + monkeypatch.delenv("GITHUB_APP_ID", raising=False) + monkeypatch.delenv("GITHUB_APP_PRIVATE_KEY", raising=False) + + elif auth_type == "app": + monkeypatch.delenv("GITHUB_API_TOKEN", raising=False) + monkeypatch.setenv("GITHUB_APP_ID", "123456") + # this is fake but conforms to standards + monkeypatch.setenv( + "GITHUB_APP_PRIVATE_KEY", + """-----BEGIN RSA PRIVATE KEY----- +MIIBOgIBAAJBAKj34GkxFhD90vcNLYLInFEX6Ppy1tPf9Cnzj4p4WGeKLs1Pt8Qu +KUpRKfFLfRYC9AIKjbJTWit+CqvjWYzvQwECAwEAAQJAIJLixBy2qpFoS4DSmoEm +o3qGy0t6z09AIJtH+5OeRV1be+N4cDYJKffGzDa88vQENZiRm0GRq6a+HPGQMd2k +TQIhAKMSvzIBnni7ot/OSie2TmJLY4SwTQAevXysE2RbFDYdAiEBCUEaRQnMnbp7 +9mxDXDf6AU0cN/RPBjb9qSHDcWZHGzUCIG2Es59z8ugGrDY+pxLQnwfotadxd+Uy +v/Ow5T0q5gIJAiEAyS4RaI9YG8EWx/2w0T67ZUVAw8eOMB6BIUg0Xcu+3okCIBOs +/5OiPgoTdSy7bcF9IGpSE8ZgGKzgYQVZeN97YE00 +-----END RSA PRIVATE KEY-----""", + ) + + elif auth_type == "none": + monkeypatch.delenv("GITHUB_API_TOKEN", raising=False) + monkeypatch.delenv("GITHUB_APP_ID", raising=False) + monkeypatch.delenv("GITHUB_APP_PRIVATE_KEY", raising=False) + + return auth_type + + +@pytest.fixture +def conbench_env(monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("CONBENCH_URL", "https://conbench.biz") + monkeypatch.setenv("CONBENCH_EMAIL", "email") + monkeypatch.setenv("CONBENCH_PASSWORD", "password") + + +@pytest.fixture +def missing_conbench_env(monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("CONBENCH_URL", raising=False) + monkeypatch.delenv("CONBENCH_EMAIL", raising=False) + monkeypatch.delenv("CONBENCH_PASSWORD", raising=False) diff --git a/benchalerts/tests/unit_tests/expected_md/details_error.md b/benchalerts/tests/unit_tests/expected_md/details_error.md new file mode 100644 index 000000000..38d76ddd0 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/details_error.md @@ -0,0 +1,3 @@ +Error: `ValueError('Environment variable CONBENCH_URL not found')` + +See build link below. diff --git a/benchalerts/tests/unit_tests/expected_md/details_noregressions.md b/benchalerts/tests/unit_tests/expected_md/details_noregressions.md new file mode 100644 index 000000000..1d56c097b --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/details_noregressions.md @@ -0,0 +1,3 @@ +Conbench has details about 2 total run(s) on this commit. + +This report was generated using a z-score threshold of 500. A regression is defined as a benchmark exhibiting a z-score higher than the threshold in the "bad" direction (e.g. down for iterations per second; up for total time taken). diff --git a/benchalerts/tests/unit_tests/expected_md/details_regressions.md b/benchalerts/tests/unit_tests/expected_md/details_regressions.md new file mode 100644 index 000000000..0aaab4072 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/details_regressions.md @@ -0,0 +1,3 @@ +Conbench has details about 2 total run(s) on this commit. + +This report was generated using a z-score threshold of 5. A regression is defined as a benchmark exhibiting a z-score higher than the threshold in the "bad" direction (e.g. down for iterations per second; up for total time taken). diff --git a/benchalerts/tests/unit_tests/expected_md/details_workflow_noregressions.md b/benchalerts/tests/unit_tests/expected_md/details_workflow_noregressions.md new file mode 100644 index 000000000..1c7515de7 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/details_workflow_noregressions.md @@ -0,0 +1,3 @@ +Conbench has details about 1 total run(s) on this commit. + +This report was generated using a z-score threshold of 500. A regression is defined as a benchmark exhibiting a z-score higher than the threshold in the "bad" direction (e.g. down for iterations per second; up for total time taken). diff --git a/benchalerts/tests/unit_tests/expected_md/details_workflow_regressions.md b/benchalerts/tests/unit_tests/expected_md/details_workflow_regressions.md new file mode 100644 index 000000000..82289a089 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/details_workflow_regressions.md @@ -0,0 +1,3 @@ +Conbench has details about 1 total run(s) on this commit. + +This report was generated using a z-score threshold of 5. A regression is defined as a benchmark exhibiting a z-score higher than the threshold in the "bad" direction (e.g. down for iterations per second; up for total time taken). diff --git a/benchalerts/tests/unit_tests/expected_md/summary_error.md b/benchalerts/tests/unit_tests/expected_md/summary_error.md new file mode 100644 index 000000000..9327e12d8 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_error.md @@ -0,0 +1 @@ +The CI build running the regression analysis failed. This does not necessarily mean this commit has benchmark regressions, but there is an error that must be resolved before we can find out. diff --git a/benchalerts/tests/unit_tests/expected_md/summary_nobaseline.md b/benchalerts/tests/unit_tests/expected_md/summary_nobaseline.md new file mode 100644 index 000000000..5b6912a19 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_nobaseline.md @@ -0,0 +1,10 @@ +## Benchmarks with errors + +These are errors that were caught while running the benchmarks. You can click the link next to each case to go to the Conbench entry for that benchmark, which might have more information about what the error was. + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/runs/some_contender) + - [file-write](http://localhost/benchmarks/some-benchmark-uuid-2) + +## Benchmarks with performance regressions + +Conbench could not find a baseline run for contender commit `no_basel`. A baseline run needs to be on the default branch in the same repository, with the same hardware and context, and have at least one of the same benchmark cases. diff --git a/benchalerts/tests/unit_tests/expected_md/summary_noregressions.md b/benchalerts/tests/unit_tests/expected_md/summary_noregressions.md new file mode 100644 index 000000000..8d5801338 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_noregressions.md @@ -0,0 +1,17 @@ +## Benchmarks with errors + +These are errors that were caught while running the benchmarks. You can click the link next to each case to go to the Conbench entry for that benchmark, which might have more information about what the error was. + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline_2...some_contender_2/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +## Benchmarks with performance regressions + +Contender commit `abc` had 0 performance regression(s) compared to its baseline commit. + +### Note + +The baseline commit was not the immediate parent of the contender commit. See the link below for details. diff --git a/benchalerts/tests/unit_tests/expected_md/summary_pending.md b/benchalerts/tests/unit_tests/expected_md/summary_pending.md new file mode 100644 index 000000000..d5663dc59 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_pending.md @@ -0,0 +1 @@ +Analyzing `abc` for regressions... diff --git a/benchalerts/tests/unit_tests/expected_md/summary_pending_nobaseline.md b/benchalerts/tests/unit_tests/expected_md/summary_pending_nobaseline.md new file mode 100644 index 000000000..7a4e38ad8 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_pending_nobaseline.md @@ -0,0 +1 @@ +Analyzing `no_basel` for regressions... diff --git a/benchalerts/tests/unit_tests/expected_md/summary_regressions.md b/benchalerts/tests/unit_tests/expected_md/summary_regressions.md new file mode 100644 index 000000000..c2025e2f8 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_regressions.md @@ -0,0 +1,25 @@ +## Benchmarks with errors + +These are errors that were caught while running the benchmarks. You can click the link next to each case to go to the Conbench entry for that benchmark, which might have more information about what the error was. + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline_2...some_contender_2/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +## Benchmarks with performance regressions + +Contender commit `abc` had 2 performance regression(s) compared to its baseline commit. + +### Benchmarks with regressions: + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, parquet, arrow](http://localhost/benchmarks/some-benchmark-uuid-3) + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline_2...some_contender_2/) + - [snappy, nyctaxi_sample, parquet, arrow](http://localhost/benchmarks/some-benchmark-uuid-3) + +### Note + +The baseline commit was not the immediate parent of the contender commit. See the link below for details. diff --git a/benchalerts/tests/unit_tests/expected_md/summary_workflow_noregressions.md b/benchalerts/tests/unit_tests/expected_md/summary_workflow_noregressions.md new file mode 100644 index 000000000..fd1169b2d --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_workflow_noregressions.md @@ -0,0 +1,14 @@ +## Benchmarks with errors + +These are errors that were caught while running the benchmarks. You can click the link next to each case to go to the Conbench entry for that benchmark, which might have more information about what the error was. + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +## Benchmarks with performance regressions + +Contender commit `abc` had 0 performance regression(s) compared to its baseline commit. + +### Note + +The baseline commit was not the immediate parent of the contender commit. See the link below for details. diff --git a/benchalerts/tests/unit_tests/expected_md/summary_workflow_regressions.md b/benchalerts/tests/unit_tests/expected_md/summary_workflow_regressions.md new file mode 100644 index 000000000..0ac83a971 --- /dev/null +++ b/benchalerts/tests/unit_tests/expected_md/summary_workflow_regressions.md @@ -0,0 +1,19 @@ +## Benchmarks with errors + +These are errors that were caught while running the benchmarks. You can click the link next to each case to go to the Conbench entry for that benchmark, which might have more information about what the error was. + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, csv, arrow](http://localhost/benchmarks/some-benchmark-uuid-4) + +## Benchmarks with performance regressions + +Contender commit `abc` had 1 performance regression(s) compared to its baseline commit. + +### Benchmarks with regressions: + +- Some Run Reason Run at [2021-02-04 17:22:05.225583](http://localhost/compare/runs/some_baseline...some_contender/) + - [snappy, nyctaxi_sample, parquet, arrow](http://localhost/benchmarks/some-benchmark-uuid-3) + +### Note + +The baseline commit was not the immediate parent of the contender commit. See the link below for details. diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_benchmarks_run_id_contender_wo_base.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_benchmarks_run_id_contender_wo_base.json new file mode 100644 index 000000000..a846b01b0 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_benchmarks_run_id_contender_wo_base.json @@ -0,0 +1,114 @@ +{ + "status_code": 200, + "data": [ + { + "batch_id": "some-batch-uuid-1", + "error": null, + "id": "some-benchmark-uuid-1", + "links": { + "context": "http://localhost/api/contexts/some-context-uuid-1/", + "info": "http://localhost/api/info/some-info-uuid-1/", + "list": "http://localhost/api/benchmarks/", + "run": "http://localhost/api/runs/some-run-uuid-1/", + "self": "http://localhost/api/benchmarks/some-benchmark-uuid-1/" + }, + "run_id": "some-run-uuid-1", + "stats": { + "data": [ + 0.099094, + 0.037129, + 0.036381, + 0.148896, + 0.008104, + 0.005496, + 0.009871, + 0.006008, + 0.007978, + 0.004733 + ], + "iqr": 0.030442, + "iterations": 10, + "max": 0.148896, + "mean": 0.036369, + "median": 0.008988, + "min": 0.004733, + "q1": 0.0065, + "q3": 0.036942, + "stdev": 0.049194, + "time_unit": "s", + "times": [ + 0.099094, + 0.037129, + 0.036381, + 0.148896, + 0.008104, + 0.005496, + 0.009871, + 0.006008, + 0.007978, + 0.004733 + ], + "unit": "s", + "z_improvement": false, + "z_regression": false, + "z_score": null + }, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "id": "some-case-uuid-1", + "input_type": "arrow", + "name": "file-read" + }, + "timestamp": "2020-11-25T21:02:42.706806" + }, + { + "batch_id": "some-batch-uuid-1", + "error": { + "fatal": true, + "message": "Something went wrong", + "regression": false, + "stack_trace": "Traceback (most recent call last):\n File \"\", line 1, in \nAssertionError" + }, + "id": "some-benchmark-uuid-2", + "links": { + "context": "http://localhost/api/contexts/some-context-uuid-1/", + "info": "http://localhost/api/info/some-info-uuid-1/", + "list": "http://localhost/api/benchmarks/", + "run": "http://localhost/api/runs/some-run-uuid-1/", + "self": "http://localhost/api/benchmarks/some-benchmark-uuid-2/" + }, + "run_id": "some-run-uuid-1", + "stats": { + "data": [], + "iqr": null, + "iterations": null, + "max": null, + "mean": null, + "median": null, + "min": null, + "q1": null, + "q3": null, + "stdev": null, + "time_unit": null, + "times": [], + "unit": null, + "z_improvement": false, + "z_regression": false, + "z_score": null + }, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "id": "some-case-uuid-2", + "input_type": "arrow", + "name": "file-write" + }, + "timestamp": "2020-11-25T21:03:42.706806" + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender.json new file mode 100644 index 000000000..70782f288 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender.json @@ -0,0 +1,82 @@ +{ + "status_code": 200, + "data": [ + { + "baseline": "0.036 s", + "baseline_batch_id": "some-batch-uuid-1", + "baseline_error": null, + "baseline_id": "some-benchmark-uuid-1", + "baseline_run_id": "some-run-uuid-1", + "baseline_z_improvement": false, + "baseline_z_regression": false, + "baseline_z_score": null, + "batch": "file-read", + "benchmark": "snappy, nyctaxi_sample, parquet, arrow", + "change": "0.000%", + "contender": "0.036 s", + "contender_batch_id": "some-batch-uuid-2", + "contender_error": null, + "contender_id": "some-benchmark-uuid-3", + "contender_run_id": "some-run-uuid-2", + "contender_z_improvement": false, + "contender_z_regression": true, + "contender_z_score": null, + "improvement": false, + "language": "Python", + "less_is_better": true, + "regression": true, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "input_type": "arrow", + "name": "file-read" + }, + "threshold": "5.000%", + "threshold_z": 5, + "unit": "s" + }, + { + "baseline": "0.036 s", + "baseline_batch_id": "some-batch-uuid-1", + "baseline_error": null, + "baseline_id": "some-benchmark-uuid-2", + "baseline_run_id": "some-run-uuid-1", + "baseline_z_improvement": false, + "baseline_z_regression": false, + "baseline_z_score": null, + "batch": "file-write", + "benchmark": "snappy, nyctaxi_sample, csv, arrow", + "change": "0.000%", + "contender": "0.036 s", + "contender_batch_id": "some-batch-uuid-2", + "contender_error": { + "fatal": true, + "message": "Something went wrong", + "regression": false, + "stack_trace": "Traceback (most recent call last):\n File \"\", line 1, in \nAssertionError" + }, + "contender_id": "some-benchmark-uuid-4", + "contender_run_id": "some-run-uuid-2", + "contender_z_improvement": false, + "contender_z_regression": false, + "contender_z_score": null, + "improvement": false, + "language": "Python", + "less_is_better": true, + "regression": false, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "input_type": "arrow", + "name": "file-write" + }, + "threshold": "5.000%", + "threshold_z": 5, + "unit": "s" + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender_threshold_z_500.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender_threshold_z_500.json new file mode 100644 index 000000000..8f803d942 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_compare_runs_some_baseline_some_contender_threshold_z_500.json @@ -0,0 +1,82 @@ +{ + "status_code": 200, + "data": [ + { + "baseline": "0.036 s", + "baseline_batch_id": "some-batch-uuid-1", + "baseline_error": null, + "baseline_id": "some-benchmark-uuid-1", + "baseline_run_id": "some-run-uuid-1", + "baseline_z_improvement": false, + "baseline_z_regression": false, + "baseline_z_score": null, + "batch": "file-read", + "benchmark": "snappy, nyctaxi_sample, parquet, arrow", + "change": "0.000%", + "contender": "0.036 s", + "contender_batch_id": "some-batch-uuid-2", + "contender_error": null, + "contender_id": "some-benchmark-uuid-3", + "contender_run_id": "some-run-uuid-2", + "contender_z_improvement": false, + "contender_z_regression": false, + "contender_z_score": null, + "improvement": false, + "language": "Python", + "less_is_better": true, + "regression": false, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "input_type": "arrow", + "name": "file-read" + }, + "threshold": "5.000%", + "threshold_z": 500, + "unit": "s" + }, + { + "baseline": "0.036 s", + "baseline_batch_id": "some-batch-uuid-1", + "baseline_error": null, + "baseline_id": "some-benchmark-uuid-2", + "baseline_run_id": "some-run-uuid-1", + "baseline_z_improvement": false, + "baseline_z_regression": false, + "baseline_z_score": null, + "batch": "file-write", + "benchmark": "snappy, nyctaxi_sample, csv, arrow", + "change": "0.000%", + "contender": "0.036 s", + "contender_batch_id": "some-batch-uuid-2", + "contender_error": { + "fatal": true, + "message": "Something went wrong", + "regression": false, + "stack_trace": "Traceback (most recent call last):\n File \"\", line 1, in \nAssertionError" + }, + "contender_id": "some-benchmark-uuid-4", + "contender_run_id": "some-run-uuid-2", + "contender_z_improvement": false, + "contender_z_regression": false, + "contender_z_score": null, + "improvement": false, + "language": "Python", + "less_is_better": true, + "regression": false, + "tags": { + "compression": "snappy", + "cpu_count": 2, + "dataset": "nyctaxi_sample", + "file_type": "parquet", + "input_type": "arrow", + "name": "file-write" + }, + "threshold": "5.000%", + "threshold_z": 500, + "unit": "s" + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_with_content.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_with_content.json new file mode 100644 index 000000000..d740f0a58 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_with_content.json @@ -0,0 +1,7 @@ +{ + "status_code": 404, + "data": { + "code": 404, + "name": "Not Found" + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_without_content.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_without_content.json new file mode 100644 index 000000000..0c9fef0f7 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_error_without_content.json @@ -0,0 +1,3 @@ +{ + "status_code": 404 +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_contender_wo_base.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_contender_wo_base.json new file mode 100644 index 000000000..f051b7996 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_contender_wo_base.json @@ -0,0 +1,52 @@ +{ + "status_code": 200, + "data": { + "commit": { + "author_avatar": "https://avatars.githubusercontent.com/u/878798?v=4", + "author_login": "dianaclarke", + "author_name": "Diana Clarke", + "id": "some-commit-uuid-1", + "message": "ARROW-11771: [Developer][Archery] Move benchmark tests (so CI runs them)", + "parent_sha": "4beb514d071c9beec69b8917b5265e77ade22fb3", + "repository": "https://github.com/apache/arrow", + "sha": "no_baseline", + "timestamp": "2021-02-25T01:02:51", + "url": "https://github.com/apache/arrow/commit/02addad336ba19a654f9c857ede546331be7b631" + }, + "hardware": { + "architecture_name": "x86_64", + "cpu_core_count": 2, + "cpu_frequency_max_hz": 3500000000, + "cpu_l1d_cache_bytes": 32768, + "cpu_l1i_cache_bytes": 32768, + "cpu_l2_cache_bytes": 262144, + "cpu_l3_cache_bytes": 4194304, + "cpu_model_name": "Intel(R) Core(TM) i7-7567U CPU @ 3.50GHz", + "cpu_thread_count": 4, + "gpu_count": 2, + "gpu_product_names": [ + "Tesla T4", + "GeForce GTX 1060 3GB" + ], + "id": "some-machine-uuid-1", + "kernel_name": "19.6.0", + "memory_bytes": 17179869184, + "name": "some-machine-name", + "os_name": "macOS", + "os_version": "10.15.7", + "type": "machine" + }, + "has_errors": true, + "id": "some_contender", + "links": { + "baseline": null, + "commit": "http://localhost/api/commits/some-commit-uuid-1/", + "hardware": "http://localhost/api/hardware/some-machine-uuid-1/", + "list": "http://localhost/api/runs/", + "self": "http://localhost/api/runs/some-run-uuid-1/" + }, + "name": "some run name", + "reason": "some run reason", + "timestamp": "2021-02-04T17:22:05.225583" + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_abc.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_abc.json new file mode 100644 index 000000000..2f2aeaa9e --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_abc.json @@ -0,0 +1,53 @@ +{ + "status_code": 200, + "data": [ + { + "commit": { + "author_avatar": "https://avatars.githubusercontent.com/u/878798?v=4", + "author_login": "dianaclarke", + "author_name": "Diana Clarke", + "id": "some-commit-uuid-1", + "message": "ARROW-11771: [Developer][Archery] Move benchmark tests (so CI runs them)", + "parent_sha": "4beb514d071c9beec69b8917b5265e77ade22fb3", + "repository": "https://github.com/apache/arrow", + "sha": "02addad336ba19a654f9c857ede546331be7b631", + "timestamp": "2021-02-25T01:02:51", + "url": "https://github.com/apache/arrow/commit/02addad336ba19a654f9c857ede546331be7b631" + }, + "hardware": { + "architecture_name": "x86_64", + "cpu_core_count": 2, + "cpu_frequency_max_hz": 3500000000, + "cpu_l1d_cache_bytes": 32768, + "cpu_l1i_cache_bytes": 32768, + "cpu_l2_cache_bytes": 262144, + "cpu_l3_cache_bytes": 4194304, + "cpu_model_name": "Intel(R) Core(TM) i7-7567U CPU @ 3.50GHz", + "cpu_thread_count": 4, + "gpu_count": 2, + "gpu_product_names": [ + "Tesla T4", + "GeForce GTX 1060 3GB" + ], + "id": "some-machine-uuid-1", + "kernel_name": "19.6.0", + "memory_bytes": 17179869184, + "name": "some-machine-name", + "os_name": "macOS", + "os_version": "10.15.7", + "type": "machine" + }, + "has_errors": false, + "id": "some_contender", + "links": { + "commit": "http://localhost/api/commits/some-commit-uuid-1/", + "hardware": "http://localhost/api/hardware/some-machine-uuid-1/", + "list": "http://localhost/api/runs/", + "self": "http://localhost/api/runs/some-run-uuid-1/" + }, + "name": "some run name", + "reason": "some run reason", + "timestamp": "2021-02-04T17:22:05.225583" + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_baseline.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_baseline.json new file mode 100644 index 000000000..0f1388099 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_baseline.json @@ -0,0 +1,53 @@ +{ + "status_code": 200, + "data": [ + { + "commit": { + "author_avatar": "https://avatars.githubusercontent.com/u/878798?v=4", + "author_login": "dianaclarke", + "author_name": "Diana Clarke", + "id": "some-commit-uuid-1", + "message": "ARROW-11771: [Developer][Archery] Move benchmark tests (so CI runs them)", + "parent_sha": "4beb514d071c9beec69b8917b5265e77ade22fb3", + "repository": "https://github.com/apache/arrow", + "sha": "02addad336ba19a654f9c857ede546331be7b631", + "timestamp": "2021-02-25T01:02:51", + "url": "https://github.com/apache/arrow/commit/02addad336ba19a654f9c857ede546331be7b631" + }, + "hardware": { + "architecture_name": "x86_64", + "cpu_core_count": 2, + "cpu_frequency_max_hz": 3500000000, + "cpu_l1d_cache_bytes": 32768, + "cpu_l1i_cache_bytes": 32768, + "cpu_l2_cache_bytes": 262144, + "cpu_l3_cache_bytes": 4194304, + "cpu_model_name": "Intel(R) Core(TM) i7-7567U CPU @ 3.50GHz", + "cpu_thread_count": 4, + "gpu_count": 2, + "gpu_product_names": [ + "Tesla T4", + "GeForce GTX 1060 3GB" + ], + "id": "some-machine-uuid-1", + "kernel_name": "19.6.0", + "memory_bytes": 17179869184, + "name": "some-machine-name", + "os_name": "macOS", + "os_version": "10.15.7", + "type": "machine" + }, + "has_errors": false, + "id": "contender_wo_base", + "links": { + "commit": "http://localhost/api/commits/some-commit-uuid-1/", + "hardware": "http://localhost/api/hardware/some-machine-uuid-1/", + "list": "http://localhost/api/runs/", + "self": "http://localhost/api/runs/some-run-uuid-1/" + }, + "name": "some run name", + "reason": "some run reason", + "timestamp": "2021-02-04T17:22:05.225583" + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_runs.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_runs.json new file mode 100644 index 000000000..1b9861185 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_sha_no_runs.json @@ -0,0 +1,4 @@ +{ + "status_code": 200, + "data": [] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_baseline.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_baseline.json new file mode 100644 index 000000000..f00c52155 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_baseline.json @@ -0,0 +1,52 @@ +{ + "status_code": 200, + "data": { + "commit": { + "author_avatar": "https://avatars.githubusercontent.com/u/878798?v=4", + "author_login": "dianaclarke", + "author_name": "Diana Clarke", + "id": "some-commit-uuid-1", + "message": "ARROW-11771: [Developer][Archery] Move benchmark tests (so CI runs them)", + "parent_sha": "4beb514d071c9beec69b8917b5265e77ade22fb3", + "repository": "https://github.com/apache/arrow", + "sha": "02addad336ba19a654f9c857ede546331be7b631", + "timestamp": "2021-02-25T01:02:51", + "url": "https://github.com/apache/arrow/commit/02addad336ba19a654f9c857ede546331be7b631" + }, + "hardware": { + "architecture_name": "x86_64", + "cpu_core_count": 2, + "cpu_frequency_max_hz": 3500000000, + "cpu_l1d_cache_bytes": 32768, + "cpu_l1i_cache_bytes": 32768, + "cpu_l2_cache_bytes": 262144, + "cpu_l3_cache_bytes": 4194304, + "cpu_model_name": "Intel(R) Core(TM) i7-7567U CPU @ 3.50GHz", + "cpu_thread_count": 4, + "gpu_count": 2, + "gpu_product_names": [ + "Tesla T4", + "GeForce GTX 1060 3GB" + ], + "id": "some-machine-uuid-1", + "kernel_name": "19.6.0", + "memory_bytes": 17179869184, + "name": "some-machine-name", + "os_name": "macOS", + "os_version": "10.15.7", + "type": "machine" + }, + "has_errors": false, + "id": "some_baseline", + "links": { + "baseline": "http://localhost/api/runs/another_baseline/", + "commit": "http://localhost/api/commits/some-commit-uuid-1/", + "hardware": "http://localhost/api/hardware/some-machine-uuid-1/", + "list": "http://localhost/api/runs/", + "self": "http://localhost/api/runs/some-run-uuid-1/" + }, + "name": "some run name", + "reason": "some run reason", + "timestamp": "2021-02-04T17:22:05.225583" + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_contender.json b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_contender.json new file mode 100644 index 000000000..cd9207774 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_conbench_runs_some_contender.json @@ -0,0 +1,52 @@ +{ + "status_code": 200, + "data": { + "commit": { + "author_avatar": "https://avatars.githubusercontent.com/u/878798?v=4", + "author_login": "dianaclarke", + "author_name": "Diana Clarke", + "id": "some-commit-uuid-1", + "message": "ARROW-11771: [Developer][Archery] Move benchmark tests (so CI runs them)", + "parent_sha": "4beb514d071c9beec69b8917b5265e77ade22fb3", + "repository": "https://github.com/apache/arrow", + "sha": "abc", + "timestamp": "2021-02-25T01:02:51", + "url": "https://github.com/apache/arrow/commit/02addad336ba19a654f9c857ede546331be7b631" + }, + "hardware": { + "architecture_name": "x86_64", + "cpu_core_count": 2, + "cpu_frequency_max_hz": 3500000000, + "cpu_l1d_cache_bytes": 32768, + "cpu_l1i_cache_bytes": 32768, + "cpu_l2_cache_bytes": 262144, + "cpu_l3_cache_bytes": 4194304, + "cpu_model_name": "Intel(R) Core(TM) i7-7567U CPU @ 3.50GHz", + "cpu_thread_count": 4, + "gpu_count": 2, + "gpu_product_names": [ + "Tesla T4", + "GeForce GTX 1060 3GB" + ], + "id": "some-machine-uuid-1", + "kernel_name": "19.6.0", + "memory_bytes": 17179869184, + "name": "some-machine-name", + "os_name": "macOS", + "os_version": "10.15.7", + "type": "machine" + }, + "has_errors": false, + "id": "some_contender", + "links": { + "baseline": "http://localhost/api/runs/some_baseline/", + "commit": "http://localhost/api/commits/some-commit-uuid-1/", + "hardware": "http://localhost/api/hardware/some-machine-uuid-1/", + "list": "http://localhost/api/runs/", + "self": "http://localhost/api/runs/some-run-uuid-1/" + }, + "name": "some run name", + "reason": "some run reason", + "timestamp": "2021-02-04T17:22:05.225583" + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_github_app_installations.json b/benchalerts/tests/unit_tests/mocked_responses/GET_github_app_installations.json new file mode 100644 index 000000000..a6ca7505a --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_github_app_installations.json @@ -0,0 +1,55 @@ +{ + "status_code": 200, + "data": [ + { + "id": 1, + "account": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "access_tokens_url": "https://api.github.com/installations/1/access_tokens", + "repositories_url": "https://api.github.com/installation/repositories", + "html_url": "https://github.com/organizations/github/settings/installations/1", + "app_id": 1, + "target_id": 1, + "target_type": "Organization", + "permissions": { + "checks": "write", + "metadata": "read", + "contents": "read" + }, + "events": [ + "push", + "pull_request" + ], + "single_file_name": "config.yaml", + "has_multiple_single_files": true, + "single_file_paths": [ + "config.yml", + ".github/issue_TEMPLATE.md" + ], + "repository_selection": "selected", + "created_at": "2017-07-08T16:18:44-04:00", + "updated_at": "2017-07-08T16:18:44-04:00", + "app_slug": "github-actions", + "suspended_at": null, + "suspended_by": null + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_abc_pulls.json b/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_abc_pulls.json new file mode 100644 index 000000000..6663b7de9 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_abc_pulls.json @@ -0,0 +1,523 @@ +{ + "status_code": 200, + "data": [ + { + "url": "https://api.github.com/repos/octocat/Hello-World/pulls/1347", + "id": 1, + "node_id": "MDExOlB1bGxSZXF1ZXN0MQ==", + "html_url": "https://github.com/octocat/Hello-World/pull/1347", + "diff_url": "https://github.com/octocat/Hello-World/pull/1347.diff", + "patch_url": "https://github.com/octocat/Hello-World/pull/1347.patch", + "issue_url": "https://api.github.com/repos/octocat/Hello-World/issues/1347", + "commits_url": "https://api.github.com/repos/octocat/Hello-World/pulls/1347/commits", + "review_comments_url": "https://api.github.com/repos/octocat/Hello-World/pulls/1347/comments", + "review_comment_url": "https://api.github.com/repos/octocat/Hello-World/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/octocat/Hello-World/issues/1347/comments", + "statuses_url": "https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e", + "number": 1347, + "state": "open", + "locked": true, + "title": "Amazing new feature", + "user": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "body": "Please pull these awesome changes in!", + "labels": [ + { + "id": 208045946, + "node_id": "MDU6TGFiZWwyMDgwNDU5NDY=", + "url": "https://api.github.com/repos/octocat/Hello-World/labels/bug", + "name": "bug", + "description": "Something isn't working", + "color": "f29513", + "default": true + } + ], + "milestone": { + "url": "https://api.github.com/repos/octocat/Hello-World/milestones/1", + "html_url": "https://github.com/octocat/Hello-World/milestones/v1.0", + "labels_url": "https://api.github.com/repos/octocat/Hello-World/milestones/1/labels", + "id": 1002604, + "node_id": "MDk6TWlsZXN0b25lMTAwMjYwNA==", + "number": 1, + "state": "open", + "title": "v1.0", + "description": "Tracking milestone for version 1.0", + "creator": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "open_issues": 4, + "closed_issues": 8, + "created_at": "2011-04-10T20:09:31Z", + "updated_at": "2014-03-03T18:58:10Z", + "closed_at": "2013-02-12T13:22:01Z", + "due_on": "2012-10-09T23:39:01Z" + }, + "active_lock_reason": "too heated", + "created_at": "2011-01-26T19:01:12Z", + "updated_at": "2011-01-26T19:01:12Z", + "closed_at": "2011-01-26T19:01:12Z", + "merged_at": "2011-01-26T19:01:12Z", + "merge_commit_sha": "e5bd3914e2e596debea16f433f57875b5b90bcd6", + "assignee": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "assignees": [ + { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + { + "login": "hubot", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/hubot_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/hubot", + "html_url": "https://github.com/hubot", + "followers_url": "https://api.github.com/users/hubot/followers", + "following_url": "https://api.github.com/users/hubot/following{/other_user}", + "gists_url": "https://api.github.com/users/hubot/gists{/gist_id}", + "starred_url": "https://api.github.com/users/hubot/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/hubot/subscriptions", + "organizations_url": "https://api.github.com/users/hubot/orgs", + "repos_url": "https://api.github.com/users/hubot/repos", + "events_url": "https://api.github.com/users/hubot/events{/privacy}", + "received_events_url": "https://api.github.com/users/hubot/received_events", + "type": "User", + "site_admin": true + } + ], + "requested_reviewers": [ + { + "login": "other_user", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/other_user_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/other_user", + "html_url": "https://github.com/other_user", + "followers_url": "https://api.github.com/users/other_user/followers", + "following_url": "https://api.github.com/users/other_user/following{/other_user}", + "gists_url": "https://api.github.com/users/other_user/gists{/gist_id}", + "starred_url": "https://api.github.com/users/other_user/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/other_user/subscriptions", + "organizations_url": "https://api.github.com/users/other_user/orgs", + "repos_url": "https://api.github.com/users/other_user/repos", + "events_url": "https://api.github.com/users/other_user/events{/privacy}", + "received_events_url": "https://api.github.com/users/other_user/received_events", + "type": "User", + "site_admin": false + } + ], + "requested_teams": [ + { + "id": 1, + "node_id": "MDQ6VGVhbTE=", + "url": "https://api.github.com/teams/1", + "html_url": "https://github.com/orgs/github/teams/justice-league", + "name": "Justice League", + "slug": "justice-league", + "description": "A great team.", + "privacy": "closed", + "permission": "admin", + "members_url": "https://api.github.com/teams/1/members{/member}", + "repositories_url": "https://api.github.com/teams/1/repos", + "parent": null + } + ], + "head": { + "label": "octocat:new-topic", + "ref": "new-topic", + "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", + "user": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 1296269, + "node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5", + "name": "Hello-World", + "full_name": "octocat/Hello-World", + "owner": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/octocat/Hello-World", + "description": "This your first repo!", + "fork": false, + "url": "https://api.github.com/repos/octocat/Hello-World", + "archive_url": "https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}", + "assignees_url": "https://api.github.com/repos/octocat/Hello-World/assignees{/user}", + "blobs_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}", + "branches_url": "https://api.github.com/repos/octocat/Hello-World/branches{/branch}", + "collaborators_url": "https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}", + "comments_url": "https://api.github.com/repos/octocat/Hello-World/comments{/number}", + "commits_url": "https://api.github.com/repos/octocat/Hello-World/commits{/sha}", + "compare_url": "https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}", + "contents_url": "https://api.github.com/repos/octocat/Hello-World/contents/{+path}", + "contributors_url": "https://api.github.com/repos/octocat/Hello-World/contributors", + "deployments_url": "https://api.github.com/repos/octocat/Hello-World/deployments", + "downloads_url": "https://api.github.com/repos/octocat/Hello-World/downloads", + "events_url": "https://api.github.com/repos/octocat/Hello-World/events", + "forks_url": "https://api.github.com/repos/octocat/Hello-World/forks", + "git_commits_url": "https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}", + "git_refs_url": "https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}", + "git_tags_url": "https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}", + "git_url": "git:github.com/octocat/Hello-World.git", + "issue_comment_url": "https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}", + "issue_events_url": "https://api.github.com/repos/octocat/Hello-World/issues/events{/number}", + "issues_url": "https://api.github.com/repos/octocat/Hello-World/issues{/number}", + "keys_url": "https://api.github.com/repos/octocat/Hello-World/keys{/key_id}", + "labels_url": "https://api.github.com/repos/octocat/Hello-World/labels{/name}", + "languages_url": "https://api.github.com/repos/octocat/Hello-World/languages", + "merges_url": "https://api.github.com/repos/octocat/Hello-World/merges", + "milestones_url": "https://api.github.com/repos/octocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}", + "pulls_url": "https://api.github.com/repos/octocat/Hello-World/pulls{/number}", + "releases_url": "https://api.github.com/repos/octocat/Hello-World/releases{/id}", + "ssh_url": "git@github.com:octocat/Hello-World.git", + "stargazers_url": "https://api.github.com/repos/octocat/Hello-World/stargazers", + "statuses_url": "https://api.github.com/repos/octocat/Hello-World/statuses/{sha}", + "subscribers_url": "https://api.github.com/repos/octocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/octocat/Hello-World/subscription", + "tags_url": "https://api.github.com/repos/octocat/Hello-World/tags", + "teams_url": "https://api.github.com/repos/octocat/Hello-World/teams", + "trees_url": "https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}", + "clone_url": "https://github.com/octocat/Hello-World.git", + "mirror_url": "git:git.example.com/octocat/Hello-World", + "hooks_url": "https://api.github.com/repos/octocat/Hello-World/hooks", + "svn_url": "https://svn.github.com/octocat/Hello-World", + "homepage": "https://github.com", + "language": null, + "forks_count": 9, + "stargazers_count": 80, + "watchers_count": 80, + "size": 108, + "default_branch": "master", + "open_issues_count": 0, + "is_template": true, + "topics": [ + "octocat", + "atom", + "electron", + "api" + ], + "has_issues": true, + "has_projects": true, + "has_wiki": true, + "has_pages": false, + "has_downloads": true, + "archived": false, + "disabled": false, + "visibility": "public", + "pushed_at": "2011-01-26T19:06:43Z", + "created_at": "2011-01-26T19:01:12Z", + "updated_at": "2011-01-26T19:14:43Z", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "allow_rebase_merge": true, + "template_repository": null, + "temp_clone_token": "ABTLWHOULUVAXGTRYU7OC2876QJ2O", + "allow_squash_merge": true, + "allow_auto_merge": false, + "delete_branch_on_merge": true, + "allow_merge_commit": true, + "subscribers_count": 42, + "network_count": 0, + "license": { + "key": "mit", + "name": "MIT License", + "url": "https://api.github.com/licenses/mit", + "spdx_id": "MIT", + "node_id": "MDc6TGljZW5zZW1pdA==", + "html_url": "https://github.com/licenses/mit" + }, + "forks": 1, + "open_issues": 1, + "watchers": 1 + } + }, + "base": { + "label": "octocat:master", + "ref": "master", + "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e", + "user": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 1296269, + "node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5", + "name": "Hello-World", + "full_name": "octocat/Hello-World", + "owner": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/octocat/Hello-World", + "description": "This your first repo!", + "fork": false, + "url": "https://api.github.com/repos/octocat/Hello-World", + "archive_url": "https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}", + "assignees_url": "https://api.github.com/repos/octocat/Hello-World/assignees{/user}", + "blobs_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}", + "branches_url": "https://api.github.com/repos/octocat/Hello-World/branches{/branch}", + "collaborators_url": "https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}", + "comments_url": "https://api.github.com/repos/octocat/Hello-World/comments{/number}", + "commits_url": "https://api.github.com/repos/octocat/Hello-World/commits{/sha}", + "compare_url": "https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}", + "contents_url": "https://api.github.com/repos/octocat/Hello-World/contents/{+path}", + "contributors_url": "https://api.github.com/repos/octocat/Hello-World/contributors", + "deployments_url": "https://api.github.com/repos/octocat/Hello-World/deployments", + "downloads_url": "https://api.github.com/repos/octocat/Hello-World/downloads", + "events_url": "https://api.github.com/repos/octocat/Hello-World/events", + "forks_url": "https://api.github.com/repos/octocat/Hello-World/forks", + "git_commits_url": "https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}", + "git_refs_url": "https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}", + "git_tags_url": "https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}", + "git_url": "git:github.com/octocat/Hello-World.git", + "issue_comment_url": "https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}", + "issue_events_url": "https://api.github.com/repos/octocat/Hello-World/issues/events{/number}", + "issues_url": "https://api.github.com/repos/octocat/Hello-World/issues{/number}", + "keys_url": "https://api.github.com/repos/octocat/Hello-World/keys{/key_id}", + "labels_url": "https://api.github.com/repos/octocat/Hello-World/labels{/name}", + "languages_url": "https://api.github.com/repos/octocat/Hello-World/languages", + "merges_url": "https://api.github.com/repos/octocat/Hello-World/merges", + "milestones_url": "https://api.github.com/repos/octocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}", + "pulls_url": "https://api.github.com/repos/octocat/Hello-World/pulls{/number}", + "releases_url": "https://api.github.com/repos/octocat/Hello-World/releases{/id}", + "ssh_url": "git@github.com:octocat/Hello-World.git", + "stargazers_url": "https://api.github.com/repos/octocat/Hello-World/stargazers", + "statuses_url": "https://api.github.com/repos/octocat/Hello-World/statuses/{sha}", + "subscribers_url": "https://api.github.com/repos/octocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/octocat/Hello-World/subscription", + "tags_url": "https://api.github.com/repos/octocat/Hello-World/tags", + "teams_url": "https://api.github.com/repos/octocat/Hello-World/teams", + "trees_url": "https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}", + "clone_url": "https://github.com/octocat/Hello-World.git", + "mirror_url": "git:git.example.com/octocat/Hello-World", + "hooks_url": "https://api.github.com/repos/octocat/Hello-World/hooks", + "svn_url": "https://svn.github.com/octocat/Hello-World", + "homepage": "https://github.com", + "language": null, + "forks_count": 9, + "stargazers_count": 80, + "watchers_count": 80, + "size": 108, + "default_branch": "master", + "open_issues_count": 0, + "is_template": true, + "topics": [ + "octocat", + "atom", + "electron", + "api" + ], + "has_issues": true, + "has_projects": true, + "has_wiki": true, + "has_pages": false, + "has_downloads": true, + "archived": false, + "disabled": false, + "visibility": "public", + "pushed_at": "2011-01-26T19:06:43Z", + "created_at": "2011-01-26T19:01:12Z", + "updated_at": "2011-01-26T19:14:43Z", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "allow_rebase_merge": true, + "template_repository": null, + "temp_clone_token": "ABTLWHOULUVAXGTRYU7OC2876QJ2O", + "allow_squash_merge": true, + "allow_auto_merge": false, + "delete_branch_on_merge": true, + "allow_merge_commit": true, + "subscribers_count": 42, + "network_count": 0, + "license": { + "key": "mit", + "name": "MIT License", + "url": "https://api.github.com/licenses/mit", + "spdx_id": "MIT", + "node_id": "MDc6TGljZW5zZW1pdA==", + "html_url": "https://github.com/licenses/mit" + }, + "forks": 1, + "open_issues": 1, + "watchers": 1 + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/octocat/Hello-World/pulls/1347" + }, + "html": { + "href": "https://github.com/octocat/Hello-World/pull/1347" + }, + "issue": { + "href": "https://api.github.com/repos/octocat/Hello-World/issues/1347" + }, + "comments": { + "href": "https://api.github.com/repos/octocat/Hello-World/issues/1347/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/octocat/Hello-World/pulls/1347/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/octocat/Hello-World/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/octocat/Hello-World/pulls/1347/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e" + } + }, + "author_association": "OWNER", + "auto_merge": null, + "draft": false + } + ] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_no_prs_pulls.json b/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_no_prs_pulls.json new file mode 100644 index 000000000..1b9861185 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/GET_github_commits_no_prs_pulls.json @@ -0,0 +1,4 @@ +{ + "status_code": 200, + "data": [] +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_conbench_login.json b/benchalerts/tests/unit_tests/mocked_responses/POST_conbench_login.json new file mode 100644 index 000000000..9cc9d9498 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_conbench_login.json @@ -0,0 +1,3 @@ +{ + "status_code": 204 +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_github_app_installations_1_access_tokens.json b/benchalerts/tests/unit_tests/mocked_responses/POST_github_app_installations_1_access_tokens.json new file mode 100644 index 000000000..55fc7c50d --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_github_app_installations_1_access_tokens.json @@ -0,0 +1,137 @@ +{ + "status_code": 201, + "data": { + "token": "ghs_16C7e42F292c6912E7710c838347Ae178B4a", + "expires_at": "2016-07-11T22:14:10Z", + "permissions": { + "issues": "write", + "contents": "read" + }, + "repository_selection": "selected", + "repositories": [ + { + "id": 1296269, + "node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5", + "name": "Hello-World", + "full_name": "octocat/Hello-World", + "owner": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/octocat/Hello-World", + "description": "This your first repo!", + "fork": false, + "url": "https://api.github.com/repos/octocat/Hello-World", + "archive_url": "https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}", + "assignees_url": "https://api.github.com/repos/octocat/Hello-World/assignees{/user}", + "blobs_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}", + "branches_url": "https://api.github.com/repos/octocat/Hello-World/branches{/branch}", + "collaborators_url": "https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}", + "comments_url": "https://api.github.com/repos/octocat/Hello-World/comments{/number}", + "commits_url": "https://api.github.com/repos/octocat/Hello-World/commits{/sha}", + "compare_url": "https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}", + "contents_url": "https://api.github.com/repos/octocat/Hello-World/contents/{+path}", + "contributors_url": "https://api.github.com/repos/octocat/Hello-World/contributors", + "deployments_url": "https://api.github.com/repos/octocat/Hello-World/deployments", + "downloads_url": "https://api.github.com/repos/octocat/Hello-World/downloads", + "events_url": "https://api.github.com/repos/octocat/Hello-World/events", + "forks_url": "https://api.github.com/repos/octocat/Hello-World/forks", + "git_commits_url": "https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}", + "git_refs_url": "https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}", + "git_tags_url": "https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}", + "git_url": "git:github.com/octocat/Hello-World.git", + "issue_comment_url": "https://api.github.com/repos/octocat/Hello-World/issues/comments{/number}", + "issue_events_url": "https://api.github.com/repos/octocat/Hello-World/issues/events{/number}", + "issues_url": "https://api.github.com/repos/octocat/Hello-World/issues{/number}", + "keys_url": "https://api.github.com/repos/octocat/Hello-World/keys{/key_id}", + "labels_url": "https://api.github.com/repos/octocat/Hello-World/labels{/name}", + "languages_url": "https://api.github.com/repos/octocat/Hello-World/languages", + "merges_url": "https://api.github.com/repos/octocat/Hello-World/merges", + "milestones_url": "https://api.github.com/repos/octocat/Hello-World/milestones{/number}", + "notifications_url": "https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}", + "pulls_url": "https://api.github.com/repos/octocat/Hello-World/pulls{/number}", + "releases_url": "https://api.github.com/repos/octocat/Hello-World/releases{/id}", + "ssh_url": "git@github.com:octocat/Hello-World.git", + "stargazers_url": "https://api.github.com/repos/octocat/Hello-World/stargazers", + "statuses_url": "https://api.github.com/repos/octocat/Hello-World/statuses/{sha}", + "subscribers_url": "https://api.github.com/repos/octocat/Hello-World/subscribers", + "subscription_url": "https://api.github.com/repos/octocat/Hello-World/subscription", + "tags_url": "https://api.github.com/repos/octocat/Hello-World/tags", + "teams_url": "https://api.github.com/repos/octocat/Hello-World/teams", + "trees_url": "https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}", + "clone_url": "https://github.com/octocat/Hello-World.git", + "mirror_url": "git:git.example.com/octocat/Hello-World", + "hooks_url": "https://api.github.com/repos/octocat/Hello-World/hooks", + "svn_url": "https://svn.github.com/octocat/Hello-World", + "homepage": "https://github.com", + "language": null, + "forks_count": 9, + "stargazers_count": 80, + "watchers_count": 80, + "size": 108, + "default_branch": "master", + "open_issues_count": 0, + "is_template": true, + "topics": [ + "octocat", + "atom", + "electron", + "api" + ], + "has_issues": true, + "has_projects": true, + "has_wiki": true, + "has_pages": false, + "has_downloads": true, + "archived": false, + "disabled": false, + "visibility": "public", + "pushed_at": "2011-01-26T19:06:43Z", + "created_at": "2011-01-26T19:01:12Z", + "updated_at": "2011-01-26T19:14:43Z", + "permissions": { + "admin": false, + "push": false, + "pull": true + }, + "allow_rebase_merge": true, + "template_repository": null, + "temp_clone_token": "ABTLWHOULUVAXGTRYU7OC2876QJ2O", + "allow_squash_merge": true, + "allow_auto_merge": false, + "delete_branch_on_merge": true, + "allow_merge_commit": true, + "subscribers_count": 42, + "network_count": 0, + "license": { + "key": "mit", + "name": "MIT License", + "url": "https://api.github.com/licenses/mit", + "spdx_id": "MIT", + "node_id": "MDc6TGljZW5zZW1pdA==", + "html_url": "https://github.com/licenses/mit" + }, + "forks": 1, + "open_issues": 1, + "watchers": 1 + } + ] + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_github_check-runs.json b/benchalerts/tests/unit_tests/mocked_responses/POST_github_check-runs.json new file mode 100644 index 000000000..29853c29e --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_github_check-runs.json @@ -0,0 +1,93 @@ +{ + "status_code": 201, + "data": { + "id": 4, + "head_sha": "ce587453ced02b1526dfb4cb910479d431683101", + "node_id": "MDg6Q2hlY2tSdW40", + "external_id": "42", + "url": "https://api.github.com/repos/github/hello-world/check-runs/4", + "html_url": "https://github.com/github/hello-world/runs/4", + "details_url": "https://example.com", + "status": "completed", + "conclusion": "neutral", + "started_at": "2018-05-04T01:14:52Z", + "completed_at": "2018-05-04T01:14:52Z", + "output": { + "title": "Mighty Readme report", + "summary": "Testing something", + "text": "You may have some misspelled words on lines 2 and 4. You also may want to add a section in your README about how to install your app.", + "annotations_count": 2, + "annotations_url": "https://api.github.com/repos/github/hello-world/check-runs/4/annotations" + }, + "name": "mighty_readme", + "check_suite": { + "id": 5 + }, + "app": { + "id": 1, + "slug": "octoapp", + "node_id": "MDExOkludGVncmF0aW9uMQ==", + "owner": { + "login": "github", + "id": 1, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjE=", + "url": "https://api.github.com/orgs/github", + "repos_url": "https://api.github.com/orgs/github/repos", + "events_url": "https://api.github.com/orgs/github/events", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": true + }, + "name": "Octocat App", + "description": "", + "external_url": "https://example.com", + "html_url": "https://github.com/apps/octoapp", + "created_at": "2017-07-08T16:18:44-04:00", + "updated_at": "2017-07-08T16:18:44-04:00", + "permissions": { + "metadata": "read", + "contents": "read", + "issues": "write", + "single_file": "write" + }, + "events": [ + "push", + "pull_request" + ] + }, + "pull_requests": [ + { + "url": "https://api.github.com/repos/github/hello-world/pulls/1", + "id": 1934, + "number": 3956, + "head": { + "ref": "say-hello", + "sha": "3dca65fa3e8d4b3da3f3d056c59aee1c50f41390", + "repo": { + "id": 526, + "url": "https://api.github.com/repos/github/hello-world", + "name": "hello-world" + } + }, + "base": { + "ref": "master", + "sha": "e7fdf7640066d71ad16a86fbcbb9c6a10a18af4f", + "repo": { + "id": 526, + "url": "https://api.github.com/repos/github/hello-world", + "name": "hello-world" + } + } + } + ] + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_github_issues_1347_comments.json b/benchalerts/tests/unit_tests/mocked_responses/POST_github_issues_1347_comments.json new file mode 100644 index 000000000..6801b5ebd --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_github_issues_1347_comments.json @@ -0,0 +1,34 @@ +{ + "status_code": 201, + "data": { + "id": 1, + "node_id": "MDEyOklzc3VlQ29tbWVudDE=", + "url": "https://api.github.com/repos/octocat/Hello-World/issues/comments/1", + "html_url": "https://github.com/octocat/Hello-World/issues/1347#issuecomment-1", + "body": "test", + "user": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + }, + "created_at": "2011-04-14T16:00:49Z", + "updated_at": "2011-04-14T16:00:49Z", + "issue_url": "https://api.github.com/repos/octocat/Hello-World/issues/1347", + "author_association": "COLLABORATOR" + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_abc.json b/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_abc.json new file mode 100644 index 000000000..20c12da65 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_abc.json @@ -0,0 +1,35 @@ +{ + "status_code": 201, + "data": { + "url": "https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e", + "avatar_url": "https://github.com/images/error/hubot_happy.gif", + "id": 1, + "node_id": "MDY6U3RhdHVzMQ==", + "state": "success", + "description": "Testing something", + "target_url": "https://ci.example.com/1000/output", + "context": "continuous-integration/jenkins", + "created_at": "2012-07-20T01:19:13Z", + "updated_at": "2012-07-20T01:19:13Z", + "creator": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + } + } +} diff --git a/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_no_baseline.json b/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_no_baseline.json new file mode 100644 index 000000000..1b162c9f5 --- /dev/null +++ b/benchalerts/tests/unit_tests/mocked_responses/POST_github_statuses_no_baseline.json @@ -0,0 +1,35 @@ +{ + "status_code": 201, + "data": { + "url": "https://api.github.com/repos/octocat/Hello-World/statuses/6dcb09b5b57875f334f61aebed695e2e4193db5e", + "avatar_url": "https://github.com/images/error/hubot_happy.gif", + "id": 1, + "node_id": "MDY6U3RhdHVzMQ==", + "state": "success", + "description": "Could not find any baseline runs to compare to", + "target_url": "https://ci.example.com/1000/output", + "context": "continuous-integration/jenkins", + "created_at": "2012-07-20T01:19:13Z", + "updated_at": "2012-07-20T01:19:13Z", + "creator": { + "login": "octocat", + "id": 1, + "node_id": "MDQ6VXNlcjE=", + "avatar_url": "https://github.com/images/error/octocat_happy.gif", + "gravatar_id": "", + "url": "https://api.github.com/users/octocat", + "html_url": "https://github.com/octocat", + "followers_url": "https://api.github.com/users/octocat/followers", + "following_url": "https://api.github.com/users/octocat/following{/other_user}", + "gists_url": "https://api.github.com/users/octocat/gists{/gist_id}", + "starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/octocat/subscriptions", + "organizations_url": "https://api.github.com/users/octocat/orgs", + "repos_url": "https://api.github.com/users/octocat/repos", + "events_url": "https://api.github.com/users/octocat/events{/privacy}", + "received_events_url": "https://api.github.com/users/octocat/received_events", + "type": "User", + "site_admin": false + } + } +} diff --git a/benchalerts/tests/unit_tests/mocks.py b/benchalerts/tests/unit_tests/mocks.py new file mode 100644 index 000000000..f495a463a --- /dev/null +++ b/benchalerts/tests/unit_tests/mocks.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import pathlib + +import requests +from requests.adapters import HTTPAdapter + +from benchalerts.log import log + +response_dir = pathlib.Path(__file__).parent / "mocked_responses" + + +class MockResponse(requests.Response): + def __init__(self, status_code, data=None): + super().__init__() + self.status_code = status_code + if data is not None: + self._content = str.encode(json.dumps(data)) + + @classmethod + def from_file(cls, file: pathlib.Path): + with open(file, "r") as f: + response: dict = json.load(f) + return cls(status_code=response["status_code"], data=response.get("data")) + + +class MockAdapter(HTTPAdapter): + @staticmethod + def clean_base_url(url: str) -> str: + bases = { + "https://api.github.com/repos/some/repo": "github", + "https://api.github.com/app": "github_app", + "https://conbench.biz/api": "conbench", + } + for base_url, base_name in bases.items(): + if url.startswith(base_url): + clean_path = url.split(base_url + "/")[1] + for char in ["/", "&", "?", "=", ".", "__", "__"]: + clean_path = clean_path.replace(char, "_") + if clean_path.endswith("_"): + clean_path = clean_path[:-1] + return base_name + "_" + clean_path if clean_path else base_name + + raise Exception(f"No base URL was mocked for this request: {url}") + + def send(self, *args, **kwargs): + req: requests.PreparedRequest = args[0] + log.info(f"Sent request {req}({req.__dict__}) with kwargs {kwargs}") + + # to help with test_workflows.py, log the markdowns that were posted + if req.url.endswith("check-runs"): + body = json.loads(req.body) + log.info("Summary: " + body["output"]["summary"]) + log.info("Details: " + str(body["output"].get("text"))) + + method = req.method + clean_url = self.clean_base_url(req.url) + response_path = response_dir / f"{method}_{clean_url}.json" + + if not response_path.exists(): + raise Exception(f"Mock response not found at {response_path}") + + return MockResponse.from_file(response_path) diff --git a/benchalerts/tests/unit_tests/test_clients.py b/benchalerts/tests/unit_tests/test_clients.py new file mode 100644 index 000000000..dd6a11968 --- /dev/null +++ b/benchalerts/tests/unit_tests/test_clients.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import requests +from _pytest.logging import LogCaptureFixture + +from benchalerts.clients import ( + CheckStatus, + ConbenchClient, + GitHubRepoClient, + StatusState, +) + +from .mocks import MockAdapter + + +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +class TestGitHubRepoClient: + @property + def gh(self): + return GitHubRepoClient("some/repo", adapter=MockAdapter()) + + def test_create_pull_request_comment_with_number(self, github_auth): + output = self.gh.create_pull_request_comment(comment="test", pull_number=1347) + assert output["body"] == "test" + + def test_create_pull_request_comment_with_sha(self, github_auth): + output = self.gh.create_pull_request_comment(comment="test", commit_sha="abc") + assert output["body"] == "test" + + def test_create_pull_request_comment_bad_input(self, github_auth): + with pytest.raises(ValueError, match="missing"): + self.gh.create_pull_request_comment(comment="test") + + def test_comment_with_sha_fails_with_no_matching_prs(self, github_auth): + with pytest.raises(ValueError, match="pull request"): + self.gh.create_pull_request_comment(comment="test", commit_sha="no_prs") + + def test_update_commit_status(self, github_auth): + res = self.gh.update_commit_status( + commit_sha="abc", + title="tests", + description="Testing something", + state=StatusState.SUCCESS, + details_url="https://conbench.biz/", + ) + assert res["description"] == "Testing something" + + def test_update_commit_status_bad_state(self, github_auth): + with pytest.raises(TypeError, match="StatusState"): + self.gh.update_commit_status( + commit_sha="abc", + title="tests", + description="Testing something", + state="sorta working", + details_url="https://conbench.biz/", + ) + + @pytest.mark.parametrize("in_progress", [True, False]) + def test_update_check(self, github_auth, in_progress): + res = self.gh.update_check( + name="tests", + commit_sha="abc", + status=CheckStatus.IN_PROGRESS if in_progress else CheckStatus.SUCCESS, + title="This was good", + summary="Testing something", + details="Some details", + details_url="https://conbench.biz/", + ) + assert res["output"]["summary"] == "Testing something" + + def test_update_check_bad_status(self, github_auth): + with pytest.raises(TypeError, match="CheckStatus"): + self.gh.update_check(name="tests", commit_sha="abc", status="okay") + + +@pytest.mark.parametrize("github_auth", ["none"], indirect=True) +class TestMissingGithubEnvVars: + def test_no_vars_at_all(self, github_auth): + with pytest.raises(ValueError, match="GITHUB_API_TOKEN"): + TestGitHubRepoClient().gh + + def test_no_app_id(self, github_auth, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("GITHUB_APP_PRIVATE_KEY", "private key") + with pytest.raises(ValueError, match="GITHUB_APP_ID"): + TestGitHubRepoClient().gh + + def test_no_app_pk(self, github_auth, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("GITHUB_APP_ID", "123456") + with pytest.raises(ValueError, match="GITHUB_APP_PRIVATE_KEY"): + TestGitHubRepoClient().gh + + +class TestConbenchClient: + @property + def cb(self): + return ConbenchClient(adapter=MockAdapter()) + + def test_conbench_fails_missing_env(self, missing_conbench_env): + with pytest.raises(ValueError, match="CONBENCH_URL"): + self.cb + + @pytest.mark.parametrize("path", ["/error_with_content", "/error_without_content"]) + def test_client_error_handling(self, conbench_env, path, caplog: LogCaptureFixture): + with pytest.raises(requests.HTTPError, match="404"): + self.cb.get(path) + + if path == "/error_with_content": + assert 'Response content: {"code":' in caplog.text + else: + assert "Response content: None" in caplog.text diff --git a/benchalerts/tests/unit_tests/test_parse_conbench.py b/benchalerts/tests/unit_tests/test_parse_conbench.py new file mode 100644 index 000000000..311963324 --- /dev/null +++ b/benchalerts/tests/unit_tests/test_parse_conbench.py @@ -0,0 +1,190 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib +from copy import deepcopy + +import pytest +from _pytest.fixtures import SubRequest + +from benchalerts.clients import CheckStatus +from benchalerts.parse_conbench import ( + benchmarks_with_errors, + benchmarks_with_z_regressions, + regression_check_status, + regression_details, + regression_summary, +) +from benchalerts.talk_to_conbench import RunComparison + +from .mocks import MockResponse, response_dir + + +@pytest.fixture +def mock_comparisons(request: SubRequest): + how: str = request.param + + def _response(basename: str): + """Get a mocked response.""" + filename = basename + ".json" + return MockResponse.from_file(response_dir / filename).json() + + def _dup_info(basename: str): + """Get a mocked response and duplicate it with a different ID.""" + info = _response(basename) + info_2 = deepcopy(info) + info_2["id"] += "_2" + return info, info_2 + + contender_info, contender_info_2 = _dup_info("GET_conbench_runs_some_contender") + baseline_info, baseline_info_2 = _dup_info("GET_conbench_runs_some_baseline") + no_baseline_info = _response("GET_conbench_runs_contender_wo_base") + compare_results_noregressions = _response( + "GET_conbench_compare_runs_some_baseline_some_contender_threshold_z_500" + ) + compare_results_regressions = _response( + "GET_conbench_compare_runs_some_baseline_some_contender" + ) + benchmark_results = _response("GET_conbench_benchmarks_run_id_contender_wo_base") + + if how == "noregressions": + return [ + RunComparison( + contender_info=contender_info, + baseline_info=baseline_info, + compare_results=compare_results_noregressions, + ), + RunComparison( + contender_info=contender_info_2, + baseline_info=baseline_info_2, + compare_results=compare_results_noregressions, + ), + ] + elif how == "regressions": + return [ + RunComparison( + contender_info=contender_info, + baseline_info=baseline_info, + compare_results=compare_results_regressions, + ), + RunComparison( + contender_info=contender_info_2, + baseline_info=baseline_info_2, + compare_results=compare_results_regressions, + ), + ] + elif how == "no_baseline_without_errors": + no_baseline_info["has_errors"] = False + benchmark_results[1]["error"] = None + return [ + RunComparison( + contender_info=no_baseline_info, benchmark_results=benchmark_results + ), + ] + elif how == "no_baseline": + return [ + RunComparison( + contender_info=no_baseline_info, benchmark_results=benchmark_results + ), + ] + + +def get_expected_markdown(filename: str) -> str: + if not filename: + return None + file = pathlib.Path(__file__).parent / "expected_md" / (filename + ".md") + with open(file, "r") as f: + return f.read() + + +@pytest.mark.parametrize( + ["mock_comparisons", "expected_len"], + [ + ("noregressions", 2), + ("regressions", 2), + ("no_baseline", 1), + ("no_baseline_without_errors", 0), + ], + indirect=["mock_comparisons"], +) +def test_benchmarks_with_errors(mock_comparisons, expected_len): + actual = benchmarks_with_errors(mock_comparisons) + assert len(actual) == expected_len + + +@pytest.mark.parametrize( + ["mock_comparisons", "expected_len"], + [ + ("noregressions", 0), + ("regressions", 2), + ("no_baseline", 0), + ("no_baseline_without_errors", 0), + ], + indirect=["mock_comparisons"], +) +def test_benchmarks_with_z_regressions(mock_comparisons, expected_len): + actual = benchmarks_with_z_regressions(mock_comparisons) + assert len(actual) == expected_len + + +@pytest.mark.parametrize( + ["mock_comparisons", "expected_md"], + [ + ("noregressions", "summary_noregressions"), + ("regressions", "summary_regressions"), + ("no_baseline", "summary_nobaseline"), + ], + indirect=["mock_comparisons"], +) +def test_regression_summary(mock_comparisons, expected_md): + actual = regression_summary(mock_comparisons, warn_if_baseline_isnt_parent=True) + expected = get_expected_markdown(expected_md) + assert ( + actual.strip() == expected.strip() + ), f"see tests/unit_tests/expected_md/{expected_md}.md" + + +@pytest.mark.parametrize( + ["mock_comparisons", "expected_md"], + [ + ("noregressions", "details_noregressions"), + ("regressions", "details_regressions"), + ("no_baseline", None), + ("no_baseline_without_errors", None), + ], + indirect=["mock_comparisons"], +) +def test_regression_details(mock_comparisons, expected_md): + actual = regression_details(mock_comparisons) + expected = get_expected_markdown(expected_md) + if expected: + assert ( + actual.strip() == expected.strip() + ), f"see tests/unit_tests/expected_md/{expected_md}.md" + else: + assert actual is expected + + +@pytest.mark.parametrize( + ["mock_comparisons", "expected_status"], + [ + ("noregressions", CheckStatus.SUCCESS), + ("regressions", CheckStatus.FAILURE), + ("no_baseline", CheckStatus.ACTION_REQUIRED), + ("no_baseline_without_errors", CheckStatus.SKIPPED), + ], + indirect=["mock_comparisons"], +) +def test_regression_check_status(mock_comparisons, expected_status): + assert regression_check_status(mock_comparisons) == expected_status diff --git a/benchalerts/tests/unit_tests/test_talk_to_conbench.py b/benchalerts/tests/unit_tests/test_talk_to_conbench.py new file mode 100644 index 000000000..57e3857f4 --- /dev/null +++ b/benchalerts/tests/unit_tests/test_talk_to_conbench.py @@ -0,0 +1,51 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from _pytest.logging import LogCaptureFixture + +from benchalerts.clients import ConbenchClient +from benchalerts.talk_to_conbench import get_comparison_to_baseline + +from .mocks import MockAdapter + + +@pytest.fixture +def conbench(conbench_env): + return ConbenchClient(adapter=MockAdapter()) + + +@pytest.mark.parametrize("z_score_threshold", [None, 500]) +def test_get_comparison_to_baseline(conbench, z_score_threshold): + comparisons = get_comparison_to_baseline(conbench, "abc", z_score_threshold) + assert isinstance(comparisons, list) + assert len(comparisons) == 1 + assert comparisons[0].compare_link + assert len(comparisons[0].compare_results) == 2 + + +def test_comparison_fails_when_no_runs(conbench): + with pytest.raises(ValueError, match="runs"): + get_comparison_to_baseline(conbench, "no_runs") + + +def test_comparison_warns_when_no_baseline(conbench, caplog: LogCaptureFixture): + comparisons = get_comparison_to_baseline(conbench, "no_baseline") + assert isinstance(comparisons, list) + assert len(comparisons) == 1 + assert comparisons[0].contender_link + assert not comparisons[0].compare_link + assert not comparisons[0].baseline_info + assert len(comparisons[0].benchmark_results) == 2 + assert "could not find a baseline run" in caplog.text diff --git a/benchalerts/tests/unit_tests/test_version.py b/benchalerts/tests/unit_tests/test_version.py new file mode 100644 index 000000000..75ad1d3ec --- /dev/null +++ b/benchalerts/tests/unit_tests/test_version.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import benchalerts + + +def test_version_exists(): + assert benchalerts.__version__ diff --git a/benchalerts/tests/unit_tests/test_workflows.py b/benchalerts/tests/unit_tests/test_workflows.py new file mode 100644 index 000000000..aa62515f5 --- /dev/null +++ b/benchalerts/tests/unit_tests/test_workflows.py @@ -0,0 +1,156 @@ +# Copyright (c) 2022, Voltron Data. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pathlib +from typing import List, Tuple + +import pytest +from _pytest.logging import LogCaptureFixture + +import benchalerts.workflows as flows +from benchalerts.clients import ConbenchClient, GitHubRepoClient + +from .mocks import MockAdapter + + +def check_posted_markdown( + caplog: LogCaptureFixture, expected_markdowns: List[Tuple[str, str]] +): + """After we run a workflow, search through the logs for what markdowns we + mock-posted to GitHub, and assert they are what we expected. + + expected_markdowns should look like [(summary0, details0), (summary1, details1), ] + """ + actual_summaries = [ + log_record.message[9:] + for log_record in caplog.records + if log_record.levelname == "INFO" + and log_record.filename == "mocks.py" + and log_record.message.startswith("Summary: ") + ] + actual_detailses = [ + log_record.message[9:] + for log_record in caplog.records + if log_record.levelname == "INFO" + and log_record.filename == "mocks.py" + and log_record.message.startswith("Details: ") + ] + assert len(expected_markdowns) == len(actual_summaries) == len(actual_detailses) + + for ( + (expected_summary_filename, expected_details_filename), + (actual_summary, actual_details), + ) in zip(expected_markdowns, zip(actual_summaries, actual_detailses)): + base_dir = pathlib.Path(__file__).parent / "expected_md" + + with open(base_dir / (expected_summary_filename + ".md"), "r") as f: + expected_summary = f.read() + assert ( + expected_summary.strip() == actual_summary.strip() + ), f"see tests/unit_tests/expected_md/{expected_summary_filename}.md" + + if expected_details_filename is None: + assert actual_details == "None" + else: + with open(base_dir / (expected_details_filename + ".md"), "r") as f: + expected_details = f.read() + assert ( + expected_details.strip() == actual_details.strip() + ), f"see tests/unit_tests/expected_md/{expected_details_filename}.md" + + +@pytest.mark.parametrize("z_score_threshold", [None, 500]) +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +@pytest.mark.parametrize( + "workflow", + [ + flows.update_github_status_based_on_regressions, + flows.update_github_check_based_on_regressions, + ], +) +def test_flows( + github_auth, conbench_env, z_score_threshold, workflow, caplog: LogCaptureFixture +): + caplog.set_level("DEBUG") + gh = GitHubRepoClient("some/repo", adapter=MockAdapter()) + cb = ConbenchClient(adapter=MockAdapter()) + + res = workflow( + contender_sha="abc", z_score_threshold=z_score_threshold, github=gh, conbench=cb + ) + if workflow == flows.update_github_status_based_on_regressions: + assert res["description"] == "Testing something" + elif workflow == flows.update_github_check_based_on_regressions: + if z_score_threshold: + expected_markdowns = [ + ("summary_pending", None), + ("summary_workflow_noregressions", "details_workflow_noregressions"), + ] + else: + expected_markdowns = [ + ("summary_pending", None), + ("summary_workflow_regressions", "details_workflow_regressions"), + ] + check_posted_markdown(caplog, expected_markdowns) + + +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +@pytest.mark.parametrize( + "workflow", + [ + flows.update_github_status_based_on_regressions, + flows.update_github_check_based_on_regressions, + ], +) +def test_flows_failure( + github_auth, missing_conbench_env, workflow, caplog: LogCaptureFixture +): + caplog.set_level("DEBUG") + gh = GitHubRepoClient("some/repo", adapter=MockAdapter()) + + with pytest.raises(ValueError, match="not found"): + workflow(contender_sha="abc", github=gh) + + if workflow == flows.update_github_check_based_on_regressions: + expected_markdowns = [ + ("summary_pending", None), + ("summary_error", "details_error"), + ] + check_posted_markdown(caplog, expected_markdowns) + + +@pytest.mark.parametrize("github_auth", ["pat", "app"], indirect=True) +@pytest.mark.parametrize( + "workflow", + [ + flows.update_github_status_based_on_regressions, + flows.update_github_check_based_on_regressions, + ], +) +def test_flows_no_baseline( + github_auth, conbench_env, workflow, caplog: LogCaptureFixture +): + caplog.set_level("DEBUG") + gh = GitHubRepoClient("some/repo", adapter=MockAdapter()) + cb = ConbenchClient(adapter=MockAdapter()) + + res = workflow(contender_sha="no_baseline", github=gh, conbench=cb) + if workflow == flows.update_github_status_based_on_regressions: + assert res["description"] == "Could not find any baseline runs to compare to" + elif workflow == flows.update_github_check_based_on_regressions: + expected_markdowns = [ + ("summary_pending_nobaseline", None), + ("summary_nobaseline", None), + ] + check_posted_markdown(caplog, expected_markdowns)