diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index f30b98bb4f..6a8fa4a446 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -26,6 +26,9 @@ jobs:
test-general:
name: "Pytest"
runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash -el {0}
strategy:
matrix:
python-version:
@@ -34,21 +37,28 @@ jobs:
- "3.10"
- "3.11"
fail-fast: false
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ matrix.python-version }}
+ cancel-in-progress: true
steps:
- name: "Checkout Infrastructure"
uses: actions/checkout@v3
with:
fetch-depth: 0
+
- name: Setup miniconda
uses: conda-incubator/setup-miniconda@v2
with:
+ auto-update-conda: true
python-version: ${{ matrix.python-version }}
- channel-priority: strict
- channels: conda-forge
+ channels: conda-forge,defaults
+ activate-environment: nebari-dev
+
- name: Install Nebari
run: |
+ python --version
pip install -e .[dev]
- conda install --quiet --yes conda-build
+
- name: Test Nebari
run: |
pytest --version
diff --git a/.github/workflows/test_aws_integration.yaml b/.github/workflows/test_aws_integration.yaml
index dcdfc4fe32..fa1a2332df 100644
--- a/.github/workflows/test_aws_integration.yaml
+++ b/.github/workflows/test_aws_integration.yaml
@@ -56,7 +56,6 @@ jobs:
- name: Install Nebari
run: |
pip install .[dev]
- conda install --quiet --yes conda-build
playwright install
- name: Retrieve secret from Vault
diff --git a/.github/workflows/test_conda_build.yaml b/.github/workflows/test_conda_build.yaml
new file mode 100644
index 0000000000..e34363d9a3
--- /dev/null
+++ b/.github/workflows/test_conda_build.yaml
@@ -0,0 +1,54 @@
+name: "Test Conda Build"
+
+on:
+ pull_request:
+ paths:
+ - ".github/workflows/test_conda_build.yaml"
+ - "pyproject.toml"
+ push:
+ branches:
+ - main
+ - develop
+ - release/\d{4}.\d{1,2}.\d{1,2}
+ paths:
+ - ".github/workflows/test_conda_build.yaml"
+ - "pyproject.toml"
+
+jobs:
+ test-conda-build:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash -el {0}
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+ steps:
+ - name: "Checkout Infrastructure"
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Setup miniconda
+ uses: conda-incubator/setup-miniconda@v2
+ with:
+ auto-update-conda: true
+ python-version: 3.8
+ channels: conda-forge
+ activate-environment: nebari-dev
+
+ - name: Install dependencies
+ run: |
+ conda install build grayskull conda-build
+
+ - name: Generate sdist
+ run: |
+ python -m build --sdist
+
+ - name: Generate meta.yaml
+ run: |
+ python -m grayskull pypi dist/*.tar.gz
+
+ - name: Build conda package
+ run: |
+ conda build nebari
diff --git a/.github/workflows/test_do_integration.yaml b/.github/workflows/test_do_integration.yaml
new file mode 100644
index 0000000000..dbe10a3028
--- /dev/null
+++ b/.github/workflows/test_do_integration.yaml
@@ -0,0 +1,83 @@
+name: test-gcp-integration
+
+on:
+ schedule:
+ - cron: "0 0 * * MON"
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: 'Nebari branch to deploy, test, destroy'
+ required: true
+ default: develop
+ type: string
+ image-tag:
+ description: 'Nebari image tag created by the nebari-docker-images repo'
+ required: true
+ default: main
+ type: string
+ tf-log-level:
+ description: 'Change Terraform log levels'
+ required: false
+ default: info
+ type: choice
+ options:
+ - info
+ - warn
+ - debug
+ - trace
+ - error
+
+env:
+ NEBARI_GH_BRANCH: ${{ github.event.inputs.branch || 'develop' }}
+ NEBARI_IMAGE_TAG: ${{ github.event.inputs.image-tag || 'main' }}
+ TF_LOG: ${{ github.event.inputs.tf-log-level || 'info' }}
+
+
+jobs:
+ test-do-integration:
+ runs-on: ubuntu-latest
+ permissions:
+ id-token: write
+ contents: read
+ pull-requests: write
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ env.NEBARI_GH_BRANCH }}
+ fetch-depth: 0
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.11
+
+ - name: Install Nebari
+ run: |
+ pip install .[dev]
+ playwright install
+
+ - name: Retrieve secret from Vault
+ uses: hashicorp/vault-action@v2.5.0
+ with:
+ method: jwt
+ url: "https://quansight-vault-public-vault-b2379fa7.d415e30e.z1.hashicorp.cloud:8200"
+ namespace: "admin/quansight"
+ role: "repository-nebari-dev-nebari-role"
+ secrets: |
+ kv/data/repository/nebari-dev/nebari/shared_secrets DIGITALOCEAN_TOKEN | DIGITALOCEAN_TOKEN;
+ kv/data/repository/nebari-dev/nebari/cloudflare/internal-devops@quansight.com/nebari-dev-ci token | CLOUDFLARE_TOKEN;
+
+ - name: Set Environment DO
+ run: |
+ echo "SPACES_ACCESS_KEY_ID=${{ secrets.SPACES_ACCESS_KEY_ID }}" >> $GITHUB_ENV
+ echo "SPACES_SECRET_ACCESS_KEY=${{ secrets.SPACES_SECRET_ACCESS_KEY }}" >> $GITHUB_ENV
+ echo "NEBARI_K8S_VERSION"=1.25.12-do.0 >> $GITHUB_ENV
+
+ - name: Integration Tests
+ run: |
+ pytest --version
+ pytest tests/tests_integration/ -vvv -s --cloud do
+ env:
+ NEBARI_SECRET__default_images__jupyterhub: "quay.io/nebari/nebari-jupyterhub:${{ env.NEBARI_IMAGE_TAG }}"
+ NEBARI_SECRET__default_images__jupyterlab: "quay.io/nebari/nebari-jupyterlab:${{ env.NEBARI_IMAGE_TAG }}"
+ NEBARI_SECRET__default_images__dask_worker: "quay.io/nebari/nebari-dask-worker:${{ env.NEBARI_IMAGE_TAG }}"
diff --git a/.github/workflows/test_gcp_integration.yaml b/.github/workflows/test_gcp_integration.yaml
new file mode 100644
index 0000000000..57ef84288f
--- /dev/null
+++ b/.github/workflows/test_gcp_integration.yaml
@@ -0,0 +1,91 @@
+name: test-gcp-integration
+
+on:
+ schedule:
+ - cron: "0 0 * * MON"
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: 'Nebari branch to deploy, test, destroy'
+ required: true
+ default: develop
+ type: string
+ image-tag:
+ description: 'Nebari image tag created by the nebari-docker-images repo'
+ required: true
+ default: main
+ type: string
+ tf-log-level:
+ description: 'Change Terraform log levels'
+ required: false
+ default: info
+ type: choice
+ options:
+ - info
+ - warn
+ - debug
+ - trace
+ - error
+
+env:
+ NEBARI_GH_BRANCH: ${{ github.event.inputs.branch || 'develop' }}
+ NEBARI_IMAGE_TAG: ${{ github.event.inputs.image-tag || 'main' }}
+ TF_LOG: ${{ github.event.inputs.tf-log-level || 'info' }}
+
+
+jobs:
+ test-gcp-integration:
+ runs-on: ubuntu-latest
+ permissions:
+ id-token: write
+ contents: read
+ pull-requests: write
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ env.NEBARI_GH_BRANCH }}
+ fetch-depth: 0
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.11
+
+ - name: Install Nebari
+ run: |
+ pip install .[dev]
+ playwright install
+
+ - name: Retrieve secret from Vault
+ uses: hashicorp/vault-action@v2.5.0
+ with:
+ method: jwt
+ url: "https://quansight-vault-public-vault-b2379fa7.d415e30e.z1.hashicorp.cloud:8200"
+ namespace: "admin/quansight"
+ role: "repository-nebari-dev-nebari-role"
+ secrets: |
+ kv/data/repository/nebari-dev/nebari/google_cloud_platform/nebari-dev-ci/github-nebari-dev-repo-ci project_id | PROJECT_ID;
+ kv/data/repository/nebari-dev/nebari/google_cloud_platform/nebari-dev-ci/github-nebari-dev-repo-ci workload_identity_provider | GCP_WORKFLOW_PROVIDER;
+ kv/data/repository/nebari-dev/nebari/google_cloud_platform/nebari-dev-ci/github-nebari-dev-repo-ci service_account_name | GCP_SERVICE_ACCOUNT;
+ kv/data/repository/nebari-dev/nebari/cloudflare/internal-devops@quansight.com/nebari-dev-ci token | CLOUDFLARE_TOKEN;
+
+ - name: 'Authenticate to GCP'
+ uses: 'google-github-actions/auth@v1'
+ with:
+ token_format: access_token
+ workload_identity_provider: ${{ env.GCP_WORKFLOW_PROVIDER }}
+ service_account: ${{ env.GCP_SERVICE_ACCOUNT }}
+
+ - name: Set required environment variables
+ run: |
+ echo "GOOGLE_CREDENTIALS=${{ env.GOOGLE_APPLICATION_CREDENTIALS }}" >> $GITHUB_ENV
+
+ - name: Integration Tests
+ run: |
+ pytest --version
+ pytest tests/tests_integration/ -vvv -s --cloud gcp
+ env:
+ NEBARI_SECRET__default_images__jupyterhub: "quay.io/nebari/nebari-jupyterhub:${{ env.NEBARI_IMAGE_TAG }}"
+ NEBARI_SECRET__default_images__jupyterlab: "quay.io/nebari/nebari-jupyterlab:${{ env.NEBARI_IMAGE_TAG }}"
+ NEBARI_SECRET__default_images__dask_worker: "quay.io/nebari/nebari-dask-worker:${{ env.NEBARI_IMAGE_TAG }}"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 3e4d4d9352..7284f9f7bf 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -39,7 +39,7 @@ repos:
exclude: "^src/_nebari/template/"
- repo: https://github.com/codespell-project/codespell
- rev: v2.2.5
+ rev: v2.2.6
hooks:
- id: codespell
args:
@@ -53,13 +53,13 @@ repos:
# python
- repo: https://github.com/psf/black
- rev: 23.7.0
+ rev: 23.9.1
hooks:
- id: black
args: ["--line-length=88", "--exclude=/src/_nebari/template/"]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.287
+ rev: v0.0.292
hooks:
- id: ruff
args: ["--fix"]
@@ -75,7 +75,7 @@ repos:
# terraform
- repo: https://github.com/antonbabenko/pre-commit-terraform
- rev: v1.83.2
+ rev: v1.83.4
hooks:
- id: terraform_fmt
args:
diff --git a/README.md b/README.md
index e96b370ed2..0ca6652d68 100644
--- a/README.md
+++ b/README.md
@@ -77,7 +77,7 @@ Amazon [AWS](https://aws.amazon.com/), [GCP](https://cloud.google.com/ "Google C
- Operating System: Currently, Nebari supports development on macOS and Linux operating systems. Windows is NOT supported.
However, we would welcome contributions that add and improve support for Windows.
-- You need Python >= 3.7 on your local machine or virtual environment to work on Nebari.
+- You need Python >= 3.8 on your local machine or virtual environment to work on Nebari.
- Adopting virtual environments ([`conda`](https://docs.conda.io/en/latest/), [`pipenv`](https://github.com/pypa/pipenv) or
[`venv`](https://docs.python.org/3/library/venv.html)) is also encouraged.
diff --git a/RELEASE.md b/RELEASE.md
index 9eb17a1cd9..9bccfa1340 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -441,7 +441,7 @@ Enhancements for this release include:
### Bug fixes
-This release addresses several bugs with a slight emphasis on stablizing the core services while also improving the end user experience.
+This release addresses several bugs with a slight emphasis on stabilizing the core services while also improving the end user experience.
### What's Changed
* [BUG] Adding back feature of limiting profiles for users and groups by @costrouc in [PR 1169](https://github.com/Quansight/qhub/pull/1169)
diff --git a/flake.nix b/flake.nix
index 0c9ae01bce..c6fc060f9d 100644
--- a/flake.nix
+++ b/flake.nix
@@ -36,7 +36,6 @@
pythonPackages.pytest
pythonPackages.pytest-timeout
pythonPackages.black
- pythonPackages.flake8
pythonPackages.sphinx
pythonPackages.dask-gateway
pythonPackages.paramiko
diff --git a/pyproject.toml b/pyproject.toml
index 82f2bf0132..63f0835cbf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,6 +45,7 @@ classifiers = [
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
@@ -81,7 +82,6 @@ dev = [
"diagrams",
"python-dotenv",
"escapism",
- "flake8==3.8.4",
"importlib-metadata<5.0",
"jhub-client",
"paramiko",
@@ -91,8 +91,6 @@ dev = [
"pytest-playwright",
"pytest-cov",
"coverage[toml]",
- "grayskull",
- "build",
"jinja2",
"setuptools==63.4.3",
]
diff --git a/pytest.ini b/pytest.ini
index 7341aae585..0555ec6b2d 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -7,7 +7,6 @@ addopts =
# turn warnings into errors
-Werror
markers =
- conda: conda required to run this test (deselect with '-m \"not conda\"')
gpu: test gpu working properly
preemptible: test preemptible instances
testpaths =
diff --git a/src/_nebari/initialize.py b/src/_nebari/initialize.py
index 7f4b7aab96..70ec8bc5db 100644
--- a/src/_nebari/initialize.py
+++ b/src/_nebari/initialize.py
@@ -47,7 +47,7 @@ def render_config(
ssl_cert_email: str = None,
):
config = {
- "provider": cloud_provider,
+ "provider": cloud_provider.value,
"namespace": namespace,
"nebari_version": __version__,
}
@@ -74,7 +74,7 @@ def render_config(
config["theme"] = {"jupyterhub": {"hub_title": f"Nebari - { project_name }"}}
config["theme"]["jupyterhub"][
"welcome"
- ] = """Welcome! Learn about Nebari's features and configurations in the documentation. If you have any questions or feedback, reach the team on Nebari's support forums."""
+ ] = """Welcome! Learn about Nebari's features and configurations in the documentation. If you have any questions or feedback, reach the team on Nebari's support forums."""
config["security"]["authentication"] = {"type": auth_provider}
diff --git a/src/_nebari/provider/cloud/amazon_web_services.py b/src/_nebari/provider/cloud/amazon_web_services.py
index f43273226f..576f72c1c6 100644
--- a/src/_nebari/provider/cloud/amazon_web_services.py
+++ b/src/_nebari/provider/cloud/amazon_web_services.py
@@ -2,7 +2,7 @@
import os
import re
import time
-from typing import Dict, List
+from typing import Dict, List, Optional
import boto3
from botocore.exceptions import ClientError, EndpointConnectionError
@@ -29,7 +29,9 @@ def check_credentials():
@functools.lru_cache()
-def aws_session(region: str = None, digitalocean_region: str = None) -> boto3.Session:
+def aws_session(
+ region: Optional[str] = None, digitalocean_region: Optional[str] = None
+) -> boto3.Session:
"""Create a boto3 session."""
if digitalocean_region:
aws_access_key_id = os.environ["SPACES_ACCESS_KEY_ID"]
@@ -126,7 +128,7 @@ def instances(region: str) -> Dict[str, str]:
return {t: t for t in instance_types}
-def aws_get_vpc_id(name: str, namespace: str, region: str) -> str:
+def aws_get_vpc_id(name: str, namespace: str, region: str) -> Optional[str]:
"""Return VPC ID for the EKS cluster namedd `{name}-{namespace}`."""
cluster_name = f"{name}-{namespace}"
session = aws_session(region=region)
@@ -138,6 +140,7 @@ def aws_get_vpc_id(name: str, namespace: str, region: str) -> str:
for tag in tags:
if tag["Key"] == "Name" and tag["Value"] == cluster_name:
return vpc["VpcId"]
+ return None
def aws_get_subnet_ids(name: str, namespace: str, region: str) -> List[str]:
@@ -216,11 +219,11 @@ def aws_get_security_group_ids(name: str, namespace: str, region: str) -> List[s
return security_group_ids
-def aws_get_load_balancer_name(vpc_id: str, region: str) -> str:
+def aws_get_load_balancer_name(vpc_id: str, region: str) -> Optional[str]:
"""Return load balancer name for the VPC ID."""
if not vpc_id:
print("No VPC ID provided. Exiting...")
- return
+ return None
session = aws_session(region=region)
client = session.client("elb")
@@ -229,6 +232,7 @@ def aws_get_load_balancer_name(vpc_id: str, region: str) -> str:
for load_balancer in response:
if load_balancer["VPCId"] == vpc_id:
return load_balancer["LoadBalancerName"]
+ return None
def aws_get_efs_ids(name: str, namespace: str, region: str) -> List[str]:
@@ -260,7 +264,7 @@ def aws_get_efs_mount_target_ids(efs_id: str, region: str) -> List[str]:
"""Return list of EFS mount target IDs for the EFS ID."""
if not efs_id:
print("No EFS ID provided. Exiting...")
- return
+ return []
session = aws_session(region=region)
client = session.client("efs")
@@ -290,7 +294,9 @@ def aws_get_ec2_volume_ids(name: str, namespace: str, region: str) -> List[str]:
return volume_ids
-def aws_get_iam_policy(region: str, name: str = None, pattern: str = None) -> str:
+def aws_get_iam_policy(
+ region: Optional[str], name: Optional[str] = None, pattern: Optional[str] = None
+) -> Optional[str]:
"""Return IAM policy ARN for the policy name or pattern."""
session = aws_session(region=region)
client = session.client("iam")
@@ -301,6 +307,7 @@ def aws_get_iam_policy(region: str, name: str = None, pattern: str = None) -> st
pattern and re.match(pattern, policy["PolicyName"])
):
return policy["Arn"]
+ return None
def aws_delete_load_balancer(name: str, namespace: str, region: str):
@@ -640,9 +647,9 @@ def aws_delete_ec2_volumes(name: str, namespace: str, region: str):
def aws_delete_s3_objects(
bucket_name: str,
- endpoint: str = None,
- region: str = None,
- digitalocean_region: str = None,
+ endpoint: Optional[str] = None,
+ region: Optional[str] = None,
+ digitalocean_region: Optional[str] = None,
):
"""
Delete all objects in the S3 bucket.
@@ -707,9 +714,9 @@ def aws_delete_s3_objects(
def aws_delete_s3_bucket(
bucket_name: str,
- endpoint: str = None,
- region: str = None,
- digitalocean_region: str = None,
+ endpoint: Optional[str] = None,
+ region: Optional[str] = None,
+ digitalocean_region: Optional[str] = None,
):
"""
Delete S3 bucket.
diff --git a/src/_nebari/provider/git.py b/src/_nebari/provider/git.py
index 7e90d06dfc..8be3b29a2a 100644
--- a/src/_nebari/provider/git.py
+++ b/src/_nebari/provider/git.py
@@ -2,16 +2,17 @@
import os
import subprocess
from pathlib import Path
+from typing import Optional
from _nebari.utils import change_directory
-def is_git_repo(path: Path = None):
+def is_git_repo(path: Optional[Path] = None):
path = path or Path.cwd()
return ".git" in os.listdir(path)
-def initialize_git(path: Path = None):
+def initialize_git(path: Optional[Path] = None):
path = path or Path.cwd()
with change_directory(path):
subprocess.check_output(["git", "init"])
@@ -19,7 +20,9 @@ def initialize_git(path: Path = None):
subprocess.check_output(["git", "checkout", "-b", "main"])
-def add_git_remote(remote_path: str, path: Path = None, remote_name: str = "origin"):
+def add_git_remote(
+ remote_path: str, path: Optional[Path] = None, remote_name: str = "origin"
+):
path = path or Path.cwd()
c = configparser.ConfigParser()
diff --git a/src/_nebari/provider/terraform.py b/src/_nebari/provider/terraform.py
index 9d5c8c78cd..6f6ad6930b 100644
--- a/src/_nebari/provider/terraform.py
+++ b/src/_nebari/provider/terraform.py
@@ -28,8 +28,8 @@ def deploy(
terraform_import: bool = False,
terraform_apply: bool = True,
terraform_destroy: bool = False,
- input_vars: Dict[str, Any] = None,
- state_imports: List = None,
+ input_vars: Dict[str, Any] = {},
+ state_imports: List[Any] = [],
):
"""Execute a given terraform directory.
@@ -52,9 +52,6 @@ def deploy(
state_imports: (addr, id) pairs for iterate through and attempt
to terraform import
"""
- input_vars = input_vars or {}
- state_imports = state_imports or []
-
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", suffix=".tfvars.json"
) as f:
diff --git a/src/_nebari/stages/infrastructure/__init__.py b/src/_nebari/stages/infrastructure/__init__.py
index 3b77a28700..f17d98ea0f 100644
--- a/src/_nebari/stages/infrastructure/__init__.py
+++ b/src/_nebari/stages/infrastructure/__init__.py
@@ -503,6 +503,29 @@ class ExistingProvider(schema.Base):
}
+provider_enum_model_map = {
+ schema.ProviderEnum.local: LocalProvider,
+ schema.ProviderEnum.existing: ExistingProvider,
+ schema.ProviderEnum.gcp: GoogleCloudPlatformProvider,
+ schema.ProviderEnum.aws: AmazonWebServicesProvider,
+ schema.ProviderEnum.azure: AzureProvider,
+ schema.ProviderEnum.do: DigitalOceanProvider,
+}
+
+provider_enum_name_map: Dict[schema.ProviderEnum, str] = {
+ schema.ProviderEnum.local: "local",
+ schema.ProviderEnum.existing: "existing",
+ schema.ProviderEnum.gcp: "google_cloud_platform",
+ schema.ProviderEnum.aws: "amazon_web_services",
+ schema.ProviderEnum.azure: "azure",
+ schema.ProviderEnum.do: "digital_ocean",
+}
+
+provider_name_abbreviation_map: Dict[str, str] = {
+ value: key.value for key, value in provider_enum_name_map.items()
+}
+
+
class InputSchema(schema.Base):
local: typing.Optional[LocalProvider]
existing: typing.Optional[ExistingProvider]
@@ -511,54 +534,36 @@ class InputSchema(schema.Base):
azure: typing.Optional[AzureProvider]
digital_ocean: typing.Optional[DigitalOceanProvider]
- @pydantic.root_validator
+ @pydantic.root_validator(pre=True)
def check_provider(cls, values):
- if (
- values["provider"] == schema.ProviderEnum.local
- and values.get("local") is None
- ):
- values["local"] = LocalProvider()
- elif (
- values["provider"] == schema.ProviderEnum.existing
- and values.get("existing") is None
- ):
- values["existing"] = ExistingProvider()
- elif (
- values["provider"] == schema.ProviderEnum.gcp
- and values.get("google_cloud_platform") is None
- ):
- values["google_cloud_platform"] = GoogleCloudPlatformProvider()
- elif (
- values["provider"] == schema.ProviderEnum.aws
- and values.get("amazon_web_services") is None
- ):
- values["amazon_web_services"] = AmazonWebServicesProvider()
- elif (
- values["provider"] == schema.ProviderEnum.azure
- and values.get("azure") is None
- ):
- values["azure"] = AzureProvider()
- elif (
- values["provider"] == schema.ProviderEnum.do
- and values.get("digital_ocean") is None
- ):
- values["digital_ocean"] = DigitalOceanProvider()
-
- if (
- sum(
- (_ in values and values[_] is not None)
- for _ in {
- "local",
- "existing",
- "google_cloud_platform",
- "amazon_web_services",
- "azure",
- "digital_ocean",
- }
- )
- != 1
- ):
- raise ValueError("multiple providers set or wrong provider fields set")
+ if "provider" in values:
+ provider: str = values["provider"]
+ if hasattr(schema.ProviderEnum, provider):
+ # TODO: all cloud providers has required fields, but local and existing don't.
+ # And there is no way to initialize a model without user input here.
+ # We preserve the original behavior here, but we should find a better way to do this.
+ if provider in ["local", "existing"]:
+ values[provider] = provider_enum_model_map[provider]()
+ else:
+ # if the provider field is invalid, it won't be set when this validator is called
+ # so we need to check for it explicitly here, and set the `pre` to True
+ # TODO: this is a workaround, check if there is a better way to do this in Pydantic v2
+ raise ValueError(
+ f"'{provider}' is not a valid enumeration member; permitted: local, existing, do, aws, gcp, azure"
+ )
+ else:
+ setted_providers = [
+ provider
+ for provider in provider_name_abbreviation_map.keys()
+ if provider in values
+ ]
+ num_providers = len(setted_providers)
+ if num_providers > 1:
+ raise ValueError(f"Multiple providers set: {setted_providers}")
+ elif num_providers == 1:
+ values["provider"] = provider_name_abbreviation_map[setted_providers[0]]
+ elif num_providers == 0:
+ values["provider"] = schema.ProviderEnum.local.value
return values
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf
index ad0876f2a0..c3e725dbea 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf
@@ -79,7 +79,7 @@ resource "kubernetes_deployment" "worker" {
}
annotations = {
- # This lets us autorestart when the conifg changes!
+ # This lets us autorestart when the config changes!
"checksum/config-map" = sha256(jsonencode(kubernetes_config_map.conda-store-config.data))
"checksum/secret" = sha256(jsonencode(kubernetes_secret.conda-store-secret.data))
"checksum/conda-environments" = sha256(jsonencode(kubernetes_config_map.conda-store-environments.data))
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/ipython/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/ipython/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/__init__.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/src/_nebari/subcommands/init.py b/src/_nebari/subcommands/init.py
index 4c3412358a..617893bf3e 100644
--- a/src/_nebari/subcommands/init.py
+++ b/src/_nebari/subcommands/init.py
@@ -326,8 +326,11 @@ def check_cloud_provider_creds(cloud_provider: ProviderEnum, disable_prompt: boo
"Paste your SPACES_SECRET_ACCESS_KEY",
hide_input=True,
)
+ # Set spaces credentials. Spaces are API compatible with s3
+ # Setting spaces credentials to AWS credentials allows us to
+ # reuse s3 code
os.environ["AWS_ACCESS_KEY_ID"] = os.getenv("SPACES_ACCESS_KEY_ID")
- os.environ["AWS_SECRET_ACCESS_KEY"] = os.getenv("AWS_SECRET_ACCESS_KEY")
+ os.environ["AWS_SECRET_ACCESS_KEY"] = os.getenv("SPACES_SECRET_ACCESS_KEY")
# AZURE
elif cloud_provider == ProviderEnum.azure.value.lower() and (
diff --git a/src/nebari/plugins.py b/src/nebari/plugins.py
index 7afd1bcbe7..c5148e9e1d 100644
--- a/src/nebari/plugins.py
+++ b/src/nebari/plugins.py
@@ -76,7 +76,7 @@ def _import_module_from_filename(plugin: str):
try:
self.plugin_manager.register(mod, plugin)
except ValueError:
- # Pluin already registered
+ # Plugin already registered
pass
def get_available_stages(self):
diff --git a/tests/common/run_notebook.py b/tests/common/run_notebook.py
index e55b874d17..03c383299a 100644
--- a/tests/common/run_notebook.py
+++ b/tests/common/run_notebook.py
@@ -2,6 +2,7 @@
import re
import time
from pathlib import Path
+from typing import List, Union
from tests.common.navigator import Navigator
@@ -16,11 +17,13 @@ def __init__(self, navigator: Navigator):
def run(
self,
path,
- expected_outputs,
- conda_env,
- runtime=30000,
- retry=2,
- exact_match=True,
+ expected_outputs: List[str],
+ conda_env: str,
+ timeout: float = 1000,
+ complition_wait_time: float = 2,
+ retry: int = 2,
+ retry_wait_time: float = 5,
+ exact_match: bool = True,
):
"""Run jupyter notebook and check for expected output text anywhere on
the page.
@@ -30,10 +33,33 @@ def run(
Conda environments may still be being built shortly after deployment.
+ Parameters
+ ----------
+ path: str
+ Path to notebook relative to the root of the jupyterlab instance.
+ expected_outputs: List[str]
+ Text to look for in the output of the notebook. This can be a
+ substring of the actual output if exact_match is False.
conda_env: str
Name of conda environment. Python conda environments have the
structure "conda-env-nebari-git-nebari-git-dashboard-py" where
the actual name of the environment is "dashboard".
+ timeout: float
+ Time in seconds to wait for the expected output text to appear.
+ default: 1000
+ complition_wait_time: float
+ Time in seconds to wait between checking for expected output text.
+ default: 2
+ retry: int
+ Number of times to retry running the notebook.
+ default: 2
+ retry_wait_time: float
+ Time in seconds to wait between retries.
+ default: 5
+ exact_match: bool
+ If True, the expected output must match exactly. If False, the
+ expected output must be a substring of the actual output.
+ default: True
"""
logger.debug(f">>> Running notebook: {path}")
filename = Path(path).name
@@ -47,13 +73,13 @@ def run(
# make sure that this notebook is one currently selected
self.nav.page.get_by_role("tab", name=filename).get_by_text(filename).click()
- for i in range(retry):
+ for _ in range(retry):
self._restart_run_all()
# Wait for a couple of seconds to make sure it's re-started
- time.sleep(2)
- self._wait_for_commands_completion()
+ time.sleep(retry_wait_time)
+ self._wait_for_commands_completion(timeout, complition_wait_time)
all_outputs = self._get_outputs()
- self.assert_match_all_outputs(expected_outputs, all_outputs)
+ assert_match_all_outputs(expected_outputs, all_outputs, exact_match)
def create_notebook(self, conda_env=None):
file_locator = self.nav.page.get_by_text("File", exact=True)
@@ -95,11 +121,36 @@ def open_notebook(self, path):
logger.debug("Path to notebook is invalid")
raise RuntimeError("Path to notebook is invalid")
- def assert_code_output(self, code, expected_output):
+ def assert_code_output(
+ self,
+ code: str,
+ expected_output: str,
+ timeout: float = 1000,
+ complition_wait_time: float = 2,
+ exact_match: bool = True,
+ ):
+ """
+ Run code in last cell and check for expected output text anywhere on
+ the page.
+
+
+ Parameters
+ ----------
+ code: str
+ Code to run in last cell.
+ expected_outputs: List[Union[re.Pattern, str]]
+ Text to look for in the output of the notebook.
+ timeout: float
+ Time in seconds to wait for the expected output text to appear.
+ default: 1000
+ complition_wait_time: float
+ Time in seconds to wait between checking for expected output text.
+ """
self.run_in_last_cell(code)
- self._wait_for_commands_completion()
+ self._wait_for_commands_completion(timeout, complition_wait_time)
outputs = self._get_outputs()
- self.assert_match_output(expected_output, outputs[-1])
+ actual_output = outputs[-1] if outputs else ""
+ assert_match_output(expected_output, actual_output, exact_match)
def run_in_last_cell(self, code):
self._create_new_cell()
@@ -125,38 +176,40 @@ def _get_last_cell(self):
return cell
raise ValueError("Unable to get last cell")
- def _wait_for_commands_completion(self, timeout=120):
- elapsed_time = 0
- start_time = time.time()
+ def _wait_for_commands_completion(
+ self, timeout: float, complition_wait_time: float
+ ):
+ """
+ Wait for commands to finish running
+
+ Parameters
+ ----------
+ timeout: float
+ Time in seconds to wait for the expected output text to appear.
+ complition_wait_time: float
+ Time in seconds to wait between checking for expected output text.
+ """
+ elapsed_time = 0.0
still_visible = True
+ start_time = time.time()
while elapsed_time < timeout:
running = self.nav.page.get_by_text("[*]").all()
still_visible = any(list(map(lambda r: r.is_visible(), running)))
- elapsed_time = time.time() - start_time
- time.sleep(1)
if not still_visible:
break
+ elapsed_time = time.time() - start_time
+ time.sleep(complition_wait_time)
if still_visible:
raise ValueError(
f"Timeout Waited for commands to finish, "
f"but couldn't finish in {timeout} sec"
)
- def _get_outputs(self):
+ def _get_outputs(self) -> List[str]:
output_elements = self.nav.page.query_selector_all(".jp-OutputArea-output")
text_content = [element.text_content().strip() for element in output_elements]
return text_content
- def assert_match_all_outputs(self, expected_outputs, actual_outputs):
- for ex, act in zip(expected_outputs, actual_outputs):
- self.assert_match_output(ex, act)
-
- def assert_match_output(self, expected_output, actual_output):
- if isinstance(expected_output, re.Pattern):
- assert re.match(expected_output, actual_output)
- else:
- assert expected_output == actual_output
-
def _restart_run_all(self):
# restart run all cells
self.nav.page.get_by_text("Kernel", exact=True).click()
@@ -171,3 +224,61 @@ def _restart_run_all(self):
)
if restart_dialog_button.is_visible():
restart_dialog_button.click()
+
+
+def assert_match_output(
+ expected_output: str, actual_output: str, exact_match: bool
+) -> None:
+ """Assert that the expected_output is found in the actual_output.
+
+ ----------
+ Parameters
+
+ expected_output: str
+ The expected output text or regular expression to find in the
+ actual output.
+ actual_output: str
+ The actual output text to search for the expected output.
+ exact_match: bool
+ If True, then the expected_output must match the actual_output
+ exactly. Otherwise, the expected_output must be found somewhere in
+ the actual_output.
+ """
+ regex = re.compile(rf"{expected_output}")
+ match = (
+ regex.fullmatch(actual_output) if exact_match else regex.search(actual_output)
+ )
+ assert (
+ match is not None
+ ), f"Expected output: {expected_output} not found in actual output: {actual_output}"
+
+
+def assert_match_all_outputs(
+ expected_outputs: List[str],
+ actual_outputs: List[str],
+ exact_matches: Union[bool, List[bool]],
+) -> None:
+ """Assert that the expected_outputs are found in the actual_outputs.
+ The expected_outputs and actual_outputs must be the same length.
+
+ ----------
+ Parameters
+
+ expected_outputs: List[str]
+ A list of expected output text or regular expression to find in
+ the actual output.
+ actual_outputs: List[str]
+ A list of actual output text to search for the expected output.
+ exact_matches: Union[bool, List[bool]]
+ If True, then the expected_output must match the actual_output
+ exactly. Otherwise, the expected_output must be found somewhere in
+ the actual_output. If a list is provided, then it must be the same
+ length as expected_outputs and actual_outputs.
+ """
+ if isinstance(exact_matches, bool):
+ exact_matches = [exact_matches] * len(expected_outputs)
+
+ for exact_output, actual_output, exact in zip(
+ expected_outputs, actual_outputs, exact_matches
+ ):
+ assert_match_output(exact_output, actual_output, exact)
diff --git a/tests/common/tests/test_notebook.py b/tests/common/tests/test_notebook.py
new file mode 100644
index 0000000000..ba8cbbbf84
--- /dev/null
+++ b/tests/common/tests/test_notebook.py
@@ -0,0 +1,35 @@
+import pytest
+
+from tests.common.run_notebook import assert_match_output
+
+
+@pytest.mark.parametrize(
+ "expected, actual, exact",
+ [
+ ("success: 6", "success: 6", True),
+ ("success", "success: 6", False),
+ ("6", "6", True),
+ ("cde", "abcde", False),
+ ("12.*5", "12345", True),
+ (".*5", "12345", True),
+ ("ab.*ef", "123abcdef123", False),
+ ],
+)
+def test_output_match(expected, actual, exact):
+ assert_match_output(expected, actual, exact_match=exact)
+
+
+@pytest.mark.parametrize(
+ "expected, actual, exact",
+ [
+ ("True", "False", True),
+ ("success: 6", "success", True),
+ ("60", "6", True),
+ ("abcde", "cde", True),
+ ("ab.*ef", "123abcdef123", True),
+ ],
+)
+def test_output_not_match(expected, actual, exact):
+ msg = f"Expected output: {expected} not found in actual output: {actual}"
+ with pytest.raises(AssertionError, match=msg):
+ assert_match_output(expected, actual, exact_match=exact)
diff --git a/tests/tests_e2e/playwright/test_playwright.py b/tests/tests_e2e/playwright/test_playwright.py
index 30035b8b58..7f4dabac08 100644
--- a/tests/tests_e2e/playwright/test_playwright.py
+++ b/tests/tests_e2e/playwright/test_playwright.py
@@ -14,5 +14,5 @@ def test_notebook(navigator, test_data_root):
path=notebook_name,
expected_outputs=["success: 6"],
conda_env="conda-env-default-py",
- runtime=60000,
+ timeout=500,
)
diff --git a/tests/tests_unit/conftest.py b/tests/tests_unit/conftest.py
index 335d84cb74..a65a4ce8aa 100644
--- a/tests/tests_unit/conftest.py
+++ b/tests/tests_unit/conftest.py
@@ -190,3 +190,8 @@ def nebari_render(nebari_config, nebari_stages, tmp_path):
write_configuration(config_filename, nebari_config)
render_template(tmp_path, nebari_config, nebari_stages)
return tmp_path, config_filename
+
+
+@pytest.fixture
+def config_schema():
+ return nebari_plugin_manager.config_schema
diff --git a/tests/tests_unit/test_cli_upgrade.py b/tests/tests_unit/test_cli_upgrade.py
index e0fe685aff..91d8c0b299 100644
--- a/tests/tests_unit/test_cli_upgrade.py
+++ b/tests/tests_unit/test_cli_upgrade.py
@@ -233,31 +233,6 @@ def test_cli_upgrade_fail_on_missing_file():
)
-def test_cli_upgrade_fail_invalid_file():
- with tempfile.TemporaryDirectory() as tmp:
- tmp_file = Path(tmp).resolve() / "nebari-config.yaml"
- assert tmp_file.exists() is False
-
- nebari_config = yaml.safe_load(
- """
-project_name: test
-provider: fake
- """
- )
-
- with open(tmp_file.resolve(), "w") as f:
- yaml.dump(nebari_config, f)
-
- assert tmp_file.exists() is True
- app = create_cli()
-
- result = runner.invoke(app, ["upgrade", "--config", tmp_file.resolve()])
-
- assert 1 == result.exit_code
- assert result.exception
- assert "provider" in str(result.exception)
-
-
def test_cli_upgrade_fail_on_downgrade():
start_version = "9999.9.9" # way in the future
end_version = _nebari.upgrade.__version__
diff --git a/tests/tests_unit/test_dependencies.py b/tests/tests_unit/test_dependencies.py
index 38e891e25c..bcde584e08 100644
--- a/tests/tests_unit/test_dependencies.py
+++ b/tests/tests_unit/test_dependencies.py
@@ -1,72 +1,7 @@
-import subprocess
import urllib
-from pathlib import Path
-
-import pytest
from _nebari.provider import terraform
-SRC_DIR = Path(__file__).parent.parent.parent
-PYPROJECT = SRC_DIR / "pyproject.toml"
-
-
-@pytest.mark.conda
-def test_build_by_conda_forge(tmp_path):
- """
- This test ensures that nebari can be built and packaged by conda-forge.
-
- This is achieved by walking through the following steps:
- 1. Use Python build package to generate the `sdist` .tar.gz file
- 2. Use grayskull package to generate the `meta.yaml` recipe file
- 3. Use conda build to attempt to build the nebari package from the `meta.yaml`
-
- These steps mimic what takes places on the conda-forge/nebari-feedstock repo whenever
- a new version of the package gets released.
-
- NOTE: this test requires conda and conda-build
- """
-
- assert PYPROJECT.exists()
-
- try:
- # build sdist
- subprocess.run(
- ["python", "-m", "build", SRC_DIR, "--outdir", tmp_path],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- check=True,
- )
-
- # get location of sdist file built above
- sdist_loc = next(tmp_path.glob("*.tar.gz"))
- # run grayskull to create the meta.yaml using the local sdist file
- subprocess.run(
- [
- "grayskull",
- "pypi",
- "--strict-conda-forge",
- sdist_loc,
- "--output",
- tmp_path,
- ],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- check=True,
- )
-
- # get the directory the meta.yaml is in
- meta_loc = tmp_path / "nebari"
- # try to run conda build to build package from meta.yaml
- subprocess.run(
- ["conda", "build", "--channel=conda-forge", meta_loc],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- check=True,
- )
- except subprocess.CalledProcessError as e:
- print(e.stderr.decode("utf-8"))
- raise e
-
def test_terraform_open_source_license():
tf_version = terraform.version()
diff --git a/tests/tests_unit/test_schema.py b/tests/tests_unit/test_schema.py
index 2e733a3025..21efe47c2c 100644
--- a/tests/tests_unit/test_schema.py
+++ b/tests/tests_unit/test_schema.py
@@ -1,3 +1,8 @@
+from contextlib import nullcontext
+
+import pytest
+from pydantic.error_wrappers import ValidationError
+
from nebari import schema
from nebari.plugins import nebari_plugin_manager
@@ -48,3 +53,87 @@ def test_render_schema(nebari_config):
assert isinstance(nebari_config, schema.Main)
assert nebari_config.project_name == f"pytest{nebari_config.provider.value}"
assert nebari_config.namespace == "dev"
+
+
+@pytest.mark.parametrize(
+ "provider, exception",
+ [
+ (
+ "fake",
+ pytest.raises(
+ ValueError,
+ match="'fake' is not a valid enumeration member; permitted: local, existing, do, aws, gcp, azure",
+ ),
+ ),
+ ("aws", nullcontext()),
+ ("gcp", nullcontext()),
+ ("do", nullcontext()),
+ ("azure", nullcontext()),
+ ("existing", nullcontext()),
+ ("local", nullcontext()),
+ ],
+)
+def test_provider_validation(config_schema, provider, exception):
+ config_dict = {
+ "project_name": "test",
+ "provider": f"{provider}",
+ }
+ with exception:
+ config = config_schema(**config_dict)
+ assert config.provider == provider
+
+
+@pytest.mark.parametrize(
+ "provider, full_name, default_fields",
+ [
+ ("local", "local", {}),
+ ("existing", "existing", {}),
+ (
+ "aws",
+ "amazon_web_services",
+ {"region": "us-east-1", "kubernetes_version": "1.18"},
+ ),
+ (
+ "gcp",
+ "google_cloud_platform",
+ {
+ "region": "us-east1",
+ "project": "test-project",
+ "kubernetes_version": "1.18",
+ },
+ ),
+ (
+ "do",
+ "digital_ocean",
+ {"region": "nyc3", "kubernetes_version": "1.19.2-do.3"},
+ ),
+ (
+ "azure",
+ "azure",
+ {
+ "region": "eastus",
+ "kubernetes_version": "1.18",
+ "storage_account_postfix": "test",
+ },
+ ),
+ ],
+)
+def test_no_provider(config_schema, provider, full_name, default_fields):
+ config_dict = {
+ "project_name": "test",
+ f"{full_name}": default_fields,
+ }
+ config = config_schema(**config_dict)
+ assert config.provider == provider
+ assert full_name in config.dict()
+
+
+def test_multiple_providers(config_schema):
+ config_dict = {
+ "project_name": "test",
+ "local": {},
+ "existing": {},
+ }
+ msg = r"Multiple providers set: \['local', 'existing'\]"
+ with pytest.raises(ValidationError, match=msg):
+ config_schema(**config_dict)