diff --git a/.cirun.yml b/.cirun.yml
index bdabe6500b..dcc829bb8b 100644
--- a/.cirun.yml
+++ b/.cirun.yml
@@ -4,8 +4,8 @@ runners:
- name: run-k8s-tests
# Cloud Provider: AWS
cloud: aws
- # Instance Type has 4 vcpu, 16 GiB memory, Up to 5 Gbps Network Performance
- instance_type: t3a.xlarge
+ # Instance Type has 8 vcpu, 32 GiB memory, Up to 5 Gbps Network Performance
+ instance_type: t3a.2xlarge
# Custom AMI with docker/cypress/hub pre-installed
machine_image: ami-0a388df278199ff52
# Region: Oregon
diff --git a/.github/workflows/test_local_integration.yaml b/.github/workflows/test_local_integration.yaml
index 8ddc1f9690..05dec384b0 100644
--- a/.github/workflows/test_local_integration.yaml
+++ b/.github/workflows/test_local_integration.yaml
@@ -96,6 +96,24 @@ jobs:
sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "nebari-config.yaml"
sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "nebari-config.yaml"
+ # Change default JupyterLab theme
+ cat >> nebari-config.yaml <<- EOM
+ jupyterlab:
+ default_settings:
+ "@jupyterlab/apputils-extension:themes":
+ theme: JupyterLab Dark
+ EOM
+
+ # Change default value for minio persistence size
+ cat >> nebari-config.yaml <<- EOM
+ monitoring:
+ enabled: true
+ overrides:
+ minio:
+ persistence:
+ size: 1Gi
+ EOM
+
cat nebari-config.yaml
- name: Deploy Nebari
@@ -106,7 +124,7 @@ jobs:
- name: Basic kubectl checks after deployment
if: always()
run: |
- kubectl get all,cm,secret,ing -A
+ kubectl get all,cm,secret,pv,pvc,ing -A
- name: Check github-actions.nebari.dev resolves
run: |
@@ -167,22 +185,6 @@ jobs:
run: |
pytest tests/tests_deployment/ -v -s
- - name: JupyterHub Notebook Tests
- timeout-minutes: 2
- # run jhub-client after pytest since jhubctl can cleanup
- # the running server
- env:
- JUPYTERHUB_USERNAME: ${{ env.TEST_USERNAME }}
- JUPYTERHUB_PASSWORD: ${{ env.TEST_PASSWORD }}
- run: |
- sleep 60
- jhubctl --verbose run --hub=https://github-actions.nebari.dev\
- --auth-type=keycloak \
- --validate --no-verify-ssl \
- --kernel python3 \
- --stop-server \
- --notebook tests/tests_deployment/assets/notebook/simple.ipynb \
-
### CLEANUP AFTER TESTS
- name: Cleanup nebari deployment
if: always()
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ca0dafbc98..2427219a8e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -51,19 +51,19 @@ repos:
# python
- repo: https://github.com/psf/black
- rev: 23.11.0
+ rev: 24.1.1
hooks:
- id: black
args: ["--line-length=88", "--exclude=/src/_nebari/template/"]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.6
+ rev: v0.2.0
hooks:
- id: ruff
args: ["--fix"]
- repo: https://github.com/pycqa/isort
- rev: 5.12.0
+ rev: 5.13.2
hooks:
- id: isort
name: isort
@@ -73,7 +73,7 @@ repos:
# terraform
- repo: https://github.com/antonbabenko/pre-commit-terraform
- rev: v1.83.6
+ rev: v1.86.0
hooks:
- id: terraform_fmt
args:
diff --git a/RELEASE.md b/RELEASE.md
index 44dc102ea9..41433e9e13 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -11,6 +11,98 @@ This file is copied to nebari-dev/nebari-docs using a GitHub Action. -->
## Upcoming Release
+## Release 2024.3.1 - March 11, 2024
+
+### What's Changed
+* Modify Playwright test to account for changes in JupyterLab UI. by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2232
+* Add favicon to jupyterhub theme. by @jbouder in https://github.com/nebari-dev/nebari/pull/2222
+* Set min nodes to 0 for worker and user. by @pt247 in https://github.com/nebari-dev/nebari/pull/2168
+* Remove `jhub-client` from pyproject.toml by @pavithraes in https://github.com/nebari-dev/nebari/pull/2242
+* Include permission validation step to programmatically cloned repos by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2258
+* Expose jupyter's preferred dir as a config option by @krassowski in https://github.com/nebari-dev/nebari/pull/2251
+* Allow to configure default settings for JupyterLab (`overrides.json`) by @krassowski in https://github.com/nebari-dev/nebari/pull/2249
+* Feature/jlab menu customization by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2259
+* Add cloud provider to the dask config.json file by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2266
+* Fix syntax error in jupyter-server-config Python file by @krassowski in https://github.com/nebari-dev/nebari/pull/2286
+* Add "Open VS Code" entry in services by @krassowski in https://github.com/nebari-dev/nebari/pull/2267
+* Add Grafana Loki integration by @aktech in https://github.com/nebari-dev/nebari/pull/2156
+
+### New Contributors
+* @jbouder made their first contribution in https://github.com/nebari-dev/nebari/pull/2222
+* @krassowski made their first contribution in https://github.com/nebari-dev/nebari/pull/2251
+
+**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.1.1...2024.3.1
+
+
+## Release 2024.1.1 - January 17, 2024
+
+### Feature changes and enhancements
+
+* Upgrade conda-store to latest version 2024.1.1
+* Add Jhub-Apps
+* Add Jupyterlab-pioneer
+* Minor improvements and bug fixes
+
+### Breaking Changes
+
+> WARNING: jupyterlab-videochat, retrolab, jupyter-tensorboard, jupyterlab-conda-store and jupyter-nvdashboard are no longer supported in Nebari version and will be uninstalled."
+
+### What's Changed
+
+* [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in https://github.com/nebari-dev/nebari/pull/2176
+* Fix logic for dns lookup. by @pt247 in https://github.com/nebari-dev/nebari/pull/2166
+* Integrate JupyterHub App Launcher into Nebari by @aktech in https://github.com/nebari-dev/nebari/pull/2185
+* Pass in permissions boundary to k8s module by @aktech in https://github.com/nebari-dev/nebari/pull/2153
+* Add jupyterlab-pioneer by @aktech in https://github.com/nebari-dev/nebari/pull/2127
+* JHub Apps: Filter conda envs by user by @aktech in https://github.com/nebari-dev/nebari/pull/2187
+* update upgrade command by @dcmcand in https://github.com/nebari-dev/nebari/pull/2198
+* Remove JupyterLab from services list by @aktech in https://github.com/nebari-dev/nebari/pull/2189
+* Adding fields to ignore within keycloak_realm by @costrouc in https://github.com/nebari-dev/nebari/pull/2200
+* Add Nebari menu item configuration. by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2196
+* Disable "Newer update available" popup as default setting by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2192
+* Block usage of pip inside jupyterlab by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2191
+* Return all environments instead of just those under the user's namespace for jhub-apps by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2206
+* Adding a temporary writable directory for conda-store server /home/conda by @costrouc in https://github.com/nebari-dev/nebari/pull/2209
+* Add demo repositories mechanism to populate user's space by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2207
+* update nebari_workflow_controller and conda_store tags to test rc by @dcmcand in https://github.com/nebari-dev/nebari/pull/2210
+* 2023.12.1 release notes by @dcmcand in https://github.com/nebari-dev/nebari/pull/2211
+* Make it so that jhub-apps default theme doesn't override by @costrouc in https://github.com/nebari-dev/nebari/pull/2213
+* Adding additional theme variables to jupyterhub theme config by @costrouc in https://github.com/nebari-dev/nebari/pull/2215
+* updates Current Release to 2024.1.1 by @dcmcand in https://github.com/nebari-dev/nebari/pull/2227
+
+
+**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2023.12.1...2024.1.1
+
+## Release 2023.12.1 - December 15, 2023
+
+### Feature changes and enhancements
+
+* Upgrade conda-store to latest version 2023.10.1
+* Minor improvements and bug fixes
+
+### Breaking Changes
+
+> WARNING: Prefect, ClearML and kbatch were removed in this release and upgrading to this version will result in all of them being uninstalled.
+
+### What's Changed
+* BUG: fix incorrect config override #2086 by @fangchenli in https://github.com/nebari-dev/nebari/pull/2087
+* ENH: add AWS IAM permissions_boundary option #2078 by @fangchenli in https://github.com/nebari-dev/nebari/pull/2082
+* CI: cleanup local integration workflow by @fangchenli in https://github.com/nebari-dev/nebari/pull/2079
+* ENH: check missing GCP services by @fangchenli in https://github.com/nebari-dev/nebari/pull/2036
+* ENH: use packaging for version parsing, add unit tests by @fangchenli in https://github.com/nebari-dev/nebari/pull/2048
+* ENH: specify required field when retrieving available gcp regions by @fangchenli in https://github.com/nebari-dev/nebari/pull/2033
+* Upgrade conda-store to 2023.10.1 by @iameskild in https://github.com/nebari-dev/nebari/pull/2092
+* Add upgrade command for 2023.11.1 by @iameskild in https://github.com/nebari-dev/nebari/pull/2103
+* CLN: cleanup typing and typing import in init by @fangchenli in https://github.com/nebari-dev/nebari/pull/2107
+* Remove kbatch, prefect and clearml by @iameskild in https://github.com/nebari-dev/nebari/pull/2101
+* Fix integration tests, helm-validate script by @iameskild in https://github.com/nebari-dev/nebari/pull/2102
+* Re-enable AWS tags support by @iameskild in https://github.com/nebari-dev/nebari/pull/2096
+* Update upgrade instructions for 2023.11.1 by @iameskild in https://github.com/nebari-dev/nebari/pull/2112
+* Update nebari-git env pins by by @iameskild in https://github.com/nebari-dev/nebari/pull/2113
+* Update release notes for 2023.11.1 by @iameskild in https://github.com/nebari-dev/nebari/pull/2114
+
+
+**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2023.11.1...2023.12.1
## Release 2023.11.1 - November 15, 2023
diff --git a/pyproject.toml b/pyproject.toml
index 20d2cc6c5a..cb90bc52d0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -84,7 +84,6 @@ dev = [
"diagrams",
"escapism",
"importlib-metadata<5.0",
- "jhub-client",
"jinja2",
"mypy==1.6.1",
"paramiko",
diff --git a/src/_nebari/constants.py b/src/_nebari/constants.py
index 02e89d607a..3ce86b9ecd 100644
--- a/src/_nebari/constants.py
+++ b/src/_nebari/constants.py
@@ -1,4 +1,4 @@
-CURRENT_RELEASE = "2023.12.1"
+CURRENT_RELEASE = "2024.3.1"
# NOTE: Terraform cannot be upgraded further due to Hashicorp licensing changes
# implemented in August 2023.
@@ -8,14 +8,14 @@
# 04-kubernetes-ingress
DEFAULT_TRAEFIK_IMAGE_TAG = "2.9.1"
-HIGHEST_SUPPORTED_K8S_VERSION = ("1", "26", "9")
+HIGHEST_SUPPORTED_K8S_VERSION = ("1", "29", "2")
DEFAULT_GKE_RELEASE_CHANNEL = "UNSPECIFIED"
DEFAULT_NEBARI_DASK_VERSION = CURRENT_RELEASE
DEFAULT_NEBARI_IMAGE_TAG = CURRENT_RELEASE
-DEFAULT_NEBARI_WORKFLOW_CONTROLLER_IMAGE_TAG = "2023.7.2"
+DEFAULT_NEBARI_WORKFLOW_CONTROLLER_IMAGE_TAG = CURRENT_RELEASE
-DEFAULT_CONDA_STORE_IMAGE_TAG = "2023.10.1"
+DEFAULT_CONDA_STORE_IMAGE_TAG = "2024.1.1"
LATEST_SUPPORTED_PYTHON_VERSION = "3.10"
diff --git a/src/_nebari/provider/cloud/amazon_web_services.py b/src/_nebari/provider/cloud/amazon_web_services.py
index 576f72c1c6..2bf905bfcb 100644
--- a/src/_nebari/provider/cloud/amazon_web_services.py
+++ b/src/_nebari/provider/cloud/amazon_web_services.py
@@ -143,6 +143,46 @@ def aws_get_vpc_id(name: str, namespace: str, region: str) -> Optional[str]:
return None
+def set_asg_tags(asg_node_group_map: Dict[str, str], region: str) -> None:
+ """Set tags for AWS node scaling from zero to work."""
+ session = aws_session(region=region)
+ autoscaling_client = session.client("autoscaling")
+ tags = []
+ for asg_name, node_group in asg_node_group_map.items():
+ tags.append(
+ {
+ "Key": "k8s.io/cluster-autoscaler/node-template/label/dedicated",
+ "Value": node_group,
+ "ResourceId": asg_name,
+ "ResourceType": "auto-scaling-group",
+ "PropagateAtLaunch": True,
+ }
+ )
+ autoscaling_client.create_or_update_tags(Tags=tags)
+
+
+def aws_get_asg_node_group_mapping(
+ name: str, namespace: str, region: str
+) -> Dict[str, str]:
+ """Return a dictionary of autoscaling groups and their associated node groups."""
+ asg_node_group_mapping = {}
+ session = aws_session(region=region)
+ eks = session.client("eks")
+ node_groups_response = eks.list_nodegroups(
+ clusterName=f"{name}-{namespace}",
+ )
+ node_groups = node_groups_response.get("nodegroups", [])
+ for nodegroup in node_groups:
+ response = eks.describe_nodegroup(
+ clusterName=f"{name}-{namespace}", nodegroupName=nodegroup
+ )
+ node_group_name = response["nodegroup"]["nodegroupName"]
+ auto_scaling_groups = response["nodegroup"]["resources"]["autoScalingGroups"]
+ for auto_scaling_group in auto_scaling_groups:
+ asg_node_group_mapping[auto_scaling_group["name"]] = node_group_name
+ return asg_node_group_mapping
+
+
def aws_get_subnet_ids(name: str, namespace: str, region: str) -> List[str]:
"""Return list of subnet IDs for the EKS cluster named `{name}-{namespace}`."""
session = aws_session(region=region)
diff --git a/src/_nebari/stages/infrastructure/__init__.py b/src/_nebari/stages/infrastructure/__init__.py
index 6d699eecfe..ada4db7c1d 100644
--- a/src/_nebari/stages/infrastructure/__init__.py
+++ b/src/_nebari/stages/infrastructure/__init__.py
@@ -144,6 +144,17 @@ class AWSInputVars(schema.Base):
tags: Dict[str, str] = {}
+def _calculate_asg_node_group_map(config: schema.Main):
+ if config.provider == schema.ProviderEnum.aws:
+ return amazon_web_services.aws_get_asg_node_group_mapping(
+ config.project_name,
+ config.namespace,
+ config.amazon_web_services.region,
+ )
+ else:
+ return {}
+
+
def _calculate_node_groups(config: schema.Main):
if config.provider == schema.ProviderEnum.aws:
return {
@@ -437,10 +448,10 @@ class AmazonWebServicesProvider(schema.Base):
node_groups: Dict[str, AWSNodeGroup] = {
"general": AWSNodeGroup(instance="m5.2xlarge", min_nodes=1, max_nodes=1),
"user": AWSNodeGroup(
- instance="m5.xlarge", min_nodes=1, max_nodes=5, single_subnet=False
+ instance="m5.xlarge", min_nodes=0, max_nodes=5, single_subnet=False
),
"worker": AWSNodeGroup(
- instance="m5.xlarge", min_nodes=1, max_nodes=5, single_subnet=False
+ instance="m5.xlarge", min_nodes=0, max_nodes=5, single_subnet=False
),
}
existing_subnet_ids: List[str] = None
@@ -810,6 +821,16 @@ def set_outputs(
outputs["node_selectors"] = _calculate_node_groups(self.config)
super().set_outputs(stage_outputs, outputs)
+ @contextlib.contextmanager
+ def post_deploy(
+ self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False
+ ):
+ asg_node_group_map = _calculate_asg_node_group_map(self.config)
+ if asg_node_group_map:
+ amazon_web_services.set_asg_tags(
+ asg_node_group_map, self.config.amazon_web_services.region
+ )
+
@contextlib.contextmanager
def deploy(
self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False
diff --git a/src/_nebari/stages/infrastructure/template/aws/main.tf b/src/_nebari/stages/infrastructure/template/aws/main.tf
index a2c0e929d7..2c78018f0b 100644
--- a/src/_nebari/stages/infrastructure/template/aws/main.tf
+++ b/src/_nebari/stages/infrastructure/template/aws/main.tf
@@ -94,4 +94,5 @@ module "kubernetes" {
endpoint_private_access = var.eks_endpoint_private_access
public_access_cidrs = var.eks_public_access_cidrs
+ permissions_boundary = var.permissions_boundary
}
diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
index fd53a636c2..848d1c0471 100644
--- a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
+++ b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
@@ -39,6 +39,10 @@ resource "aws_eks_node_group" "main" {
max_size = var.node_groups[count.index].max_size
}
+ labels = {
+ "dedicated" = var.node_groups[count.index].name
+ }
+
lifecycle {
ignore_changes = [
scaling_config[0].desired_size,
@@ -53,7 +57,9 @@ resource "aws_eks_node_group" "main" {
]
tags = merge({
- "kubernetes.io/cluster/${var.name}" = "shared"
+ # "kubernetes.io/cluster/${var.name}" = "shared"
+ "k8s.io/cluster-autoscaler/node-template/label/dedicated" = var.node_groups[count.index].name
+ propagate_at_launch = true
}, var.tags)
}
diff --git a/src/_nebari/stages/infrastructure/template/local/main.tf b/src/_nebari/stages/infrastructure/template/local/main.tf
index 9fd8bb2618..fb0d0997e1 100644
--- a/src/_nebari/stages/infrastructure/template/local/main.tf
+++ b/src/_nebari/stages/infrastructure/template/local/main.tf
@@ -1,8 +1,8 @@
terraform {
required_providers {
kind = {
- source = "kyma-incubator/kind"
- version = "0.0.11"
+ source = "tehcyx/kind"
+ version = "0.4.0"
}
docker = {
source = "kreuzwerker/docker"
@@ -48,7 +48,7 @@ resource "kind_cluster" "default" {
node {
role = "general"
- image = "kindest/node:v1.21.10"
+ image = "kindest/node:v1.29.2"
}
}
}
diff --git a/src/_nebari/stages/infrastructure/template/local/metallb.yaml b/src/_nebari/stages/infrastructure/template/local/metallb.yaml
index 9d6b6833c8..c832baebde 100644
--- a/src/_nebari/stages/infrastructure/template/local/metallb.yaml
+++ b/src/_nebari/stages/infrastructure/template/local/metallb.yaml
@@ -1,82 +1,3 @@
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- labels:
- app: metallb
- name: controller
-spec:
- allowPrivilegeEscalation: false
- allowedCapabilities: []
- allowedHostPaths: []
- defaultAddCapabilities: []
- defaultAllowPrivilegeEscalation: false
- fsGroup:
- ranges:
- - max: 65535
- min: 1
- rule: MustRunAs
- hostIPC: false
- hostNetwork: false
- hostPID: false
- privileged: false
- readOnlyRootFilesystem: true
- requiredDropCapabilities:
- - ALL
- runAsUser:
- ranges:
- - max: 65535
- min: 1
- rule: MustRunAs
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- ranges:
- - max: 65535
- min: 1
- rule: MustRunAs
- volumes:
- - configMap
- - secret
- - emptyDir
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- labels:
- app: metallb
- name: speaker
-spec:
- allowPrivilegeEscalation: false
- allowedCapabilities:
- - NET_RAW
- allowedHostPaths: []
- defaultAddCapabilities: []
- defaultAllowPrivilegeEscalation: false
- fsGroup:
- rule: RunAsAny
- hostIPC: false
- hostNetwork: true
- hostPID: false
- hostPorts:
- - max: 7472
- min: 7472
- - max: 7946
- min: 7946
- privileged: true
- readOnlyRootFilesystem: true
- requiredDropCapabilities:
- - ALL
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - configMap
- - secret
- - emptyDir
----
apiVersion: v1
kind: ServiceAccount
metadata:
diff --git a/src/_nebari/stages/kubernetes_ingress/__init__.py b/src/_nebari/stages/kubernetes_ingress/__init__.py
index 4240c52f8c..25b23fa9df 100644
--- a/src/_nebari/stages/kubernetes_ingress/__init__.py
+++ b/src/_nebari/stages/kubernetes_ingress/__init__.py
@@ -74,15 +74,15 @@ def _attempt_dns_lookup(
):
for i in range(num_attempts):
try:
- resolved_ip = socket.gethostbyname(domain_name)
- if resolved_ip == ip:
+ _, _, resolved_ips = socket.gethostbyname_ex(domain_name)
+ if ip in resolved_ips:
print(
- f"DNS configured domain={domain_name} matches ingress ip={ip}"
+ f"DNS configured domain={domain_name} matches ingress ips={ip}"
)
return True
else:
print(
- f"Attempt {i+1} polling DNS domain={domain_name} does not match ip={ip} instead got {resolved_ip}"
+ f"Attempt {i+1} polling DNS domain={domain_name} does not match ip={ip} instead got {resolved_ips}"
)
except socket.gaierror:
print(
@@ -179,9 +179,9 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
cert_details["acme-email"] = self.config.certificate.acme_email
cert_details["acme-server"] = self.config.certificate.acme_server
elif cert_type == "existing":
- cert_details[
- "certificate-secret-name"
- ] = self.config.certificate.secret_name
+ cert_details["certificate-secret-name"] = (
+ self.config.certificate.secret_name
+ )
return {
**{
diff --git a/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml b/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml
index 94359cf451..abe7d4d3e3 100644
--- a/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml
+++ b/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml
@@ -4,6 +4,9 @@ ingress:
# we will need to define our own IngressRoute elsewhere.
enabled: false
+image:
+ repository: quay.io/keycloak/keycloak
+
imagePullSecrets:
- name: "extcrcreds"
diff --git a/src/_nebari/stages/kubernetes_keycloak_configuration/template/main.tf b/src/_nebari/stages/kubernetes_keycloak_configuration/template/main.tf
index fc7175ff58..95dba71810 100644
--- a/src/_nebari/stages/kubernetes_keycloak_configuration/template/main.tf
+++ b/src/_nebari/stages/kubernetes_keycloak_configuration/template/main.tf
@@ -24,6 +24,48 @@ resource "keycloak_realm" "main" {
web_authn_passwordless_policy {
}
+ lifecycle {
+ ignore_changes = [
+ # We want user to have control over attributes we are not managing
+ # If attribute is added above remove it from this list
+ # https://registry.terraform.io/providers/mrparkers/keycloak/latest/docs/resources/realm
+ attributes,
+ registration_allowed,
+ registration_email_as_username,
+ edit_username_allowed,
+ reset_password_allowed,
+ remember_me,
+ verify_email,
+ login_with_email_allowed,
+ login_theme,
+ account_theme,
+ admin_theme,
+ email_theme,
+ sso_session_idle_timeout,
+ sso_session_max_lifespan,
+ sso_session_idle_timeout_remember_me,
+ sso_session_max_lifespan_remember_me,
+ offline_session_idle_timeout,
+ offline_session_max_lifespan,
+ access_token_lifespan,
+ access_token_lifespan_for_implicit_flow,
+ access_code_lifespan,
+ access_code_lifespan_login,
+ access_code_lifespan_user_action,
+ action_token_generated_by_user_lifespan,
+ action_token_generated_by_admin_lifespan,
+ oauth2_device_code_lifespan,
+ oauth2_device_polling_interval,
+ smtp_server,
+ internationalization,
+ security_defenses,
+ password_policy,
+ otp_policy,
+ default_default_client_scopes,
+ default_optional_client_scopes,
+ ]
+ }
+
}
resource "keycloak_group" "groups" {
diff --git a/src/_nebari/stages/kubernetes_services/__init__.py b/src/_nebari/stages/kubernetes_services/__init__.py
index b4ecea4f28..fcf7ae8dfc 100644
--- a/src/_nebari/stages/kubernetes_services/__init__.py
+++ b/src/_nebari/stages/kubernetes_services/__init__.py
@@ -50,15 +50,28 @@ class Storage(schema.Base):
class JupyterHubTheme(schema.Base):
hub_title: str = "Nebari"
hub_subtitle: str = "Your open source data science platform"
- welcome: str = """Welcome! Learn about Nebari's features and configurations in the documentation. If you have any questions or feedback, reach the team on Nebari's support forums."""
- logo: str = "https://raw.githubusercontent.com/nebari-dev/nebari-design/main/logo-mark/horizontal/Nebari-Logo-Horizontal-Lockup-White-text.svg"
+ welcome: str = (
+ """Welcome! Learn about Nebari's features and configurations in the documentation. If you have any questions or feedback, reach the team on Nebari's support forums."""
+ )
+ logo: str = (
+ "https://raw.githubusercontent.com/nebari-dev/nebari-design/main/logo-mark/horizontal/Nebari-Logo-Horizontal-Lockup-White-text.svg"
+ )
+ favicon: str = (
+ "https://raw.githubusercontent.com/nebari-dev/nebari-design/main/symbol/favicon.ico"
+ )
primary_color: str = "#4f4173"
+ primary_color_dark: str = "#4f4173"
secondary_color: str = "#957da6"
+ secondary_color_dark: str = "#957da6"
accent_color: str = "#32C574"
+ accent_color_dark: str = "#32C574"
text_color: str = "#111111"
h1_color: str = "#652e8e"
h2_color: str = "#652e8e"
version: str = f"v{__version__}"
+ navbar_color: str = "#1c1d26"
+ navbar_text_color: str = "#f1f1f6"
+ navbar_hover_color: str = "#db96f3"
display_version: str = "True" # limitation of theme everything is a str
@@ -187,8 +200,29 @@ class ArgoWorkflows(schema.Base):
nebari_workflow_controller: NebariWorkflowController = NebariWorkflowController()
+class JHubApps(schema.Base):
+ enabled: bool = False
+
+
+class MonitoringOverrides(schema.Base):
+ loki: typing.Dict = {}
+ promtail: typing.Dict = {}
+ minio: typing.Dict = {}
+
+
class Monitoring(schema.Base):
enabled: bool = True
+ overrides: MonitoringOverrides = MonitoringOverrides()
+ minio_enabled: bool = True
+
+
+class JupyterLabPioneer(schema.Base):
+ enabled: bool = False
+ log_format: typing.Optional[str] = None
+
+
+class Telemetry(schema.Base):
+ jupyterlab_pioneer: JupyterLabPioneer = JupyterLabPioneer()
class JupyterHub(schema.Base):
@@ -206,7 +240,10 @@ class IdleCuller(schema.Base):
class JupyterLab(schema.Base):
+ default_settings: typing.Dict[str, typing.Any] = {}
idle_culler: IdleCuller = IdleCuller()
+ initial_repositories: typing.List[typing.Dict[str, str]] = []
+ preferred_dir: typing.Optional[str] = None
class InputSchema(schema.Base):
@@ -279,8 +316,10 @@ class InputSchema(schema.Base):
conda_store: CondaStore = CondaStore()
argo_workflows: ArgoWorkflows = ArgoWorkflows()
monitoring: Monitoring = Monitoring()
+ telemetry: Telemetry = Telemetry()
jupyterhub: JupyterHub = JupyterHub()
jupyterlab: JupyterLab = JupyterLab()
+ jhub_apps: JHubApps = JHubApps()
class OutputSchema(schema.Base):
@@ -328,6 +367,10 @@ class CondaStoreInputVars(schema.Base):
class JupyterhubInputVars(schema.Base):
jupyterhub_theme: Dict[str, Any] = Field(alias="jupyterhub-theme")
jupyterlab_image: ImageNameTag = Field(alias="jupyterlab-image")
+ jupyterlab_default_settings: Dict[str, Any] = Field(
+ alias="jupyterlab-default-settings"
+ )
+ initial_repositories: str = Field(alias="initial-repositories")
jupyterhub_overrides: List[str] = Field(alias="jupyterhub-overrides")
jupyterhub_stared_storage: str = Field(alias="jupyterhub-shared-storage")
jupyterhub_shared_endpoint: str = Field(None, alias="jupyterhub-shared-endpoint")
@@ -336,15 +379,34 @@ class JupyterhubInputVars(schema.Base):
jupyterhub_hub_extraEnv: str = Field(alias="jupyterhub-hub-extraEnv")
idle_culler_settings: Dict[str, Any] = Field(alias="idle-culler-settings")
argo_workflows_enabled: bool = Field(alias="argo-workflows-enabled")
+ jhub_apps_enabled: bool = Field(alias="jhub-apps-enabled")
+ cloud_provider: str = Field(alias="cloud-provider")
+ jupyterlab_preferred_dir: typing.Optional[str] = Field(
+ alias="jupyterlab-preferred-dir"
+ )
class DaskGatewayInputVars(schema.Base):
dask_worker_image: ImageNameTag = Field(alias="dask-worker-image")
dask_gateway_profiles: Dict[str, Any] = Field(alias="dask-gateway-profiles")
+ cloud_provider: str = Field(alias="cloud-provider")
class MonitoringInputVars(schema.Base):
monitoring_enabled: bool = Field(alias="monitoring-enabled")
+ minio_enabled: bool = Field(alias="minio-enabled")
+ grafana_loki_overrides: List[str] = Field(alias="grafana-loki-overrides")
+ grafana_promtail_overrides: List[str] = Field(alias="grafana-promtail-overrides")
+ grafana_loki_minio_overrides: List[str] = Field(
+ alias="grafana-loki-minio-overrides"
+ )
+
+
+class TelemetryInputVars(schema.Base):
+ jupyterlab_pioneer_enabled: bool = Field(alias="jupyterlab-pioneer-enabled")
+ jupyterlab_pioneer_log_format: typing.Optional[str] = Field(
+ alias="jupyterlab-pioneer-log-format"
+ )
class ArgoWorkflowsInputVars(schema.Base):
@@ -378,6 +440,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
realm_id = stage_outputs["stages/06-kubernetes-keycloak-configuration"][
"realm_id"
]["value"]
+ cloud_provider = self.config.provider.value
jupyterhub_shared_endpoint = (
stage_outputs["stages/02-infrastructure"]
.get("nfs_endpoint", {})
@@ -400,6 +463,12 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
"*/*": ["viewer"],
},
},
+ "jhub-apps": {
+ "primary_namespace": "",
+ "role_bindings": {
+ "*/*": ["viewer"],
+ },
+ },
}
# Compound any logout URLs from extensions so they are are logged out in succession
@@ -447,6 +516,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
),
jupyterhub_stared_storage=self.config.storage.shared_filesystem,
jupyterhub_shared_endpoint=jupyterhub_shared_endpoint,
+ cloud_provider=cloud_provider,
jupyterhub_profiles=self.config.profiles.dict()["jupyterlab"],
jupyterhub_image=_split_docker_image_name(
self.config.default_images.jupyterhub
@@ -457,6 +527,10 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
),
idle_culler_settings=self.config.jupyterlab.idle_culler.dict(),
argo_workflows_enabled=self.config.argo_workflows.enabled,
+ jhub_apps_enabled=self.config.jhub_apps.enabled,
+ initial_repositories=str(self.config.jupyterlab.initial_repositories),
+ jupyterlab_default_settings=self.config.jupyterlab.default_settings,
+ jupyterlab_preferred_dir=self.config.jupyterlab.preferred_dir,
)
dask_gateway_vars = DaskGatewayInputVars(
@@ -464,10 +538,24 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
self.config.default_images.dask_worker
),
dask_gateway_profiles=self.config.profiles.dict()["dask_worker"],
+ cloud_provider=cloud_provider,
)
monitoring_vars = MonitoringInputVars(
monitoring_enabled=self.config.monitoring.enabled,
+ minio_enabled=self.config.monitoring.minio_enabled,
+ grafana_loki_overrides=[json.dumps(self.config.monitoring.overrides.loki)],
+ grafana_promtail_overrides=[
+ json.dumps(self.config.monitoring.overrides.promtail)
+ ],
+ grafana_loki_minio_overrides=[
+ json.dumps(self.config.monitoring.overrides.minio)
+ ],
+ )
+
+ telemetry_vars = TelemetryInputVars(
+ jupyterlab_pioneer_enabled=self.config.telemetry.jupyterlab_pioneer.enabled,
+ jupyterlab_pioneer_log_format=self.config.telemetry.jupyterlab_pioneer.log_format,
)
argo_workflows_vars = ArgoWorkflowsInputVars(
@@ -485,6 +573,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
**dask_gateway_vars.dict(by_alias=True),
**monitoring_vars.dict(by_alias=True),
**argo_workflows_vars.dict(by_alias=True),
+ **telemetry_vars.dict(by_alias=True),
}
def check(
diff --git a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
index 765be2753a..b9b0a9c6c3 100644
--- a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
+++ b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
@@ -11,7 +11,6 @@ variable "dask-gateway-profiles" {
description = "Dask Gateway profiles to expose to user"
}
-
# =================== RESOURCES =====================
module "dask-gateway" {
source = "./modules/kubernetes/services/dask-gateway"
@@ -39,4 +38,6 @@ module "dask-gateway" {
# profiles
profiles = var.dask-gateway-profiles
+
+ cloud-provider = var.cloud-provider
}
diff --git a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
index 9fa68cbf53..4f8bebb9e4 100644
--- a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
+++ b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
@@ -39,6 +39,21 @@ variable "jupyterlab-profiles" {
description = "JupyterHub profiles to expose to user"
}
+variable "jupyterlab-preferred-dir" {
+ description = "Directory in which the JupyterLab should open the file browser"
+ type = string
+}
+
+variable "initial-repositories" {
+ description = "Map of folder location and git repo url to clone"
+ type = string
+}
+
+variable "jupyterlab-default-settings" {
+ description = "Default settings for JupyterLab to be placed in overrides.json"
+ type = map(any)
+}
+
variable "jupyterhub-hub-extraEnv" {
description = "Extracted overrides to merge with jupyterhub.hub.extraEnv"
type = string
@@ -50,7 +65,6 @@ variable "idle-culler-settings" {
type = any
}
-
module "kubernetes-nfs-server" {
count = var.jupyterhub-shared-endpoint == null ? 1 : 0
@@ -83,6 +97,8 @@ module "jupyterhub" {
name = var.name
namespace = var.environment
+ cloud-provider = var.cloud-provider
+
external-url = var.endpoint
realm_id = var.realm_id
@@ -99,6 +115,8 @@ module "jupyterhub" {
argo-workflows-enabled = var.argo-workflows-enabled
conda-store-argo-workflows-jupyter-scheduler-token = module.kubernetes-conda-store-server.service-tokens.argo-workflows-jupyter-scheduler
conda-store-service-name = module.kubernetes-conda-store-server.service_name
+ conda-store-jhub-apps-token = module.kubernetes-conda-store-server.service-tokens.jhub-apps
+ jhub-apps-enabled = var.jhub-apps-enabled
extra-mounts = {
"/etc/dask" = {
@@ -127,5 +145,12 @@ module "jupyterhub" {
jupyterhub-hub-extraEnv = var.jupyterhub-hub-extraEnv
idle-culler-settings = var.idle-culler-settings
+ initial-repositories = var.initial-repositories
+
+ jupyterlab-default-settings = var.jupyterlab-default-settings
+
+ jupyterlab-pioneer-enabled = var.jupyterlab-pioneer-enabled
+ jupyterlab-pioneer-log-format = var.jupyterlab-pioneer-log-format
+ jupyterlab-preferred-dir = var.jupyterlab-preferred-dir
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf
index abf1211ee4..ab9edd87e8 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf
@@ -156,6 +156,11 @@ resource "kubernetes_deployment" "server" {
name = "secret"
mount_path = "/var/lib/conda-store/"
}
+
+ volume_mount {
+ name = "home-volume"
+ mount_path = "/home/conda"
+ }
}
volume {
@@ -171,6 +176,13 @@ resource "kubernetes_deployment" "server" {
secret_name = kubernetes_secret.conda-store-secret.metadata.0.name
}
}
+
+ volume {
+ name = "home-volume"
+ empty_dir {
+ size_limit = "1Mi"
+ }
+ }
}
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
index b1499efe44..2219d14e56 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
@@ -114,9 +114,10 @@ def list_dask_environments():
def base_node_group(options):
- default_node_group = {
- config["worker-node-group"]["key"]: config["worker-node-group"]["value"]
- }
+ key = config["worker-node-group"]["key"]
+ if config.get("provider", "") == "aws":
+ key = "dedicated"
+ default_node_group = {key: config["worker-node-group"]["value"]}
# check `worker_extra_pod_config` first
worker_node_group = (
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
index eb99f75d54..62265b350b 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
@@ -24,6 +24,7 @@ resource "kubernetes_secret" "gateway" {
conda-store-api-token = var.conda-store-api-token
conda-store-service-name = var.conda-store-service-name
conda-store-namespace = var.namespace
+ provider = var.cloud-provider
})
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
index 5feb72d167..7f8a4aa978 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
@@ -199,3 +199,8 @@ variable "conda-store-service-name" {
description = "internal service-name:port where conda-store can be reached"
type = string
}
+
+variable "cloud-provider" {
+ description = "Name of the cloud provider to deploy to."
+ type = string
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
index 4f8c38464d..4b8f9145b9 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
@@ -7,6 +7,21 @@ locals {
kernel_cull_connected = var.idle-culler-settings.kernel_cull_connected ? "True" : "False" # for Python compatible boolean values
kernel_cull_busy = var.idle-culler-settings.kernel_cull_busy ? "True" : "False" # for Python compatible boolean values
server_shutdown_no_activity_timeout = var.idle-culler-settings.server_shutdown_no_activity_timeout
+ jupyterlab_preferred_dir = var.jupyterlab-preferred-dir != null ? var.jupyterlab-preferred-dir : ""
+ }
+ )
+}
+
+locals {
+ jupyterlab-overrides-json-object = merge(
+ jsondecode(file("${path.module}/files/jupyterlab/overrides.json")),
+ var.jupyterlab-default-settings
+ )
+}
+
+locals {
+ jupyter-pioneer-config-py-template = templatefile("${path.module}/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl", {
+ log_format = var.jupyterlab-pioneer-log-format != null ? var.jupyterlab-pioneer-log-format : ""
}
)
}
@@ -15,6 +30,27 @@ locals {
resource "local_file" "jupyter_server_config_py" {
content = local.jupyter-notebook-config-py-template
filename = "${path.module}/files/jupyter/jupyter_server_config.py"
+
+ provisioner "local-exec" {
+ # check the syntax of the config file without running it
+ command = "python -m py_compile ${self.filename}"
+ }
+}
+
+resource "local_file" "jupyter_jupyterlab_pioneer_config_py" {
+ content = local.jupyter-pioneer-config-py-template
+ filename = "${path.module}/files/jupyter/jupyter_jupyterlab_pioneer_config.py"
+
+ provisioner "local-exec" {
+ # check the syntax of the config file without running it
+ command = "python -m py_compile ${self.filename}"
+ }
+}
+
+
+resource "local_file" "overrides_json" {
+ content = jsonencode(local.jupyterlab-overrides-json-object)
+ filename = "${path.module}/files/jupyterlab/overrides.json"
}
@@ -31,9 +67,29 @@ resource "kubernetes_config_map" "etc-ipython" {
}
+locals {
+ etc-jupyter-config-data = merge(
+ {
+ "jupyter_server_config.py" = local_file.jupyter_server_config_py.content,
+ },
+ var.jupyterlab-pioneer-enabled ? {
+ # quotes are must here, as terraform would otherwise think py is a property of
+ # a defined resource jupyter_jupyterlab_pioneer_config
+ "jupyter_jupyterlab_pioneer_config.py" = local_file.jupyter_jupyterlab_pioneer_config_py.content
+ } : {}
+ )
+}
+
+locals {
+ etc-jupyterlab-settings = {
+ "overrides.json" = local_file.overrides_json.content
+ }
+}
+
resource "kubernetes_config_map" "etc-jupyter" {
depends_on = [
- local_file.jupyter_server_config_py
+ local_file.jupyter_server_config_py,
+ local_file.jupyter_jupyterlab_pioneer_config_py
]
metadata {
@@ -41,9 +97,7 @@ resource "kubernetes_config_map" "etc-jupyter" {
namespace = var.namespace
}
- data = {
- "jupyter_server_config.py" : local_file.jupyter_server_config_py.content
- }
+ data = local.etc-jupyter-config-data
}
@@ -61,13 +115,25 @@ resource "kubernetes_config_map" "etc-skel" {
resource "kubernetes_config_map" "jupyterlab-settings" {
+ depends_on = [
+ local_file.overrides_json
+ ]
+
metadata {
name = "jupyterlab-settings"
namespace = var.namespace
}
+ data = local.etc-jupyterlab-settings
+}
+
+resource "kubernetes_config_map" "git_clone_update" {
+ metadata {
+ name = "git-clone-update"
+ namespace = var.namespace
+ }
+
data = {
- for filename in fileset("${path.module}/files/jupyterlab", "*") :
- filename => file("${path.module}/files/jupyterlab/${filename}")
+ "git-clone-update.sh" = "${file("${path.module}/files/extras/git_clone_update.sh")}"
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh
new file mode 100644
index 0000000000..bca1734ea2
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh
@@ -0,0 +1,109 @@
+#!/bin/sh
+
+################################################################################
+# Git Clone and/or Update Script
+#
+# This script automates Git repository handling with the following features:
+#
+# 1. Clones/Updates a Git repository into a specified folder;
+# 2. Creates a `.firstrun` file in the folder to mark the script's execution, ensuring it only runs once for each folder.
+#
+# Usage: ./git_clone_update.sh " " [...]
+# - : Path to the folder where the Git repository will be cloned or updated.
+# - : URL of the Git repository to clone or update.
+################################################################################
+
+# Define colors for messages and command output
+GREEN='\033[0;32m'
+RED='\033[0;31m'
+YELLOW='\033[0;33m'
+NC='\033[0m'
+
+ERROR_LOG=".git-sync-errors.txt"
+
+echo -e "${GREEN}Starting execution...${NC}"
+
+# Check if at least one pair of folder and git repo URL is provided
+if [ "$#" -lt 1 ] || [ "$1" = "--help" ]; then
+ echo "Usage: $0 \" \" \" \" ..."
+
+ # Exit with status code 0 if '--help' is provided, otherwise exit with status code 1
+ [ "$1" = "--help" ] && exit 0 || exit 1
+fi
+
+fix_parent_dir_permissions() {
+ # Fix parent directory permissions to allow the JupyterLab user to access the cloned repository
+
+ local folder_path="$1"
+
+ # Retrieve the very first parent directory
+ local parent_dir=$(echo "$folder_path" | cut -d '/' -f1)
+
+ # Check if the parent directory has the correct permissions
+ if [ "$(stat -c "%u:%g" "$parent_dir")" != "1000:100" ]; then
+ echo "Fixing permissions for parent directory: $parent_dir"
+ chown -R 1000:100 "$parent_dir" || { echo "Error: Unable to set ownership for $parent_dir"; return 1; }
+ chmod -R 755 "$parent_dir" || { echo "Error: Unable to set permissions for $parent_dir"; return 1; }
+ fi
+}
+
+clone_update_repository() {
+ # Clone or update a Git repository into a specified folder,
+ # and create a `.firstrun` file to mark the script's execution.
+
+ local folder_path="$1"
+ local git_repo_url="$2"
+
+ local firstrun_file="$folder_path/.firstrun"
+
+ if [ -f "$firstrun_file" ]; then
+ echo -e "The script has already been run for ${folder_path}. Skipping. ${GREEN}✅${NC}"
+ else
+ if [ ! -d "$folder_path" ]; then
+ mkdir -p "$folder_path"
+ fi
+
+ fix_parent_dir_permissions "$folder_path" || return 1
+
+ if [ -d "$folder_path/.git" ]; then
+ echo -e "Updating Git repository in ${folder_path}..."
+ (cd "$folder_path" && git pull)
+ else
+ echo -e "Cloning Git repository to ${folder_path}..."
+ (git clone "$git_repo_url" "$folder_path")
+ fi
+
+ echo -e "Creating .firstrun file in ${folder_path}..."
+ touch "$firstrun_file"
+
+ # User permissions for JupyterLab user to newly created git folders
+ echo -e "Setting permissions for ${folder_path}..."
+ chown -R 1000:100 "$folder_path" || { echo "Error: Unable to set ownership for $folder_path"; return 1; }
+
+ echo -e "Execution for ${folder_path} completed. ${GREEN}✅${NC}"
+ fi
+}
+
+
+# Iterate through pairs and run in parallel
+for pair in "$@"; do
+ # Split the pair into folder_path and git_repo_url using space as the delimiter
+ folder_path=$(echo "$pair" | cut -d ' ' -f1)
+ git_repo_url=$(echo "$pair" | cut -d ' ' -f2-)
+
+ if [ -z "$folder_path" ] || [ -z "$git_repo_url" ]; then
+ # Initialize error log
+ echo -e "${RED}Invalid argument format: \"${pair}\". Please provide folder path and Git repository URL in the correct order.${NC}" >> "$ERROR_LOG"
+ else
+ clone_update_repository "$folder_path" "$git_repo_url" || echo -e "${RED}Error executing for ${folder_path}.${NC}" >> "$ERROR_LOG"
+ fi
+done
+
+wait
+
+if [ -s "$ERROR_LOG" ]; then
+ echo -e "${RED}Some operations failed. See errors in '${ERROR_LOG}'.${NC}"
+ chown 1000:100 "$ERROR_LOG"
+else
+ echo -e "${GREEN}All operations completed successfully. ✅${NC}"
+fi
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl
new file mode 100644
index 0000000000..66b653b894
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl
@@ -0,0 +1,60 @@
+import logging
+import json
+
+
+default_log_format = "%(asctime)s %(levelname)9s %(lineno)4s %(module)s: %(message)s"
+log_format = "${log_format}"
+
+logging.basicConfig(
+ level=logging.INFO,
+ format=log_format if log_format else default_log_format
+)
+
+logger = logging.getLogger(__name__)
+
+CUSTOM_EXPORTER_NAME = "MyCustomExporter"
+
+
+def my_custom_exporter(args):
+ """Custom exporter to log JupyterLab events to command line."""
+ logger.info(json.dumps(args.get("data")))
+ return {
+ "exporter": CUSTOM_EXPORTER_NAME,
+ "message": ""
+ }
+
+
+c.JupyterLabPioneerApp.exporters = [
+ {
+ # sends telemetry data to the browser console
+ "type": "console_exporter",
+ },
+ {
+ # sends telemetry data (json) to the python console jupyter is running on
+ "type": "custom_exporter",
+ "args": {
+ "id": CUSTOM_EXPORTER_NAME
+ # add additional args for your exporter function here
+ },
+ }
+]
+
+c.JupyterLabPioneerApp.custom_exporter = {
+ CUSTOM_EXPORTER_NAME: my_custom_exporter,
+}
+
+c.JupyterLabPioneerApp.activeEvents = [
+ {"name": "ActiveCellChangeEvent", "logWholeNotebook": False},
+ {"name": "CellAddEvent", "logWholeNotebook": False},
+ {"name": "CellEditEvent", "logWholeNotebook": False},
+ {"name": "CellExecuteEvent", "logWholeNotebook": False},
+ {"name": "CellRemoveEvent", "logWholeNotebook": False},
+ {"name": "ClipboardCopyEvent", "logWholeNotebook": False},
+ {"name": "ClipboardCutEvent", "logWholeNotebook": False},
+ {"name": "ClipboardPasteEvent", "logWholeNotebook": False},
+ {"name": "NotebookHiddenEvent", "logWholeNotebook": False},
+ {"name": "NotebookOpenEvent", "logWholeNotebook": False},
+ {"name": "NotebookSaveEvent", "logWholeNotebook": False},
+ {"name": "NotebookScrollEvent", "logWholeNotebook": False},
+ {"name": "NotebookVisibleEvent", "logWholeNotebook": False},
+]
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
index 79e3ec37d2..d5e089dfa3 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
@@ -4,11 +4,13 @@
# Extra config available at:
# https://zero-to-jupyterhub.readthedocs.io/en/1.x/jupyterhub/customizing/user-management.html#culling-user-pods
-
# Enable Show Hidden Files menu option in View menu
c.ContentsManager.allow_hidden = True
c.FileContentsManager.allow_hidden = True
+# Set the preferred path for the frontend to start in
+c.FileContentsManager.preferred_dir = "${jupyterlab_preferred_dir}"
+
# Timeout (in seconds) in which a terminal has been inactive and ready to
# be culled.
c.TerminalManager.cull_inactive_timeout = ${terminal_cull_inactive_timeout} * 60
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/01-theme.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/01-theme.py
index c57be0f573..b2c5e37032 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/01-theme.py
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/01-theme.py
@@ -10,3 +10,8 @@
c.JupyterHub.template_vars = {
**jupyterhub_theme,
}
+
+if z2jh.get_config("custom.jhub-apps-enabled"):
+ from jhub_apps import themes
+
+ c.JupyterHub.template_vars = {**themes.DEFAULT_THEME, **jupyterhub_theme}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py
index d4a6ce26f8..c3934aad05 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py
@@ -20,7 +20,70 @@ def get_username_hook(spawner):
)
+def get_conda_store_environments(user_info: dict):
+ import urllib3
+ import yarl
+
+ external_url = z2jh.get_config("custom.conda-store-service-name")
+ token = z2jh.get_config("custom.conda-store-jhub-apps-token")
+ endpoint = "conda-store/api/v1/environment"
+
+ url = yarl.URL(f"http://{external_url}/{endpoint}/")
+
+ http = urllib3.PoolManager()
+ response = http.request(
+ "GET", str(url), headers={"Authorization": f"Bearer {token}"}
+ )
+
+ # parse response
+ j = json.loads(response.data.decode("UTF-8"))
+ # Filter and return conda environments for the user
+ return [f"{env['namespace']['name']}-{env['name']}" for env in j.get("data", [])]
+
+
c.Spawner.pre_spawn_hook = get_username_hook
c.JupyterHub.allow_named_servers = False
c.JupyterHub.spawner_class = KubeSpawner
+
+if z2jh.get_config("custom.jhub-apps-enabled"):
+ from jhub_apps import theme_template_paths
+ from jhub_apps.configuration import install_jhub_apps
+
+ domain = z2jh.get_config("custom.external-url")
+ hub_url = f"https://{domain}"
+ c.JupyterHub.bind_url = hub_url
+ c.JupyterHub.default_url = "/hub/home"
+ c.Spawner.debug = True
+
+ c.JAppsConfig.conda_envs = get_conda_store_environments
+ c.JAppsConfig.jupyterhub_config_path = (
+ "/usr/local/etc/jupyterhub/jupyterhub_config.py"
+ )
+ c.JAppsConfig.hub_host = "hub"
+ c.JAppsConfig.service_workers = 4
+
+ def service_for_jhub_apps(name, url):
+ return {
+ "name": name,
+ "display": True,
+ "info": {
+ "name": name,
+ "url": url,
+ "external": True,
+ },
+ "oauth_no_confirm": True,
+ }
+
+ c.JupyterHub.services.extend(
+ [
+ service_for_jhub_apps(name="Argo", url="/argo"),
+ service_for_jhub_apps(name="Users", url="/auth/admin/nebari/console/"),
+ service_for_jhub_apps(name="Environments", url="/conda-store"),
+ service_for_jhub_apps(name="Monitoring", url="/monitoring"),
+ service_for_jhub_apps(name="VSCode", url="/user/[USER]/vscode"),
+ ]
+ )
+
+ c.JupyterHub.template_paths = theme_template_paths
+ c = install_jhub_apps(c, spawner_to_subclass=KubeSpawner)
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py
index 190ccd813e..50d527b863 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py
@@ -1,3 +1,4 @@
+import ast
import copy
import functools
import json
@@ -207,12 +208,14 @@ def base_profile_extra_mounts():
extra_pod_config = {
"volumes": [
- {
- "name": volume["name"],
- "persistentVolumeClaim": {"claimName": volume["name"]},
- }
- if volume["kind"] == "persistentvolumeclaim"
- else {"name": volume["name"], "configMap": {"name": volume["name"]}}
+ (
+ {
+ "name": volume["name"],
+ "persistentVolumeClaim": {"claimName": volume["name"]},
+ }
+ if volume["kind"] == "persistentvolumeclaim"
+ else {"name": volume["name"], "configMap": {"name": volume["name"]}}
+ )
for mount_path, volume in extra_mounts.items()
]
}
@@ -232,6 +235,85 @@ def base_profile_extra_mounts():
}
+def configure_user_provisioned_repositories(username):
+ # Define paths and configurations
+ pvc_home_mount_path = f"home/{username}"
+
+ git_repos_provision_pvc = z2jh.get_config("custom.initial-repositories")
+ git_clone_update_config = {
+ "name": "git-clone-update",
+ "configMap": {"name": "git-clone-update", "defaultMode": 511},
+ }
+
+ # Convert the string configuration to a list of dictionaries
+ def string_to_objects(input_string):
+ try:
+ result = ast.literal_eval(input_string)
+ if isinstance(result, list) and all(
+ isinstance(item, dict) for item in result
+ ):
+ return result
+ else:
+ raise ValueError(
+ "Input string does not contain a list of dictionaries."
+ )
+ except (ValueError, SyntaxError):
+ # Return an error message if the input string is not a list of dictionaries
+ raise ValueError(f"Invalid input string format: {input_string}")
+
+ git_repos_provision_pvc = string_to_objects(git_repos_provision_pvc)
+
+ if not git_repos_provision_pvc:
+ return {}
+
+ # Define the extra pod configuration for the volumes
+ extra_pod_config = {
+ "volumes": [{"name": "git-clone-update", **git_clone_update_config}]
+ }
+
+ extras_git_clone_cp_path = f"/mnt/{pvc_home_mount_path}/.git-clone-update.sh"
+
+ BASH_EXECUTION = "./.git-clone-update.sh"
+
+ for local_repo_pair in git_repos_provision_pvc:
+ for path, remote_url in local_repo_pair.items():
+ BASH_EXECUTION += f" '{path} {remote_url}'"
+
+ EXEC_OWNERSHIP_CHANGE = " && ".join(
+ [
+ f"cp /mnt/extras/git-clone-update.sh {extras_git_clone_cp_path}",
+ f"chmod 777 {extras_git_clone_cp_path}",
+ f"chown -R 1000:100 {extras_git_clone_cp_path}",
+ f"cd /mnt/{pvc_home_mount_path}",
+ BASH_EXECUTION,
+ f"rm -f {extras_git_clone_cp_path}",
+ ]
+ )
+
+ # Define init containers configuration
+ init_containers = [
+ {
+ "name": "pre-populate-git-repos",
+ "image": "bitnami/git",
+ "command": ["sh", "-c", EXEC_OWNERSHIP_CHANGE],
+ "securityContext": {"runAsUser": 0},
+ "volumeMounts": [
+ {
+ "mountPath": f"/mnt/{pvc_home_mount_path}",
+ "name": "home",
+ "subPath": pvc_home_mount_path,
+ },
+ {"mountPath": "/mnt/extras", "name": "git-clone-update"},
+ ],
+ }
+ ]
+
+ return {
+ "extra_pod_config": extra_pod_config,
+ "init_containers": init_containers,
+ }
+
+
def configure_user(username, groups, uid=1000, gid=100):
environment = {
# nss_wrapper
@@ -245,6 +327,8 @@ def configure_user(username, groups, uid=1000, gid=100):
"SHELL": "/bin/bash",
# set home directory to username
"HOME": f"/home/{username}",
+ # Disable global usage of pip
+ "PIP_REQUIRE_VIRTUALENV": "true",
}
etc_passwd, etc_group = generate_nss_files(
@@ -285,9 +369,11 @@ def configure_user(username, groups, uid=1000, gid=100):
# mount the shared directories for user only if there are
# shared folders (groups) that the user is a member of
# else ensure that the `shared` folder symlink does not exist
- f"ln -sfn /shared /home/{username}/shared"
- if groups
- else f"rm -f /home/{username}/shared",
+ (
+ f"ln -sfn /shared /home/{username}/shared"
+ if groups
+ else f"rm -f /home/{username}/shared"
+ ),
# conda-store environment configuration
f"printf '{condarc}' > /home/{username}/.condarc",
# jupyter configuration
@@ -414,6 +500,7 @@ def render_profile(profile, username, groups, keycloak_profilenames):
profile_conda_store_mounts(username, groups),
base_profile_extra_mounts(),
configure_user(username, groups),
+ configure_user_provisioned_repositories(username),
profile_kubespawner_override,
],
{},
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
index 69c500f744..fd6cafc624 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
@@ -9,6 +9,106 @@
"authToken": ""
},
"@jupyterlab/apputils-extension:notification": {
+ "checkForUpdates": false,
"fetchNews": "false"
+ },
+ "@jupyterlab/mainmenu-extension:plugin": {
+ "menus": [
+ {
+ "id": "jp-mainmenu-file",
+ "items": [
+ {
+ "command": "help:open",
+ "rank": 0,
+ "args": {
+ "url": "/hub/home",
+ "text": "Home",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "type": "submenu",
+ "submenu": {
+ "id": "jp-mainmenu-file-new"
+ },
+ "rank": 0.5
+ },
+ {
+ "command": "hub:control-panel",
+ "disabled": true
+ },
+ {
+ "command": "hub:logout",
+ "disabled": true
+ }
+ ]
+ },
+ {
+ "id": "jp-mainmenu-services",
+ "disabled": false,
+ "label": "Services",
+ "rank": 1000,
+ "items": [
+ {
+ "command": "help:open",
+ "rank": 1,
+ "args": {
+ "url": "/conda-store",
+ "text": "Environment Management",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "command": "help:open",
+ "rank": 2,
+ "args": {
+ "url": "/auth/admin/nebari/console",
+ "text": "User Management",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "command": "help:open",
+ "rank": 3,
+ "args": {
+ "url": "/monitoring",
+ "text": "Monitoring",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "command": "help:open",
+ "rank": 4,
+ "args": {
+ "url": "/argo",
+ "text": "Argo Workflows",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "command": "nebari:open-proxy",
+ "rank": 5,
+ "args": {
+ "name": "vscode"
+ }
+ }
+ ]
+ },
+ {
+ "id": "jp-mainmenu-help",
+ "rank": 1001,
+ "items": [
+ {
+ "command": "help:open",
+ "rank": 1001,
+ "args": {
+ "url": "https://www.nebari.dev/docs/welcome/",
+ "text": "Nebari documentation",
+ "newBrowserTab": true
+ }
+ }
+ ]
+ }
+ ]
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
index 3c5822a461..af690112f6 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
@@ -10,6 +10,46 @@ resource "random_password" "proxy_secret_token" {
special = false
}
+resource "random_password" "jhub_apps_jwt_secret" {
+ length = 32
+ special = false
+}
+
+locals {
+ jhub_apps_secrets_name = "jhub-apps-secrets"
+ jhub_apps_env_var_name = "JHUB_APP_JWT_SECRET_KEY"
+ singleuser_nodeselector_key = var.cloud-provider == "aws" ? "dedicated" : var.user-node-group.key
+ userscheduler_nodeselector_key = var.cloud-provider == "aws" ? "dedicated" : var.user-node-group.key
+ userscheduler_nodeselector_value = var.cloud-provider == "aws" ? var.general-node-group.value : var.user-node-group.value
+}
+
+resource "kubernetes_secret" "jhub_apps_secrets" {
+ metadata {
+ name = local.jhub_apps_secrets_name
+ namespace = var.namespace
+ }
+
+ data = {
+ jwt_secret_key = random_password.jhub_apps_jwt_secret.result
+ }
+
+ type = "Opaque"
+}
+
+locals {
+ jupyterhub_env_vars = [
+ {
+ name = local.jhub_apps_env_var_name,
+ valueFrom : {
+ secretKeyRef : {
+ name : local.jhub_apps_secrets_name
+ key : "jwt_secret_key"
+ }
+ }
+ }
+ ]
+}
+
resource "helm_release" "jupyterhub" {
name = "jupyterhub-${var.namespace}"
@@ -17,7 +57,7 @@ resource "helm_release" "jupyterhub" {
repository = "https://jupyterhub.github.io/helm-chart/"
chart = "jupyterhub"
- version = "2.0.0"
+ version = "3.2.1"
values = concat([
file("${path.module}/values.yaml"),
@@ -35,6 +75,9 @@ resource "helm_release" "jupyterhub" {
conda-store-mount = var.conda-store-mount
default-conda-store-namespace = var.default-conda-store-namespace
conda-store-service-name = var.conda-store-service-name
+ conda-store-jhub-apps-token = var.conda-store-jhub-apps-token
+ jhub-apps-enabled = var.jhub-apps-enabled
+ initial-repositories = var.initial-repositories
skel-mount = {
name = kubernetes_config_map.etc-skel.metadata.0.name
namespace = kubernetes_config_map.etc-skel.metadata.0.namespace
@@ -134,14 +177,14 @@ resource "helm_release" "jupyterhub" {
singleuser = {
image = var.jupyterlab-image
nodeSelector = {
- "${var.user-node-group.key}" = var.user-node-group.value
+ "${local.singleuser_nodeselector_key}" = var.user-node-group.value
}
}
scheduling = {
userScheduler = {
nodeSelector = {
- "${var.user-node-group.key}" = var.user-node-group.value
+ "${local.userscheduler_nodeselector_key}" = local.userscheduler_nodeselector_value
}
}
}
@@ -153,8 +196,10 @@ resource "helm_release" "jupyterhub" {
{
name = "OAUTH_LOGOUT_REDIRECT_URL",
value = format("%s?redirect_uri=%s", "https://${var.external-url}/auth/realms/${var.realm_id}/protocol/openid-connect/logout", urlencode(var.jupyterhub-logout-redirect-url))
- }],
- jsondecode(var.jupyterhub-hub-extraEnv))
+ },
+ ],
+ concat(local.jupyterhub_env_vars, jsondecode(var.jupyterhub-hub-extraEnv))
+ )
}
})]
)
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/values.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/values.yaml
index b3a2331cae..d0bf954843 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/values.yaml
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/values.yaml
@@ -5,8 +5,21 @@ hub:
pvc:
storage: 1Gi
baseUrl: "/"
+
networkPolicy:
- enabled: false
+ ingress:
+ - ports:
+ - port: 10202
+ from:
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-hub: "true"
+
+ service:
+ extraPorts:
+ - port: 10202
+ targetPort: 10202
+ name: jhub-apps
proxy:
secretToken: ""
@@ -26,6 +39,8 @@ proxy:
protocol: UDP
- port: 53
protocol: TCP
+ - port: 10202
+ protocol: TCP
- to:
- ipBlock:
cidr: 0.0.0.0/0
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
index 7943d78d30..577dedc8ef 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
@@ -102,12 +102,21 @@ variable "conda-store-service-name" {
type = string
}
+variable "conda-store-jhub-apps-token" {
+ description = "Token for conda-store to be used by jhub apps for fetching conda environments dynamically."
+ type = string
+}
+
variable "conda-store-environments" {
description = "conda environments from conda-store in filesystem namespace"
type = any
default = {}
}
+variable "jhub-apps-enabled" {
+ description = "Enable/Disable JupyterHub Apps extension to spin up apps, dashboards, etc"
+ type = bool
+}
variable "conda-store-argo-workflows-jupyter-scheduler-token" {
description = "Token for argo-workflows-jupyter-schedule to use conda-store"
@@ -148,3 +157,34 @@ variable "argo-workflows-enabled" {
description = "Enable Argo Workflows"
type = bool
}
+
+variable "jupyterlab-default-settings" {
+ description = "Default settings for JupyterLab to be placed in overrides.json"
+ type = map(any)
+}
+
+variable "jupyterlab-pioneer-enabled" {
+ description = "Enable JupyterLab Pioneer for telemetry"
+ type = bool
+}
+
+variable "jupyterlab-pioneer-log-format" {
+ description = "Logging format for JupyterLab Pioneer"
+ type = string
+}
+
+variable "jupyterlab-preferred-dir" {
+ description = "Directory in which the JupyterLab should open the file browser"
+ type = string
+}
+
+variable "cloud-provider" {
+ description = "Name of cloud provider."
+ type = string
+}
+
+variable "initial-repositories" {
+ description = "Map of folder location and git repo url to clone"
+ type = string
+ default = "[]"
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf
new file mode 100644
index 0000000000..8180d46fb8
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf
@@ -0,0 +1,103 @@
+resource "random_password" "minio_root_password" {
+ length = 32
+ special = false
+}
+
+locals {
+ minio-url = "http://${var.minio-release-name}:${var.minio-port}"
+ node-selector = {
+ "${var.node-group.key}" = "${var.node-group.value}"
+ }
+}
+
+resource "helm_release" "loki-minio" {
+ count = var.minio-enabled ? 1 : 0
+ name = var.minio-release-name
+ namespace = var.namespace
+ repository = "https://raw.githubusercontent.com/bitnami/charts/defb094c658024e4aa8245622dab202874880cbc/bitnami"
+ chart = "minio"
+ # last release that was Apache-2.0
+ version = var.minio-helm-chart-version
+
+ set {
+ name = "accessKey.password"
+ value = "admin"
+ }
+
+ set {
+ name = "secretKey.password"
+ value = random_password.minio_root_password.result
+ }
+
+ set {
+ name = "defaultBuckets"
+ value = join(" ", var.buckets)
+ }
+
+ set {
+ name = "persistence.size"
+ value = var.minio-storage
+ }
+
+ values = concat([
+ file("${path.module}/values_minio.yaml"),
+ jsonencode({
+ nodeSelector : local.node-selector
+ })
+ ], var.grafana-loki-minio-overrides)
+}
+
+
+resource "helm_release" "grafana-loki" {
+ name = "nebari-loki"
+ namespace = var.namespace
+ repository = "https://grafana.github.io/helm-charts"
+ chart = "loki"
+ version = var.loki-helm-chart-version
+
+ values = concat([
+ file("${path.module}/values_loki.yaml"),
+ jsonencode({
+ loki : {
+ storage : {
+ s3 : {
+ endpoint : local.minio-url,
+ accessKeyId : "admin"
+ secretAccessKey : random_password.minio_root_password.result,
+ s3ForcePathStyle : true
+ }
+ }
+ }
+ storageConfig : {
+ # We configure MinIO by using the AWS config because MinIO implements the S3 API
+ aws : {
+ s3 : local.minio-url
+ s3ForcePathStyle : true
+ }
+ }
+ write : { nodeSelector : local.node-selector }
+ read : { nodeSelector : local.node-selector }
+ backend : { nodeSelector : local.node-selector }
+ gateway : { nodeSelector : local.node-selector }
+ })
+ ], var.grafana-loki-overrides)
+
+ depends_on = [helm_release.loki-minio]
+}
+
+resource "helm_release" "grafana-promtail" {
+ # Promtail ships the contents of logs to Loki instance
+ name = "nebari-promtail"
+ namespace = var.namespace
+ repository = "https://grafana.github.io/helm-charts"
+ chart = "promtail"
+ version = var.promtail-helm-chart-version
+
+ values = concat([
+ file("${path.module}/values_promtail.yaml"),
+ jsonencode({
+ })
+ ], var.grafana-promtail-overrides)
+
+ depends_on = [helm_release.grafana-loki]
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml
new file mode 100644
index 0000000000..c11ebe5d1f
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml
@@ -0,0 +1,78 @@
+# https://github.com/grafana/loki/blob/4cae003ecedd474e4c15feab4ea2ef435afff83f/production/helm/loki/values.yaml
+
+loki:
+ storage:
+ type: s3
+ commonConfig:
+ replication_factor: 1
+ # Not required as it is inside cluster and not exposed to the public network
+ auth_enabled: false
+
+ # The Compactor deduplicates index entries and also apply granular retention.
+ compactor:
+ # is the directory where marked chunks and temporary tables will be saved.
+ working_directory: /var/loki/compactor/data/retention
+ # minio s3
+ shared_store: s3
+ # how often compaction will happen
+ compaction_interval: 1h
+ # should delete old logs after retention delete delay
+ # ideally we would want to do storage based retention, but this is not
+ # currently implemented in loki, that's why we're doing time based retention.
+ retention_enabled: true
+ # is the delay after which the Compactor will delete marked chunks.
+ retention_delete_delay: 1h
+ # specifies the maximum quantity of goroutine workers instantiated to delete chunks.
+ retention_delete_worker_count: 150
+
+ limits_config:
+ # The minimum retention period is 24h.
+ # This is reasonable in most cases, but if people would like to retain logs for longer
+ # then they can override this variable from nebari-config.yaml
+ retention_period: 60d
+
+ schema_config:
+ configs:
+ # list of period_configs
+ # The date of the first day that index buckets should be created.
+ - from: "2024-03-01"
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+ storage_config:
+ boltdb_shipper:
+ # Directory where ingesters would write index files which would then be
+ # uploaded by shipper to configured storage
+ active_index_directory: /var/loki/compactor/data/index
+ # Cache location for restoring index files from storage for queries
+ cache_location: /var/loki/compactor/data/boltdb-cache
+ # Shared store for keeping index files
+ shared_store: s3
+
+# Configuration for the write pod(s)
+write:
+ # -- Number of replicas for the write
+ # Keeping cost of running Nebari in mind
+ # We don't need so many replicas, if people need it
+ # they can always override from nebari-config.yaml
+ replicas: 1
+
+read:
+ # -- Number of replicas for the read
+ replicas: 1
+
+backend:
+ # -- Number of replicas for the backend
+ replicas: 1
+
+minio:
+ # We are deploying minio from bitnami chart separately
+ enabled: false
+
+monitoring:
+ selfMonitoring:
+ grafanaAgent:
+ installOperator: false
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml
new file mode 100644
index 0000000000..666542bb45
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml
@@ -0,0 +1 @@
+# https://github.com/bitnami/charts/blob/440ec159c26e4ff0748b9e9866b345d98220c40a/bitnami/minio/values.yaml
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml
new file mode 100644
index 0000000000..5a18a9bc09
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml
@@ -0,0 +1 @@
+# https://github.com/grafana/helm-charts/blob/3831194ba2abd2a0ca7a14ca00e578f8e9d2abc6/charts/promtail/values.yaml
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf
new file mode 100644
index 0000000000..a43695252c
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf
@@ -0,0 +1,84 @@
+variable "namespace" {
+ description = "deploy monitoring services on this namespace"
+ type = string
+ default = "dev"
+}
+
+variable "loki-helm-chart-version" {
+ description = "version to deploy for the loki helm chart"
+ type = string
+ default = "5.43.3"
+}
+
+variable "promtail-helm-chart-version" {
+ description = "version to deploy for the promtail helm chart"
+ type = string
+ default = "6.15.5"
+}
+
+variable "minio-helm-chart-version" {
+ description = "version to deploy for the minio helm chart"
+ type = string
+ default = "6.7.4"
+}
+
+variable "grafana-loki-overrides" {
+ description = "Grafana Loki helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-promtail-overrides" {
+ description = "Grafana Promtail helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-loki-minio-overrides" {
+ description = "Grafana Loki minio helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "minio-release-name" {
+ description = "Grafana Loki minio release name"
+ type = string
+ default = "nebari-loki-minio"
+}
+
+variable "minio-port" {
+ description = "Grafana Loki minio port"
+ type = number
+ default = 9000
+}
+
+variable "buckets" {
+ description = "Minio buckets"
+ type = list(string)
+ default = [
+ "chunks",
+ "ruler",
+ "admin",
+ "loki"
+ ]
+}
+
+variable "minio-storage" {
+ description = "Minio storage"
+ type = string
+ default = "50Gi"
+}
+
+variable "minio-enabled" {
+ description = "Deploy minio along with loki or not"
+ type = bool
+ default = true
+}
+
+variable "node-group" {
+ description = "Node key value pair for bound resources"
+ type = object({
+ key = string
+ value = string
+ })
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf
index 7ba919ec54..413a9e08d2 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf
@@ -1,3 +1,8 @@
+resource "random_password" "grafana_admin_password" {
+ length = 32
+ special = false
+}
+
resource "helm_release" "prometheus-grafana" {
name = "nebari"
namespace = var.namespace
@@ -176,6 +181,9 @@ resource "helm_release" "prometheus-grafana" {
"${var.node-group.key}" = var.node-group.value
}
+ # Avoid using the default password, as that's a security risk
+ adminPassword : random_password.grafana_admin_password.result
+
sidecar = {
dashboards = {
annotations = {
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
index ada868882f..f3cf47c88d 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
@@ -1 +1,7 @@
# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml
+
+grafana:
+ additionalDataSources:
+ - name: Loki
+ type: loki
+ url: http://loki-gateway.dev
diff --git a/src/_nebari/stages/kubernetes_services/template/monitoring.tf b/src/_nebari/stages/kubernetes_services/template/monitoring.tf
index ec20a75ba7..39487c4bb1 100644
--- a/src/_nebari/stages/kubernetes_services/template/monitoring.tf
+++ b/src/_nebari/stages/kubernetes_services/template/monitoring.tf
@@ -14,3 +14,14 @@ module "monitoring" {
node-group = var.node_groups.general
}
+
+module "grafana-loki" {
+ count = var.monitoring-enabled ? 1 : 0
+ source = "./modules/kubernetes/services/monitoring/loki"
+ namespace = var.environment
+ grafana-loki-overrides = var.grafana-loki-overrides
+ grafana-promtail-overrides = var.grafana-promtail-overrides
+ grafana-loki-minio-overrides = var.grafana-loki-minio-overrides
+ node-group = var.node_groups.general
+ minio-enabled = var.minio-enabled
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/variables.tf b/src/_nebari/stages/kubernetes_services/template/variables.tf
index 7b3394c01d..9e36e65979 100644
--- a/src/_nebari/stages/kubernetes_services/template/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/variables.tf
@@ -39,8 +39,51 @@ variable "conda-store-default-namespace" {
type = string
}
-
variable "argo-workflows-enabled" {
description = "Enable Argo Workflows"
type = bool
}
+
+variable "jupyterlab-pioneer-enabled" {
+ description = "Enable JupyterLab Pioneer for telemetry"
+ type = bool
+}
+
+variable "jupyterlab-pioneer-log-format" {
+ description = "Logging format for JupyterLab Pioneer"
+ type = string
+}
+
+variable "jhub-apps-enabled" {
+ description = "Enable JupyterHub Apps"
+ type = bool
+}
+
+variable "cloud-provider" {
+ description = "Name of cloud provider."
+ type = string
+}
+
+variable "grafana-loki-overrides" {
+ description = "Helm chart overrides for loki"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-promtail-overrides" {
+ description = "Helm chart overrides for promtail"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-loki-minio-overrides" {
+ description = "Grafana Loki minio helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "minio-enabled" {
+ description = "Deploy minio along with loki or not"
+ type = bool
+ default = true
+}
diff --git a/src/_nebari/upgrade.py b/src/_nebari/upgrade.py
index a5227adf34..5c095f04a2 100644
--- a/src/_nebari/upgrade.py
+++ b/src/_nebari/upgrade.py
@@ -685,6 +685,37 @@ def _version_specific_upgrade(
return config
+class Upgrade_2024_1_1(UpgradeStep):
+ version = "2024.1.1"
+
+ def _version_specific_upgrade(
+ self, config, start_version, config_filename: Path, *args, **kwargs
+ ):
+ rich.print("\n ⚠️ Warning ⚠️")
+ rich.print(
+ "-> Please run the [green]rm -rf stages[/green] so that we can regenerate an updated set of Terraform scripts for your deployment."
+ )
+ rich.print("\n ⚠️ Deprecation Warning ⚠️")
+ rich.print(
+ "-> jupyterlab-videochat, retrolab, jupyter-tensorboard, jupyterlab-conda-store and jupyter-nvdashboard",
+ f"are no longer supported in Nebari version [green]{self.version}[/green] and will be uninstalled.",
+ )
+ rich.print()
+
+ return config
+
+
+class Upgrade_2024_3_1(UpgradeStep):
+ version = "2024.3.1"
+
+ def _version_specific_upgrade(
+ self, config, start_version, config_filename: Path, *args, **kwargs
+ ):
+ rich.print("Ready to upgrade to Nebari version [green]2024.3.1[/green].")
+
+ return config
+
+
__rounded_version__ = str(rounded_ver_parse(__version__))
# Manually-added upgrade steps must go above this line
diff --git a/tests/common/kube_api.py b/tests/common/kube_api.py
new file mode 100644
index 0000000000..eec1d05d7b
--- /dev/null
+++ b/tests/common/kube_api.py
@@ -0,0 +1,40 @@
+import socket
+import typing
+
+from kubernetes import config
+from kubernetes.client.api import core_v1_api
+from kubernetes.client.models import V1Pod
+from kubernetes.stream import portforward
+
+
+def kubernetes_port_forward(
+ pod_labels: typing.Dict[str, str], port: int, namespace: str = "dev"
+) -> V1Pod:
+ """Given pod labels and port, finds the pod name and port forwards to
+ the given port.
+ :param pod_labels: dict of labels, by which to search the pod
+ :param port: port number to forward
+ :param namespace: kubernetes namespace name
+ :return: kubernetes pod object
+ """
+ config.load_kube_config()
+ core_v1 = core_v1_api.CoreV1Api()
+ label_selector = ",".join([f"{k}={v}" for k, v in pod_labels.items()])
+ pods = core_v1.list_namespaced_pod(
+ namespace=namespace, label_selector=label_selector
+ )
+ assert pods.items
+ pod = pods.items[0]
+ pod_name = pod.metadata.name
+
+ def kubernetes_create_connection(address, *args, **kwargs):
+ pf = portforward(
+ core_v1.connect_get_namespaced_pod_portforward,
+ pod_name,
+ namespace,
+ ports=str(port),
+ )
+ return pf.socket(port)
+
+ socket.create_connection = kubernetes_create_connection
+ return pod
diff --git a/tests/common/navigator.py b/tests/common/navigator.py
index dc2adc9eba..12a1445bd5 100644
--- a/tests/common/navigator.py
+++ b/tests/common/navigator.py
@@ -320,15 +320,13 @@ def _set_environment_via_popup(self, kernel=None):
# failure here indicates that the environment doesn't exist either
# because of incorrect naming syntax or because the env is still
# being built
- self.page.get_by_role("combobox").nth(1).select_option(
- f'{{"name":"{kernel}"}}'
- )
+ self.page.get_by_role("combobox").nth(1).select_option(kernel)
# click Select to close popup (deal with the two formats of this dialog)
try:
- self.page.get_by_role("button", name="Select", exact=True).click()
+ self.page.get_by_role("button", name="Select Kernel").click()
except Exception:
self.page.locator("div").filter(has_text="No KernelSelect").get_by_role(
- "button", name="Select"
+ "button", name="Select Kernel"
).click()
def set_environment(self, kernel):
@@ -360,10 +358,8 @@ def set_environment(self, kernel):
self._set_environment_via_popup(kernel)
# wait for the jupyter UI to catch up before moving forward
- # extract conda env name
- conda_env_label = re.search("conda-env-(.*)-py", kernel).group(1)
# see if the jupyter notebook label for the conda env is visible
- kernel_label_loc = self.page.get_by_role("button", name=conda_env_label)
+ kernel_label_loc = self.page.get_by_role("button", name=kernel)
if not kernel_label_loc.is_visible():
kernel_label_loc.wait_for(state="attached")
@@ -411,3 +407,18 @@ def write_file(self, filepath, content):
self.run_terminal_command(f"ls {filepath}")
logger.debug(f"time to complete {dt.datetime.now() - start}")
time.sleep(2)
+
+ def stop_server(self) -> None:
+ """Stops the JupyterHub server by navigating to the Hub Control Panel."""
+ self.page.get_by_text("File", exact=True).click()
+
+ with self.context.expect_page() as page_info:
+ self.page.get_by_role("menuitem", name="Home", exact=True).click()
+
+ home_page = page_info.value
+ home_page.wait_for_load_state()
+ stop_button = home_page.get_by_role("button", name="Stop My Server")
+ if not stop_button.is_visible():
+ stop_button.wait_for(state="visible")
+ stop_button.click()
+ stop_button.wait_for(state="hidden")
diff --git a/tests/common/playwright_fixtures.py b/tests/common/playwright_fixtures.py
index 388f6ef4b0..03e17a5065 100644
--- a/tests/common/playwright_fixtures.py
+++ b/tests/common/playwright_fixtures.py
@@ -48,6 +48,10 @@ def _navigator_session(request, browser_name, pytestconfig):
logger.debug(e)
raise
finally:
+ try:
+ nav.stop_server()
+ except Exception as e:
+ logger.debug(e)
nav.teardown()
diff --git a/tests/common/run_notebook.py b/tests/common/run_notebook.py
index 03c383299a..10d28d6637 100644
--- a/tests/common/run_notebook.py
+++ b/tests/common/run_notebook.py
@@ -220,7 +220,7 @@ def _restart_run_all(self):
# Restart dialog appears most, but not all of the time (e.g. set
# No Kernel, then Restart Run All)
restart_dialog_button = self.nav.page.get_by_role(
- "button", name="Restart", exact=True
+ "button", name="Confirm Kernel Restart"
)
if restart_dialog_button.is_visible():
restart_dialog_button.click()
diff --git a/tests/tests_deployment/test_grafana_api.py b/tests/tests_deployment/test_grafana_api.py
new file mode 100644
index 0000000000..cdb489f349
--- /dev/null
+++ b/tests/tests_deployment/test_grafana_api.py
@@ -0,0 +1,18 @@
+import base64
+
+import pytest
+import requests
+
+from tests.tests_deployment import constants
+
+
+@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning")
+def test_grafana_api_not_accessible_with_default_credentials():
+ """Making sure that Grafana's API is not accessible on default user/pass"""
+ user_pass_b64_encoded = base64.b64encode(b"admin:prom-operator").decode()
+ response = requests.get(
+ f"https://{constants.NEBARI_HOSTNAME}/monitoring/api/datasources",
+ headers={"Authorization": f"Basic {user_pass_b64_encoded}"},
+ verify=False,
+ )
+ assert response.status_code == 401
diff --git a/tests/tests_deployment/test_jupyterhub_ssh.py b/tests/tests_deployment/test_jupyterhub_ssh.py
index 0e90927a4c..fd6b0799d5 100644
--- a/tests/tests_deployment/test_jupyterhub_ssh.py
+++ b/tests/tests_deployment/test_jupyterhub_ssh.py
@@ -14,10 +14,14 @@
TIMEOUT_SECS = 300
+@pytest.fixture(scope="session")
+def api_token():
+ return get_jupyterhub_token("jupyterhub-ssh")
+
+
@pytest.fixture(scope="function")
-def paramiko_object():
+def paramiko_object(api_token):
"""Connects to JupyterHub ssh cluster from outside the cluster."""
- api_token = get_jupyterhub_token("jupyterhub-ssh")
try:
client = paramiko.SSHClient()
diff --git a/tests/tests_deployment/test_loki_deployment.py b/tests/tests_deployment/test_loki_deployment.py
new file mode 100644
index 0000000000..59210a8fc3
--- /dev/null
+++ b/tests/tests_deployment/test_loki_deployment.py
@@ -0,0 +1,126 @@
+import json
+import urllib.parse
+import urllib.request as urllib_request
+
+import pytest
+from kubernetes.client import V1Pod
+
+from tests.common.kube_api import kubernetes_port_forward
+
+LOKI_BACKEND_PORT = 3100
+LOKI_BACKEND_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki",
+ "app.kubernetes.io/component": "backend",
+}
+
+MINIO_PORT = 9000
+MINIO_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki-minio",
+ "app.kubernetes.io/name": "minio",
+}
+
+LOKI_GATEWAY_PORT = 8080
+LOKI_GATEWAY_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki",
+ "app.kubernetes.io/component": "gateway",
+}
+
+
+@pytest.fixture(scope="module")
+def port_forward_fixture(request):
+ """Pytest fixture to port forward loki backend pod to make it accessible
+ on localhost so that we can run some tests on it.
+ """
+ return kubernetes_port_forward(
+ pod_labels=request.param["labels"], port=request.param["port"]
+ )
+
+
+def port_forward(labels, port):
+ params = {"labels": labels, "port": port}
+ return pytest.mark.parametrize("port_forward_fixture", [params], indirect=True)
+
+
+@pytest.mark.parametrize(
+ "endpoint_path",
+ (
+ "metrics",
+ "services",
+ "config",
+ "ready",
+ "log_level",
+ ),
+)
+@port_forward(labels=LOKI_BACKEND_POD_LABELS, port=LOKI_BACKEND_PORT)
+def test_loki_endpoint(endpoint_path: str, port_forward_fixture: V1Pod):
+ """This will hit some endpoints in the loki API and verify that we
+ get a 200 status code, to make sure Loki is working properly.
+ :param endpoint_path: a loki api endpoint path
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/{endpoint_path}"
+ response = urllib_request.urlopen(url)
+ response.read().decode("utf-8")
+ assert response.code == 200
+ response.close()
+
+
+@port_forward(labels=MINIO_POD_LABELS, port=MINIO_PORT)
+def test_minio_accessible(port_forward_fixture: V1Pod):
+ """This will hit liveness endpoint of minio API and verify that we
+ get a 200 status code, to make sure minio is up and running.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{MINIO_PORT}/minio/health/live"
+ response = urllib_request.urlopen(url)
+ response.read().decode("utf-8")
+ assert response.code == 200
+ response.close()
+
+
+@port_forward(labels=LOKI_GATEWAY_POD_LABELS, port=LOKI_GATEWAY_PORT)
+def test_loki_gateway(port_forward_fixture: V1Pod):
+ """This will hit an endpoint of loki gateway API and verify that we
+ get a 200 status code, to make sure minio is up and running.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/loki/api/v1/labels"
+ response = urllib_request.urlopen(url)
+ response_content = response.read().decode("utf-8")
+ response_json = json.loads(response_content)
+ assert response.code == 200
+ assert response_json["status"] == "success"
+ response.close()
+
+
+@port_forward(labels=LOKI_GATEWAY_POD_LABELS, port=LOKI_GATEWAY_PORT)
+def test_loki_gateway_fetch_logs(port_forward_fixture: V1Pod):
+ """This will hit an endpoint of loki gateway API to fetch some logs
+ and verify logs received.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return: None
+ """
+ pod_name = port_forward_fixture.metadata.name
+ query_params = {
+ "limit": "5",
+ # Fetch logs for jupyterhub app
+ "query": '{app="jupyterhub"}',
+ }
+
+ encoded_params = urllib.parse.urlencode(query_params)
+ path = f"/loki/api/v1/query_range?{encoded_params}"
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/{path}"
+ response = urllib_request.urlopen(url)
+ response_content = response.read().decode("utf-8")
+ response_json = json.loads(response_content)
+ assert response.code == 200
+ assert response_json["status"] == "success"
+ # Make sure log lines received
+ assert len(response_json["data"]["result"][0]["values"]) > 0
+ response.close()
diff --git a/tests/tests_deployment/utils.py b/tests/tests_deployment/utils.py
index 327de53309..d175a2dd05 100644
--- a/tests/tests_deployment/utils.py
+++ b/tests/tests_deployment/utils.py
@@ -28,16 +28,17 @@ def get_jupyterhub_session():
def get_jupyterhub_token(note="jupyterhub-tests-deployment"):
session = get_jupyterhub_session()
+ xsrf_token = session.cookies.get("_xsrf")
+ headers = {"Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token"}
+ if xsrf_token:
+ headers["X-XSRFToken"] = xsrf_token
+ data = {"note": note, "expires_in": None}
r = session.post(
f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}/tokens",
- headers={
- "Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token",
- },
- json={
- "note": note,
- "expires_in": None,
- },
+ headers=headers,
+ json=data,
)
+
return r.json()["token"]
diff --git a/tests/tests_e2e/cypress/integration/main.js b/tests/tests_e2e/cypress/integration/main.js
index 1e1fbf206f..1184ba76d6 100644
--- a/tests/tests_e2e/cypress/integration/main.js
+++ b/tests/tests_e2e/cypress/integration/main.js
@@ -61,6 +61,9 @@ describe('First Test', () => {
// Click VS Code Launcher exists
cy.get('div.jp-LauncherCard[title="VS Code [↗]"]').should('exist');
+ // Should reflect theme set by default_settings
+ cy.get('body[data-jp-theme-name="JupyterLab Dark"]').should('exist');
+
// Stop my Jupyter server - must do this so PVC can be destroyed on Minikube
cy.visit('/hub/home');
diff --git a/tests/tests_e2e/playwright/test_playwright.py b/tests/tests_e2e/playwright/test_playwright.py
index 7f4dabac08..903af3f0dd 100644
--- a/tests/tests_e2e/playwright/test_playwright.py
+++ b/tests/tests_e2e/playwright/test_playwright.py
@@ -13,6 +13,6 @@ def test_notebook(navigator, test_data_root):
test_app.run(
path=notebook_name,
expected_outputs=["success: 6"],
- conda_env="conda-env-default-py",
+ conda_env="default *",
timeout=500,
)
diff --git a/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml b/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml
new file mode 100644
index 0000000000..4b29a53c1c
--- /dev/null
+++ b/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml
@@ -0,0 +1,5 @@
+project_name: test
+jupyterlab:
+ default_settings:
+ "@jupyterlab/apputils-extension:themes":
+ theme: JupyterLab Dark
diff --git a/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml b/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml
new file mode 100644
index 0000000000..587c0cf5cb
--- /dev/null
+++ b/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml
@@ -0,0 +1,10 @@
+project_name: test
+monitoring:
+ enabled: true
+ overrides:
+ loki:
+ loki: foobar
+ promtail:
+ promtail: foobar
+ minio:
+ minio: foobar