diff --git a/.cirun.yml b/.cirun.yml
index bdabe6500b..dcc829bb8b 100644
--- a/.cirun.yml
+++ b/.cirun.yml
@@ -4,8 +4,8 @@ runners:
- name: run-k8s-tests
# Cloud Provider: AWS
cloud: aws
- # Instance Type has 4 vcpu, 16 GiB memory, Up to 5 Gbps Network Performance
- instance_type: t3a.xlarge
+ # Instance Type has 8 vcpu, 32 GiB memory, Up to 5 Gbps Network Performance
+ instance_type: t3a.2xlarge
# Custom AMI with docker/cypress/hub pre-installed
machine_image: ami-0a388df278199ff52
# Region: Oregon
diff --git a/.github/workflows/test_local_integration.yaml b/.github/workflows/test_local_integration.yaml
index 8ddc1f9690..05dec384b0 100644
--- a/.github/workflows/test_local_integration.yaml
+++ b/.github/workflows/test_local_integration.yaml
@@ -96,6 +96,24 @@ jobs:
sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "nebari-config.yaml"
sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "nebari-config.yaml"
+ # Change default JupyterLab theme
+ cat >> nebari-config.yaml <<- EOM
+ jupyterlab:
+ default_settings:
+ "@jupyterlab/apputils-extension:themes":
+ theme: JupyterLab Dark
+ EOM
+
+ # Change default value for minio persistence size
+ cat >> nebari-config.yaml <<- EOM
+ monitoring:
+ enabled: true
+ overrides:
+ minio:
+ persistence:
+ size: 1Gi
+ EOM
+
cat nebari-config.yaml
- name: Deploy Nebari
@@ -106,7 +124,7 @@ jobs:
- name: Basic kubectl checks after deployment
if: always()
run: |
- kubectl get all,cm,secret,ing -A
+ kubectl get all,cm,secret,pv,pvc,ing -A
- name: Check github-actions.nebari.dev resolves
run: |
@@ -167,22 +185,6 @@ jobs:
run: |
pytest tests/tests_deployment/ -v -s
- - name: JupyterHub Notebook Tests
- timeout-minutes: 2
- # run jhub-client after pytest since jhubctl can cleanup
- # the running server
- env:
- JUPYTERHUB_USERNAME: ${{ env.TEST_USERNAME }}
- JUPYTERHUB_PASSWORD: ${{ env.TEST_PASSWORD }}
- run: |
- sleep 60
- jhubctl --verbose run --hub=https://github-actions.nebari.dev\
- --auth-type=keycloak \
- --validate --no-verify-ssl \
- --kernel python3 \
- --stop-server \
- --notebook tests/tests_deployment/assets/notebook/simple.ipynb \
-
### CLEANUP AFTER TESTS
- name: Cleanup nebari deployment
if: always()
diff --git a/RELEASE.md b/RELEASE.md
index f3f93499ae..41433e9e13 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -11,6 +11,29 @@ This file is copied to nebari-dev/nebari-docs using a GitHub Action. -->
## Upcoming Release
+## Release 2024.3.1 - March 11, 2024
+
+### What's Changed
+* Modify Playwright test to account for changes in JupyterLab UI. by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2232
+* Add favicon to jupyterhub theme. by @jbouder in https://github.com/nebari-dev/nebari/pull/2222
+* Set min nodes to 0 for worker and user. by @pt247 in https://github.com/nebari-dev/nebari/pull/2168
+* Remove `jhub-client` from pyproject.toml by @pavithraes in https://github.com/nebari-dev/nebari/pull/2242
+* Include permission validation step to programmatically cloned repos by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2258
+* Expose jupyter's preferred dir as a config option by @krassowski in https://github.com/nebari-dev/nebari/pull/2251
+* Allow to configure default settings for JupyterLab (`overrides.json`) by @krassowski in https://github.com/nebari-dev/nebari/pull/2249
+* Feature/jlab menu customization by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2259
+* Add cloud provider to the dask config.json file by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2266
+* Fix syntax error in jupyter-server-config Python file by @krassowski in https://github.com/nebari-dev/nebari/pull/2286
+* Add "Open VS Code" entry in services by @krassowski in https://github.com/nebari-dev/nebari/pull/2267
+* Add Grafana Loki integration by @aktech in https://github.com/nebari-dev/nebari/pull/2156
+
+### New Contributors
+* @jbouder made their first contribution in https://github.com/nebari-dev/nebari/pull/2222
+* @krassowski made their first contribution in https://github.com/nebari-dev/nebari/pull/2251
+
+**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.1.1...2024.3.1
+
+
## Release 2024.1.1 - January 17, 2024
### Feature changes and enhancements
diff --git a/pyproject.toml b/pyproject.toml
index 20d2cc6c5a..cb90bc52d0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -84,7 +84,6 @@ dev = [
"diagrams",
"escapism",
"importlib-metadata<5.0",
- "jhub-client",
"jinja2",
"mypy==1.6.1",
"paramiko",
diff --git a/src/_nebari/constants.py b/src/_nebari/constants.py
index 19ed5ce7e8..0d49bc6e1b 100644
--- a/src/_nebari/constants.py
+++ b/src/_nebari/constants.py
@@ -1,4 +1,4 @@
-CURRENT_RELEASE = "2024.1.1"
+CURRENT_RELEASE = "2024.3.1"
# NOTE: Terraform cannot be upgraded further due to Hashicorp licensing changes
# implemented in August 2023.
@@ -13,7 +13,7 @@
DEFAULT_NEBARI_DASK_VERSION = CURRENT_RELEASE
DEFAULT_NEBARI_IMAGE_TAG = CURRENT_RELEASE
-DEFAULT_NEBARI_WORKFLOW_CONTROLLER_IMAGE_TAG = "2024.1.1"
+DEFAULT_NEBARI_WORKFLOW_CONTROLLER_IMAGE_TAG = CURRENT_RELEASE
DEFAULT_CONDA_STORE_IMAGE_TAG = "2024.1.1"
diff --git a/src/_nebari/provider/cloud/amazon_web_services.py b/src/_nebari/provider/cloud/amazon_web_services.py
index 576f72c1c6..2bf905bfcb 100644
--- a/src/_nebari/provider/cloud/amazon_web_services.py
+++ b/src/_nebari/provider/cloud/amazon_web_services.py
@@ -143,6 +143,46 @@ def aws_get_vpc_id(name: str, namespace: str, region: str) -> Optional[str]:
return None
+def set_asg_tags(asg_node_group_map: Dict[str, str], region: str) -> None:
+ """Set tags for AWS node scaling from zero to work."""
+ session = aws_session(region=region)
+ autoscaling_client = session.client("autoscaling")
+ tags = []
+ for asg_name, node_group in asg_node_group_map.items():
+ tags.append(
+ {
+ "Key": "k8s.io/cluster-autoscaler/node-template/label/dedicated",
+ "Value": node_group,
+ "ResourceId": asg_name,
+ "ResourceType": "auto-scaling-group",
+ "PropagateAtLaunch": True,
+ }
+ )
+ autoscaling_client.create_or_update_tags(Tags=tags)
+
+
+def aws_get_asg_node_group_mapping(
+ name: str, namespace: str, region: str
+) -> Dict[str, str]:
+ """Return a dictionary of autoscaling groups and their associated node groups."""
+ asg_node_group_mapping = {}
+ session = aws_session(region=region)
+ eks = session.client("eks")
+ node_groups_response = eks.list_nodegroups(
+ clusterName=f"{name}-{namespace}",
+ )
+ node_groups = node_groups_response.get("nodegroups", [])
+ for nodegroup in node_groups:
+ response = eks.describe_nodegroup(
+ clusterName=f"{name}-{namespace}", nodegroupName=nodegroup
+ )
+ node_group_name = response["nodegroup"]["nodegroupName"]
+ auto_scaling_groups = response["nodegroup"]["resources"]["autoScalingGroups"]
+ for auto_scaling_group in auto_scaling_groups:
+ asg_node_group_mapping[auto_scaling_group["name"]] = node_group_name
+ return asg_node_group_mapping
+
+
def aws_get_subnet_ids(name: str, namespace: str, region: str) -> List[str]:
"""Return list of subnet IDs for the EKS cluster named `{name}-{namespace}`."""
session = aws_session(region=region)
diff --git a/src/_nebari/stages/infrastructure/__init__.py b/src/_nebari/stages/infrastructure/__init__.py
index 1e34cb05ef..5c1aa77f77 100644
--- a/src/_nebari/stages/infrastructure/__init__.py
+++ b/src/_nebari/stages/infrastructure/__init__.py
@@ -145,6 +145,17 @@ class AWSInputVars(schema.Base):
tags: Dict[str, str] = {}
+def _calculate_asg_node_group_map(config: schema.Main):
+ if config.provider == schema.ProviderEnum.aws:
+ return amazon_web_services.aws_get_asg_node_group_mapping(
+ config.project_name,
+ config.namespace,
+ config.amazon_web_services.region,
+ )
+ else:
+ return {}
+
+
def _calculate_node_groups(config: schema.Main):
if config.provider == schema.ProviderEnum.aws:
return {
@@ -438,10 +449,10 @@ class AmazonWebServicesProvider(schema.Base):
node_groups: Dict[str, AWSNodeGroup] = {
"general": AWSNodeGroup(instance="m5.2xlarge", min_nodes=1, max_nodes=1),
"user": AWSNodeGroup(
- instance="m5.xlarge", min_nodes=1, max_nodes=5, single_subnet=False
+ instance="m5.xlarge", min_nodes=0, max_nodes=5, single_subnet=False
),
"worker": AWSNodeGroup(
- instance="m5.xlarge", min_nodes=1, max_nodes=5, single_subnet=False
+ instance="m5.xlarge", min_nodes=0, max_nodes=5, single_subnet=False
),
}
existing_subnet_ids: List[str] = None
@@ -814,6 +825,16 @@ def set_outputs(
outputs["node_selectors"] = _calculate_node_groups(self.config)
super().set_outputs(stage_outputs, outputs)
+ @contextlib.contextmanager
+ def post_deploy(
+ self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False
+ ):
+ asg_node_group_map = _calculate_asg_node_group_map(self.config)
+ if asg_node_group_map:
+ amazon_web_services.set_asg_tags(
+ asg_node_group_map, self.config.amazon_web_services.region
+ )
+
@contextlib.contextmanager
def deploy(
self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False
diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
index fd53a636c2..848d1c0471 100644
--- a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
+++ b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf
@@ -39,6 +39,10 @@ resource "aws_eks_node_group" "main" {
max_size = var.node_groups[count.index].max_size
}
+ labels = {
+ "dedicated" = var.node_groups[count.index].name
+ }
+
lifecycle {
ignore_changes = [
scaling_config[0].desired_size,
@@ -53,7 +57,9 @@ resource "aws_eks_node_group" "main" {
]
tags = merge({
- "kubernetes.io/cluster/${var.name}" = "shared"
+ # "kubernetes.io/cluster/${var.name}" = "shared"
+ "k8s.io/cluster-autoscaler/node-template/label/dedicated" = var.node_groups[count.index].name
+ propagate_at_launch = true
}, var.tags)
}
diff --git a/src/_nebari/stages/kubernetes_services/__init__.py b/src/_nebari/stages/kubernetes_services/__init__.py
index bde7163744..a9124f41ac 100644
--- a/src/_nebari/stages/kubernetes_services/__init__.py
+++ b/src/_nebari/stages/kubernetes_services/__init__.py
@@ -53,6 +53,7 @@ class JupyterHubTheme(schema.Base):
hub_subtitle: str = "Your open source data science platform"
welcome: str = """Welcome! Learn about Nebari's features and configurations in the documentation. If you have any questions or feedback, reach the team on Nebari's support forums."""
logo: str = "https://raw.githubusercontent.com/nebari-dev/nebari-design/main/logo-mark/horizontal/Nebari-Logo-Horizontal-Lockup-White-text.svg"
+ favicon: str = "https://raw.githubusercontent.com/nebari-dev/nebari-design/main/symbol/favicon.ico"
primary_color: str = "#4f4173"
primary_color_dark: str = "#4f4173"
secondary_color: str = "#957da6"
@@ -198,8 +199,16 @@ class JHubApps(schema.Base):
enabled: bool = False
+class MonitoringOverrides(schema.Base):
+ loki: typing.Dict = {}
+ promtail: typing.Dict = {}
+ minio: typing.Dict = {}
+
+
class Monitoring(schema.Base):
enabled: bool = True
+ overrides: MonitoringOverrides = MonitoringOverrides()
+ minio_enabled: bool = True
class JupyterLabPioneer(schema.Base):
@@ -226,8 +235,10 @@ class IdleCuller(schema.Base):
class JupyterLab(schema.Base):
+ default_settings: typing.Dict[str, typing.Any] = {}
idle_culler: IdleCuller = IdleCuller()
initial_repositories: typing.List[typing.Dict[str, str]] = []
+ preferred_dir: typing.Optional[str] = None
class InputSchema(schema.Base):
@@ -351,6 +362,9 @@ class CondaStoreInputVars(schema.Base):
class JupyterhubInputVars(schema.Base):
jupyterhub_theme: Dict[str, Any] = Field(alias="jupyterhub-theme")
jupyterlab_image: ImageNameTag = Field(alias="jupyterlab-image")
+ jupyterlab_default_settings: Dict[str, Any] = Field(
+ alias="jupyterlab-default-settings"
+ )
initial_repositories: str = Field(alias="initial-repositories")
jupyterhub_overrides: List[str] = Field(alias="jupyterhub-overrides")
jupyterhub_stared_storage: str = Field(alias="jupyterhub-shared-storage")
@@ -361,15 +375,26 @@ class JupyterhubInputVars(schema.Base):
idle_culler_settings: Dict[str, Any] = Field(alias="idle-culler-settings")
argo_workflows_enabled: bool = Field(alias="argo-workflows-enabled")
jhub_apps_enabled: bool = Field(alias="jhub-apps-enabled")
+ cloud_provider: str = Field(alias="cloud-provider")
+ jupyterlab_preferred_dir: typing.Optional[str] = Field(
+ alias="jupyterlab-preferred-dir"
+ )
class DaskGatewayInputVars(schema.Base):
dask_worker_image: ImageNameTag = Field(alias="dask-worker-image")
dask_gateway_profiles: Dict[str, Any] = Field(alias="dask-gateway-profiles")
+ cloud_provider: str = Field(alias="cloud-provider")
class MonitoringInputVars(schema.Base):
monitoring_enabled: bool = Field(alias="monitoring-enabled")
+ minio_enabled: bool = Field(alias="minio-enabled")
+ grafana_loki_overrides: List[str] = Field(alias="grafana-loki-overrides")
+ grafana_promtail_overrides: List[str] = Field(alias="grafana-promtail-overrides")
+ grafana_loki_minio_overrides: List[str] = Field(
+ alias="grafana-loki-minio-overrides"
+ )
class TelemetryInputVars(schema.Base):
@@ -410,6 +435,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
realm_id = stage_outputs["stages/06-kubernetes-keycloak-configuration"][
"realm_id"
]["value"]
+ cloud_provider = self.config.provider.value
jupyterhub_shared_endpoint = (
stage_outputs["stages/02-infrastructure"]
.get("nfs_endpoint", {})
@@ -485,6 +511,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
),
jupyterhub_stared_storage=self.config.storage.shared_filesystem,
jupyterhub_shared_endpoint=jupyterhub_shared_endpoint,
+ cloud_provider=cloud_provider,
jupyterhub_profiles=self.config.profiles.dict()["jupyterlab"],
jupyterhub_image=_split_docker_image_name(
self.config.default_images.jupyterhub
@@ -497,6 +524,8 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
argo_workflows_enabled=self.config.argo_workflows.enabled,
jhub_apps_enabled=self.config.jhub_apps.enabled,
initial_repositories=str(self.config.jupyterlab.initial_repositories),
+ jupyterlab_default_settings=self.config.jupyterlab.default_settings,
+ jupyterlab_preferred_dir=self.config.jupyterlab.preferred_dir,
)
dask_gateway_vars = DaskGatewayInputVars(
@@ -504,10 +533,19 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]):
self.config.default_images.dask_worker
),
dask_gateway_profiles=self.config.profiles.dict()["dask_worker"],
+ cloud_provider=cloud_provider,
)
monitoring_vars = MonitoringInputVars(
monitoring_enabled=self.config.monitoring.enabled,
+ minio_enabled=self.config.monitoring.minio_enabled,
+ grafana_loki_overrides=[json.dumps(self.config.monitoring.overrides.loki)],
+ grafana_promtail_overrides=[
+ json.dumps(self.config.monitoring.overrides.promtail)
+ ],
+ grafana_loki_minio_overrides=[
+ json.dumps(self.config.monitoring.overrides.minio)
+ ],
)
telemetry_vars = TelemetryInputVars(
diff --git a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
index 765be2753a..b9b0a9c6c3 100644
--- a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
+++ b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf
@@ -11,7 +11,6 @@ variable "dask-gateway-profiles" {
description = "Dask Gateway profiles to expose to user"
}
-
# =================== RESOURCES =====================
module "dask-gateway" {
source = "./modules/kubernetes/services/dask-gateway"
@@ -39,4 +38,6 @@ module "dask-gateway" {
# profiles
profiles = var.dask-gateway-profiles
+
+ cloud-provider = var.cloud-provider
}
diff --git a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
index ca4176670e..4f8bebb9e4 100644
--- a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
+++ b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf
@@ -39,11 +39,21 @@ variable "jupyterlab-profiles" {
description = "JupyterHub profiles to expose to user"
}
+variable "jupyterlab-preferred-dir" {
+ description = "Directory in which the JupyterLab should open the file browser"
+ type = string
+}
+
variable "initial-repositories" {
description = "Map of folder location and git repo url to clone"
type = string
}
+variable "jupyterlab-default-settings" {
+ description = "Default settings for JupyterLab to be placed in overrides.json"
+ type = map(any)
+}
+
variable "jupyterhub-hub-extraEnv" {
description = "Extracted overrides to merge with jupyterhub.hub.extraEnv"
type = string
@@ -55,7 +65,6 @@ variable "idle-culler-settings" {
type = any
}
-
module "kubernetes-nfs-server" {
count = var.jupyterhub-shared-endpoint == null ? 1 : 0
@@ -88,6 +97,8 @@ module "jupyterhub" {
name = var.name
namespace = var.environment
+ cloud-provider = var.cloud-provider
+
external-url = var.endpoint
realm_id = var.realm_id
@@ -136,6 +147,10 @@ module "jupyterhub" {
idle-culler-settings = var.idle-culler-settings
initial-repositories = var.initial-repositories
+ jupyterlab-default-settings = var.jupyterlab-default-settings
+
jupyterlab-pioneer-enabled = var.jupyterlab-pioneer-enabled
jupyterlab-pioneer-log-format = var.jupyterlab-pioneer-log-format
+
+ jupyterlab-preferred-dir = var.jupyterlab-preferred-dir
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
index b1499efe44..2219d14e56 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py
@@ -114,9 +114,10 @@ def list_dask_environments():
def base_node_group(options):
- default_node_group = {
- config["worker-node-group"]["key"]: config["worker-node-group"]["value"]
- }
+ key = config["worker-node-group"]["key"]
+ if config.get("provider", "") == "aws":
+ key = "dedicated"
+ default_node_group = {key: config["worker-node-group"]["value"]}
# check `worker_extra_pod_config` first
worker_node_group = (
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
index eb99f75d54..62265b350b 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf
@@ -24,6 +24,7 @@ resource "kubernetes_secret" "gateway" {
conda-store-api-token = var.conda-store-api-token
conda-store-service-name = var.conda-store-service-name
conda-store-namespace = var.namespace
+ provider = var.cloud-provider
})
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
index 5feb72d167..7f8a4aa978 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf
@@ -199,3 +199,8 @@ variable "conda-store-service-name" {
description = "internal service-name:port where conda-store can be reached"
type = string
}
+
+variable "cloud-provider" {
+ description = "Name of the cloud provider to deploy to."
+ type = string
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
index 4b6561129e..4b8f9145b9 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf
@@ -7,10 +7,18 @@ locals {
kernel_cull_connected = var.idle-culler-settings.kernel_cull_connected ? "True" : "False" # for Python compatible boolean values
kernel_cull_busy = var.idle-culler-settings.kernel_cull_busy ? "True" : "False" # for Python compatible boolean values
server_shutdown_no_activity_timeout = var.idle-culler-settings.server_shutdown_no_activity_timeout
+ jupyterlab_preferred_dir = var.jupyterlab-preferred-dir != null ? var.jupyterlab-preferred-dir : ""
}
)
}
+locals {
+ jupyterlab-overrides-json-object = merge(
+ jsondecode(file("${path.module}/files/jupyterlab/overrides.json")),
+ var.jupyterlab-default-settings
+ )
+}
+
locals {
jupyter-pioneer-config-py-template = templatefile("${path.module}/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl", {
log_format = var.jupyterlab-pioneer-log-format != null ? var.jupyterlab-pioneer-log-format : ""
@@ -22,11 +30,27 @@ locals {
resource "local_file" "jupyter_server_config_py" {
content = local.jupyter-notebook-config-py-template
filename = "${path.module}/files/jupyter/jupyter_server_config.py"
+
+ provisioner "local-exec" {
+ # check the syntax of the config file without running it
+ command = "python -m py_compile ${self.filename}"
+ }
}
resource "local_file" "jupyter_jupyterlab_pioneer_config_py" {
content = local.jupyter-pioneer-config-py-template
filename = "${path.module}/files/jupyter/jupyter_jupyterlab_pioneer_config.py"
+
+ provisioner "local-exec" {
+ # check the syntax of the config file without running it
+ command = "python -m py_compile ${self.filename}"
+ }
+}
+
+
+resource "local_file" "overrides_json" {
+ content = jsonencode(local.jupyterlab-overrides-json-object)
+ filename = "${path.module}/files/jupyterlab/overrides.json"
}
@@ -56,6 +80,12 @@ locals {
)
}
+locals {
+ etc-jupyterlab-settings = {
+ "overrides.json" = local_file.overrides_json.content
+ }
+}
+
resource "kubernetes_config_map" "etc-jupyter" {
depends_on = [
local_file.jupyter_server_config_py,
@@ -85,15 +115,16 @@ resource "kubernetes_config_map" "etc-skel" {
resource "kubernetes_config_map" "jupyterlab-settings" {
+ depends_on = [
+ local_file.overrides_json
+ ]
+
metadata {
name = "jupyterlab-settings"
namespace = var.namespace
}
- data = {
- for filename in fileset("${path.module}/files/jupyterlab", "*") :
- filename => file("${path.module}/files/jupyterlab/${filename}")
- }
+ data = local.etc-jupyterlab-settings
}
resource "kubernetes_config_map" "git_clone_update" {
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh
index 5c012c01c1..bca1734ea2 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/extras/git_clone_update.sh
@@ -31,6 +31,22 @@ if [ "$#" -lt 1 ] || [ "$1" = "--help" ]; then
[ "$1" = "--help" ] && exit 0 || exit 1
fi
+fix_parent_dir_permissions() {
+ # Fix parent directory permissions to allow the JupyterLab user to access the cloned repository
+
+ local folder_path="$1"
+
+ # Retrieve the very first parent directory
+ local parent_dir=$(echo "$folder_path" | cut -d '/' -f1)
+
+ # Check if the parent directory has the correct permissions
+ if [ "$(stat -c "%u:%g" "$parent_dir")" != "1000:100" ]; then
+ echo "Fixing permissions for parent directory: $parent_dir"
+ chown -R 1000:100 "$parent_dir" || { echo "Error: Unable to set ownership for $parent_dir"; return 1; }
+ chmod -R 755 "$parent_dir" || { echo "Error: Unable to set permissions for $parent_dir"; return 1; }
+ fi
+}
+
clone_update_repository() {
# Clone or update a Git repository into a specified folder,
# and create a `.firstrun` file to mark the script's execution.
@@ -47,6 +63,8 @@ clone_update_repository() {
mkdir -p "$folder_path"
fi
+ fix_parent_dir_permissions "$folder_path" || return 1
+
if [ -d "$folder_path/.git" ]; then
echo -e "Updating Git repository in ${folder_path}..."
(cd "$folder_path" && git pull)
@@ -55,7 +73,13 @@ clone_update_repository() {
(git clone "$git_repo_url" "$folder_path")
fi
+ echo -e "Creating .firstrun file in ${folder_path}..."
touch "$firstrun_file"
+
+ # User permissions for JupyterLab user to newly created git folders
+ echo -e "Setting permissions for ${folder_path}..."
+ chown -R 1000:100 "$folder_path" || { echo "Error: Unable to set ownership for $folder_path"; return 1; }
+
echo -e "Execution for ${folder_path} completed. ${GREEN}✅${NC}"
fi
}
@@ -72,7 +96,6 @@ for pair in "$@"; do
echo -e "${RED}Invalid argument format: \"${pair}\". Please provide folder path and Git repository URL in the correct order.${NC}" >> "$ERROR_LOG"
else
clone_update_repository "$folder_path" "$git_repo_url" || echo -e "${RED}Error executing for ${folder_path}.${NC}" >> "$ERROR_LOG"
- chown -R 1000:100 "$folder_path" # User permissions for JupyterLab user
fi
done
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl
index 2149d298f8..66b653b894 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_jupyterlab_pioneer_config.py.tpl
@@ -3,7 +3,7 @@ import json
default_log_format = "%(asctime)s %(levelname)9s %(lineno)4s %(module)s: %(message)s"
-log_format = ${log_format}
+log_format = "${log_format}"
logging.basicConfig(
level=logging.INFO,
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
index 79e3ec37d2..d5e089dfa3 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl
@@ -4,11 +4,13 @@
# Extra config available at:
# https://zero-to-jupyterhub.readthedocs.io/en/1.x/jupyterhub/customizing/user-management.html#culling-user-pods
-
# Enable Show Hidden Files menu option in View menu
c.ContentsManager.allow_hidden = True
c.FileContentsManager.allow_hidden = True
+# Set the preferred path for the frontend to start in
+c.FileContentsManager.preferred_dir = "${jupyterlab_preferred_dir}"
+
# Timeout (in seconds) in which a terminal has been inactive and ready to
# be culled.
c.TerminalManager.cull_inactive_timeout = ${terminal_cull_inactive_timeout} * 60
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
index bdd69047c1..fd6cafc624 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterlab/overrides.json
@@ -15,15 +15,40 @@
"@jupyterlab/mainmenu-extension:plugin": {
"menus": [
{
- "id": "jp-mainmenu-nebari",
- "disabled": false,
- "label": "Nebari",
- "rank": 1001,
+ "id": "jp-mainmenu-file",
"items": [
+ {
+ "command": "help:open",
+ "rank": 0,
+ "args": {
+ "url": "/hub/home",
+ "text": "Home",
+ "newBrowserTab": true
+ }
+ },
+ {
+ "type": "submenu",
+ "submenu": {
+ "id": "jp-mainmenu-file-new"
+ },
+ "rank": 0.5
+ },
{
"command": "hub:control-panel",
- "rank": 0
+ "disabled": true
},
+ {
+ "command": "hub:logout",
+ "disabled": true
+ }
+ ]
+ },
+ {
+ "id": "jp-mainmenu-services",
+ "disabled": false,
+ "label": "Services",
+ "rank": 1000,
+ "items": [
{
"command": "help:open",
"rank": 1,
@@ -59,6 +84,28 @@
"text": "Argo Workflows",
"newBrowserTab": true
}
+ },
+ {
+ "command": "nebari:open-proxy",
+ "rank": 5,
+ "args": {
+ "name": "vscode"
+ }
+ }
+ ]
+ },
+ {
+ "id": "jp-mainmenu-help",
+ "rank": 1001,
+ "items": [
+ {
+ "command": "help:open",
+ "rank": 1001,
+ "args": {
+ "url": "https://www.nebari.dev/docs/welcome/",
+ "text": "Nebari documentation",
+ "newBrowserTab": true
+ }
}
]
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
index 7eed29caef..af690112f6 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf
@@ -16,8 +16,11 @@ resource "random_password" "jhub_apps_jwt_secret" {
}
locals {
- jhub_apps_secrets_name = "jhub-apps-secrets"
- jhub_apps_env_var_name = "JHUB_APP_JWT_SECRET_KEY"
+ jhub_apps_secrets_name = "jhub-apps-secrets"
+ jhub_apps_env_var_name = "JHUB_APP_JWT_SECRET_KEY"
+ singleuser_nodeselector_key = var.cloud-provider == "aws" ? "dedicated" : var.user-node-group.key
+ userscheduler_nodeselector_key = var.cloud-provider == "aws" ? "dedicated" : var.user-node-group.key
+ userscheduler_nodeselector_value = var.cloud-provider == "aws" ? var.general-node-group.value : var.user-node-group.value
}
resource "kubernetes_secret" "jhub_apps_secrets" {
@@ -174,14 +177,14 @@ resource "helm_release" "jupyterhub" {
singleuser = {
image = var.jupyterlab-image
nodeSelector = {
- "${var.user-node-group.key}" = var.user-node-group.value
+ "${local.singleuser_nodeselector_key}" = var.user-node-group.value
}
}
scheduling = {
userScheduler = {
nodeSelector = {
- "${var.user-node-group.key}" = var.user-node-group.value
+ "${local.userscheduler_nodeselector_key}" = local.userscheduler_nodeselector_value
}
}
}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
index d43a2ab9c7..577dedc8ef 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf
@@ -158,6 +158,11 @@ variable "argo-workflows-enabled" {
type = bool
}
+variable "jupyterlab-default-settings" {
+ description = "Default settings for JupyterLab to be placed in overrides.json"
+ type = map(any)
+}
+
variable "jupyterlab-pioneer-enabled" {
description = "Enable JupyterLab Pioneer for telemetry"
type = bool
@@ -168,6 +173,16 @@ variable "jupyterlab-pioneer-log-format" {
type = string
}
+variable "jupyterlab-preferred-dir" {
+ description = "Directory in which the JupyterLab should open the file browser"
+ type = string
+}
+
+variable "cloud-provider" {
+ description = "Name of cloud provider."
+ type = string
+}
+
variable "initial-repositories" {
description = "Map of folder location and git repo url to clone"
type = string
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf
new file mode 100644
index 0000000000..8180d46fb8
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/main.tf
@@ -0,0 +1,103 @@
+resource "random_password" "minio_root_password" {
+ length = 32
+ special = false
+}
+
+locals {
+ minio-url = "http://${var.minio-release-name}:${var.minio-port}"
+ node-selector = {
+ "${var.node-group.key}" = "${var.node-group.value}"
+ }
+}
+
+resource "helm_release" "loki-minio" {
+ count = var.minio-enabled ? 1 : 0
+ name = var.minio-release-name
+ namespace = var.namespace
+ repository = "https://raw.githubusercontent.com/bitnami/charts/defb094c658024e4aa8245622dab202874880cbc/bitnami"
+ chart = "minio"
+ # last release that was Apache-2.0
+ version = var.minio-helm-chart-version
+
+ set {
+ name = "accessKey.password"
+ value = "admin"
+ }
+
+ set {
+ name = "secretKey.password"
+ value = random_password.minio_root_password.result
+ }
+
+ set {
+ name = "defaultBuckets"
+ value = join(" ", var.buckets)
+ }
+
+ set {
+ name = "persistence.size"
+ value = var.minio-storage
+ }
+
+ values = concat([
+ file("${path.module}/values_minio.yaml"),
+ jsonencode({
+ nodeSelector : local.node-selector
+ })
+ ], var.grafana-loki-minio-overrides)
+}
+
+
+resource "helm_release" "grafana-loki" {
+ name = "nebari-loki"
+ namespace = var.namespace
+ repository = "https://grafana.github.io/helm-charts"
+ chart = "loki"
+ version = var.loki-helm-chart-version
+
+ values = concat([
+ file("${path.module}/values_loki.yaml"),
+ jsonencode({
+ loki : {
+ storage : {
+ s3 : {
+ endpoint : local.minio-url,
+ accessKeyId : "admin"
+ secretAccessKey : random_password.minio_root_password.result,
+ s3ForcePathStyle : true
+ }
+ }
+ }
+ storageConfig : {
+ # We configure MinIO by using the AWS config because MinIO implements the S3 API
+ aws : {
+ s3 : local.minio-url
+ s3ForcePathStyle : true
+ }
+ }
+ write : { nodeSelector : local.node-selector }
+ read : { nodeSelector : local.node-selector }
+ backend : { nodeSelector : local.node-selector }
+ gateway : { nodeSelector : local.node-selector }
+ })
+ ], var.grafana-loki-overrides)
+
+ depends_on = [helm_release.loki-minio]
+}
+
+resource "helm_release" "grafana-promtail" {
+ # Promtail ships the contents of logs to Loki instance
+ name = "nebari-promtail"
+ namespace = var.namespace
+ repository = "https://grafana.github.io/helm-charts"
+ chart = "promtail"
+ version = var.promtail-helm-chart-version
+
+ values = concat([
+ file("${path.module}/values_promtail.yaml"),
+ jsonencode({
+ })
+ ], var.grafana-promtail-overrides)
+
+ depends_on = [helm_release.grafana-loki]
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml
new file mode 100644
index 0000000000..c11ebe5d1f
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_loki.yaml
@@ -0,0 +1,78 @@
+# https://github.com/grafana/loki/blob/4cae003ecedd474e4c15feab4ea2ef435afff83f/production/helm/loki/values.yaml
+
+loki:
+ storage:
+ type: s3
+ commonConfig:
+ replication_factor: 1
+ # Not required as it is inside cluster and not exposed to the public network
+ auth_enabled: false
+
+ # The Compactor deduplicates index entries and also apply granular retention.
+ compactor:
+ # is the directory where marked chunks and temporary tables will be saved.
+ working_directory: /var/loki/compactor/data/retention
+ # minio s3
+ shared_store: s3
+ # how often compaction will happen
+ compaction_interval: 1h
+ # should delete old logs after retention delete delay
+ # ideally we would want to do storage based retention, but this is not
+ # currently implemented in loki, that's why we're doing time based retention.
+ retention_enabled: true
+ # is the delay after which the Compactor will delete marked chunks.
+ retention_delete_delay: 1h
+ # specifies the maximum quantity of goroutine workers instantiated to delete chunks.
+ retention_delete_worker_count: 150
+
+ limits_config:
+ # The minimum retention period is 24h.
+ # This is reasonable in most cases, but if people would like to retain logs for longer
+ # then they can override this variable from nebari-config.yaml
+ retention_period: 60d
+
+ schema_config:
+ configs:
+ # list of period_configs
+ # The date of the first day that index buckets should be created.
+ - from: "2024-03-01"
+ index:
+ period: 24h
+ prefix: loki_index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+ storage_config:
+ boltdb_shipper:
+ # Directory where ingesters would write index files which would then be
+ # uploaded by shipper to configured storage
+ active_index_directory: /var/loki/compactor/data/index
+ # Cache location for restoring index files from storage for queries
+ cache_location: /var/loki/compactor/data/boltdb-cache
+ # Shared store for keeping index files
+ shared_store: s3
+
+# Configuration for the write pod(s)
+write:
+ # -- Number of replicas for the write
+ # Keeping cost of running Nebari in mind
+ # We don't need so many replicas, if people need it
+ # they can always override from nebari-config.yaml
+ replicas: 1
+
+read:
+ # -- Number of replicas for the read
+ replicas: 1
+
+backend:
+ # -- Number of replicas for the backend
+ replicas: 1
+
+minio:
+ # We are deploying minio from bitnami chart separately
+ enabled: false
+
+monitoring:
+ selfMonitoring:
+ grafanaAgent:
+ installOperator: false
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml
new file mode 100644
index 0000000000..666542bb45
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_minio.yaml
@@ -0,0 +1 @@
+# https://github.com/bitnami/charts/blob/440ec159c26e4ff0748b9e9866b345d98220c40a/bitnami/minio/values.yaml
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml
new file mode 100644
index 0000000000..5a18a9bc09
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/values_promtail.yaml
@@ -0,0 +1 @@
+# https://github.com/grafana/helm-charts/blob/3831194ba2abd2a0ca7a14ca00e578f8e9d2abc6/charts/promtail/values.yaml
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf
new file mode 100644
index 0000000000..a43695252c
--- /dev/null
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/loki/variables.tf
@@ -0,0 +1,84 @@
+variable "namespace" {
+ description = "deploy monitoring services on this namespace"
+ type = string
+ default = "dev"
+}
+
+variable "loki-helm-chart-version" {
+ description = "version to deploy for the loki helm chart"
+ type = string
+ default = "5.43.3"
+}
+
+variable "promtail-helm-chart-version" {
+ description = "version to deploy for the promtail helm chart"
+ type = string
+ default = "6.15.5"
+}
+
+variable "minio-helm-chart-version" {
+ description = "version to deploy for the minio helm chart"
+ type = string
+ default = "6.7.4"
+}
+
+variable "grafana-loki-overrides" {
+ description = "Grafana Loki helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-promtail-overrides" {
+ description = "Grafana Promtail helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-loki-minio-overrides" {
+ description = "Grafana Loki minio helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "minio-release-name" {
+ description = "Grafana Loki minio release name"
+ type = string
+ default = "nebari-loki-minio"
+}
+
+variable "minio-port" {
+ description = "Grafana Loki minio port"
+ type = number
+ default = 9000
+}
+
+variable "buckets" {
+ description = "Minio buckets"
+ type = list(string)
+ default = [
+ "chunks",
+ "ruler",
+ "admin",
+ "loki"
+ ]
+}
+
+variable "minio-storage" {
+ description = "Minio storage"
+ type = string
+ default = "50Gi"
+}
+
+variable "minio-enabled" {
+ description = "Deploy minio along with loki or not"
+ type = bool
+ default = true
+}
+
+variable "node-group" {
+ description = "Node key value pair for bound resources"
+ type = object({
+ key = string
+ value = string
+ })
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
index ada868882f..f3cf47c88d 100644
--- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
+++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/values.yaml
@@ -1 +1,7 @@
# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml
+
+grafana:
+ additionalDataSources:
+ - name: Loki
+ type: loki
+ url: http://loki-gateway.dev
diff --git a/src/_nebari/stages/kubernetes_services/template/monitoring.tf b/src/_nebari/stages/kubernetes_services/template/monitoring.tf
index ec20a75ba7..39487c4bb1 100644
--- a/src/_nebari/stages/kubernetes_services/template/monitoring.tf
+++ b/src/_nebari/stages/kubernetes_services/template/monitoring.tf
@@ -14,3 +14,14 @@ module "monitoring" {
node-group = var.node_groups.general
}
+
+module "grafana-loki" {
+ count = var.monitoring-enabled ? 1 : 0
+ source = "./modules/kubernetes/services/monitoring/loki"
+ namespace = var.environment
+ grafana-loki-overrides = var.grafana-loki-overrides
+ grafana-promtail-overrides = var.grafana-promtail-overrides
+ grafana-loki-minio-overrides = var.grafana-loki-minio-overrides
+ node-group = var.node_groups.general
+ minio-enabled = var.minio-enabled
+}
diff --git a/src/_nebari/stages/kubernetes_services/template/variables.tf b/src/_nebari/stages/kubernetes_services/template/variables.tf
index 7a125f1c69..9e36e65979 100644
--- a/src/_nebari/stages/kubernetes_services/template/variables.tf
+++ b/src/_nebari/stages/kubernetes_services/template/variables.tf
@@ -53,7 +53,37 @@ variable "jupyterlab-pioneer-log-format" {
description = "Logging format for JupyterLab Pioneer"
type = string
}
+
variable "jhub-apps-enabled" {
description = "Enable JupyterHub Apps"
type = bool
}
+
+variable "cloud-provider" {
+ description = "Name of cloud provider."
+ type = string
+}
+
+variable "grafana-loki-overrides" {
+ description = "Helm chart overrides for loki"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-promtail-overrides" {
+ description = "Helm chart overrides for promtail"
+ type = list(string)
+ default = []
+}
+
+variable "grafana-loki-minio-overrides" {
+ description = "Grafana Loki minio helm chart overrides"
+ type = list(string)
+ default = []
+}
+
+variable "minio-enabled" {
+ description = "Deploy minio along with loki or not"
+ type = bool
+ default = true
+}
diff --git a/src/_nebari/upgrade.py b/src/_nebari/upgrade.py
index ef933f48ea..5c095f04a2 100644
--- a/src/_nebari/upgrade.py
+++ b/src/_nebari/upgrade.py
@@ -705,6 +705,17 @@ def _version_specific_upgrade(
return config
+class Upgrade_2024_3_1(UpgradeStep):
+ version = "2024.3.1"
+
+ def _version_specific_upgrade(
+ self, config, start_version, config_filename: Path, *args, **kwargs
+ ):
+ rich.print("Ready to upgrade to Nebari version [green]2024.3.1[/green].")
+
+ return config
+
+
__rounded_version__ = str(rounded_ver_parse(__version__))
# Manually-added upgrade steps must go above this line
diff --git a/tests/common/kube_api.py b/tests/common/kube_api.py
new file mode 100644
index 0000000000..eec1d05d7b
--- /dev/null
+++ b/tests/common/kube_api.py
@@ -0,0 +1,40 @@
+import socket
+import typing
+
+from kubernetes import config
+from kubernetes.client.api import core_v1_api
+from kubernetes.client.models import V1Pod
+from kubernetes.stream import portforward
+
+
+def kubernetes_port_forward(
+ pod_labels: typing.Dict[str, str], port: int, namespace: str = "dev"
+) -> V1Pod:
+ """Given pod labels and port, finds the pod name and port forwards to
+ the given port.
+ :param pod_labels: dict of labels, by which to search the pod
+ :param port: port number to forward
+ :param namespace: kubernetes namespace name
+ :return: kubernetes pod object
+ """
+ config.load_kube_config()
+ core_v1 = core_v1_api.CoreV1Api()
+ label_selector = ",".join([f"{k}={v}" for k, v in pod_labels.items()])
+ pods = core_v1.list_namespaced_pod(
+ namespace=namespace, label_selector=label_selector
+ )
+ assert pods.items
+ pod = pods.items[0]
+ pod_name = pod.metadata.name
+
+ def kubernetes_create_connection(address, *args, **kwargs):
+ pf = portforward(
+ core_v1.connect_get_namespaced_pod_portforward,
+ pod_name,
+ namespace,
+ ports=str(port),
+ )
+ return pf.socket(port)
+
+ socket.create_connection = kubernetes_create_connection
+ return pod
diff --git a/tests/common/navigator.py b/tests/common/navigator.py
index dc2adc9eba..12a1445bd5 100644
--- a/tests/common/navigator.py
+++ b/tests/common/navigator.py
@@ -320,15 +320,13 @@ def _set_environment_via_popup(self, kernel=None):
# failure here indicates that the environment doesn't exist either
# because of incorrect naming syntax or because the env is still
# being built
- self.page.get_by_role("combobox").nth(1).select_option(
- f'{{"name":"{kernel}"}}'
- )
+ self.page.get_by_role("combobox").nth(1).select_option(kernel)
# click Select to close popup (deal with the two formats of this dialog)
try:
- self.page.get_by_role("button", name="Select", exact=True).click()
+ self.page.get_by_role("button", name="Select Kernel").click()
except Exception:
self.page.locator("div").filter(has_text="No KernelSelect").get_by_role(
- "button", name="Select"
+ "button", name="Select Kernel"
).click()
def set_environment(self, kernel):
@@ -360,10 +358,8 @@ def set_environment(self, kernel):
self._set_environment_via_popup(kernel)
# wait for the jupyter UI to catch up before moving forward
- # extract conda env name
- conda_env_label = re.search("conda-env-(.*)-py", kernel).group(1)
# see if the jupyter notebook label for the conda env is visible
- kernel_label_loc = self.page.get_by_role("button", name=conda_env_label)
+ kernel_label_loc = self.page.get_by_role("button", name=kernel)
if not kernel_label_loc.is_visible():
kernel_label_loc.wait_for(state="attached")
@@ -411,3 +407,18 @@ def write_file(self, filepath, content):
self.run_terminal_command(f"ls {filepath}")
logger.debug(f"time to complete {dt.datetime.now() - start}")
time.sleep(2)
+
+ def stop_server(self) -> None:
+ """Stops the JupyterHub server by navigating to the Hub Control Panel."""
+ self.page.get_by_text("File", exact=True).click()
+
+ with self.context.expect_page() as page_info:
+ self.page.get_by_role("menuitem", name="Home", exact=True).click()
+
+ home_page = page_info.value
+ home_page.wait_for_load_state()
+ stop_button = home_page.get_by_role("button", name="Stop My Server")
+ if not stop_button.is_visible():
+ stop_button.wait_for(state="visible")
+ stop_button.click()
+ stop_button.wait_for(state="hidden")
diff --git a/tests/common/playwright_fixtures.py b/tests/common/playwright_fixtures.py
index 388f6ef4b0..03e17a5065 100644
--- a/tests/common/playwright_fixtures.py
+++ b/tests/common/playwright_fixtures.py
@@ -48,6 +48,10 @@ def _navigator_session(request, browser_name, pytestconfig):
logger.debug(e)
raise
finally:
+ try:
+ nav.stop_server()
+ except Exception as e:
+ logger.debug(e)
nav.teardown()
diff --git a/tests/common/run_notebook.py b/tests/common/run_notebook.py
index 03c383299a..10d28d6637 100644
--- a/tests/common/run_notebook.py
+++ b/tests/common/run_notebook.py
@@ -220,7 +220,7 @@ def _restart_run_all(self):
# Restart dialog appears most, but not all of the time (e.g. set
# No Kernel, then Restart Run All)
restart_dialog_button = self.nav.page.get_by_role(
- "button", name="Restart", exact=True
+ "button", name="Confirm Kernel Restart"
)
if restart_dialog_button.is_visible():
restart_dialog_button.click()
diff --git a/tests/tests_deployment/test_jupyterhub_ssh.py b/tests/tests_deployment/test_jupyterhub_ssh.py
index 0e90927a4c..fd6b0799d5 100644
--- a/tests/tests_deployment/test_jupyterhub_ssh.py
+++ b/tests/tests_deployment/test_jupyterhub_ssh.py
@@ -14,10 +14,14 @@
TIMEOUT_SECS = 300
+@pytest.fixture(scope="session")
+def api_token():
+ return get_jupyterhub_token("jupyterhub-ssh")
+
+
@pytest.fixture(scope="function")
-def paramiko_object():
+def paramiko_object(api_token):
"""Connects to JupyterHub ssh cluster from outside the cluster."""
- api_token = get_jupyterhub_token("jupyterhub-ssh")
try:
client = paramiko.SSHClient()
diff --git a/tests/tests_deployment/test_loki_deployment.py b/tests/tests_deployment/test_loki_deployment.py
new file mode 100644
index 0000000000..59210a8fc3
--- /dev/null
+++ b/tests/tests_deployment/test_loki_deployment.py
@@ -0,0 +1,126 @@
+import json
+import urllib.parse
+import urllib.request as urllib_request
+
+import pytest
+from kubernetes.client import V1Pod
+
+from tests.common.kube_api import kubernetes_port_forward
+
+LOKI_BACKEND_PORT = 3100
+LOKI_BACKEND_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki",
+ "app.kubernetes.io/component": "backend",
+}
+
+MINIO_PORT = 9000
+MINIO_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki-minio",
+ "app.kubernetes.io/name": "minio",
+}
+
+LOKI_GATEWAY_PORT = 8080
+LOKI_GATEWAY_POD_LABELS = {
+ "app.kubernetes.io/instance": "nebari-loki",
+ "app.kubernetes.io/component": "gateway",
+}
+
+
+@pytest.fixture(scope="module")
+def port_forward_fixture(request):
+ """Pytest fixture to port forward loki backend pod to make it accessible
+ on localhost so that we can run some tests on it.
+ """
+ return kubernetes_port_forward(
+ pod_labels=request.param["labels"], port=request.param["port"]
+ )
+
+
+def port_forward(labels, port):
+ params = {"labels": labels, "port": port}
+ return pytest.mark.parametrize("port_forward_fixture", [params], indirect=True)
+
+
+@pytest.mark.parametrize(
+ "endpoint_path",
+ (
+ "metrics",
+ "services",
+ "config",
+ "ready",
+ "log_level",
+ ),
+)
+@port_forward(labels=LOKI_BACKEND_POD_LABELS, port=LOKI_BACKEND_PORT)
+def test_loki_endpoint(endpoint_path: str, port_forward_fixture: V1Pod):
+ """This will hit some endpoints in the loki API and verify that we
+ get a 200 status code, to make sure Loki is working properly.
+ :param endpoint_path: a loki api endpoint path
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/{endpoint_path}"
+ response = urllib_request.urlopen(url)
+ response.read().decode("utf-8")
+ assert response.code == 200
+ response.close()
+
+
+@port_forward(labels=MINIO_POD_LABELS, port=MINIO_PORT)
+def test_minio_accessible(port_forward_fixture: V1Pod):
+ """This will hit liveness endpoint of minio API and verify that we
+ get a 200 status code, to make sure minio is up and running.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{MINIO_PORT}/minio/health/live"
+ response = urllib_request.urlopen(url)
+ response.read().decode("utf-8")
+ assert response.code == 200
+ response.close()
+
+
+@port_forward(labels=LOKI_GATEWAY_POD_LABELS, port=LOKI_GATEWAY_PORT)
+def test_loki_gateway(port_forward_fixture: V1Pod):
+ """This will hit an endpoint of loki gateway API and verify that we
+ get a 200 status code, to make sure minio is up and running.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return:
+ """
+ pod_name = port_forward_fixture.metadata.name
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/loki/api/v1/labels"
+ response = urllib_request.urlopen(url)
+ response_content = response.read().decode("utf-8")
+ response_json = json.loads(response_content)
+ assert response.code == 200
+ assert response_json["status"] == "success"
+ response.close()
+
+
+@port_forward(labels=LOKI_GATEWAY_POD_LABELS, port=LOKI_GATEWAY_PORT)
+def test_loki_gateway_fetch_logs(port_forward_fixture: V1Pod):
+ """This will hit an endpoint of loki gateway API to fetch some logs
+ and verify logs received.
+ :param port_forward_fixture: pytest fixture to port forward.
+ :return: None
+ """
+ pod_name = port_forward_fixture.metadata.name
+ query_params = {
+ "limit": "5",
+ # Fetch logs for jupyterhub app
+ "query": '{app="jupyterhub"}',
+ }
+
+ encoded_params = urllib.parse.urlencode(query_params)
+ path = f"/loki/api/v1/query_range?{encoded_params}"
+ url = f"http://{pod_name}.pod.dev.kubernetes:{LOKI_BACKEND_PORT}/{path}"
+ response = urllib_request.urlopen(url)
+ response_content = response.read().decode("utf-8")
+ response_json = json.loads(response_content)
+ assert response.code == 200
+ assert response_json["status"] == "success"
+ # Make sure log lines received
+ assert len(response_json["data"]["result"][0]["values"]) > 0
+ response.close()
diff --git a/tests/tests_deployment/utils.py b/tests/tests_deployment/utils.py
index 327de53309..d175a2dd05 100644
--- a/tests/tests_deployment/utils.py
+++ b/tests/tests_deployment/utils.py
@@ -28,16 +28,17 @@ def get_jupyterhub_session():
def get_jupyterhub_token(note="jupyterhub-tests-deployment"):
session = get_jupyterhub_session()
+ xsrf_token = session.cookies.get("_xsrf")
+ headers = {"Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token"}
+ if xsrf_token:
+ headers["X-XSRFToken"] = xsrf_token
+ data = {"note": note, "expires_in": None}
r = session.post(
f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}/tokens",
- headers={
- "Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token",
- },
- json={
- "note": note,
- "expires_in": None,
- },
+ headers=headers,
+ json=data,
)
+
return r.json()["token"]
diff --git a/tests/tests_e2e/cypress/integration/main.js b/tests/tests_e2e/cypress/integration/main.js
index 1e1fbf206f..1184ba76d6 100644
--- a/tests/tests_e2e/cypress/integration/main.js
+++ b/tests/tests_e2e/cypress/integration/main.js
@@ -61,6 +61,9 @@ describe('First Test', () => {
// Click VS Code Launcher exists
cy.get('div.jp-LauncherCard[title="VS Code [↗]"]').should('exist');
+ // Should reflect theme set by default_settings
+ cy.get('body[data-jp-theme-name="JupyterLab Dark"]').should('exist');
+
// Stop my Jupyter server - must do this so PVC can be destroyed on Minikube
cy.visit('/hub/home');
diff --git a/tests/tests_e2e/playwright/test_playwright.py b/tests/tests_e2e/playwright/test_playwright.py
index 7f4dabac08..903af3f0dd 100644
--- a/tests/tests_e2e/playwright/test_playwright.py
+++ b/tests/tests_e2e/playwright/test_playwright.py
@@ -13,6 +13,6 @@ def test_notebook(navigator, test_data_root):
test_app.run(
path=notebook_name,
expected_outputs=["success: 6"],
- conda_env="conda-env-default-py",
+ conda_env="default *",
timeout=500,
)
diff --git a/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml b/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml
new file mode 100644
index 0000000000..4b29a53c1c
--- /dev/null
+++ b/tests/tests_unit/cli_validate/min.happy.jupyterlab.default_settings.yaml
@@ -0,0 +1,5 @@
+project_name: test
+jupyterlab:
+ default_settings:
+ "@jupyterlab/apputils-extension:themes":
+ theme: JupyterLab Dark
diff --git a/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml b/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml
new file mode 100644
index 0000000000..587c0cf5cb
--- /dev/null
+++ b/tests/tests_unit/cli_validate/min.happy.monitoring.overrides.yaml
@@ -0,0 +1,10 @@
+project_name: test
+monitoring:
+ enabled: true
+ overrides:
+ loki:
+ loki: foobar
+ promtail:
+ promtail: foobar
+ minio:
+ minio: foobar