Skip to content

Commit

Permalink
Merge pull request #88 from mlinfra-io/add-prefect-on-kind
Browse files Browse the repository at this point in the history
(feat)-add-prefect-on-kind
  • Loading branch information
aliabbasjaffri authored Jul 9, 2024
2 parents 56f33e5 + 2c086d7 commit 5aeb2c7
Show file tree
Hide file tree
Showing 7 changed files with 212 additions and 0 deletions.
2 changes: 2 additions & 0 deletions examples/local/kind-advanced.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,5 @@ stack:
name: lakefs
- experiment_tracking:
name: mlflow
- orchestrator:
name: prefect
2 changes: 2 additions & 0 deletions examples/local/kind.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,5 @@ stack:
name: lakefs
- experiment_tracking:
name: mlflow
- orchestrator:
name: prefect
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
inputs:
- name: prefect_chart_version
user_facing: true
description: prefect Chart version. See here for more details; https://artifacthub.io/packages/helm/prefect/prefect-server
default: "2024.6.28162841"
outputs:
- name: prefect_server_endpoint
description: Prefect server access endpoint.
export: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
locals {
prefect_server_helmchart_values = [{
name = "postgresql.useSubChart"
value = "true"
type = "auto"
}, {
name = "postgresql.primary.persistence.enabled"
value = "true"
type = "auto"
}, {
name = "ingress.enabled"
value = "true"
type = "auto"
}, {
name = "ingress.host.hostname"
value = "${var.prefect_server_hostname}"
type = "auto"
}]
}

module "prefect_server_helmchart" {
source = "../../../../../local/kind/helm_chart"

name = "prefect-server"
namespace = var.namespace
create_namespace = true
repository = "https://prefecthq.github.io/prefect-helm"
chart = "prefect-server"
chart_version = var.prefect_chart_version
values = templatefile("${path.module}/values.yaml", {
prefect_deplyoment_type = "server"
resources = jsonencode(var.resources)
})
set = concat(local.prefect_server_helmchart_values, [{
name = "postgresql.enabled"
value = "true"
type = "auto"
}])
}



locals {
prefect_worker_helmchart_values = [{
name = "worker.apiConfig"
value = "server"
type = "auto"
}, {
name = "worker.config.workPool"
value = "Kubernetes-workpool"
type = "auto"
}, {
name = "worker.serverApiConfig.apiUrl"
value = "http://prefect-server.${var.namespace}.svc.cluster.local:4200/api"
type = "auto"
}]
}

module "prefect_worker_helmchart" {
source = "../../../../../local/kind/helm_chart"

name = "prefect-worker"
namespace = var.namespace
create_namespace = false
repository = "https://prefecthq.github.io/prefect-helm"
chart = "prefect-worker"
chart_version = var.prefect_chart_version
values = templatefile("${path.module}/values.yaml", {
prefect_deplyoment_type = "worker"
resources = jsonencode(var.resources)
})
set = local.prefect_worker_helmchart_values
depends_on = [module.prefect_server_helmchart]
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
output "prefect_server_endpoint" {
value = var.prefect_server_hostname
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
${prefect_deplyoment_type}:
resources: ${resources}
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
variable "prefect_chart_version" {
type = string
description = "prefect Chart version."
default = "2024.6.28162841"
}

variable "namespace" {
type = string
description = "The namespace where the service account would be installed"
default = "prefect"
}

variable "prefect_server_hostname" {
type = string
default = "prefect-server.localhost"
}

variable "resources" {
type = map(any)
description = "The resources to allocate to each prefect pod"
default = {
requests = {
cpu = "100m"
memory = "1024Mi"
}
limits = {
cpu = "500m"
memory = "4096Mi"
}
}
validation {
condition = (
alltrue([
for resource in keys(var.resources) :
can(resource) && can(var.resources[resource]) && var.resources[resource] != "" &&
contains(["requests", "limits"], resource) &&
alltrue([
for subresource in keys(var.resources[resource]) :
can(subresource) && can(var.resources[resource][subresource]) && var.resources[resource][subresource] != "" &&
contains(["cpu", "memory"], subresource) &&
(subresource == "cpu" ? can(regex("^[0-9]+m?$", var.resources[resource][subresource])) : true) &&
(subresource == "memory" ? can(regex("^[0-9]+(Mi|Mb|Gi|Gb)?$", var.resources[resource][subresource])) : true)
])
])
)
error_message = "Each resource must have 'requests' or 'limits' with 'cpu' and 'memory' along with their values. CPU should be in the format '100m' or '1' and memory should be in the format '128Mi' or '1Gi'"
}
}

# variable "nodeSelector" {
# type = map(any)
# description = "The nodeSelector to schedule prefect pods on specific nodes"
# validation {
# condition = (
# alltrue([
# for selector in keys(var.nodeSelector) :
# can(selector) && can(var.nodeSelector[selector]) && var.nodeSelector[selector] != ""
# ])
# )
# error_message = "Each nodeSelector must have a key and a non-empty value"
# }
# default = {
# "nodegroup_type" = "operations"
# }
# }

# variable "tolerations" {
# type = list(any)
# description = "The tolerations to schedule prefect pods on specific nodes"
# validation {
# condition = (
# alltrue([
# for toleration in var.tolerations :
# can(toleration.key) &&
# contains(["Equal", "Exists"], toleration.operator) &&
# can(toleration.value) &&
# contains(["NoSchedule", "PreferNoSchedule", "NoExecute"], toleration.effect)
# ])
# )
# error_message = "Each toleration must have operator set to 'Equal' or 'Exists' and effect set to 'NoSchedule', 'PreferNoSchedule' or 'NoExecute' along with key and value"
# }
# default = [{
# key = "nodegroup_type"
# operator = "Equal"
# value = "operations"
# effect = "NoSchedule"
# }]
# }

# variable "affinity" {
# type = map(any)
# description = "The affinity to schedule prefect pods on specific nodes"
# validation {
# condition = (
# alltrue([
# for affinity in var.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms :
# alltrue([
# for expression in affinity.matchExpressions :
# can(expression.key) &&
# contains(["In", "NotIn", "Exists", "DoesNotExist", "Gt", "Lt"], expression.operator) &&
# can(expression.values)
# ])
# ])
# )
# error_message = "Each matchExpression must have operator set to 'In', 'NotIn', 'Exists', 'DoesNotExist', 'Gt', 'Lt' along with key and values"
# }
# default = {
# "nodeAffinity" = {
# "requiredDuringSchedulingIgnoredDuringExecution" = {
# "nodeSelectorTerms" = [{
# "matchExpressions" = [{
# key = "nodegroup_type"
# operator = "In"
# values = ["operations"]
# }]
# }]
# }
# }
# }
# }

0 comments on commit 5aeb2c7

Please sign in to comment.