From adac5bc46ec7b7795882fd05ec71c7f3a79e0a76 Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 16 Feb 2021 23:54:04 -0500 Subject: [PATCH 1/9] add openshift --- customize_fusion_values.sh | 18 +++++++++ install_prom.sh | 18 +++++++-- setup_f5_k8s.sh | 83 +++++++++++++++++++++++--------------- upgrade_fusion.sh.example | 22 +++++----- 4 files changed, 94 insertions(+), 47 deletions(-) diff --git a/customize_fusion_values.sh b/customize_fusion_values.sh index 22e499c8..eeb22a44 100755 --- a/customize_fusion_values.sh +++ b/customize_fusion_values.sh @@ -13,6 +13,8 @@ CHART_VERSION="5.3.4" NAMESPACE=default OUTPUT_SCRIPT="" ADDITIONAL_VALUES=() +KUBECTL="kubectl" +KUBECTL_TIMEOUT_PARAM="--timeout" function print_usage() { CMD="$1" @@ -27,6 +29,7 @@ function print_usage() { echo -e " -c Cluster name (required)\n" echo -e " -n Kubernetes namespace to install Fusion 5 into, defaults to 'default'\n" echo -e " -r Helm release name for installing Fusion 5; defaults to the namespace, see -n option\n" + echo -e " -k The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " --version Fusion Helm Chart version; defaults to the latest release from Lucidworks, such as ${CHART_VERSION}\n" echo -e " --provider Name of your K8s provider, e.g. eks, aks, gke, oc; defaults to 'gke'\n" echo -e " --prometheus Enable Prometheus? true or false, defaults to true\n" @@ -69,6 +72,14 @@ if [ $# -gt 1 ]; then CLUSTER_NAME="$2" shift 2 ;; + -k) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -k parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; -n) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the -n parameter!" @@ -181,6 +192,11 @@ if [ $# -gt 1 ]; then done fi +# Openshift cli uses --request-timeout instead of --timeout for deploys +if [ "$PROVIDER" == "oc" ]; then + KUBECTL_TIMEOUT_PARAM="--request-timeout" +fi + valid="0-9a-zA-Z\-" if [[ $NAMESPACE =~ [^$valid] ]]; then echo -e "\nERROR: Namespace $NAMESPACE must only contain 0-9, a-z, A-Z, or dash!\n" @@ -346,5 +362,7 @@ else sed -i '' -e "s||${ADDITIONAL_VALUES_STRING}|g" "$OUTPUT_SCRIPT" fi +sed -i -e "s||${KUBECTL}|g" "$OUTPUT_SCRIPT" +sed -i -e "s||${KUBECTL_TIMEOUT_PARAM}|g" "$OUTPUT_SCRIPT" echo -e "\nCreate $OUTPUT_SCRIPT for upgrading you Fusion cluster. Please keep this script along with your custom values yaml file(s) in version control.\n" diff --git a/install_prom.sh b/install_prom.sh index 37aa8ec5..e3c905c6 100755 --- a/install_prom.sh +++ b/install_prom.sh @@ -2,6 +2,7 @@ PROVIDER=gke NODE_POOL="" +KUBECTL="kubectl" UPGRADE=0 function print_usage() { @@ -15,6 +16,7 @@ function print_usage() { echo -e "\nUse this script to install Prometheus and Grafana into an existing Fusion 5 cluster" echo -e "\nUsage: $CMD [OPTIONS] ... where OPTIONS include:\n" echo -e " -c Name of the K8s cluster (required)\n" + echo -e " -k The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " -n Kubernetes namespace to install Fusion 5 into (required)\n" echo -e " -r Helm release name for installing Fusion 5; defaults to the namespace, see -n option\n" echo -e " --node-pool Node pool label to assign pods to specific nodes, this option is only useful for existing clusters" @@ -33,6 +35,14 @@ if [ $# -gt 0 ]; then CLUSTER_NAME="$2" shift 2 ;; + -k) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -k parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; -n) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the -n parameter!" @@ -120,8 +130,8 @@ if ! helm repo list | grep -q "https://kubernetes-charts.storage.googleapis.com" helm repo add stable https://charts.helm.sh/stable fi -if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then - kubectl create namespace "${NAMESPACE}" +if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then + ${KUBECTL} create namespace "${NAMESPACE}" if [ "$PROVIDER" == "gke" ]; then who_am_i=$(gcloud auth list --filter=status:ACTIVE --format="value(account)") else @@ -129,12 +139,12 @@ if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then fi OWNER_LABEL="${who_am_i//@/-}" if [ "${OWNER_LABEL}" != "" ]; then - kubectl label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" + ${KUBECTL} label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" fi echo -e "\nCreated namespace ${NAMESPACE} with owner label ${OWNER_LABEL}\n" fi -if kubectl get sts -n "${NAMESPACE}" -l "app=prometheus" -o "jsonpath={.items[0].metadata.labels['release']}" 2>&1 | grep -q "${RELEASE}-monitoring"; then +if ${KUBECTL} get sts -n "${NAMESPACE}" -l "app=prometheus" -o "jsonpath={.items[0].metadata.labels['release']}" 2>&1 | grep -q "${RELEASE}-monitoring"; then echo -e "\nThere is already a Prometheus StatefulSet in namespace: ${NAMESPACE} with release name: ${RELEASE}-monitoring, assuming this is an upgrade\n" UPGRADE=1 fi diff --git a/setup_f5_k8s.sh b/setup_f5_k8s.sh index f2e076aa..82c3bf53 100755 --- a/setup_f5_k8s.sh +++ b/setup_f5_k8s.sh @@ -21,6 +21,8 @@ DRY_RUN="" SOLR_DISK_GB=50 SOLR_REPLICAS=1 NODE_POOL="{}" +KUBECTL="kubectl" +KUBECTL_TIMEOUT_PARAM="--timeout" function print_usage() { CMD="$1" @@ -33,6 +35,7 @@ function print_usage() { echo -e "\nUse this script to install Fusion 5 on an existing Kubernetes cluster" echo -e "\nUsage: $CMD [OPTIONS] ... where OPTIONS include:\n" echo -e " -c Name of the K8s cluster (required)\n" + echo -e " -k The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " -r Helm release name for installing Fusion 5, defaults to 'f5'\n" echo -e " -n Kubernetes namespace to install Fusion 5 into, defaults to 'default'\n" echo -e " --provider Lowercase label for your K8s platform provider, e.g. eks, aks, gke, oc; defaults to 'k8s'\n" @@ -66,6 +69,14 @@ if [ $# -gt 0 ]; then CLUSTER_NAME="$2" shift 2 ;; + -k) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -k parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; -n) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the -n parameter!" @@ -187,6 +198,12 @@ if [ $# -gt 0 ]; then done fi +# Openshift cli uses --request-timeout instead of --timeout for deploys +if [ "$PROVIDER" == "oc" ]; then + KUBECTL_TIMEOUT_PARAM="--request-timeout" +fi + + # Sanity check we have the required variables if [ "$CLUSTER_NAME" == "" ]; then print_usage "$SCRIPT_CMD" "Please provide the Kubernetes cluster name using: -c " @@ -220,10 +237,10 @@ DEFAULT_MY_VALUES="${PROVIDER}_${CLUSTER_NAME}_${RELEASE}_fusion_values.yaml" UPGRADE_SCRIPT="${PROVIDER}_${CLUSTER_NAME}_${RELEASE}_upgrade_fusion.sh" # Check our prerequisites are in place -hash kubectl +hash ${KUBECTL} has_prereq=$? if [ $has_prereq == 1 ]; then - echo -e "\nERROR: Must install kubectl before proceeding with this script!" + echo -e "\nERROR: Must install ${KUBECTL} before proceeding with this script!" exit 1 fi @@ -235,7 +252,7 @@ if [ $has_prereq == 1 ]; then fi # Log our current kube context for the user -current=$(kubectl config current-context) +current=$(${KUBECTL} config current-context) echo -e "Using kubeconfig: $current" # Setup our owner label so we can check ownership of namespaces @@ -252,12 +269,12 @@ is_helm_v3=$(helm version --short | grep v3) if [ "${is_helm_v3}" == "" ]; then # see if Tiller is deployed ... - kubectl rollout status deployment/tiller-deploy --timeout=10s -n kube-system > /dev/null 2>&1 + ${KUBECTL} rollout status deployment/tiller-deploy ${KUBECTL_TIMEOUT_PARAM}=10s -n kube-system > /dev/null 2>&1 rollout_status=$? if [ $rollout_status != 0 ]; then echo -e "\nSetting up Helm Tiller ..." - kubectl create serviceaccount --namespace kube-system tiller - kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + ${KUBECTL} create serviceaccount --namespace kube-system tiller + ${KUBECTL} create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller helm init --service-account tiller --wait helm version fi @@ -268,20 +285,20 @@ fi # If we are upgrading if [ "${UPGRADE}" == "1" ]; then # Make sure the namespace exists - if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then + if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then echo -e "\nNamespace ${NAMESPACE} not found, if this is a new cluster please run an install first" exit 1 fi # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally upgrade a release from someone elses namespace - namespace_owner=$(kubectl get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') + namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to upgrade this namespace" exit 1 fi elif [ "$PURGE" == "1" ]; then - kubectl get namespace "${NAMESPACE}" + ${KUBECTL} get namespace "${NAMESPACE}" namespace_exists=$? if [ "$namespace_exists" != "0" ]; then echo -e "\nNamespace ${NAMESPACE} not found so assuming ${RELEASE_NAME} has already been purged" @@ -290,7 +307,7 @@ elif [ "$PURGE" == "1" ]; then # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally purge someone elses release - namespace_owner=$(kubectl get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') + namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to purge this namespace" exit 1 @@ -309,16 +326,16 @@ elif [ "$PURGE" == "1" ]; then else helm del --purge "${RELEASE}" fi - kubectl delete deployments -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete job "${RELEASE}-api-gateway" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=1s - kubectl delete svc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=2s - kubectl delete pvc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l "release=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l "app.kubernetes.io/instance=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l app=prometheus --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete serviceaccount --namespace "${NAMESPACE}" "${RELEASE}-api-gateway-jks-create" + ${KUBECTL} delete deployments -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=5s + ${KUBECTL} delete job "${RELEASE}-api-gateway" --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=1s + ${KUBECTL} delete svc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=2s + ${KUBECTL} delete pvc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=5s + ${KUBECTL} delete pvc -l "release=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=5s + ${KUBECTL} delete pvc -l "app.kubernetes.io/instance=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=5s + ${KUBECTL} delete pvc -l app=prometheus --namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=5s + ${KUBECTL} delete serviceaccount --namespace "${NAMESPACE}" "${RELEASE}-api-gateway-jks-create" if [ "${NAMESPACE}" != "default" ] && [ "${NAMESPACE}" != "kube-public" ] && [ "${NAMESPACE}" != "kube-system" ]; then - kubectl delete namespace "${NAMESPACE}" --grace-period=0 --force --timeout=10s + ${KUBECTL} delete namespace "${NAMESPACE}" --grace-period=0 --force ${KUBECTL_TIMEOUT_PARAM}=10s fi fi exit 0 @@ -337,10 +354,10 @@ else fi # There isn't let's check if there is a fusion deployment in the namespace already - if ! kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" 2>&1 | grep -q "No resources"; then + if ! ${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" 2>&1 | grep -q "No resources"; then # There is a fusion deployed into this namespace, try and protect against two releases being installed into # The same namespace - instance=$(kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") + instance=$(${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") echo -e "\nERROR: There is already a fusion deployment in namespace: ${NAMESPACE} with release name: ${instance}, please choose a new namespace\n" exit 1 fi @@ -350,19 +367,19 @@ fi # report_ns logs a message to the user informing them how to change the default namespace function report_ns() { if [ "${NAMESPACE}" != "default" ]; then - echo -e "\nNote: Change the default namespace for kubectl to ${NAMESPACE} by doing:\n kubectl config set-context --current --namespace=${NAMESPACE}\n" + echo -e "\nNote: Change the default namespace for ${KUBECTL} to ${NAMESPACE} by doing:\n ${KUBECTL} config set-context --current --namespace=${NAMESPACE}\n" fi } # proxy_url prints how to access the proxy via a LoadBalancer service function proxy_url() { if [ "${PROVIDER}" == "eks" ]; then - export PROXY_HOST=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + export PROXY_HOST=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') else - export PROXY_HOST=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + export PROXY_HOST=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].ip}') fi - export PROXY_PORT=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.spec.ports[?(@.protocol=="TCP")].port}') + export PROXY_PORT=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.spec.ports[?(@.protocol=="TCP")].port}') export PROXY_URL="$PROXY_HOST:$PROXY_PORT" if [ "$PROXY_URL" != ":" ]; then @@ -381,7 +398,7 @@ function ingress_setup() { echo -ne "\nWaiting for the Loadbalancer IP to be assigned" loops=24 while (( loops > 0 )); do - ingressIp=$(kubectl --namespace "${NAMESPACE}" get ingress "${RELEASE}-api-gateway" -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ingressIp=$(${KUBECTL} --namespace "${NAMESPACE}" get ingress "${RELEASE}-api-gateway" -o jsonpath='{.status.loadBalancer.ingress[0].ip}') if [[ ! -z ${ingressIp} ]]; then export INGRESS_IP="${ingressIp}" break @@ -394,13 +411,13 @@ function ingress_setup() { done else #EKS setup for supporting ALBs and nginx ingress - ALB_DNS=$(kubectl get ing ${RELEASE}-api-gateway --output=jsonpath={.status..loadBalancer..ingress[].hostname}) + ALB_DNS=$(${KUBECTL} get ing ${RELEASE}-api-gateway --output=jsonpath={.status..loadBalancer..ingress[].hostname}) echo -e "\n\nPlease ensure that the public DNS record for ${INGRESS_HOSTNAME} is updated to point to ${ALB_DNS}\n" fi if [ "$TLS_ENABLED" == "1" ]; then - echo -e "An SSL certificate will be automatically generated once the public DNS record has been updated,\nthis may take up to an hour after DNS has updated to be issued.\nYou can use kubectl get managedcertificates -o yaml to check the status of the certificate issue process." + echo -e "An SSL certificate will be automatically generated once the public DNS record has been updated,\nthis may take up to an hour after DNS has updated to be issued.\nYou can use ${KUBECTL} get managedcertificates -o yaml to check the status of the certificate issue process." fi report_ns } @@ -438,11 +455,11 @@ if [ "$UPGRADE" != "1" ]; then #Adding a retry loop because EKS takes more time to create nodes. retries=6 while (( retries > 0 )); do - find_nodes=$(kubectl get nodes -l "${node_selector}" | grep -i ready) + find_nodes=$(${KUBECTL} get nodes -l "${node_selector}" | grep -i ready) has_nodes=$? if [ "${has_nodes}" == "0" ]; then echo -e "Found at least one healthy node matching nodeSelector: ${NODE_POOL}" - num_nodes=$(kubectl get nodes -l "${node_selector}" | grep -i ready | wc -l) + num_nodes=$(${KUBECTL} get nodes -l "${node_selector}" | grep -i ready | wc -l) retries=-1 else echo -e "\nERROR: No 'Ready' nodes found matching nodeSelector: ${node_selector}! Retrying in 30 seconds" @@ -456,10 +473,10 @@ if [ "$UPGRADE" != "1" ]; then exit 1 fi else - num_nodes=$(kubectl get nodes | grep -i ready | wc -l) + num_nodes=$(${KUBECTL} get nodes | grep -i ready | wc -l) fi - ( "${SCRIPT_DIR}/customize_fusion_values.sh" "${DEFAULT_MY_VALUES}" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --prometheus "${PROMETHEUS_ON}" \ + ( "${SCRIPT_DIR}/customize_fusion_values.sh" "${DEFAULT_MY_VALUES}" -c "${CLUSTER_NAME}" -k "${KUBECTL}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --prometheus "${PROMETHEUS_ON}" \ --num-solr "${SOLR_REPLICAS}" --solr-disk-gb "${SOLR_DISK_GB}" --node-pool "${NODE_POOL}" --version "${CHART_VERSION}" --output-script "${UPGRADE_SCRIPT}" ${VALUES_STRING} ) else echo -e "\nValues file $DEFAULT_MY_VALUES already exists, not regenerating.\n" @@ -476,7 +493,7 @@ fi # just let the user do that manually with Helm as needed if [ "$UPGRADE" != "1" ] && [ "${PROMETHEUS}" != "none" ]; then if [ "${PROMETHEUS}" == "install" ]; then - ( "${SCRIPT_DIR}/install_prom.sh" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --node-pool "${NODE_POOL}" ) + ( "${SCRIPT_DIR}/install_prom.sh" -k "${KUBECTL}" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --node-pool "${NODE_POOL}" ) fi fi diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index 21969156..e0631c22 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -7,6 +7,8 @@ CLUSTER_NAME= RELEASE= NAMESPACE= CHART_VERSION= +KUBECTL= +KUBECTL_TIMEOUT_PARAM= MY_VALUES="" @@ -24,7 +26,7 @@ if [ ! -z "${DRY_RUN_REQUESTED}" ]; then DRY_RUN="--dry-run" fi -current_context=$(kubectl config current-context | grep "$CLUSTER_NAME") +current_context=$(${KUBECTL} config current-context | grep "$CLUSTER_NAME") #Openshift doesn't include the cluster name as a part of the current context if [[ "${current_context}" == "" && "$PROVIDER" != "oc" ]]; then @@ -32,8 +34,8 @@ if [[ "${current_context}" == "" && "$PROVIDER" != "oc" ]]; then exit 1 fi -if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then - kubectl create namespace "${NAMESPACE}" +if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then + ${KUBECTL} create namespace "${NAMESPACE}" if [ "$PROVIDER" == "gke" ]; then who_am_i=$(gcloud auth list --filter=status:ACTIVE --format="value(account)") else @@ -41,7 +43,7 @@ if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then fi OWNER_LABEL="${who_am_i//@/-}" if [ "${OWNER_LABEL}" != "" ]; then - kubectl label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" + ${KUBECTL} label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" fi echo -e "\nCreated namespace ${NAMESPACE} with owner label ${OWNER_LABEL}\n" fi @@ -58,8 +60,8 @@ helm repo update if [ "$PROVIDER" == "gke" ]; then # Make sure that the metric server is running - metrics_deployment=$(kubectl get deployment -n kube-system | grep metrics-server | cut -d ' ' -f1 -) - kubectl rollout status deployment/${metrics_deployment} --timeout=60s --namespace "kube-system" + metrics_deployment=$(${KUBECTL} get deployment -n kube-system | grep metrics-server | cut -d ' ' -f1 -) + ${KUBECTL} rollout status deployment/${metrics_deployment} ${KUBECTL_TIMEOUT_PARAM}=60s --namespace "kube-system" echo "" fi @@ -69,13 +71,13 @@ echo -e "\nNOTE: If this will be a long-running cluster for production purposes, helm upgrade ${DRY_RUN} ${RELEASE} "${lw_helm_repo}/fusion" --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" -kubectl rollout status deployment/${RELEASE}-api-gateway --timeout=600s --namespace "${NAMESPACE}" +${KUBECTL} rollout status deployment/${RELEASE}-api-gateway ${KUBECTL_TIMEOUT_PARAM}=600s --namespace "${NAMESPACE}" echo -e "\nWaiting up to 5 minutes to see the Fusion Indexing deployment come online ...\n" -kubectl rollout status deployment/${RELEASE}-fusion-indexing --timeout=300s --namespace "${NAMESPACE}" +${KUBECTL} rollout status deployment/${RELEASE}-fusion-indexing ${KUBECTL_TIMEOUT_PARAM}=300s --namespace "${NAMESPACE}" -current_ns=$(kubectl config view --minify --output 'jsonpath={..namespace}') +current_ns=$(${KUBECTL} config view --minify --output 'jsonpath={..namespace}') if [ "$NAMESPACE" != "$current_ns" ]; then - kubectl config set-context --current --namespace=${NAMESPACE} + ${KUBECTL} config set-context --current --namespace=${NAMESPACE} fi echo "" helm ls From ab8e668c1692e35e9e4f6e8d8c882b92203639e2 Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Thu, 25 Feb 2021 09:45:21 -0600 Subject: [PATCH 2/9] add more openshift switches so that ocp on-prem works when using the "oc" cli. add a no-rbac.yaml feature for openshift on prem users that are not admins. --- example-values/no-rbac.yaml | 118 ++++++++++++++++++++++++++++++++++++ setup_f5_k8s.sh | 34 +++++++---- upgrade_fusion.sh.example | 34 +++++++---- 3 files changed, 162 insertions(+), 24 deletions(-) create mode 100644 example-values/no-rbac.yaml diff --git a/example-values/no-rbac.yaml b/example-values/no-rbac.yaml new file mode 100644 index 00000000..49f97f1e --- /dev/null +++ b/example-values/no-rbac.yaml @@ -0,0 +1,118 @@ +admin-ui: + rbac: + create: false +api-gateway: + rbac: + create: false +argo: + rbac: + create: false + createAggregateRoles: false +argo-common-workflows: + rbac: + create: false +auth-ui: + rbac: + create: false +classic-rest-service: + rbac: + create: false +classification: + rbac: + create: false +config-sync: + rbac: + create: false +connector-plugin-service: + rbac: + create: false +devops-ui: + rbac: + create: false +fusion-admin: + rbac: + create: false +fusion-ambassador: + rbac: + create: false +fusion-indexing: + rbac: + create: false +fusion-jupyter: + rbac: + create: false +fusion-log-forwarder: + rbac: + create: false +fusion-resources: + rbac: + create: false +insights: + rbac: + create: false +job-launcher: + rbac: + create: false +job-rest-server: + rbac: + create: false +milvus-writable: + rbac: + create: false +ml-model-service: + rbac: + create: false + runLabelNamespaceJob: false +pm-ui: + rbac: + create: false +pulsar: + rbac: + create: false +question-answering: + rbac: + create: false +recommender: + rbac: + create: false +rest-service: + rbac: + create: false +rpc-service: + rbac: + create: false + plugins: + crd: + create: false +rules-ui: + rbac: + create: false +seldon-core-operator: + rbac: + create: false + disableWebhookCreation: true + crds: + enabled: false +solr: + rbac: + create: false + setAvailabilityZone: false +solr-backup-runner: + rbac: + create: false +sql-service: + rbac: + create: false +templating: + rbac: + create: false +webapps: + rbac: + create: false +zookeeper: + rbac: + create: false +query-pipeline: + useAvailabilityZoneRouting: false + rbac: + create: false diff --git a/setup_f5_k8s.sh b/setup_f5_k8s.sh index 82c3bf53..a70579e9 100755 --- a/setup_f5_k8s.sh +++ b/setup_f5_k8s.sh @@ -201,6 +201,8 @@ fi # Openshift cli uses --request-timeout instead of --timeout for deploys if [ "$PROVIDER" == "oc" ]; then KUBECTL_TIMEOUT_PARAM="--request-timeout" + # Openshift does not have concept of a "Cluster name" so we just set it to the namespace for placeholder. + CLUSTER_NAME="${NAMESPACE}" fi @@ -286,21 +288,23 @@ fi if [ "${UPGRADE}" == "1" ]; then # Make sure the namespace exists if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then - echo -e "\nNamespace ${NAMESPACE} not found, if this is a new cluster please run an install first" - exit 1 + if [ "$PROVIDER" != "oc" ]; then + echo -e "\nNamespace ${NAMESPACE} not found, if this is a new cluster please run an install first" + exit 1 + fi fi # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally upgrade a release from someone elses namespace namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') - if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then + if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ] && [ "$PROVIDER" != "oc" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to upgrade this namespace" exit 1 fi elif [ "$PURGE" == "1" ]; then ${KUBECTL} get namespace "${NAMESPACE}" namespace_exists=$? - if [ "$namespace_exists" != "0" ]; then + if [ "$namespace_exists" != "0"] && [ "$PROVIDER" != "oc" ]; then echo -e "\nNamespace ${NAMESPACE} not found so assuming ${RELEASE_NAME} has already been purged" exit 1 fi @@ -308,7 +312,7 @@ elif [ "$PURGE" == "1" ]; then # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally purge someone elses release namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') - if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then + if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ] && [ "$PROVIDER" != "oc" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to purge this namespace" exit 1 fi @@ -344,22 +348,26 @@ else if [ "${is_helm_v3}" == "" ]; then if helm status "${RELEASE}" > /dev/null 2>&1 ; then echo -e "\nERROR: There is already a release with name: ${RELEASE} installed in the cluster, please choose a different release name or upgrade the release\n" - exit 1 + #exit 1 fi else if helm status --namespace "${NAMESPACE}" "${RELEASE}" > /dev/null 2>&1 ; then - echo -e "\nERROR: There is already a release with name: ${RELEASE} installed in namespace: ${NAMESPACE} in the cluster, please choose a different release name or upgrade the release\n" - exit 1 + if [ "$PROVIDER" != "oc" ]; then + echo -e "\nERROR: There is already a release with name: ${RELEASE} installed in namespace: ${NAMESPACE} in the cluster, please choose a different release name or upgrade the release\n" + exit 1 + fi fi fi # There isn't let's check if there is a fusion deployment in the namespace already if ! ${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" 2>&1 | grep -q "No resources"; then - # There is a fusion deployed into this namespace, try and protect against two releases being installed into - # The same namespace - instance=$(${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") - echo -e "\nERROR: There is already a fusion deployment in namespace: ${NAMESPACE} with release name: ${instance}, please choose a new namespace\n" - exit 1 + if [ "$PROVIDER" != "oc" ]; then + # There is a fusion deployed into this namespace, try and protect against two releases being installed into + # The same namespace + instance=$(${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") + echo -e "\nERROR: There is already a fusion deployment in namespace: ${NAMESPACE} with release name: ${instance}, please choose a new namespace\n" + exit 1 + fi fi # We should be good to install now fi diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index e0631c22..71f8fa4e 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -56,7 +56,16 @@ if ! helm repo list | grep -q "https://charts.lucidworks.com"; then helm repo add ${lw_helm_repo} https://charts.lucidworks.com fi -helm repo update +helm_chart_to_use="${lw_helm_repo}/fusion" + +if [ "$LOCAL_HELM_CHART" != "" ]; then + helm_chart_to_use="${LOCAL_HELM_CHART}" + echo "LOCAL_HELM_CHART variable was specified. Will use ${LOCAL_HELM_CHART} as the helm chart." +fi + +if [ "$LOCAL_HELM_CHART" == "" ]; then + helm repo update +fi if [ "$PROVIDER" == "gke" ]; then # Make sure that the metric server is running @@ -65,20 +74,23 @@ if [ "$PROVIDER" == "gke" ]; then echo "" fi -echo -e "Upgrading the '$RELEASE' release (Fusion chart: $CHART_VERSION) in the '$NAMESPACE' namespace in the '$CLUSTER_NAME' cluster using values:\n ${MY_VALUES//--values}" +echo -e "Upgrading the '$RELEASE' release (Fusion chart: ${helm_chart_to_use}, version: $CHART_VERSION) in the '$NAMESPACE' namespace in the '$CLUSTER_NAME' cluster using values:\n ${MY_VALUES//--values}" echo -e "\nNOTE: If this will be a long-running cluster for production purposes, you should save the following file(s) in version control:\n${MY_VALUES//--values}\n" +echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" --skip-crds +helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} --skip-crds -helm upgrade ${DRY_RUN} ${RELEASE} "${lw_helm_repo}/fusion" --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} - -echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" -${KUBECTL} rollout status deployment/${RELEASE}-api-gateway ${KUBECTL_TIMEOUT_PARAM}=600s --namespace "${NAMESPACE}" -echo -e "\nWaiting up to 5 minutes to see the Fusion Indexing deployment come online ...\n" -${KUBECTL} rollout status deployment/${RELEASE}-fusion-indexing ${KUBECTL_TIMEOUT_PARAM}=300s --namespace "${NAMESPACE}" +if [ "$DRY_RUN" != "--dry-run"]; then + echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" + ${KUBECTL} rollout status deployment/${RELEASE}-api-gateway ${KUBECTL_TIMEOUT_PARAM}=600s --namespace "${NAMESPACE}" + echo -e "\nWaiting up to 5 minutes to see the Fusion Indexing deployment come online ...\n" + ${KUBECTL} rollout status deployment/${RELEASE}-fusion-indexing ${KUBECTL_TIMEOUT_PARAM}=300s --namespace "${NAMESPACE}" -current_ns=$(${KUBECTL} config view --minify --output 'jsonpath={..namespace}') -if [ "$NAMESPACE" != "$current_ns" ]; then - ${KUBECTL} config set-context --current --namespace=${NAMESPACE} + current_ns=$(${KUBECTL} config view --minify --output 'jsonpath={..namespace}') + if [ "$NAMESPACE" != "$current_ns" ]; then + ${KUBECTL} config set-context --current --namespace=${NAMESPACE} + fi fi + echo "" helm ls echo "" From 3126f4bd6f29f240bba0ca3b0043038ebefc854e Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Thu, 25 Feb 2021 10:11:45 -0600 Subject: [PATCH 3/9] add a --skip-crds parameter --- customize_fusion_values.sh | 6 ++++++ setup_f5_k8s.sh | 7 ++++++- upgrade_fusion.sh.example | 5 +++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/customize_fusion_values.sh b/customize_fusion_values.sh index eeb22a44..31dbeb53 100755 --- a/customize_fusion_values.sh +++ b/customize_fusion_values.sh @@ -15,6 +15,7 @@ OUTPUT_SCRIPT="" ADDITIONAL_VALUES=() KUBECTL="kubectl" KUBECTL_TIMEOUT_PARAM="--timeout" +SKIP_CRDS="" function print_usage() { CMD="$1" @@ -41,6 +42,7 @@ function print_usage() { echo -e " --with-replicas Flag to enable replicas yaml, defaults to off\n" echo -e " --additional-values Additional values files to add to the upgrade script, may be specified multiple times\n" echo -e " --output-script The name of the generated upgrade script, defaults to ___upgrade_fusion.sh \n" + echo -e " --skip-crds Set the --skip-crds flag on the helm upgrade. Use this in situations where you do no have permissions to make Custom Resource Definitions.\n" echo -e "\nIf you omit the arg, then the script will create it using the naming convention:\n ___fusion_values.yaml\n" } @@ -112,6 +114,9 @@ if [ $# -gt 1 ]; then PROVIDER="$2" shift 2 ;; + --skip-crds) + SKIP_CRDS="--skip-crds" + ;; --prometheus) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the --prometheus parameter!" @@ -364,5 +369,6 @@ fi sed -i -e "s||${KUBECTL}|g" "$OUTPUT_SCRIPT" sed -i -e "s||${KUBECTL_TIMEOUT_PARAM}|g" "$OUTPUT_SCRIPT" +sed -i -e "s||${SKIP_CRDS}|g" "$OUTPUT_SCRIPT" echo -e "\nCreate $OUTPUT_SCRIPT for upgrading you Fusion cluster. Please keep this script along with your custom values yaml file(s) in version control.\n" diff --git a/setup_f5_k8s.sh b/setup_f5_k8s.sh index a70579e9..1a53ebb3 100755 --- a/setup_f5_k8s.sh +++ b/setup_f5_k8s.sh @@ -23,6 +23,7 @@ SOLR_REPLICAS=1 NODE_POOL="{}" KUBECTL="kubectl" KUBECTL_TIMEOUT_PARAM="--timeout" +SKIP_CRDS="" function print_usage() { CMD="$1" @@ -56,6 +57,7 @@ function print_usage() { echo -e " --force Force upgrade or purge a deployment if your account is not the value 'owner' label on the namespace\n" echo -e " --num-solr Number of Solr pods to deploy, defaults to 1\n" echo -e " --solr-disk-gb Size (in gigabytes) of the Solr persistent volume claim, defaults to 50\n" + echo -e " --skip-crds Set the --skip-crds flag on the helm upgrade. Use this in situations where you do no have permissions to make Custom Resource Definitions.\n" } if [ $# -gt 0 ]; then @@ -178,6 +180,9 @@ if [ $# -gt 0 ]; then FORCE=1 shift 1 ;; + --skip-crds) + SKIP_CRDS="--skip-crds" + ;; -help|-usage|--help|--usage) print_usage "$SCRIPT_CMD" exit 0 @@ -485,7 +490,7 @@ if [ "$UPGRADE" != "1" ]; then fi ( "${SCRIPT_DIR}/customize_fusion_values.sh" "${DEFAULT_MY_VALUES}" -c "${CLUSTER_NAME}" -k "${KUBECTL}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --prometheus "${PROMETHEUS_ON}" \ - --num-solr "${SOLR_REPLICAS}" --solr-disk-gb "${SOLR_DISK_GB}" --node-pool "${NODE_POOL}" --version "${CHART_VERSION}" --output-script "${UPGRADE_SCRIPT}" ${VALUES_STRING} ) + --num-solr "${SOLR_REPLICAS}" --solr-disk-gb "${SOLR_DISK_GB}" --node-pool "${NODE_POOL}" --version "${CHART_VERSION}" --output-script "${UPGRADE_SCRIPT}" ${VALUES_STRING} ${SKIP_CRDS} ) else echo -e "\nValues file $DEFAULT_MY_VALUES already exists, not regenerating.\n" fi diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index 71f8fa4e..698d3454 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -9,6 +9,7 @@ NAMESPACE= CHART_VERSION= KUBECTL= KUBECTL_TIMEOUT_PARAM= +SKIP_CRDS= MY_VALUES="" @@ -76,8 +77,8 @@ fi echo -e "Upgrading the '$RELEASE' release (Fusion chart: ${helm_chart_to_use}, version: $CHART_VERSION) in the '$NAMESPACE' namespace in the '$CLUSTER_NAME' cluster using values:\n ${MY_VALUES//--values}" echo -e "\nNOTE: If this will be a long-running cluster for production purposes, you should save the following file(s) in version control:\n${MY_VALUES//--values}\n" -echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" --skip-crds -helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} --skip-crds +echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" ${SKIP_CRDS} +helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} ${SKIP_CRDS} if [ "$DRY_RUN" != "--dry-run"]; then echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" From 776abc55e5edac1e9e813a39a2c9c1675e7301d1 Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Thu, 25 Feb 2021 15:17:27 -0600 Subject: [PATCH 4/9] fix syntax error --- upgrade_fusion.sh.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index 698d3454..0201607f 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -80,7 +80,7 @@ echo -e "\nNOTE: If this will be a long-running cluster for production purposes, echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" ${SKIP_CRDS} helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} ${SKIP_CRDS} -if [ "$DRY_RUN" != "--dry-run"]; then +if [ "$DRY_RUN" != "--dry-run" ]; then echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" ${KUBECTL} rollout status deployment/${RELEASE}-api-gateway ${KUBECTL_TIMEOUT_PARAM}=600s --namespace "${NAMESPACE}" echo -e "\nWaiting up to 5 minutes to see the Fusion Indexing deployment come online ...\n" From 5412e3d19eb58ab71f793980453cb3e4ea4c1e58 Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 13 Apr 2021 11:27:09 -0500 Subject: [PATCH 5/9] fix new param --- customize_fusion_values.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/customize_fusion_values.sh b/customize_fusion_values.sh index 31dbeb53..3e1ea2f0 100755 --- a/customize_fusion_values.sh +++ b/customize_fusion_values.sh @@ -116,6 +116,7 @@ if [ $# -gt 1 ]; then ;; --skip-crds) SKIP_CRDS="--skip-crds" + shift 1 ;; --prometheus) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then From 021cb9997b1edf574df77b8741ce2fbcf994f1eb Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 13 Apr 2021 11:27:46 -0500 Subject: [PATCH 6/9] fix new param --- setup_f5_k8s.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/setup_f5_k8s.sh b/setup_f5_k8s.sh index 1a53ebb3..77ed1b92 100755 --- a/setup_f5_k8s.sh +++ b/setup_f5_k8s.sh @@ -182,6 +182,7 @@ if [ $# -gt 0 ]; then ;; --skip-crds) SKIP_CRDS="--skip-crds" + shift 1 ;; -help|-usage|--help|--usage) print_usage "$SCRIPT_CMD" From 9c5c6f224e20785a0b6aecf5cee618390a88827f Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 13 Apr 2021 11:28:09 -0500 Subject: [PATCH 7/9] add a parameter for --skip-crds instead of making people make hard coded change --- example-values/replicas.yaml | 82 +++++++++++++++++++++++------------ example-values/resources.yaml | 73 +++++++++++++++++++++---------- upgrade_fusion.sh.example | 4 +- 3 files changed, 107 insertions(+), 52 deletions(-) diff --git a/example-values/replicas.yaml b/example-values/replicas.yaml index e860a7c9..e43a89da 100644 --- a/example-values/replicas.yaml +++ b/example-values/replicas.yaml @@ -6,13 +6,12 @@ # https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics admin-ui: - replicaCount: 1 + replicaCount: 0 api-gateway: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: true - minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -21,22 +20,21 @@ api-gateway: targetAverageUtilization: 70 auth-ui: - replicaCount: 1 + replicaCount: 0 classic-rest-service: - replicaCount: 1 + replicaCount: 0 devops-ui: - replicaCount: 1 + replicaCount: 0 fusion-admin: - replicaCount: 2 + replicaCount: 0 fusion-indexing: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: true - minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -45,10 +43,9 @@ fusion-indexing: targetAverageUtilization: 70 insights: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: false - minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -57,13 +54,12 @@ insights: targetAverageUtilization: 70 job-launcher: - replicaCount: 1 + replicaCount: 0 job-rest-server: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: false - minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -72,10 +68,9 @@ job-rest-server: targetAverageUtilization: 70 ml-model-service: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: true - minReplicas: 2 maxReplicas: 6 metrics: - type: Resource @@ -84,10 +79,9 @@ ml-model-service: targetAverageUtilization: 70 query-pipeline: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: true - minReplicas: 2 maxReplicas: 6 metrics: - type: Resource @@ -96,10 +90,9 @@ query-pipeline: targetAverageUtilization: 60 rest-service: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: false - minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -108,17 +101,16 @@ rest-service: targetAverageUtilization: 70 rpc-service: - replicaCount: 2 + replicaCount: 0 plugins: # Number of replicas of each plugin service. # WARN this setting is global and will affect all plugins. # This setting only affect new plugins deployments. # Scale them up/down manually if needed. # The replicas for plugins can be modified by modifying plugin.replicas in values.yaml file for rpc-connectors - replicas: 1 + replicas: 0 autoscaling: enabled: true - minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -127,19 +119,18 @@ rpc-service: targetAverageUtilization: 70 rules-ui: - replicaCount: 1 + replicaCount: 0 solr: - replicaCount: 3 + replicaCount: 4 sql-service: replicaCount: 0 webapps: - replicaCount: 2 + replicaCount: 0 autoscaling: enabled: false - minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -150,9 +141,46 @@ webapps: zookeeper: replicaCount: 3 +fusion-ambassador: + replicaCount: 0 + +milvus-writable: + replicaCount: 0 connector-plugin-service: # the base connector-plugin-service deployment must be 0 replicaCount: 0 +argo: + replicaCount: 0 +argo-common-workflows: + replicaCount: 0 + +classification: + replicaCount: 0 +config-sync: + replicaCount: 0 +fusion-jupyter: + replicaCount: 0 +fusion-log-forwarder: + replicaCount: 0 +fusion-resources: + replicaCount: 0 +pm-ui: + replicaCount: 0 +pulsar: + bookkeeper: + replicaCount: 0 + broker: + replicaCount: 0 +question-answering: + replicaCount: 0 +recommender: + replicaCount: 0 +seldon-core-operator: + replicaCount: 0 +solr-backup-runner: + replicaCount: 0 +templating: + replicaCount: 0 diff --git a/example-values/resources.yaml b/example-values/resources.yaml index 7459c3d4..94f6fd90 100644 --- a/example-values/resources.yaml +++ b/example-values/resources.yaml @@ -16,6 +16,7 @@ solr: cpu: 800m memory: 6Gi limits: + cpu: 1500m memory: 6Gi pulsar: @@ -33,6 +34,7 @@ pulsar: cpu: 300m memory: 2300Mi limits: + cpu: 300m memory: 2300Mi bookkeeper: @@ -49,6 +51,7 @@ pulsar: cpu: 300m memory: 2300Mi limits: + cpu: 1 memory: 2300Mi argo: @@ -56,15 +59,17 @@ argo: resources: requests: memory: "128Mi" - cpu: "50m" + cpu: "100m" limits: + cpu: "550m" memory: "256Mi" controller: resources: requests: memory: "128Mi" - cpu: "50m" + cpu: "100m" limits: + cpu: "300m" memory: "256Mi" seldon-core-operator: @@ -78,10 +83,11 @@ seldon-core-operator: admin-ui: resources: requests: - cpu: "50m" - memory: "8Mi" + cpu: "100m" + memory: "50Mi" limits: - memory: "32Mi" + cpu: "300m" + memory: "50Mi" api-gateway: jksSetup: @@ -97,15 +103,17 @@ api-gateway: cpu: "500m" memory: "2300Mi" limits: + cpu: "700m" memory: "2300Mi" auth-ui: resources: requests: - cpu: "50m" - memory: "8Mi" + cpu: "100m" + memory: "50Mi" limits: - memory: "32Mi" + cpu: "400m" + memory: "50Mi" classic-rest-service: resources: @@ -113,15 +121,17 @@ classic-rest-service: cpu: "300m" memory: "2300Mi" limits: + cpu: "1" memory: "2300Mi" devops-ui: resources: requests: - cpu: "50m" - memory: "8Mi" + cpu: "100m" + memory: "50Mi" limits: - memory: "32Mi" + cpu: "300m" + memory: "50Mi" fusion-admin: resources: @@ -129,6 +139,7 @@ fusion-admin: cpu: "400m" memory: "2300Mi" limits: + cpu: "600m" memory: "2300Mi" fusion-indexing: @@ -137,6 +148,7 @@ fusion-indexing: cpu: "500m" memory: "800Mi" limits: + cpu: "500m" memory: "1600Mi" fusion-log-forwarder: @@ -146,20 +158,22 @@ fusion-log-forwarder: memory: "64Mi" limits: memory: "128Mi" + cpu: "100m" insights: resources: requests: - cpu: "50m" + cpu: "100m" memory: "500Mi" limits: memory: "1Gi" + cpu: "100m" job-launcher: sparkCleanup: resources: requests: - cpu: "50m" + cpu: "100m" memory: "128Mi" limits: cpu: "200m" @@ -167,7 +181,7 @@ job-launcher: argoCleanup: resources: requests: - cpu: "50m" + cpu: "100m" memory: "128Mi" limits: cpu: "200m" @@ -178,6 +192,7 @@ job-launcher: cpu: 200m limits: memory: 3Gi + cpu: 200m job-rest-server: resources: @@ -186,6 +201,7 @@ job-rest-server: cpu: 200m limits: memory: 1Gi + cpu: 200m ml-model-service: preinstall: @@ -203,6 +219,7 @@ ml-model-service: cpu: 800m limits: memory: 3Gi + cpu: 800m milvus: image: resources: @@ -220,6 +237,7 @@ query-pipeline: memory: "2300Mi" limits: memory: "3300Mi" + cpu: "700m" # if PM templating feature is being used (similar needs as query-pipeline): templating: @@ -229,6 +247,7 @@ templating: memory: "2300Mi" limits: memory: "3300Mi" + cpu: "700m" # if PM templating feature is not being used # the service is still needed but only for minimal PM functionality, @@ -248,6 +267,7 @@ rest-service: memory: "1500Mi" limits: memory: "2300Mi" + cpu: "200m" rpc-service: resources: @@ -256,30 +276,33 @@ rpc-service: memory: "1500Mi" limits: memory: "2300Mi" + cpu: "300m" pm-ui: resources: requests: - cpu: "50m" - memory: "8Mi" + cpu: "100m" + memory: "50Mi" limits: - cpu: "50m" - memory: "32Mi" + cpu: "100m" + memory: "50Mi" rules-ui: resources: requests: - cpu: "50m" - memory: "8Mi" + cpu: "100m" + memory: "50Mi" limits: - memory: "32Mi" + cpu: "100m" + memory: "50Mi" zookeeper: resources: requests: memory: 1Gi - cpu: 400m + cpu: "1" limits: + cpu: "100m" memory: 1500Mi sql-service: @@ -288,6 +311,7 @@ sql-service: cpu: "500m" memory: 2Gi limits: + cpu: "100m" memory: 2Gi webapps: @@ -296,6 +320,7 @@ webapps: cpu: "300m" memory: "800Mi" limits: + cpu: "100m" memory: "1300Mi" logstash: @@ -310,7 +335,7 @@ logstash: resources: requests: cpu: "100m" - memory: "32Mi" + memory: "50Mi" limits: cpu: "150m" memory: "64Mi" @@ -321,6 +346,7 @@ config-sync: cpu: 200m memory: 1Gi limits: + cpu: 200m memory: 2Gi connector-plugin-service: @@ -329,4 +355,5 @@ connector-plugin-service: cpu: "200m" memory: "1500Mi" limits: + cpu: "200m" memory: "2300Mi" diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index 0201607f..103a6bfe 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -77,8 +77,8 @@ fi echo -e "Upgrading the '$RELEASE' release (Fusion chart: ${helm_chart_to_use}, version: $CHART_VERSION) in the '$NAMESPACE' namespace in the '$CLUSTER_NAME' cluster using values:\n ${MY_VALUES//--values}" echo -e "\nNOTE: If this will be a long-running cluster for production purposes, you should save the following file(s) in version control:\n${MY_VALUES//--values}\n" -echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" ${SKIP_CRDS} -helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} ${SKIP_CRDS} +echo "helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace ${NAMESPACE} --version ${CHART_VERSION} ${MY_VALUES}" --skip-crds +helm upgrade ${DRY_RUN} ${RELEASE} ${helm_chart_to_use} --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} --skip-crds if [ "$DRY_RUN" != "--dry-run" ]; then echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" From 9e0d2928613f84c2a641c4080e334ab6a5701feb Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 13 Apr 2021 11:36:22 -0500 Subject: [PATCH 8/9] undo accidental files --- example-values/replicas.yaml | 82 ++++++++++++----------------------- example-values/resources.yaml | 73 ++++++++++--------------------- 2 files changed, 50 insertions(+), 105 deletions(-) diff --git a/example-values/replicas.yaml b/example-values/replicas.yaml index e43a89da..e860a7c9 100644 --- a/example-values/replicas.yaml +++ b/example-values/replicas.yaml @@ -6,12 +6,13 @@ # https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics admin-ui: - replicaCount: 0 + replicaCount: 1 api-gateway: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: true + minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -20,21 +21,22 @@ api-gateway: targetAverageUtilization: 70 auth-ui: - replicaCount: 0 + replicaCount: 1 classic-rest-service: - replicaCount: 0 + replicaCount: 1 devops-ui: - replicaCount: 0 + replicaCount: 1 fusion-admin: - replicaCount: 0 + replicaCount: 2 fusion-indexing: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: true + minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -43,9 +45,10 @@ fusion-indexing: targetAverageUtilization: 70 insights: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: false + minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -54,12 +57,13 @@ insights: targetAverageUtilization: 70 job-launcher: - replicaCount: 0 + replicaCount: 1 job-rest-server: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: false + minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -68,9 +72,10 @@ job-rest-server: targetAverageUtilization: 70 ml-model-service: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: true + minReplicas: 2 maxReplicas: 6 metrics: - type: Resource @@ -79,9 +84,10 @@ ml-model-service: targetAverageUtilization: 70 query-pipeline: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: true + minReplicas: 2 maxReplicas: 6 metrics: - type: Resource @@ -90,9 +96,10 @@ query-pipeline: targetAverageUtilization: 60 rest-service: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: false + minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -101,16 +108,17 @@ rest-service: targetAverageUtilization: 70 rpc-service: - replicaCount: 0 + replicaCount: 2 plugins: # Number of replicas of each plugin service. # WARN this setting is global and will affect all plugins. # This setting only affect new plugins deployments. # Scale them up/down manually if needed. # The replicas for plugins can be modified by modifying plugin.replicas in values.yaml file for rpc-connectors - replicas: 0 + replicas: 1 autoscaling: enabled: true + minReplicas: 2 maxReplicas: 4 metrics: - type: Resource @@ -119,18 +127,19 @@ rpc-service: targetAverageUtilization: 70 rules-ui: - replicaCount: 0 + replicaCount: 1 solr: - replicaCount: 4 + replicaCount: 3 sql-service: replicaCount: 0 webapps: - replicaCount: 0 + replicaCount: 2 autoscaling: enabled: false + minReplicas: 2 maxReplicas: 3 metrics: - type: Resource @@ -141,46 +150,9 @@ webapps: zookeeper: replicaCount: 3 -fusion-ambassador: - replicaCount: 0 - -milvus-writable: - replicaCount: 0 connector-plugin-service: # the base connector-plugin-service deployment must be 0 replicaCount: 0 -argo: - replicaCount: 0 -argo-common-workflows: - replicaCount: 0 - -classification: - replicaCount: 0 -config-sync: - replicaCount: 0 -fusion-jupyter: - replicaCount: 0 -fusion-log-forwarder: - replicaCount: 0 -fusion-resources: - replicaCount: 0 -pm-ui: - replicaCount: 0 -pulsar: - bookkeeper: - replicaCount: 0 - broker: - replicaCount: 0 -question-answering: - replicaCount: 0 -recommender: - replicaCount: 0 -seldon-core-operator: - replicaCount: 0 -solr-backup-runner: - replicaCount: 0 -templating: - replicaCount: 0 diff --git a/example-values/resources.yaml b/example-values/resources.yaml index 94f6fd90..7459c3d4 100644 --- a/example-values/resources.yaml +++ b/example-values/resources.yaml @@ -16,7 +16,6 @@ solr: cpu: 800m memory: 6Gi limits: - cpu: 1500m memory: 6Gi pulsar: @@ -34,7 +33,6 @@ pulsar: cpu: 300m memory: 2300Mi limits: - cpu: 300m memory: 2300Mi bookkeeper: @@ -51,7 +49,6 @@ pulsar: cpu: 300m memory: 2300Mi limits: - cpu: 1 memory: 2300Mi argo: @@ -59,17 +56,15 @@ argo: resources: requests: memory: "128Mi" - cpu: "100m" + cpu: "50m" limits: - cpu: "550m" memory: "256Mi" controller: resources: requests: memory: "128Mi" - cpu: "100m" + cpu: "50m" limits: - cpu: "300m" memory: "256Mi" seldon-core-operator: @@ -83,11 +78,10 @@ seldon-core-operator: admin-ui: resources: requests: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "8Mi" limits: - cpu: "300m" - memory: "50Mi" + memory: "32Mi" api-gateway: jksSetup: @@ -103,17 +97,15 @@ api-gateway: cpu: "500m" memory: "2300Mi" limits: - cpu: "700m" memory: "2300Mi" auth-ui: resources: requests: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "8Mi" limits: - cpu: "400m" - memory: "50Mi" + memory: "32Mi" classic-rest-service: resources: @@ -121,17 +113,15 @@ classic-rest-service: cpu: "300m" memory: "2300Mi" limits: - cpu: "1" memory: "2300Mi" devops-ui: resources: requests: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "8Mi" limits: - cpu: "300m" - memory: "50Mi" + memory: "32Mi" fusion-admin: resources: @@ -139,7 +129,6 @@ fusion-admin: cpu: "400m" memory: "2300Mi" limits: - cpu: "600m" memory: "2300Mi" fusion-indexing: @@ -148,7 +137,6 @@ fusion-indexing: cpu: "500m" memory: "800Mi" limits: - cpu: "500m" memory: "1600Mi" fusion-log-forwarder: @@ -158,22 +146,20 @@ fusion-log-forwarder: memory: "64Mi" limits: memory: "128Mi" - cpu: "100m" insights: resources: requests: - cpu: "100m" + cpu: "50m" memory: "500Mi" limits: memory: "1Gi" - cpu: "100m" job-launcher: sparkCleanup: resources: requests: - cpu: "100m" + cpu: "50m" memory: "128Mi" limits: cpu: "200m" @@ -181,7 +167,7 @@ job-launcher: argoCleanup: resources: requests: - cpu: "100m" + cpu: "50m" memory: "128Mi" limits: cpu: "200m" @@ -192,7 +178,6 @@ job-launcher: cpu: 200m limits: memory: 3Gi - cpu: 200m job-rest-server: resources: @@ -201,7 +186,6 @@ job-rest-server: cpu: 200m limits: memory: 1Gi - cpu: 200m ml-model-service: preinstall: @@ -219,7 +203,6 @@ ml-model-service: cpu: 800m limits: memory: 3Gi - cpu: 800m milvus: image: resources: @@ -237,7 +220,6 @@ query-pipeline: memory: "2300Mi" limits: memory: "3300Mi" - cpu: "700m" # if PM templating feature is being used (similar needs as query-pipeline): templating: @@ -247,7 +229,6 @@ templating: memory: "2300Mi" limits: memory: "3300Mi" - cpu: "700m" # if PM templating feature is not being used # the service is still needed but only for minimal PM functionality, @@ -267,7 +248,6 @@ rest-service: memory: "1500Mi" limits: memory: "2300Mi" - cpu: "200m" rpc-service: resources: @@ -276,33 +256,30 @@ rpc-service: memory: "1500Mi" limits: memory: "2300Mi" - cpu: "300m" pm-ui: resources: requests: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "8Mi" limits: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "32Mi" rules-ui: resources: requests: - cpu: "100m" - memory: "50Mi" + cpu: "50m" + memory: "8Mi" limits: - cpu: "100m" - memory: "50Mi" + memory: "32Mi" zookeeper: resources: requests: memory: 1Gi - cpu: "1" + cpu: 400m limits: - cpu: "100m" memory: 1500Mi sql-service: @@ -311,7 +288,6 @@ sql-service: cpu: "500m" memory: 2Gi limits: - cpu: "100m" memory: 2Gi webapps: @@ -320,7 +296,6 @@ webapps: cpu: "300m" memory: "800Mi" limits: - cpu: "100m" memory: "1300Mi" logstash: @@ -335,7 +310,7 @@ logstash: resources: requests: cpu: "100m" - memory: "50Mi" + memory: "32Mi" limits: cpu: "150m" memory: "64Mi" @@ -346,7 +321,6 @@ config-sync: cpu: 200m memory: 1Gi limits: - cpu: 200m memory: 2Gi connector-plugin-service: @@ -355,5 +329,4 @@ connector-plugin-service: cpu: "200m" memory: "1500Mi" limits: - cpu: "200m" memory: "2300Mi" From a35bce6e7b6e214473adf3bf1a42e9a9dacdfe50 Mon Sep 17 00:00:00 2001 From: Nicholas DiPiazza Date: Tue, 13 Apr 2021 11:38:59 -0500 Subject: [PATCH 9/9] add a repository.yaml example so people know how to use an internal docker repo --- example-values/repository.yaml | 157 +++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 example-values/repository.yaml diff --git a/example-values/repository.yaml b/example-values/repository.yaml new file mode 100644 index 00000000..df29b9ed --- /dev/null +++ b/example-values/repository.yaml @@ -0,0 +1,157 @@ +zookeeper: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/zookeeper" +solr: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/solr" + initContainer: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +admin-ui: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +api-gateway: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" + keytoolUtils: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +argo: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +argo-common-workflows: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +auth-ui: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +classic-rest-service: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +classification: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +config-sync: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +connector-plugin-service: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +devops-ui: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-admin: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-ambassador: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-indexing: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-jupyter: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-log-forwarder: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +fusion-resources: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +insights: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +job-launcher: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +job-rest-server: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +milvus-writable: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +pm-ui: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +pulsar: + bookkeeper: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + broker: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + initContainer: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + images: + autorecovery: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + bookie: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + broker: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + functions: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + pulsar_manager: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-manager" + zookeeper: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" + pulsar_metadata: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/apachepulsar/pulsar-all" +question-answering: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +recommender: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +rest-service: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +rpc-service: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +rules-ui: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +seldon-core-operator: + image: + registry: "YOUR-REPOSITORY-DOMAIN:443" +solr-backup-runner: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +sql-service: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +templating: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +webapps: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +query-pipeline: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" +ml-model-service: + milvus: + admin: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/milvusdb/milvus-em" + mysql: + image: "YOUR-REPOSITORY-DOMAIN:443/mysql" + busybox: + image: "YOUR-REPOSITORY-DOMAIN:443/busybox" + initContainerImage: "YOUR-REPOSITORY-DOMAIN:443/alpine:latest" + ambassador: + initContainerImage: "YOUR-REPOSITORY-DOMAIN:443/alpine:latest" + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/quay.io/datawire/ambassador" + initContainer: + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" + image: + repository: "YOUR-REPOSITORY-DOMAIN:443/lucidworks" + mysql: + image: "YOUR-REPOSITORY-DOMAIN:443/mysql"