From ccef8d6966b716aed26f9258359a0a659df8b50e Mon Sep 17 00:00:00 2001 From: Manuel Gerding Date: Wed, 6 Dec 2023 14:33:29 +0100 Subject: [PATCH] fix: Container attribute needs to refer advice target --- extadvice/cpu_limit/action_needed_summary.md | 2 +- extadvice/cpu_request/action_needed_summary.md | 2 +- extadvice/liveness_probe/action_needed_summary.md | 2 +- extadvice/memory_limit/action_needed_summary.md | 2 +- extadvice/memory_request/action_needed_summary.md | 2 +- extadvice/readiness_probe/action_needed_summary.md | 2 +- extadvice/single_replica/instructions.md | 3 +-- 7 files changed, 7 insertions(+), 8 deletions(-) diff --git a/extadvice/cpu_limit/action_needed_summary.md b/extadvice/cpu_limit/action_needed_summary.md index b509b16..5185eb5 100644 --- a/extadvice/cpu_limit/action_needed_summary.md +++ b/extadvice/cpu_limit/action_needed_summary.md @@ -1,3 +1,3 @@ When your containers of ${target.steadybit.label} use too much CPU, other pods on the same node may suffer and become unstable. -**Container affected:** ${k8s.container.spec.limit.cpu.not-set[]} +**Container affected:** ${target.k8s.container.spec.limit.cpu.not-set[]} diff --git a/extadvice/cpu_request/action_needed_summary.md b/extadvice/cpu_request/action_needed_summary.md index cd994c9..5ca281d 100644 --- a/extadvice/cpu_request/action_needed_summary.md +++ b/extadvice/cpu_request/action_needed_summary.md @@ -1,3 +1,3 @@ When your containers of ${target.steadybit.label} requests unnecessary high CPU, you're wasting resources by over-allocating and making it unlikely to schedule your pod on available node resources. -**Container affected:** ${k8s.container.spec.request.cpu.not-set[]} +**Container affected:** ${target.k8s.container.spec.request.cpu.not-set[]} diff --git a/extadvice/liveness_probe/action_needed_summary.md b/extadvice/liveness_probe/action_needed_summary.md index 4dd6809..20b4e5e 100644 --- a/extadvice/liveness_probe/action_needed_summary.md +++ b/extadvice/liveness_probe/action_needed_summary.md @@ -1,4 +1,4 @@ Kubernetes cannot detect unresponsive pods/container of ${target.steadybit.label} and thus will never restart them automatically. Eventually, this may cause to become unavailable. -**Container affected:** ${k8s.container.probes.liveness.not-set[]} +**Container affected:** ${target.k8s.container.probes.liveness.not-set[]} diff --git a/extadvice/memory_limit/action_needed_summary.md b/extadvice/memory_limit/action_needed_summary.md index b72da1d..2e00bb1 100644 --- a/extadvice/memory_limit/action_needed_summary.md +++ b/extadvice/memory_limit/action_needed_summary.md @@ -1,3 +1,3 @@ When your containers of ${target.steadybit.label} use too much memory, other pods on the same node may suffer and become unstable. -**Container affected:** ${k8s.container.spec.limit.memory.not-set[]} +**Container affected:** ${target.k8s.container.spec.limit.memory.not-set[]} diff --git a/extadvice/memory_request/action_needed_summary.md b/extadvice/memory_request/action_needed_summary.md index 2432de1..a3e3f75 100644 --- a/extadvice/memory_request/action_needed_summary.md +++ b/extadvice/memory_request/action_needed_summary.md @@ -1,3 +1,3 @@ When your containers of ${target.steadybit.label} requests unnecessary high memory, you're wasting resources by over-allocating and making it unlikely to schedule your pod on available node resources. -**Container affected:** ${k8s.container.spec.request.cpu.not-set[]} +**Container affected:** ${target.k8s.container.spec.request.cpu.not-set[]} diff --git a/extadvice/readiness_probe/action_needed_summary.md b/extadvice/readiness_probe/action_needed_summary.md index 70e3c62..e53eb8b 100644 --- a/extadvice/readiness_probe/action_needed_summary.md +++ b/extadvice/readiness_probe/action_needed_summary.md @@ -1,4 +1,4 @@ When Kubernetes redeploys ${target.steadybit.label}, it can't determine when the following container are ready to accept incoming requests. They may receive requests before being able to handle them properly. -**Container affected:** ${k8s.container.probes.readiness.not-set[]} +**Container affected:** ${target.k8s.container.probes.readiness.not-set[]} diff --git a/extadvice/single_replica/instructions.md b/extadvice/single_replica/instructions.md index d06412d..3c17daf 100644 --- a/extadvice/single_replica/instructions.md +++ b/extadvice/single_replica/instructions.md @@ -1,7 +1,5 @@ Change ```ReplicaSet``` to two (or more) in your Kubernetes configuration in order to increase the scheduling of additional pods. The availability of your service ${target.steadybit.label} will most likely improve. -**If you increase the replica we strongly advice you to check if this is supported by your application.** - ```yaml apiVersion: apps/v1 kind: ReplicaSet @@ -18,3 +16,4 @@ spec: matchLabels: tier: ${target.steadybit.label} ``` +**If you increase the replica we strongly advice you to check if this is supported by your application.**