Skip to content

Commit

Permalink
Merge pull request #1990 from FabianKramm/main
Browse files Browse the repository at this point in the history
fix: migrate maps
  • Loading branch information
FabianKramm authored Jul 26, 2024
2 parents 612eba0 + d2273dc commit 0bc3291
Show file tree
Hide file tree
Showing 6 changed files with 182 additions and 28 deletions.
25 changes: 5 additions & 20 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -219,12 +219,7 @@ jobs:
--connect=false \
--distro=${{ matrix.distribution }}
_out=$(kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s 2>&1)
if [[ "${_out}" =~ "no matching resources" ]]
then
sleep 20
kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s
fi
./hack/wait-for-pod.sh -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }}
- name: upgrade with the dev cli
run: |
Expand All @@ -240,14 +235,7 @@ jobs:
--local-chart-dir ./chart \
-f ./test/commonValues.yaml
sleep 20
_out=$(kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s 2>&1)
if [[ "${_out}" =~ "no matching resources" ]]
then
sleep 20
kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s
fi
./hack/wait-for-pod.sh -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }}
e2e-tests:
name: Execute test suites
Expand Down Expand Up @@ -384,12 +372,9 @@ jobs:
if: steps.create-vcluster.outcome == 'success'
run: |
set -x
_out=$(kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s 2>&1)
if [[ "${_out}" =~ "no matching resources" ]]
then
sleep 20
kubectl wait --for=condition=ready pod -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} --timeout=300s
fi
./hack/wait-for-pod.sh -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }}
continue-on-error: true

- name: Collect deployment information in case vcluster fails to start
Expand Down
36 changes: 36 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,47 @@ package config

import (
_ "embed"
"strings"
"testing"

"gotest.tools/assert"
)

func TestConfig_Diff(t *testing.T) {
tests := []struct {
name string

config func(c *Config)
expected string
}{
{
name: "Simple",
config: func(c *Config) {
c.Sync.ToHost.Services.Enabled = false
},
expected: `sync:
toHost:
services:
enabled: false`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defaultConfig, err := NewDefaultConfig()
assert.NilError(t, err)

toConfig, err := NewDefaultConfig()
assert.NilError(t, err)

tt.config(toConfig)

expectedConfig, err := Diff(defaultConfig, toConfig)
assert.NilError(t, err)
assert.Equal(t, tt.expected, strings.TrimSpace(expectedConfig))
})
}
}

func TestConfig_UnmarshalYAMLStrict(t *testing.T) {
type args struct {
data []byte
Expand Down
6 changes: 6 additions & 0 deletions config/diff.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,12 @@ func diff(from, to any) any {
// if its an int, its 3 -> 0
case int:
retMap[k] = 0
// if its an int, its 3 -> 0
case int64:
retMap[k] = int64(0)
// if its an int, its 3 -> 0
case float64:
retMap[k] = float64(0)
}
} else if !reflect.DeepEqual(fromValue, toValue) {
switch fromValue.(type) {
Expand Down
37 changes: 29 additions & 8 deletions config/legacyconfig/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func convertEtcd(oldConfig EtcdValues, newConfig *config.Config) error {
}
newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.ExtraArgs = oldConfig.ExtraArgs
if oldConfig.Resources != nil {
newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Resources = *oldConfig.Resources
newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Resources = mergeResources(newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Resources, *oldConfig.Resources)
}
newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.AddVolumes = oldConfig.Volumes
if oldConfig.PriorityClassName != "" {
Expand Down Expand Up @@ -408,19 +408,19 @@ func convertBaseValues(oldConfig BaseHelm, newConfig *config.Config) error {
}

if len(oldConfig.Isolation.LimitRange.Default) > 0 {
newConfig.Policies.LimitRange.Default = oldConfig.Isolation.LimitRange.Default
newConfig.Policies.LimitRange.Default = mergeMaps(newConfig.Policies.LimitRange.Default, oldConfig.Isolation.LimitRange.Default)
}
if len(oldConfig.Isolation.LimitRange.DefaultRequest) > 0 {
newConfig.Policies.LimitRange.DefaultRequest = oldConfig.Isolation.LimitRange.DefaultRequest
newConfig.Policies.LimitRange.DefaultRequest = mergeMaps(newConfig.Policies.LimitRange.DefaultRequest, oldConfig.Isolation.LimitRange.DefaultRequest)
}
if len(oldConfig.Isolation.ResourceQuota.Quota) > 0 {
newConfig.Policies.ResourceQuota.Quota = oldConfig.Isolation.ResourceQuota.Quota
newConfig.Policies.ResourceQuota.Quota = mergeMaps(newConfig.Policies.ResourceQuota.Quota, oldConfig.Isolation.ResourceQuota.Quota)
}
if len(oldConfig.Isolation.ResourceQuota.Scopes) > 0 {
newConfig.Policies.ResourceQuota.Scopes = oldConfig.Isolation.ResourceQuota.Scopes
}
if len(oldConfig.Isolation.ResourceQuota.ScopeSelector) > 0 {
newConfig.Policies.ResourceQuota.ScopeSelector = oldConfig.Isolation.ResourceQuota.ScopeSelector
newConfig.Policies.ResourceQuota.ScopeSelector = mergeMaps(newConfig.Policies.ResourceQuota.ScopeSelector, oldConfig.Isolation.ResourceQuota.ScopeSelector)
}

if oldConfig.Isolation.Namespace != nil {
Expand Down Expand Up @@ -455,7 +455,7 @@ func convertBaseValues(oldConfig BaseHelm, newConfig *config.Config) error {
newConfig.ControlPlane.CoreDNS.Deployment.Pods.Labels = oldConfig.Coredns.PodLabels
newConfig.ControlPlane.CoreDNS.Deployment.Pods.Annotations = oldConfig.Coredns.PodAnnotations
if oldConfig.Coredns.Resources != nil {
newConfig.ControlPlane.CoreDNS.Deployment.Resources = *oldConfig.Coredns.Resources
newConfig.ControlPlane.CoreDNS.Deployment.Resources = mergeResources(newConfig.ControlPlane.CoreDNS.Deployment.Resources, *oldConfig.Coredns.Resources)
}
if oldConfig.Coredns.Plugin.Enabled {
if len(oldConfig.Coredns.Plugin.Config) > 0 {
Expand Down Expand Up @@ -703,7 +703,7 @@ func convertSyncerConfig(oldConfig SyncerValues, newConfig *config.Config) error
return fmt.Errorf("syncer.volumeMounts is not allowed anymore, please remove this field or use syncer.extraVolumeMounts")
}
if len(oldConfig.Resources.Limits) > 0 || len(oldConfig.Resources.Requests) > 0 {
newConfig.ControlPlane.StatefulSet.Resources = oldConfig.Resources
newConfig.ControlPlane.StatefulSet.Resources = mergeResources(newConfig.ControlPlane.StatefulSet.Resources, oldConfig.Resources)
}

newConfig.ControlPlane.Service.Annotations = oldConfig.ServiceAnnotations
Expand Down Expand Up @@ -1058,7 +1058,7 @@ func convertVClusterConfig(oldConfig VClusterValues, retDistroCommon *config.Dis
retDistroCommon.Env = oldConfig.Env
convertImage(oldConfig.Image, &retDistroContainer.Image)
if len(oldConfig.Resources) > 0 {
retDistroCommon.Resources = oldConfig.Resources
retDistroCommon.Resources = mergeMaps(retDistroCommon.Resources, oldConfig.Resources)
}
retDistroContainer.ExtraArgs = append(retDistroContainer.ExtraArgs, oldConfig.ExtraArgs...)
if oldConfig.ImagePullPolicy != "" {
Expand Down Expand Up @@ -1144,3 +1144,24 @@ func convertObject(from, to interface{}) error {

return json.Unmarshal(out, to)
}

func mergeResources(from, to config.Resources) config.Resources {
return config.Resources{
Limits: mergeMaps(from.Limits, to.Limits),
Requests: mergeMaps(from.Requests, to.Requests),
}
}

func mergeMaps(from, to map[string]interface{}) map[string]interface{} {
if from == nil && to == nil {
return nil
}
retMap := map[string]interface{}{}
for k, v := range from {
retMap[k] = v
}
for k, v := range to {
retMap[k] = v
}
return retMap
}
55 changes: 55 additions & 0 deletions config/legacyconfig/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,61 @@ syncer:
- name: binaries
persistentVolumeClaim:
claimName: my-pvc
scheduling:
podManagementPolicy: OrderedReady`,
ExpectedErr: "",
},
{
Name: "quotas",
Distro: "k8s",
In: `isolation:
enabled: true
resourceQuota:
enabled: true
quota:
limits.cpu: 16`,
Expected: `controlPlane:
backingStore:
etcd:
deploy:
enabled: true
distro:
k8s:
enabled: true
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
policies:
limitRange:
enabled: true
networkPolicy:
enabled: true
podSecurityStandard: baseline
resourceQuota:
enabled: true
quota:
limits.cpu: 16`,
ExpectedErr: "",
},
{
Name: "resources",
Distro: "k8s",
In: `syncer:
resources:
limits:
memory: 10Gi`,
Expected: `controlPlane:
backingStore:
etcd:
deploy:
enabled: true
distro:
k8s:
enabled: true
statefulSet:
resources:
limits:
memory: 10Gi
scheduling:
podManagementPolicy: OrderedReady`,
ExpectedErr: "",
Expand Down
51 changes: 51 additions & 0 deletions hack/wait-for-pod.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/bin/bash

NAMESPACE=""
LABEL_SELECTOR=""
POD_NAME=""
CONTEXT=""

# Parse command-line arguments
while [[ $# -gt 0 ]]; do
case "$1" in
-n | --namespace)
NAMESPACE="$2"
shift 2
;;
-l | --label-selector)
LABEL_SELECTOR="$2"
shift 2
;;
-c | --context)
CONTEXT="$2"
shift 2
;;
*)
echo "Invalid argument: $1"
exit 1
;;
esac
done

# Validate required arguments
if [[ -z "$NAMESPACE" ]]; then
echo "Namespace is required. Use the '-n' or '--namespace' flag."
exit 1
fi

if [[ -z "$LABEL_SELECTOR" ]]; then
echo "Label selector is required. Use the '-l' or '--label-selector' flag."
exit 1
fi

# Loop until a pod with the given label selector is created
while [[ -z "$POD_NAME" ]]; do
POD_NAME=$(kubectl get pod --context="$CONTEXT" -n "$NAMESPACE" -l "$LABEL_SELECTOR" --output=jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [[ -z "$POD_NAME" ]]; then
echo "Pod with label selector '$LABEL_SELECTOR' not found in context '$CONTEXT'. Waiting..."
sleep 5
fi
done

# Wait for the pod to be ready
kubectl wait --context="$CONTEXT" --for=condition=Ready -n $NAMESPACE pod -l $LABEL_SELECTOR --timeout=5m

0 comments on commit 0bc3291

Please sign in to comment.