diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7136e757d..8c633a522 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,7 +29,8 @@ make build-deploy-operator TAG=myTag RELEASE_REGISTRY=docker.io/user1 RELEASE_NA Which will result in building the operator image `docker.io/user1/myImgName:myTag`, uploading it to your image registry and deploying to your cluster. Please be aware that it is using the cluster your current kube-context is pointing to. -**Note:** All bash variables are optional, the default values are set and will result in an image `ghcr.io/openfeature/operator:latest` +> [!NOTE] +> All bash variables are optional, the default values are set and will result in an image `ghcr.io/openfeature/operator:latest` ### Autogenerated Documentation diff --git a/chart/open-feature-operator/README.md b/chart/open-feature-operator/README.md index e39ea39fd..509f58fe2 100644 --- a/chart/open-feature-operator/README.md +++ b/chart/open-feature-operator/README.md @@ -45,6 +45,13 @@ helm repo update helm upgrade --install open-feature-operator openfeature/open-feature-operator ``` +> [!NOTE] +> If you upgrade to OFO `v0.5.4` or higher while using a `flagd-proxy` provider, the instance of +`flagd-proxy` will be automatically upgraded to the latest supported version by the `open-feature-operator`. +The upgrade of `flagd-proxy` will also consider your current `FeatureFlagSource` configuration and adapt +the `flagd-proxy` Deployment accordingly. +If you are upgrading OFO to `v0.5.3` or lower, `flagd-proxy` (if present) won't be upgraded automatically. + #### Upgrade CRDs CRDs are not upgraded automatically with helm (https://helm.sh/docs/chart_best_practices/custom_resource_definitions/). diff --git a/common/flagdproxy/flagdproxy.go b/common/flagdproxy/flagdproxy.go index 360d357d9..056c61d65 100644 --- a/common/flagdproxy/flagdproxy.go +++ b/common/flagdproxy/flagdproxy.go @@ -3,6 +3,7 @@ package flagdproxy import ( "context" "fmt" + "reflect" "github.com/go-logr/logr" "github.com/open-feature/open-feature-operator/common/types" @@ -28,6 +29,8 @@ type FlagdProxyHandler struct { Log logr.Logger } +type CreateUpdateFunc func(ctx context.Context, obj client.Object) error + type FlagdProxyConfiguration struct { Port int ManagementPort int @@ -62,43 +65,58 @@ func (f *FlagdProxyHandler) Config() *FlagdProxyConfiguration { return f.config } +func (f *FlagdProxyHandler) createObject(ctx context.Context, obj client.Object) error { + return f.Client.Create(ctx, obj) +} + +func (f *FlagdProxyHandler) updateObject(ctx context.Context, obj client.Object) error { + return f.Client.Update(ctx, obj) +} + func (f *FlagdProxyHandler) HandleFlagdProxy(ctx context.Context) error { - exists, err := f.doesFlagdProxyExist(ctx) + exists, deployment, err := f.doesFlagdProxyExist(ctx) if err != nil { return err } - if !exists { - return f.deployFlagdProxy(ctx) - } - return nil -} -func (f *FlagdProxyHandler) deployFlagdProxy(ctx context.Context) error { - ownerReferences := []metav1.OwnerReference{} ownerReference, err := f.getOwnerReference(ctx) if err != nil { - f.Log.Error(err, "unable to create owner reference for open-feature-operator, not appending") - } else { - ownerReferences = append(ownerReferences, ownerReference) + return err + } + newDeployment := f.newFlagdProxyManifest(ownerReference) + newService := f.newFlagdProxyServiceManifest(ownerReference) + + if !exists { + f.Log.Info("flagd-proxy Deployment does not exist, creating") + return f.deployFlagdProxy(ctx, f.createObject, newDeployment, newService) } + // flagd-proxy exists, need to check if we should update it + if f.shouldUpdateFlagdProxy(deployment, newDeployment) { + f.Log.Info("flagd-proxy Deployment out of sync, updating") + return f.deployFlagdProxy(ctx, f.updateObject, newDeployment, newService) + } + f.Log.Info("flagd-proxy Deployment up-to-date") + return nil +} +func (f *FlagdProxyHandler) deployFlagdProxy(ctx context.Context, createUpdateFunc CreateUpdateFunc, deployment *appsV1.Deployment, service *corev1.Service) error { f.Log.Info("deploying the flagd-proxy") - if err := f.Client.Create(ctx, f.newFlagdProxyManifest(ownerReferences)); err != nil && !errors.IsAlreadyExists(err) { + if err := createUpdateFunc(ctx, deployment); err != nil && !errors.IsAlreadyExists(err) { return err } f.Log.Info("deploying the flagd-proxy service") - if err := f.Client.Create(ctx, f.newFlagdProxyServiceManifest(ownerReferences)); err != nil && !errors.IsAlreadyExists(err) { + if err := createUpdateFunc(ctx, service); err != nil && !errors.IsAlreadyExists(err) { return err } return nil } -func (f *FlagdProxyHandler) newFlagdProxyServiceManifest(ownerReferences []metav1.OwnerReference) *corev1.Service { +func (f *FlagdProxyHandler) newFlagdProxyServiceManifest(ownerReference *metav1.OwnerReference) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: FlagdProxyServiceName, Namespace: f.config.Namespace, - OwnerReferences: ownerReferences, + OwnerReferences: []metav1.OwnerReference{*ownerReference}, }, Spec: corev1.ServiceSpec{ Selector: map[string]string{ @@ -116,7 +134,7 @@ func (f *FlagdProxyHandler) newFlagdProxyServiceManifest(ownerReferences []metav } } -func (f *FlagdProxyHandler) newFlagdProxyManifest(ownerReferences []metav1.OwnerReference) *appsV1.Deployment { +func (f *FlagdProxyHandler) newFlagdProxyManifest(ownerReference *metav1.OwnerReference) *appsV1.Deployment { replicas := int32(1) args := []string{ "start", @@ -135,7 +153,7 @@ func (f *FlagdProxyHandler) newFlagdProxyManifest(ownerReferences []metav1.Owner "app.kubernetes.io/managed-by": ManagedByAnnotationValue, "app.kubernetes.io/version": f.config.Tag, }, - OwnerReferences: ownerReferences, + OwnerReferences: []metav1.OwnerReference{*ownerReference}, }, Spec: appsV1.DeploymentSpec{ Replicas: &replicas, @@ -178,31 +196,53 @@ func (f *FlagdProxyHandler) newFlagdProxyManifest(ownerReferences []metav1.Owner } } -func (f *FlagdProxyHandler) doesFlagdProxyExist(ctx context.Context) (bool, error) { +func (f *FlagdProxyHandler) doesFlagdProxyExist(ctx context.Context) (bool, *appsV1.Deployment, error) { d := &appsV1.Deployment{} err := f.Client.Get(ctx, client.ObjectKey{Name: FlagdProxyDeploymentName, Namespace: f.config.Namespace}, d) if err != nil { if errors.IsNotFound(err) { // does not exist, is not ready, no error - return false, nil + return false, nil, nil } // does not exist, is not ready, is in error - return false, err + return false, nil, err } - // exists, at least one replica ready, no error - return true, nil + return true, d, nil } -func (f *FlagdProxyHandler) getOwnerReference(ctx context.Context) (metav1.OwnerReference, error) { +func (f *FlagdProxyHandler) shouldUpdateFlagdProxy(old, new *appsV1.Deployment) bool { + if !isDeployedByOFO(old) { + f.Log.Info("flagd-proxy Deployment not managed by OFO") + return false + } + return !reflect.DeepEqual(old.Spec, new.Spec) +} + +func (f *FlagdProxyHandler) getOperatorDeployment(ctx context.Context) (*appsV1.Deployment, error) { d := &appsV1.Deployment{} if err := f.Client.Get(ctx, client.ObjectKey{Name: f.config.OperatorDeploymentName, Namespace: f.config.Namespace}, d); err != nil { - return metav1.OwnerReference{}, fmt.Errorf("unable to fetch operator deployment to create owner reference: %w", err) + return nil, fmt.Errorf("unable to fetch operator deployment: %w", err) } - return metav1.OwnerReference{ - UID: d.GetUID(), - Name: d.GetName(), - APIVersion: d.APIVersion, - Kind: d.Kind, + return d, nil + +} + +func (f *FlagdProxyHandler) getOwnerReference(ctx context.Context) (*metav1.OwnerReference, error) { + operatorDeployment, err := f.getOperatorDeployment(ctx) + if err != nil { + f.Log.Error(err, "unable to create owner reference for open-feature-operator") + return nil, err + } + + return &metav1.OwnerReference{ + UID: operatorDeployment.GetUID(), + Name: operatorDeployment.GetName(), + APIVersion: operatorDeployment.APIVersion, + Kind: operatorDeployment.Kind, }, nil +} +func isDeployedByOFO(d *appsV1.Deployment) bool { + val, ok := d.Labels["app.kubernetes.io/managed-by"] + return ok && val == ManagedByAnnotationValue } diff --git a/common/flagdproxy/flagdproxy_test.go b/common/flagdproxy/flagdproxy_test.go index 49fd2594c..1991a25a5 100644 --- a/common/flagdproxy/flagdproxy_test.go +++ b/common/flagdproxy/flagdproxy_test.go @@ -104,19 +104,21 @@ func TestDoesFlagdProxyExist(t *testing.T) { require.NotNil(t, ph) - res, err := ph.doesFlagdProxyExist(context.TODO()) + res, d, err := ph.doesFlagdProxyExist(context.TODO()) require.Nil(t, err) + require.Nil(t, d) require.False(t, res) err = fakeClient.Create(context.TODO(), deployment) require.Nil(t, err) - res, err = ph.doesFlagdProxyExist(context.TODO()) + res, d, err = ph.doesFlagdProxyExist(context.TODO()) require.Nil(t, err) + require.NotNil(t, d) require.True(t, res) } -func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExists(t *testing.T) { +func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExistsWithBadVersion(t *testing.T) { env := types.EnvConfig{ PodNamespace: "ns", } @@ -124,7 +126,66 @@ func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExists(t *testing.T) { require.NotNil(t, kpConfig) - fakeClient := fake.NewClientBuilder().WithObjects(&v1.Deployment{ + fakeClient := fake.NewClientBuilder().WithObjects(createOFOTestDeployment(env.PodNamespace)).Build() + + ownerRef, err := getTestOFODeploymentOwnerRef(fakeClient, env.PodNamespace) + require.Nil(t, err) + + proxyDeployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kpConfig.Namespace, + Name: FlagdProxyDeploymentName, + OwnerReferences: []metav1.OwnerReference{*ownerRef}, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": ManagedByAnnotationValue, + }, + }, + Spec: v1.DeploymentSpec{ + Template: v12.PodTemplateSpec{ + Spec: v12.PodSpec{ + Containers: []v12.Container{ + { + Name: "my-container", + }, + }, + }, + }, + }, + } + + err = fakeClient.Create(context.TODO(), proxyDeployment) + require.Nil(t, err) + + ph := NewFlagdProxyHandler(kpConfig, fakeClient, testr.New(t)) + + require.NotNil(t, ph) + + err = ph.HandleFlagdProxy(context.Background()) + + require.Nil(t, err) + + deployment := &v1.Deployment{} + err = fakeClient.Get(context.Background(), client.ObjectKey{ + Namespace: env.PodNamespace, + Name: FlagdProxyDeploymentName, + }, deployment) + + require.Nil(t, err) + require.NotNil(t, deployment) + + // verify that the existing deployment has been changed + require.Equal(t, "flagd-proxy", deployment.Spec.Template.Spec.Containers[0].Name) +} + +func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExistsWithoutLabel(t *testing.T) { + env := types.EnvConfig{ + PodNamespace: "ns", + } + kpConfig := NewFlagdProxyConfiguration(env) + + require.NotNil(t, kpConfig) + + proxyDeployment := &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: kpConfig.Namespace, Name: FlagdProxyDeploymentName, @@ -140,7 +201,9 @@ func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExists(t *testing.T) { }, }, }, - }).Build() + } + + fakeClient := fake.NewClientBuilder().WithObjects(createOFOTestDeployment(env.PodNamespace), proxyDeployment).Build() ph := NewFlagdProxyHandler(kpConfig, fakeClient, testr.New(t)) @@ -163,6 +226,45 @@ func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExists(t *testing.T) { require.Equal(t, "my-container", deployment.Spec.Template.Spec.Containers[0].Name) } +func TestFlagdProxyHandler_HandleFlagdProxy_ProxyExistsWithNewestVersion(t *testing.T) { + env := types.EnvConfig{ + PodNamespace: "ns", + } + kpConfig := NewFlagdProxyConfiguration(env) + + require.NotNil(t, kpConfig) + + fakeClient := fake.NewClientBuilder().WithObjects(createOFOTestDeployment(env.PodNamespace)).Build() + + ph := NewFlagdProxyHandler(kpConfig, fakeClient, testr.New(t)) + + require.NotNil(t, ph) + + ownerRef, err := getTestOFODeploymentOwnerRef(fakeClient, env.PodNamespace) + require.Nil(t, err) + + proxy := ph.newFlagdProxyManifest(ownerRef) + + err = fakeClient.Create(context.TODO(), proxy) + require.Nil(t, err) + + err = ph.HandleFlagdProxy(context.Background()) + + require.Nil(t, err) + + deployment := &v1.Deployment{} + err = fakeClient.Get(context.Background(), client.ObjectKey{ + Namespace: env.PodNamespace, + Name: FlagdProxyDeploymentName, + }, deployment) + + require.Nil(t, err) + require.NotNil(t, deployment) + + // verify that the existing deployment has not been changed + require.Equal(t, "flagd-proxy", deployment.Spec.Template.Spec.Containers[0].Name) +} + func TestFlagdProxyHandler_HandleFlagdProxy_CreateProxy(t *testing.T) { env := types.EnvConfig{ PodNamespace: "ns", @@ -176,7 +278,7 @@ func TestFlagdProxyHandler_HandleFlagdProxy_CreateProxy(t *testing.T) { require.NotNil(t, kpConfig) - fakeClient := fake.NewClientBuilder().Build() + fakeClient := fake.NewClientBuilder().WithObjects(createOFOTestDeployment(env.PodNamespace)).Build() ph := NewFlagdProxyHandler(kpConfig, fakeClient, testr.New(t)) @@ -227,6 +329,13 @@ func TestFlagdProxyHandler_HandleFlagdProxy_CreateProxy(t *testing.T) { "app.kubernetes.io/version": "tag", }, ResourceVersion: "1", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: operatorDeploymentName, + }, + }, }, Spec: appsV1.DeploymentSpec{ Replicas: &replicas, @@ -288,6 +397,13 @@ func TestFlagdProxyHandler_HandleFlagdProxy_CreateProxy(t *testing.T) { Name: FlagdProxyServiceName, Namespace: "ns", ResourceVersion: "1", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: operatorDeploymentName, + }, + }, }, Spec: corev1.ServiceSpec{ Selector: map[string]string{ @@ -306,3 +422,25 @@ func TestFlagdProxyHandler_HandleFlagdProxy_CreateProxy(t *testing.T) { require.Equal(t, expectedService, service) } + +func createOFOTestDeployment(ns string) *v1.Deployment { + return &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: operatorDeploymentName, + }, + } +} + +func getTestOFODeploymentOwnerRef(c client.Client, ns string) (*metav1.OwnerReference, error) { + d := &appsV1.Deployment{} + if err := c.Get(context.TODO(), client.ObjectKey{Name: operatorDeploymentName, Namespace: ns}, d); err != nil { + return nil, err + } + return &metav1.OwnerReference{ + UID: d.GetUID(), + Name: d.GetName(), + APIVersion: d.APIVersion, + Kind: d.Kind, + }, nil +} diff --git a/controllers/core/featureflagsource/controller_test.go b/controllers/core/featureflagsource/controller_test.go index baba1f4c4..ea4b1e821 100644 --- a/controllers/core/featureflagsource/controller_test.go +++ b/controllers/core/featureflagsource/controller_test.go @@ -85,9 +85,9 @@ func TestFeatureFlagSourceReconciler_Reconcile(t *testing.T) { // setting up fake k8s client var fakeClient client.Client if tt.deployment != nil { - fakeClient = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(tt.fsConfig, tt.deployment).WithIndex(&appsv1.Deployment{}, fmt.Sprintf("%s/%s", common.OpenFeatureAnnotationPath, common.FeatureFlagSourceAnnotation), common.FeatureFlagSourceIndex).Build() + fakeClient = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(createOFOTestDeployment(testNamespace), tt.fsConfig, tt.deployment).WithIndex(&appsv1.Deployment{}, fmt.Sprintf("%s/%s", common.OpenFeatureAnnotationPath, common.FeatureFlagSourceAnnotation), common.FeatureFlagSourceIndex).Build() } else { - fakeClient = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(tt.fsConfig).WithIndex(&appsv1.Deployment{}, fmt.Sprintf("%s/%s", common.OpenFeatureAnnotationPath, common.FeatureFlagSourceAnnotation), common.FeatureFlagSourceIndex).Build() + fakeClient = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(createOFOTestDeployment(testNamespace), tt.fsConfig).WithIndex(&appsv1.Deployment{}, fmt.Sprintf("%s/%s", common.OpenFeatureAnnotationPath, common.FeatureFlagSourceAnnotation), common.FeatureFlagSourceIndex).Build() } kpConfig := flagdproxy.NewFlagdProxyConfiguration(commontypes.EnvConfig{ FlagdProxyImage: "ghcr.io/open-feature/flagd-proxy", @@ -210,3 +210,12 @@ func createTestFSConfig(fsConfigName string, testNamespace string, rollout bool, return fsConfig } + +func createOFOTestDeployment(ns string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: "open-feature-operator-controller-manager", + }, + } +} diff --git a/docs/installation.md b/docs/installation.md index 86ada3c71..a84a4b3fa 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -28,6 +28,13 @@ helm upgrade --install openfeature openfeature/open-feature-operator helm upgrade --install openfeature openfeature/open-feature-operator ``` +> [!NOTE] +> If you upgrade to OFO `v0.5.4` or higher while using a `flagd-proxy` provider, the instance of +`flagd-proxy` will be automatically upgraded to the latest supported version by the `open-feature-operator`. +The upgrade of `flagd-proxy` will also consider your current `FeatureFlagSource` configuration and adapt +the `flagd-proxy` Deployment accordingly. +If you are upgrading OFO to `v0.5.3` or lower, `flagd-proxy` (if present) won't be upgraded automatically. + #### Upgrading CRDs CRDs are not upgraded automatically with helm (https://helm.sh/docs/chart_best_practices/custom_resource_definitions/). diff --git a/docs/quick_start.md b/docs/quick_start.md index 2dedfa004..f412946d6 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -22,7 +22,7 @@ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/ kubectl wait --for=condition=Available=True deploy --all -n 'cert-manager' ``` -> **Note** +> [!NOTE] > Requirement of this dependency is explained in the [installation](./installation.md) guide. #### 3. Install OpenFeature Operator @@ -52,7 +52,7 @@ Next steps focus on adding feature flags, flag source configuration and a worklo kubectl create ns flags ``` -> **Note** +> [!NOTE] > We use the namespace `flags` for flag related custom resources #### 5. Install feature flags definition diff --git a/docs/v1beta_migration.md b/docs/v1beta_migration.md index efdd1a6d3..2b1c74705 100644 --- a/docs/v1beta_migration.md +++ b/docs/v1beta_migration.md @@ -76,5 +76,10 @@ We recommend following migration steps, 3. Install upgraded custom resources 4. Update annotation of your workloads to the latest supported version -If you have used `flagd-proxy` provider, then you have to upgrade the image used by the `flagd-proxy` deployment. -For this, please edit the deployment of `flagd-proxy` to version [v0.3.1](https://github.com/open-feature/flagd/pkgs/container/flagd-proxy/152333134?tag=v0.3.1) or above. +If you have used `flagd-proxy` provider, then you have to upgrade the image used by the `flagd-proxy` deployment. +For this, please edit the deployment of `flagd-proxy` to version [v0.3.1](https://github.com/open-feature/flagd/pkgs/container/flagd-proxy/152333134?tag=v0.3.1) or above. + +> [!NOTE] +> Since OFO version `v0.5.4`, the instance of `flagd-proxy` pod (if used) will be upgraded automatically to the +to the latest supported version by `open-feature-operator`. +For more information see the [upgrade section](./installation.md#upgrading).