From 323e3f880dc67f448cda6e62ff7c8e62a155e3b3 Mon Sep 17 00:00:00 2001 From: Joel Takvorian Date: Mon, 11 Mar 2024 08:52:52 +0100 Subject: [PATCH] NETOBSERV-1517: remove IPFIX agent mode (#579) * NETOBSERV-1517: remove IPFIX agent mode - Remove controller code for OVS configmap / CNO configuration - Remove related tests (and move console plugin related tests to its dedicated file) - Remove FLP ingester reconciler (was only used with ipfix) - Note that the IPFIX api is not removed to not introduce breaking changes in existing API, however it is now documented that using IPFIX would have no effect * Update config/descriptions/upstream.md Co-authored-by: Olivier Cazade --------- Co-authored-by: Olivier Cazade --- README.md | 11 +- .../v1beta1/flowcollector_types.go | 12 +- .../v1beta2/flowcollector_types.go | 12 +- .../flows.netobserv.io_flowcollectors.yaml | 38 +- ...observ-operator.clusterserviceversion.yaml | 11 +- .../flows.netobserv.io_flowcollectors.yaml | 38 +- config/descriptions/upstream.md | 11 +- .../consoleplugin/consoleplugin_objects.go | 40 +- controllers/ebpf/agent_controller.go | 10 +- controllers/flowcollector_controller.go | 61 - .../flowcollector_controller_console_test.go | 60 +- .../flowcollector_controller_iso_test.go | 2 +- controllers/flowcollector_controller_test.go | 313 +--- controllers/flp/flp_common_objects.go | 37 +- controllers/flp/flp_controller.go | 1 - controllers/flp/flp_controller_test.go | 36 +- controllers/flp/flp_ingest_objects.go | 85 - controllers/flp/flp_ingest_reconciler.go | 171 -- controllers/flp/flp_monolith_objects.go | 11 +- controllers/flp/flp_pipeline_builder.go | 9 - controllers/flp/flp_test.go | 52 +- controllers/ovs/flowsconfig_cno_reconciler.go | 133 -- .../ovs/flowsconfig_ovnk_reconciler.go | 132 -- controllers/ovs/flowsconfig_types.go | 51 - controllers/suite_test.go | 1 - docs/FlowCollector.md | 8 +- go.mod | 1 - go.sum | 2 - pkg/helper/flowcollector.go | 14 +- pkg/loki/labels.go | 16 +- pkg/manager/status/status_manager.go | 1 - pkg/metrics/predefined_metrics.go | 6 +- .../mitchellh/mapstructure/CHANGELOG.md | 96 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 --- .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1540 ----------------- vendor/modules.txt | 3 - 39 files changed, 195 insertions(+), 3226 deletions(-) delete mode 100644 controllers/flp/flp_ingest_objects.go delete mode 100644 controllers/flp/flp_ingest_reconciler.go delete mode 100644 controllers/ovs/flowsconfig_cno_reconciler.go delete mode 100644 controllers/ovs/flowsconfig_ovnk_reconciler.go delete mode 100644 controllers/ovs/flowsconfig_types.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go diff --git a/README.md b/README.md index f4eafa48a..9cbcbbee4 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,10 @@ ![GitHub release (latest by date)](https://img.shields.io/github/v/release/netobserv/network-observability-operator) [![Go Report Card](https://goreportcard.com/badge/github.com/netobserv/network-observability-operator)](https://goreportcard.com/report/github.com/netobserv/network-observability-operator) -NetObserv Operator is a Kubernetes / OpenShift operator for network observability. It deploys a monitoring pipeline to collect and enrich network flows. These flows can be produced by the NetObserv eBPF agent, or by any device or CNI able to export flows in IPFIX format, such as OVN-Kubernetes. - -The operator provides dashboards, metrics, and keeps flows accessible in a queryable log store, Grafana Loki. When used in OpenShift, new views are available in the Console. +NetObserv Operator is a Kubernetes / OpenShift operator for network observability. It deploys a monitoring pipeline that consists in: +- the NetObserv eBPF agent, that generates network flows from captured packets +- Flowlogs-pipeline, a component that collects, enriches and exports these flows. +- When used in OpenShift, a Console plugin for flows visualization with powerful filtering options, a topology representation and more. ## Getting Started @@ -122,11 +123,9 @@ As it operates cluster-wide, only a single `FlowCollector` is allowed, and it ha A couple of settings deserve special attention: -- Agent (`spec.agent.type`) can be `eBPF` (default) or `IPFIX`. eBPF is recommended, as it should work in more situations and offers better performances. If you can't, or don't want to use eBPF, note that the IPFIX option is fully functional only when using [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) CNI. Other CNIs are not officially supported, but you may still be able to configure them manually if they allow IPFIX exports. - - Agent features (`spec.agent.ebpf.features`) can enable more features such as tracking packet drops, TCP latency (RTT) and DNS requests and responses. -- Sampling (`spec.agent.ebpf.sampling` and `spec.agent.ipfix.sampling`): a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50) for eBPF and 400 (1:400) for IPFIX. Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. +- Sampling `spec.agent.ebpf.sampling`: a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50). Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. - Loki (`spec.loki`): configure here how to reach Loki. The default URL values match the Loki quick install paths mentioned in the _Getting Started_ section, but you may have to configure differently if you used another installation method. You will find more information in our guides for deploying Loki: [with Loki Operator](https://github.com/netobserv/documents/blob/main/loki_operator.md), or an alternative ["distributed Loki" guide](https://github.com/netobserv/documents/blob/main/loki_distributed.md). You should set `spec.loki.mode` according to the chosen installation method, for instance use `LokiStack` if you use the Loki Operator. diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index d442f6fa8..3c2cd4f67 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -86,12 +86,10 @@ type FlowCollectorSpec struct { // allow defining both fields. // +union type FlowCollectorAgent struct { - // `type` selects the flows tracing agent. Possible values are:
- // - `EBPF` (default) to use NetObserv eBPF agent.
- // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- // `EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, - // but they would require manual configuration). + // `type` [deprecated (*)] selects the flows tracing agent. The only possible value is `EBPF` (default), to use NetObserv eBPF agent.
+ // Previously, using an IPFIX collector was allowed, but was deprecated and it is now removed.
+ // Setting `IPFIX` is ignored and still use the eBPF Agent. + // Since there is only a single option here, this field will be remove in a future API version. // +unionDiscriminator // +kubebuilder:validation:Enum:="EBPF";"IPFIX" // +kubebuilder:default:=EBPF @@ -443,7 +441,7 @@ type FlowCollectorFLP struct { EnableKubeProbes *bool `json:"enableKubeProbes,omitempty"` //+kubebuilder:default:=true - // `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space. + // `dropUnusedFields` [deprecated (*)] this setting is not used anymore. DropUnusedFields *bool `json:"dropUnusedFields,omitempty"` //+kubebuilder:validation:Minimum=0 diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index b94163fca..1f553e64e 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -93,12 +93,10 @@ const ( // allow defining both fields. // +union type FlowCollectorAgent struct { - // `type` selects the flows tracing agent. Possible values are:
- // - `eBPF` (default) to use NetObserv eBPF agent.
- // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- // `eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, - // but they would require manual configuration). + // `type` [deprecated (*)] selects the flows tracing agent. The only possible value is `eBPF` (default), to use NetObserv eBPF agent.
+ // Previously, using an IPFIX collector was allowed, but was deprecated and it is now removed.
+ // Setting `IPFIX` is ignored and still use the eBPF Agent. + // Since there is only a single option here, this field will be remove in a future API version. // +unionDiscriminator // +kubebuilder:validation:Enum:="eBPF";"IPFIX" // +kubebuilder:default:=eBPF @@ -920,7 +918,7 @@ type AdvancedProcessorConfig struct { //+kubebuilder:default:=true //+optional - // `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space. + // `dropUnusedFields` [deprecated (*)] this setting is not used anymore. DropUnusedFields *bool `json:"dropUnusedFields,omitempty"` //+kubebuilder:default:="30s" diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index ba829a68b..718ac95cd 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -445,13 +445,13 @@ spec: type: object type: default: EBPF - description: '`type` selects the flows tracing agent. Possible - values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `EBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` - works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + description: '`type` [deprecated (*)] selects the flows tracing + agent. The only possible value is `EBPF` (default), to use NetObserv + eBPF agent.
Previously, using an IPFIX collector was allowed, + but was deprecated and it is now removed.
Setting `IPFIX` + is ignored and still use the eBPF Agent. Since there is only + a single option here, this field will be remove in a future + API version.' enum: - EBPF - IPFIX @@ -1875,9 +1875,8 @@ spec: type: object dropUnusedFields: default: true - description: '`dropUnusedFields` allows, when set to `true`, to - drop fields that are known to be unused by OVS, to save storage - space.' + description: '`dropUnusedFields` [deprecated (*)] this setting + is not used anymore.' type: boolean enableKubeProbes: default: true @@ -3249,13 +3248,13 @@ spec: type: object type: default: eBPF - description: '`type` selects the flows tracing agent. Possible - values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `eBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` - works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + description: '`type` [deprecated (*)] selects the flows tracing + agent. The only possible value is `eBPF` (default), to use NetObserv + eBPF agent.
Previously, using an IPFIX collector was allowed, + but was deprecated and it is now removed.
Setting `IPFIX` + is ignored and still use the eBPF Agent. Since there is only + a single option here, this field will be remove in a future + API version.' enum: - eBPF - IPFIX @@ -4941,9 +4940,8 @@ spec: type: string dropUnusedFields: default: true - description: '`dropUnusedFields` allows, when set to `true`, - to drop fields that are known to be unused by OVS, to save - storage space.' + description: '`dropUnusedFields` [deprecated (*)] this setting + is not used anymore.' type: boolean enableKubeProbes: default: true diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 6d0f90b3c..1231b6ead 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -773,9 +773,10 @@ spec: name: flowmetrics.flows.netobserv.io version: v1alpha1 description: |- - NetObserv Operator is an OpenShift / Kubernetes operator for network observability. It deploys a monitoring pipeline to collect and enrich network flows. These flows can be produced by the NetObserv eBPF agent, or by any device or CNI able to export flows in IPFIX format, such as OVN-Kubernetes. - - The operator provides dashboards, metrics, and keeps flows accessible in a queryable log store, Grafana Loki. When used in OpenShift, new views are available in the Console. + NetObserv Operator is a OpenShift / Kubernetes operator for network observability. It deploys a monitoring pipeline that consists in: + - the NetObserv eBPF agent, that generates network flows from captured packets + - Flowlogs-pipeline, a component that collects, enriches and exports these flows. + - When used in OpenShift, a Console plugin for flows visualization with powerful filtering options, a topology representation and more. ## Dependencies @@ -819,9 +820,7 @@ spec: A couple of settings deserve special attention: - - Agent (`spec.agent.type`) can be `EBPF` (default) or `IPFIX`. eBPF is recommended, as it should work in more situations and offers better performances. If you can't, or don't want to use eBPF, note that the IPFIX option is fully functional only when using [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) CNI. Other CNIs are not officially supported, but you might still be able to configure them manually if they allow IPFIX exports. - - - Sampling (`spec.agent.ebpf.sampling` and `spec.agent.ipfix.sampling`): a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50) for eBPF and 400 (1:400) for IPFIX. Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. + - Sampling (`spec.agent.ebpf.sampling`): a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50). Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 86fd1be99..3287c13cb 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -432,13 +432,13 @@ spec: type: object type: default: EBPF - description: '`type` selects the flows tracing agent. Possible - values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `EBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` - works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + description: '`type` [deprecated (*)] selects the flows tracing + agent. The only possible value is `EBPF` (default), to use NetObserv + eBPF agent.
Previously, using an IPFIX collector was allowed, + but was deprecated and it is now removed.
Setting `IPFIX` + is ignored and still use the eBPF Agent. Since there is only + a single option here, this field will be remove in a future + API version.' enum: - EBPF - IPFIX @@ -1862,9 +1862,8 @@ spec: type: object dropUnusedFields: default: true - description: '`dropUnusedFields` allows, when set to `true`, to - drop fields that are known to be unused by OVS, to save storage - space.' + description: '`dropUnusedFields` [deprecated (*)] this setting + is not used anymore.' type: boolean enableKubeProbes: default: true @@ -3236,13 +3235,13 @@ spec: type: object type: default: eBPF - description: '`type` selects the flows tracing agent. Possible - values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `eBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` - works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + description: '`type` [deprecated (*)] selects the flows tracing + agent. The only possible value is `eBPF` (default), to use NetObserv + eBPF agent.
Previously, using an IPFIX collector was allowed, + but was deprecated and it is now removed.
Setting `IPFIX` + is ignored and still use the eBPF Agent. Since there is only + a single option here, this field will be remove in a future + API version.' enum: - eBPF - IPFIX @@ -4928,9 +4927,8 @@ spec: type: string dropUnusedFields: default: true - description: '`dropUnusedFields` allows, when set to `true`, - to drop fields that are known to be unused by OVS, to save - storage space.' + description: '`dropUnusedFields` [deprecated (*)] this setting + is not used anymore.' type: boolean enableKubeProbes: default: true diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index bc666174d..44312258f 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -1,6 +1,7 @@ -NetObserv Operator is an OpenShift / Kubernetes operator for network observability. It deploys a monitoring pipeline to collect and enrich network flows. These flows can be produced by the NetObserv eBPF agent, or by any device or CNI able to export flows in IPFIX format, such as OVN-Kubernetes. - -The operator provides dashboards, metrics, and keeps flows accessible in a queryable log store, Grafana Loki. When used in OpenShift, new views are available in the Console. +NetObserv Operator is an OpenShift / Kubernetes operator for network observability. It deploys a monitoring pipeline that consists in: +- the NetObserv eBPF agent, that generates network flows from captured packets +- Flowlogs-pipeline, a component that collects, enriches and exports these flows. +- When used in OpenShift, a Console plugin for flows visualization with powerful filtering options, a topology representation and more. ## Dependencies @@ -44,9 +45,7 @@ As it operates cluster-wide, only a single `FlowCollector` is allowed, and it ha A couple of settings deserve special attention: -- Agent (`spec.agent.type`) can be `EBPF` (default) or `IPFIX`. eBPF is recommended, as it should work in more situations and offers better performances. If you can't, or don't want to use eBPF, note that the IPFIX option is fully functional only when using [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) CNI. Other CNIs are not officially supported, but you might still be able to configure them manually if they allow IPFIX exports. - -- Sampling (`spec.agent.ebpf.sampling` and `spec.agent.ipfix.sampling`): a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50) for eBPF and 400 (1:400) for IPFIX. Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. +- Sampling (`spec.agent.ebpf.sampling`): a value of `100` means: one flow every 100 is sampled. `1` means all flows are sampled. The lower it is, the more flows you get, and the more accurate are derived metrics, but the higher amount of resources are consumed. By default, sampling is set to 50 (ie. 1:50). Note that more sampled flows also means more storage needed. We recommend to start with default values and refine empirically, to figure out which setting your cluster can manage. - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index 86ffdcaef..efaa0cb82 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -359,32 +359,30 @@ func (b *builder) setFrontendConfig(fconf *config.FrontendConfig) error { if err != nil { return err } - if helper.UseEBPF(b.desired) { - if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { - fconf.Features = append(fconf.Features, "pktDrop") - } + if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { + fconf.Features = append(fconf.Features, "pktDrop") + } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { - fconf.Features = append(fconf.Features, "dnsTracking") - } + if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { + fconf.Features = append(fconf.Features, "dnsTracking") + } - if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { - fconf.Features = append(fconf.Features, "flowRTT") - } + if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { + fconf.Features = append(fconf.Features, "flowRTT") + } - if b.desired.Agent.EBPF.Advanced != nil { - if v, ok := b.desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeJustMark]; ok { - dedupJustMark, err = strconv.ParseBool(v) - if err != nil { - return err - } + if b.desired.Agent.EBPF.Advanced != nil { + if v, ok := b.desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeJustMark]; ok { + dedupJustMark, err = strconv.ParseBool(v) + if err != nil { + return err } + } - if v, ok := b.desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeMerge]; ok { - dedupMerge, err = strconv.ParseBool(v) - if err != nil { - return err - } + if v, ok := b.desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeMerge]; ok { + dedupMerge, err = strconv.ParseBool(v) + if err != nil { + return err } } } diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 6de3b1c60..54915060e 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -126,16 +126,14 @@ func (c *AgentController) Reconcile(ctx context.Context, target *flowslatest.Flo return err } - if !helper.UseEBPF(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { + if c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { c.Managed.TryDeleteAll(ctx) if current == nil { - rlog.Info("nothing to do, as the requested agent is not eBPF", "currentAgent", target.Spec.Agent) + rlog.Info("nothing to do, namespace already cleaned up", "currentAgent", target.Spec.Agent) return nil } - // If the user has changed the agent type or changed the target namespace, we need to manually - // undeploy the agent - rlog.Info("user changed the agent type, or the target namespace. Deleting eBPF agent", "currentAgent", target.Spec.Agent) + rlog.Info("namespace cleanup: deleting eBPF agent", "currentAgent", target.Spec.Agent) if err := c.Delete(ctx, current); err != nil { if errors.IsNotFound(err) { return nil @@ -201,7 +199,7 @@ func newMountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropaga } func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCollector, rlog logr.Logger) (*v1.DaemonSet, error) { - if coll == nil || !helper.UseEBPF(&coll.Spec) { + if coll == nil { return nil, nil } version := helper.ExtractVersion(c.Image) diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index 02fb69220..6f3bb03d5 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -11,13 +11,11 @@ import ( corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" "github.com/netobserv/network-observability-operator/controllers/consoleplugin" "github.com/netobserv/network-observability-operator/controllers/ebpf" - "github.com/netobserv/network-observability-operator/controllers/ovs" "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/cleanup" "github.com/netobserv/network-observability-operator/pkg/helper" @@ -26,11 +24,6 @@ import ( "github.com/netobserv/network-observability-operator/pkg/watchers" ) -const ( - ovsFlowsConfigMapName = "ovs-flows-config" - flowsFinalizer = "flows.netobserv.io/finalizer" -) - // FlowCollectorReconciler reconciles a FlowCollector object type FlowCollectorReconciler struct { client.Client @@ -125,10 +118,6 @@ func (r *FlowCollectorReconciler) reconcile(ctx context.Context, clh *helper.Cli loki := helper.NewLokiConfig(&desired.Spec.Loki, ns) reconcilersInfo := r.newCommonInfo(clh, ns, previousNamespace, &loki) - if ret, err := r.checkFinalizer(ctx, desired, &reconcilersInfo); ret { - return err - } - if err := cleanup.CleanPastReferences(ctx, r.Client, ns); err != nil { return err } @@ -155,19 +144,6 @@ func (r *FlowCollectorReconciler) reconcile(ctx context.Context, clh *helper.Cli } } - // OVS config map for CNO - if r.mgr.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) - if err := ovsConfigController.Reconcile(ctx, desired); err != nil { - return r.status.Error("ReconcileCNOFailed", err) - } - } else { - ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.IPFIX.OVNKubernetes) - if err := ovsConfigController.Reconcile(ctx, desired); err != nil { - return r.status.Error("ReconcileOVNKFailed", err) - } - } - // eBPF agent ebpfAgentController := ebpf.NewAgentController(reconcilersInfo.NewInstance(r.mgr.Config.EBPFAgentImage, r.status)) if err := ebpfAgentController.Reconcile(ctx, desired); err != nil { @@ -185,43 +161,6 @@ func (r *FlowCollectorReconciler) reconcile(ctx context.Context, clh *helper.Cli return nil } -// checkFinalizer returns true (and/or error) if the calling function needs to return -func (r *FlowCollectorReconciler) checkFinalizer(ctx context.Context, desired *flowslatest.FlowCollector, info *reconcilers.Common) (bool, error) { - if !desired.ObjectMeta.DeletionTimestamp.IsZero() { - if controllerutil.ContainsFinalizer(desired, flowsFinalizer) { - // Run finalization logic - if err := r.finalize(ctx, desired, info); err != nil { - return true, err - } - // Remove finalizer - controllerutil.RemoveFinalizer(desired, flowsFinalizer) - err := r.Update(ctx, desired) - return true, err - } - return true, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(desired, flowsFinalizer) { - controllerutil.AddFinalizer(desired, flowsFinalizer) - if err := r.Update(ctx, desired); err != nil { - return true, err - } - } - - return false, nil -} - -func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowslatest.FlowCollector, info *reconcilers.Common) error { - if !r.mgr.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigOVNKController(info, desired.Spec.Agent.IPFIX.OVNKubernetes) - if err := ovsConfigController.Finalize(ctx, desired); err != nil { - return fmt.Errorf("failed to finalize ovn-kubernetes reconciler: %w", err) - } - } - return nil -} - func (r *FlowCollectorReconciler) newCommonInfo(clh *helper.Client, ns, prevNs string, loki *helper.LokiConfig) reconcilers.Common { return reconcilers.Common{ Client: *clh, diff --git a/controllers/flowcollector_controller_console_test.go b/controllers/flowcollector_controller_console_test.go index f5e958674..46c077daa 100644 --- a/controllers/flowcollector_controller_console_test.go +++ b/controllers/flowcollector_controller_console_test.go @@ -8,12 +8,14 @@ import ( appsv1 "k8s.io/api/apps/v1" ascv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" + "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" ) @@ -38,6 +40,7 @@ func flowCollectorConsolePluginSpecs() { consoleCRKey := types.NamespacedName{ Name: "cluster", } + rbKeyPlugin := types.NamespacedName{Name: constants.PluginName} BeforeEach(func() { // Add any setup steps that needs to be executed before each test @@ -72,7 +75,7 @@ func flowCollectorConsolePluginSpecs() { Spec: flowslatest.FlowCollectorSpec{ Namespace: cpNamespace, DeploymentModel: flowslatest.DeploymentModelDirect, - Agent: flowslatest.FlowCollectorAgent{Type: "IPFIX"}, + Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentEBPF}, ConsolePlugin: flowslatest.FlowCollectorConsolePlugin{ Enable: ptr.To(true), ImagePullPolicy: "Never", @@ -138,6 +141,15 @@ func flowCollectorConsolePluginSpecs() { By("Creating the console plugin configmap") Eventually(getConfigMapData(configKey), timeout, interval).Should(ContainSubstring("url: http://loki:3100/")) + + By("Expecting to create console plugin role binding") + rb := rbacv1.ClusterRoleBinding{} + Eventually(func() interface{} { + return k8sClient.Get(ctx, rbKeyPlugin, &rb) + }, timeout, interval).Should(Succeed()) + Expect(rb.Subjects).Should(HaveLen(1)) + Expect(rb.Subjects[0].Name).Should(Equal("netobserv-plugin")) + Expect(rb.RoleRef.Name).Should(Equal("netobserv-plugin")) }) It("Should update successfully", func() { @@ -335,6 +347,52 @@ func flowCollectorConsolePluginSpecs() { }) }) + Context("Changing namespace", func() { + const otherNamespace = "other-namespace" + cpKey2 := types.NamespacedName{ + Name: "netobserv-plugin", + Namespace: otherNamespace, + } + + It("Should update namespace successfully", func() { + updateCR(crKey, func(fc *flowslatest.FlowCollector) { + fc.Spec.Namespace = otherNamespace + }) + }) + + It("Should redeploy console plugin in new namespace", func() { + By("Expecting deployment in previous namespace to be deleted") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey, &appsv1.Deployment{}) + }, timeout, interval).Should(MatchError(`deployments.apps "netobserv-plugin" not found`)) + + By("Expecting service in previous namespace to be deleted") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey, &v1.Service{}) + }, timeout, interval).Should(MatchError(`services "netobserv-plugin" not found`)) + + By("Expecting service account in previous namespace to be deleted") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey, &v1.ServiceAccount{}) + }, timeout, interval).Should(MatchError(`serviceaccounts "netobserv-plugin" not found`)) + + By("Expecting deployment to be created in new namespace") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey2, &appsv1.Deployment{}) + }, timeout, interval).Should(Succeed()) + + By("Expecting service to be created in new namespace") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey2, &v1.Service{}) + }, timeout, interval).Should(Succeed()) + + By("Expecting service account to be created in new namespace") + Eventually(func() interface{} { + return k8sClient.Get(ctx, cpKey2, &v1.ServiceAccount{}) + }, timeout, interval).Should(Succeed()) + }) + }) + Context("Cleanup", func() { It("Should delete CR", func() { cleanupCR(crKey) diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 05b71e8f9..8ffb9b9b1 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -78,7 +78,7 @@ func flowCollectorIsoSpecs() { EnableKubeProbes: ptr.To(false), DropUnusedFields: ptr.To(false), }, - LogTypes: &outputRecordTypes, + LogTypes: ptr.To(flowslatest.LogTypeAll), Metrics: flowslatest.FLPMetrics{ Server: flowslatest.MetricsServerConfig{ Port: 12347, diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index a7f5f2896..0b149f205 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -1,37 +1,19 @@ package controllers import ( - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - "github.com/netobserv/network-observability-operator/controllers/constants" - . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/pkg/test" ) const ( - timeout = test.Timeout - interval = test.Interval - conntrackEndTimeout = 10 * time.Second - conntrackTerminatingTimeout = 5 * time.Second - conntrackHeartbeatInterval = 30 * time.Second + timeout = test.Timeout + interval = test.Interval ) var ( - outputRecordTypes = flowslatest.LogTypeAll - updateCR = func(key types.NamespacedName, updater func(*flowslatest.FlowCollector)) { + updateCR = func(key types.NamespacedName, updater func(*flowslatest.FlowCollector)) { test.UpdateCR(ctx, k8sClient, key, updater) } getCR = func(key types.NamespacedName) *flowslatest.FlowCollector { @@ -41,292 +23,3 @@ var ( test.CleanupCR(ctx, k8sClient, key) } ) - -// nolint:cyclop -func flowCollectorControllerSpecs() { - const operatorNamespace = "main-namespace" - const otherNamespace = "other-namespace" - crKey := types.NamespacedName{ - Name: "cluster", - } - ovsConfigMapKey := types.NamespacedName{ - Name: "ovs-flows-config", - Namespace: "openshift-network-operator", - } - cpKey1 := types.NamespacedName{ - Name: "netobserv-plugin", - Namespace: operatorNamespace, - } - cpKey2 := types.NamespacedName{ - Name: "netobserv-plugin", - Namespace: otherNamespace, - } - rbKeyPlugin := types.NamespacedName{Name: constants.PluginName} - - // Created objects to cleanup - cleanupList := []client.Object{} - - BeforeEach(func() { - // Add any setup steps that needs to be executed before each test - }) - - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - }) - - Context("Without Kafka", func() { - It("Should create successfully", func() { - created := &flowslatest.FlowCollector{ - ObjectMeta: metav1.ObjectMeta{ - Name: crKey.Name, - }, - Spec: flowslatest.FlowCollectorSpec{ - Namespace: operatorNamespace, - DeploymentModel: flowslatest.DeploymentModelDirect, - Processor: flowslatest.FlowCollectorFLP{ - ImagePullPolicy: "Never", - LogLevel: "error", - Advanced: &flowslatest.AdvancedProcessorConfig{ - Env: map[string]string{ - "GOGC": "200", - }, - Port: ptr.To(int32(9999)), - ConversationHeartbeatInterval: &metav1.Duration{ - Duration: conntrackHeartbeatInterval, - }, - ConversationEndTimeout: &metav1.Duration{ - Duration: conntrackEndTimeout, - }, - ConversationTerminatingTimeout: &metav1.Duration{ - Duration: conntrackTerminatingTimeout, - }, - }, - LogTypes: &outputRecordTypes, - - Metrics: flowslatest.FLPMetrics{ - IncludeList: &[]flowslatest.FLPMetric{"node_ingress_bytes_total", "namespace_ingress_bytes_total", "workload_ingress_bytes_total"}, - }, - }, - Agent: flowslatest.FlowCollectorAgent{ - Type: "IPFIX", - IPFIX: flowslatest.FlowCollectorIPFIX{ - Sampling: 200, - }, - }, - ConsolePlugin: flowslatest.FlowCollectorConsolePlugin{ - Enable: ptr.To(true), - ImagePullPolicy: "Never", - PortNaming: flowslatest.ConsolePluginPortConfig{ - Enable: ptr.To(true), - PortNames: map[string]string{ - "3100": "loki", - }, - }, - }, - }, - } - - // Create - Expect(k8sClient.Create(ctx, created)).Should(Succeed()) - - By("Expecting to create console plugin role binding") - rb3 := rbacv1.ClusterRoleBinding{} - Eventually(func() interface{} { - return k8sClient.Get(ctx, rbKeyPlugin, &rb3) - }, timeout, interval).Should(Succeed()) - Expect(rb3.Subjects).Should(HaveLen(1)) - Expect(rb3.Subjects[0].Name).Should(Equal("netobserv-plugin")) - Expect(rb3.RoleRef.Name).Should(Equal("netobserv-plugin")) - - By("Creating the ovn-flows-configmap with the configuration from the FlowCollector") - Eventually(func() interface{} { - ofc := v1.ConfigMap{} - if err := k8sClient.Get(ctx, ovsConfigMapKey, &ofc); err != nil { - return err - } - return ofc.Data - }, timeout, interval).Should(Equal(map[string]string{ - "sampling": "200", - "nodePort": "9999", - "cacheMaxFlows": "400", - "cacheActiveTimeout": "20s", - })) - }) - - It("Should update successfully", func() { - updateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Processor = flowslatest.FlowCollectorFLP{ - ImagePullPolicy: "Never", - LogLevel: "error", - Advanced: &flowslatest.AdvancedProcessorConfig{ - Env: map[string]string{ - // we'll test that env vars are sorted, to keep idempotency - "GOMAXPROCS": "33", - "GOGC": "400", - }, - Port: ptr.To(int32(7891)), - ConversationHeartbeatInterval: &metav1.Duration{ - Duration: conntrackHeartbeatInterval, - }, - ConversationEndTimeout: &metav1.Duration{ - Duration: conntrackEndTimeout, - }, - ConversationTerminatingTimeout: &metav1.Duration{ - Duration: conntrackTerminatingTimeout, - }, - }, - LogTypes: &outputRecordTypes, - - Metrics: flowslatest.FLPMetrics{ - IncludeList: &[]flowslatest.FLPMetric{"node_ingress_bytes_total"}, - DisableAlerts: []flowslatest.FLPAlert{flowslatest.AlertLokiError}, - }, - } - fc.Spec.Loki = flowslatest.FlowCollectorLoki{} - fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ - Sampling: 400, - CacheActiveTimeout: "30s", - CacheMaxFlows: 1000, - } - }) - - By("Expecting to create the ovn-flows-configmap with the configuration from the FlowCollector", func() { - Eventually(func() interface{} { - ofc := v1.ConfigMap{} - if err := k8sClient.Get(ctx, ovsConfigMapKey, &ofc); err != nil { - return err - } - return ofc.Data - }, timeout, interval).Should(Equal(map[string]string{ - "sampling": "400", - "nodePort": "7891", - "cacheMaxFlows": "1000", - "cacheActiveTimeout": "30s", - })) - }) - }) - - It("Should prevent undesired sampling-everything", func() { - Eventually(func() error { - fc := flowslatest.FlowCollector{} - if err := k8sClient.Get(ctx, crKey, &fc); err != nil { - return err - } - fc.Spec.Agent.IPFIX.Sampling = 1 - return k8sClient.Update(ctx, &fc) - }).Should(Satisfy(func(err error) bool { - return err != nil && strings.Contains(err.Error(), "spec.agent.ipfix.sampling: Invalid value: 1") - }), "Error expected for invalid sampling value") - - Eventually(func() error { - fc := flowslatest.FlowCollector{} - if err := k8sClient.Get(ctx, crKey, &fc); err != nil { - return err - } - fc.Spec.Agent.IPFIX.Sampling = 10 - fc.Spec.Agent.IPFIX.ForceSampleAll = true - return k8sClient.Update(ctx, &fc) - }).Should(Succeed()) - - By("Expecting that ovn-flows-configmap is updated with sampling=1") - Eventually(func() interface{} { - ofc := v1.ConfigMap{} - if err := k8sClient.Get(ctx, ovsConfigMapKey, &ofc); err != nil { - return err - } - return ofc.Data["sampling"] - }, timeout, interval).Should(Equal("1")) - }) - }) - - Context("Changing namespace", func() { - It("Should update namespace successfully", func() { - updateCR(crKey, func(fc *flowslatest.FlowCollector) { - fc.Spec.Processor.Advanced.Port = ptr.To(int32(9999)) - fc.Spec.Namespace = otherNamespace - }) - }) - - It("Should redeploy console plugin in new namespace", func() { - By("Expecting deployment in previous namespace to be deleted") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey1, &appsv1.Deployment{}) - }, timeout, interval).Should(MatchError(`deployments.apps "netobserv-plugin" not found`)) - - By("Expecting service in previous namespace to be deleted") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey1, &v1.Service{}) - }, timeout, interval).Should(MatchError(`services "netobserv-plugin" not found`)) - - By("Expecting service account in previous namespace to be deleted") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey1, &v1.ServiceAccount{}) - }, timeout, interval).Should(MatchError(`serviceaccounts "netobserv-plugin" not found`)) - - By("Expecting deployment to be created in new namespace") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey2, &appsv1.Deployment{}) - }, timeout, interval).Should(Succeed()) - - By("Expecting service to be created in new namespace") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey2, &v1.Service{}) - }, timeout, interval).Should(Succeed()) - - By("Expecting service account to be created in new namespace") - Eventually(func() interface{} { - return k8sClient.Get(ctx, cpKey2, &v1.ServiceAccount{}) - }, timeout, interval).Should(Succeed()) - }) - }) - - Context("Checking CR ownership", func() { - It("Should be garbage collected", func() { - // Retrieve CR to get its UID - By("Getting the CR") - flowCR := getCR(crKey) - - By("Expecting console plugin deployment to be garbage collected") - Eventually(func() interface{} { - d := appsv1.Deployment{} - _ = k8sClient.Get(ctx, cpKey2, &d) - return &d - }, timeout, interval).Should(BeGarbageCollectedBy(flowCR)) - - By("Expecting console plugin service to be garbage collected") - Eventually(func() interface{} { - svc := v1.Service{} - _ = k8sClient.Get(ctx, cpKey2, &svc) - return &svc - }, timeout, interval).Should(BeGarbageCollectedBy(flowCR)) - - By("Expecting console plugin service account to be garbage collected") - Eventually(func() interface{} { - svcAcc := v1.ServiceAccount{} - _ = k8sClient.Get(ctx, cpKey2, &svcAcc) - return &svcAcc - }, timeout, interval).Should(BeGarbageCollectedBy(flowCR)) - - By("Expecting ovn-flows-configmap to be garbage collected") - Eventually(func() interface{} { - cm := v1.ConfigMap{} - _ = k8sClient.Get(ctx, ovsConfigMapKey, &cm) - return &cm - }, timeout, interval).Should(BeGarbageCollectedBy(flowCR)) - }) - }) - - Context("Cleanup", func() { - It("Should delete CR", func() { - cleanupCR(crKey) - }) - - It("Should cleanup other data", func() { - for _, obj := range cleanupList { - Eventually(func() error { - return k8sClient.Delete(ctx, obj) - }, timeout, interval).Should(Succeed()) - } - }) - }) -} diff --git a/controllers/flp/flp_common_objects.go b/controllers/flp/flp_common_objects.go index 036c06bda..bc0a94011 100644 --- a/controllers/flp/flp_common_objects.go +++ b/controllers/flp/flp_common_objects.go @@ -115,13 +115,6 @@ func (b *builder) serviceMonitorName() string { return serviceMonitorName(b.conf func (b *builder) prometheusRuleName() string { return prometheusRuleName(b.confKind) } func (b *builder) Pipeline() *PipelineBuilder { return b.pipeline } -func (b *builder) NewIPFIXPipeline() PipelineBuilder { - return b.initPipeline(config.NewCollectorPipeline("ipfix", api.IngestCollector{ - Port: int(*helper.GetAdvancedProcessorConfig(b.desired.Processor.Advanced).Port), - HostName: "0.0.0.0", - })) -} - func (b *builder) NewGRPCPipeline() PipelineBuilder { return b.initPipeline(config.NewGRPCPipeline("grpc", api.IngestGRPCProto{ Port: int(*helper.GetAdvancedProcessorConfig(b.desired.Processor.Advanced).Port), @@ -130,9 +123,6 @@ func (b *builder) NewGRPCPipeline() PipelineBuilder { func (b *builder) NewKafkaPipeline() PipelineBuilder { decoder := api.Decoder{Type: "protobuf"} - if helper.UseIPFIX(b.desired) { - decoder = api.Decoder{Type: "json"} - } return b.initPipeline(config.NewKafkaPipeline("kafka-read", api.IngestKafka{ Brokers: []string{b.desired.Kafka.Address}, Topic: b.desired.Kafka.Topic, @@ -151,13 +141,6 @@ func (b *builder) initPipeline(ingest config.PipelineBuilderStage) PipelineBuild return pipeline } -func (b *builder) portProtocol() corev1.Protocol { - if helper.UseEBPF(b.desired) { - return corev1.ProtocolTCP - } - return corev1.ProtocolUDP -} - func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[string]string) corev1.PodTemplateSpec { debugConfig := helper.GetAdvancedProcessorConfig(b.desired.Processor.Advanced) var ports []corev1.ContainerPort @@ -167,7 +150,7 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str Name: constants.FLPPortName, HostPort: *debugConfig.Port, ContainerPort: *debugConfig.Port, - Protocol: b.portProtocol(), + Protocol: corev1.ProtocolTCP, }} // This allows deploying an instance in the master node, the same technique used in the // companion ovnkube-node daemonset definition @@ -522,3 +505,21 @@ func (b *builder) prometheusRule() *monitoringv1.PrometheusRule { } return &flpPrometheusRuleObject } + +func buildClusterRoleIngester(useOpenShiftSCC bool) *rbacv1.ClusterRole { + cr := rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name(ConfKafkaIngester), + }, + Rules: []rbacv1.PolicyRule{}, + } + if useOpenShiftSCC { + cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ + APIGroups: []string{"security.openshift.io"}, + Verbs: []string{"use"}, + Resources: []string{"securitycontextconstraints"}, + ResourceNames: []string{"hostnetwork"}, + }) + } + return &cr +} diff --git a/controllers/flp/flp_controller.go b/controllers/flp/flp_controller.go index 688f1b0a3..b65d4c6a1 100644 --- a/controllers/flp/flp_controller.go +++ b/controllers/flp/flp_controller.go @@ -144,7 +144,6 @@ func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, fc *flow reconcilers := []subReconciler{ newMonolithReconciler(cmn.NewInstance(r.mgr.Config.FlowlogsPipelineImage, r.mgr.Status.ForComponent(status.FLPMonolith))), newTransformerReconciler(cmn.NewInstance(r.mgr.Config.FlowlogsPipelineImage, r.mgr.Status.ForComponent(status.FLPTransformOnly))), - newIngesterReconciler(cmn.NewInstance(r.mgr.Config.FlowlogsPipelineImage, r.mgr.Status.ForComponent(status.FLPIngestOnly))), } // Check namespace changed diff --git a/controllers/flp/flp_controller_test.go b/controllers/flp/flp_controller_test.go index a88f4cccb..8aef2e59d 100644 --- a/controllers/flp/flp_controller_test.go +++ b/controllers/flp/flp_controller_test.go @@ -58,10 +58,6 @@ func ControllerSpecs() { Name: constants.FLPName, Namespace: otherNamespace, } - flpKeyKafkaIngester := types.NamespacedName{ - Name: constants.FLPName + FlpConfSuffix[ConfKafkaIngester], - Namespace: operatorNamespace, - } flpKeyKafkaTransformer := types.NamespacedName{ Name: constants.FLPName + FlpConfSuffix[ConfKafkaTransformer], Namespace: operatorNamespace, @@ -159,12 +155,12 @@ func ControllerSpecs() { Expect(rb2.Subjects[0].Name).Should(Equal("flowlogs-pipeline")) Expect(rb2.RoleRef.Name).Should(Equal("flowlogs-pipeline-transformer")) - By("Not expecting transformer role binding") + By("Not expecting ingester role binding") Eventually(func() interface{} { return k8sClient.Get(ctx, rbKeyIngest, &rbacv1.ClusterRoleBinding{}) }, timeout, interval).Should(MatchError(`clusterrolebindings.rbac.authorization.k8s.io "flowlogs-pipeline-ingester-role" not found`)) - By("Not expecting ingester role binding") + By("Not expecting transformer role binding") Eventually(func() interface{} { return k8sClient.Get(ctx, rbKeyTransform, &rbacv1.ClusterRoleBinding{}) }, timeout, interval).Should(MatchError(`clusterrolebindings.rbac.authorization.k8s.io "flowlogs-pipeline-transformer-role" not found`)) @@ -207,8 +203,6 @@ func ControllerSpecs() { DisableAlerts: []flowslatest.FLPAlert{flowslatest.AlertLokiError}, }, } - // Using IPFIX should change proto to UDP - fc.Spec.Agent.Type = flowslatest.AgentIPFIX fc.Spec.Loki = flowslatest.FlowCollectorLoki{} }) @@ -243,7 +237,7 @@ func ControllerSpecs() { Name: constants.FLPPortName, HostPort: 7891, ContainerPort: 7891, - Protocol: "UDP", + Protocol: "TCP", })) Expect(cnt.Env).To(Equal([]v1.EnvVar{ {Name: "GOGC", Value: "400"}, {Name: "GOMAXPROCS", Value: "33"}, {Name: "GODEBUG", Value: "http2server=0"}, @@ -291,12 +285,7 @@ func ControllerSpecs() { }) }) - It("Should deploy kafka ingester and transformer", func() { - By("Expecting ingester daemonset to be created") - Eventually(func() interface{} { - return k8sClient.Get(ctx, flpKeyKafkaIngester, &appsv1.DaemonSet{}) - }, timeout, interval).Should(Succeed()) - + It("Should deploy kafka transformer", func() { By("Expecting transformer deployment to be created") Eventually(func() interface{} { return k8sClient.Get(ctx, flpKeyKafkaTransformer, &appsv1.Deployment{}) @@ -307,14 +296,10 @@ func ControllerSpecs() { return k8sClient.Get(ctx, flpKeyKafkaTransformer, &v1.Service{}) }, timeout, interval).Should(MatchError(`services "flowlogs-pipeline-transformer" not found`)) - By("Expecting to create two different flowlogs-pipeline role bindings") - rb1 := rbacv1.ClusterRoleBinding{} + By("Expecting to create transformer flowlogs-pipeline role bindings") Eventually(func() interface{} { - return k8sClient.Get(ctx, rbKeyIngest, &rb1) - }, timeout, interval).Should(Succeed()) - Expect(rb1.Subjects).Should(HaveLen(1)) - Expect(rb1.Subjects[0].Name).Should(Equal("flowlogs-pipeline-ingester")) - Expect(rb1.RoleRef.Name).Should(Equal("flowlogs-pipeline-ingester")) + return k8sClient.Get(ctx, rbKeyIngest, &rbacv1.ClusterRoleBinding{}) + }, timeout, interval).Should(MatchError(`clusterrolebindings.rbac.authorization.k8s.io "flowlogs-pipeline-ingester-role" not found`)) rb2 := rbacv1.ClusterRoleBinding{} Eventually(func() interface{} { @@ -411,12 +396,7 @@ func ControllerSpecs() { }, timeout, interval).Should(Succeed()) }) - It("Should delete kafka ingester and transformer", func() { - By("Expecting ingester daemonset to be deleted") - Eventually(func() interface{} { - return k8sClient.Get(ctx, flpKeyKafkaIngester, &appsv1.DaemonSet{}) - }, timeout, interval).Should(MatchError(`daemonsets.apps "flowlogs-pipeline-ingester" not found`)) - + It("Should delete kafka transformer", func() { By("Expecting transformer deployment to be deleted") Eventually(func() interface{} { return k8sClient.Get(ctx, flpKeyKafkaTransformer, &appsv1.Deployment{}) diff --git a/controllers/flp/flp_ingest_objects.go b/controllers/flp/flp_ingest_objects.go deleted file mode 100644 index a8916725b..000000000 --- a/controllers/flp/flp_ingest_objects.go +++ /dev/null @@ -1,85 +0,0 @@ -package flp - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" - "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/helper" -) - -type ingestBuilder struct { - generic builder -} - -func newIngestBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSpec, flowMetrics *metricslatest.FlowMetricList) (ingestBuilder, error) { - gen, err := NewBuilder(info, desired, flowMetrics, ConfKafkaIngester) - return ingestBuilder{ - generic: gen, - }, err -} - -func (b *ingestBuilder) daemonSet(annotations map[string]string) *appsv1.DaemonSet { - pod := b.generic.podTemplate(true /*listens*/, !b.generic.info.UseOpenShiftSCC, annotations) - return &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: b.generic.name(), - Namespace: b.generic.info.Namespace, - Labels: b.generic.labels, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: b.generic.selector, - }, - Template: pod, - }, - } -} - -func (b *ingestBuilder) configMap() (*corev1.ConfigMap, string, error) { - var pipeline PipelineBuilder - if helper.UseIPFIX(b.generic.desired) { - // IPFIX collector - pipeline = b.generic.NewIPFIXPipeline() - } else { - // GRPC collector (eBPF agent) - pipeline = b.generic.NewGRPCPipeline() - } - - pipeline.AddKafkaWriteStage("kafka-write", &b.generic.desired.Kafka) - return b.generic.ConfigMap() -} - -func (b *ingestBuilder) promService() *corev1.Service { - return b.generic.promService() -} - -func buildClusterRoleIngester(useOpenShiftSCC bool) *rbacv1.ClusterRole { - cr := rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: name(ConfKafkaIngester), - }, - Rules: []rbacv1.PolicyRule{}, - } - if useOpenShiftSCC { - cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ - APIGroups: []string{"security.openshift.io"}, - Verbs: []string{"use"}, - Resources: []string{"securitycontextconstraints"}, - ResourceNames: []string{"hostnetwork"}, - }) - } - return &cr -} - -func (b *ingestBuilder) serviceAccount() *corev1.ServiceAccount { - return b.generic.serviceAccount() -} - -func (b *ingestBuilder) clusterRoleBinding() *rbacv1.ClusterRoleBinding { - return b.generic.clusterRoleBinding(ConfKafkaIngester, false) -} diff --git a/controllers/flp/flp_ingest_reconciler.go b/controllers/flp/flp_ingest_reconciler.go deleted file mode 100644 index b867d18e6..000000000 --- a/controllers/flp/flp_ingest_reconciler.go +++ /dev/null @@ -1,171 +0,0 @@ -package flp - -import ( - "context" - - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/equality" - "sigs.k8s.io/controller-runtime/pkg/log" - - flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" - "github.com/netobserv/network-observability-operator/controllers/constants" - "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/helper" - "github.com/netobserv/network-observability-operator/pkg/manager/status" -) - -type ingesterReconciler struct { - *reconcilers.Instance - daemonSet *appsv1.DaemonSet - promService *corev1.Service - serviceAccount *corev1.ServiceAccount - configMap *corev1.ConfigMap - roleBinding *rbacv1.ClusterRoleBinding - serviceMonitor *monitoringv1.ServiceMonitor - prometheusRule *monitoringv1.PrometheusRule -} - -func newIngesterReconciler(cmn *reconcilers.Instance) *ingesterReconciler { - name := name(ConfKafkaIngester) - rec := ingesterReconciler{ - Instance: cmn, - daemonSet: cmn.Managed.NewDaemonSet(name), - promService: cmn.Managed.NewService(promServiceName(ConfKafkaIngester)), - serviceAccount: cmn.Managed.NewServiceAccount(name), - configMap: cmn.Managed.NewConfigMap(configMapName(ConfKafkaIngester)), - roleBinding: cmn.Managed.NewCRB(RoleBindingName(ConfKafkaIngester)), - } - if cmn.AvailableAPIs.HasSvcMonitor() { - rec.serviceMonitor = cmn.Managed.NewServiceMonitor(serviceMonitorName(ConfKafkaIngester)) - } - if cmn.AvailableAPIs.HasPromRule() { - rec.prometheusRule = cmn.Managed.NewPrometheusRule(prometheusRuleName(ConfKafkaIngester)) - } - return &rec -} - -func (r *ingesterReconciler) context(ctx context.Context) context.Context { - l := log.FromContext(ctx).WithName("ingester") - return log.IntoContext(ctx, l) -} - -// cleanupNamespace cleans up old namespace -func (r *ingesterReconciler) cleanupNamespace(ctx context.Context) { - r.Managed.CleanupPreviousNamespace(ctx) -} - -func (r *ingesterReconciler) getStatus() *status.Instance { - return &r.Status -} - -func (r *ingesterReconciler) reconcile(ctx context.Context, desired *flowslatest.FlowCollector, flowMetrics *metricslatest.FlowMetricList) error { - // Retrieve current owned objects - err := r.Managed.FetchAll(ctx) - if err != nil { - return err - } - - if !helper.UseKafka(&desired.Spec) || helper.UseEBPF(&desired.Spec) { - r.Status.SetUnused("Ingester only used with Kafka and without eBPF") - r.Managed.TryDeleteAll(ctx) - return nil - } - - r.Status.SetReady() // will be overidden if necessary, as error or pending - - builder, err := newIngestBuilder(r.Instance, &desired.Spec, flowMetrics) - if err != nil { - return err - } - newCM, configDigest, err := builder.configMap() - if err != nil { - return err - } - annotations := map[string]string{ - constants.PodConfigurationDigest: configDigest, - } - if !r.Managed.Exists(r.configMap) { - if err := r.CreateOwned(ctx, newCM); err != nil { - return err - } - } else if !equality.Semantic.DeepDerivative(newCM.Data, r.configMap.Data) { - if err := r.UpdateIfOwned(ctx, r.configMap, newCM); err != nil { - return err - } - } - - if err := r.reconcilePermissions(ctx, &builder); err != nil { - return err - } - - err = r.reconcilePrometheusService(ctx, &builder) - if err != nil { - return err - } - - // Watch for Kafka certificate if necessary; need to restart pods in case of cert rotation - if err = annotateKafkaCerts(ctx, r.Common, &desired.Spec.Kafka, "kafka", annotations); err != nil { - return err - } - - // Watch for monitoring caCert - if err = reconcileMonitoringCerts(ctx, r.Common, &desired.Spec.Processor.Metrics.Server.TLS, r.Namespace); err != nil { - return err - } - - return r.reconcileDaemonSet(ctx, builder.daemonSet(annotations)) -} - -func (r *ingesterReconciler) reconcilePrometheusService(ctx context.Context, builder *ingestBuilder) error { - report := helper.NewChangeReport("FLP prometheus service") - defer report.LogIfNeeded(ctx) - - if err := r.ReconcileService(ctx, r.promService, builder.promService(), &report); err != nil { - return err - } - if r.AvailableAPIs.HasSvcMonitor() { - serviceMonitor := builder.generic.serviceMonitor() - if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { - return err - } - } - if r.AvailableAPIs.HasPromRule() { - promRules := builder.generic.prometheusRule() - if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { - return err - } - } - return nil -} - -func (r *ingesterReconciler) reconcileDaemonSet(ctx context.Context, desiredDS *appsv1.DaemonSet) error { - report := helper.NewChangeReport("FLP DaemonSet") - defer report.LogIfNeeded(ctx) - - return reconcilers.ReconcileDaemonSet( - ctx, - r.Instance, - r.daemonSet, - desiredDS, - constants.FLPName, - &report, - ) -} - -func (r *ingesterReconciler) reconcilePermissions(ctx context.Context, builder *ingestBuilder) error { - if !r.Managed.Exists(r.serviceAccount) { - return r.CreateOwned(ctx, builder.serviceAccount()) - } // We only configure name, update is not needed for now - - cr := buildClusterRoleIngester(r.UseOpenShiftSCC) - if err := r.ReconcileClusterRole(ctx, cr); err != nil { - return err - } - - desired := builder.clusterRoleBinding() - return r.ReconcileClusterRoleBinding(ctx, desired) -} diff --git a/controllers/flp/flp_monolith_objects.go b/controllers/flp/flp_monolith_objects.go index 3c3e4db22..6c6311e8e 100644 --- a/controllers/flp/flp_monolith_objects.go +++ b/controllers/flp/flp_monolith_objects.go @@ -9,7 +9,6 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/helper" ) type monolithBuilder struct { @@ -41,15 +40,7 @@ func (b *monolithBuilder) daemonSet(annotations map[string]string) *appsv1.Daemo } func (b *monolithBuilder) configMap() (*corev1.ConfigMap, string, error) { - var pipeline PipelineBuilder - if helper.UseIPFIX(b.generic.desired) { - // IPFIX collector - pipeline = b.generic.NewIPFIXPipeline() - } else { - // GRPC collector (eBPF agent) - pipeline = b.generic.NewGRPCPipeline() - } - + pipeline := b.generic.NewGRPCPipeline() err := pipeline.AddProcessorStages() if err != nil { return nil, "", err diff --git a/controllers/flp/flp_pipeline_builder.go b/controllers/flp/flp_pipeline_builder.go index 102664768..aa7008540 100644 --- a/controllers/flp/flp_pipeline_builder.go +++ b/controllers/flp/flp_pipeline_builder.go @@ -14,7 +14,6 @@ import ( metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/pkg/conversion" - "github.com/netobserv/network-observability-operator/pkg/filters" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/loki" "github.com/netobserv/network-observability-operator/pkg/metrics" @@ -387,14 +386,6 @@ func (b *PipelineBuilder) addTransformFilter(lastStage config.PipelineBuilderSta } } - // Filter-out unused fields? - if *helper.GetAdvancedProcessorConfig(b.desired.Processor.Advanced).DropUnusedFields { - if helper.UseIPFIX(b.desired) { - rules := filters.GetOVSGoflowUnusedRules() - transformFilterRules = append(transformFilterRules, rules...) - } - // Else: nothing for eBPF at the moment - } if len(transformFilterRules) > 0 { lastStage = lastStage.TransformFilter("filter", api.TransformFilter{ Rules: transformFilterRules, diff --git a/controllers/flp/flp_test.go b/controllers/flp/flp_test.go index afdb92176..e57f5d14d 100644 --- a/controllers/flp/flp_test.go +++ b/controllers/flp/flp_test.go @@ -619,7 +619,6 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiManual(t *testing.T) { ns := "namespace" cfg := getConfig() - cfg.Agent.Type = flowslatest.AgentIPFIX loki := cfg.Loki b := monoBuilder(ns, &cfg) cm, digest, err := b.configMap() @@ -639,7 +638,7 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiManual(t *testing.T) { params := decoded.Parameters assert.Len(params, 6) - assert.Equal(*cfg.Processor.Advanced.Port, int32(params[0].Ingest.Collector.Port)) + assert.Equal(*cfg.Processor.Advanced.Port, int32(params[0].Ingest.GRPC.Port)) lokiCfg := params[3].Write.Loki assert.Equal(loki.Manual.IngesterURL, lokiCfg.URL) @@ -657,6 +656,7 @@ func TestConfigMapShouldDeserializeAsJSONWithLokiManual(t *testing.T) { "DstK8S_Type", "K8S_FlowLayer", "_RecordType", + "FlowDirection", }, lokiCfg.Labels) assert.Equal(`{app="netobserv-flowcollector"}`, fmt.Sprintf("%v", lokiCfg.StaticLabels)) @@ -757,7 +757,6 @@ func TestLabels(t *testing.T) { info := reconcilers.Common{Namespace: "ns"} builder, _ := newMonolithBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}) tBuilder, _ := newTransfoBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}) - iBuilder, _ := newIngestBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}) // Deployment depl := tBuilder.deployment(annotate("digest")) @@ -773,13 +772,6 @@ func TestLabels(t *testing.T) { assert.Equal("dev", ds.Labels["version"]) assert.Equal("dev", ds.Spec.Template.Labels["version"]) - // DaemonSet (ingester) - ds2 := iBuilder.daemonSet(annotate("digest")) - assert.Equal("flowlogs-pipeline-ingester", ds2.Labels["app"]) - assert.Equal("flowlogs-pipeline-ingester", ds2.Spec.Template.Labels["app"]) - assert.Equal("dev", ds2.Labels["version"]) - assert.Equal("dev", ds2.Spec.Template.Labels["version"]) - // Service svc := builder.promService() assert.Equal("flowlogs-pipeline", svc.Labels["app"]) @@ -794,9 +786,6 @@ func TestLabels(t *testing.T) { smTrans := tBuilder.generic.serviceMonitor() assert.Equal("flowlogs-pipeline-transformer-monitor", smTrans.Name) assert.Equal("flowlogs-pipeline-transformer", smTrans.Spec.Selector.MatchLabels["app"]) - smIng := iBuilder.generic.serviceMonitor() - assert.Equal("flowlogs-pipeline-ingester-monitor", smIng.Name) - assert.Equal("flowlogs-pipeline-ingester", smIng.Spec.Selector.MatchLabels["app"]) } // This function validate that each stage has its matching parameter @@ -837,19 +826,8 @@ func TestPipelineConfig(t *testing.T) { pipeline, ) - // Kafka Ingester - cfg.DeploymentModel = flowslatest.DeploymentModelKafka - info := reconcilers.Common{Namespace: ns} - bi, _ := newIngestBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}) - cm, _, err = bi.configMap() - assert.NoError(err) - _, pipeline = validatePipelineConfig(t, cm) - assert.Equal( - `[{"name":"grpc"},{"name":"kafka-write","follows":"grpc"}]`, - pipeline, - ) - // Kafka Transformer + cfg.DeploymentModel = flowslatest.DeploymentModelKafka bt := transfBuilder(ns, &cfg) cm, _, err = bt.configMap() assert.NoError(err) @@ -860,30 +838,6 @@ func TestPipelineConfig(t *testing.T) { ) } -func TestPipelineConfigDropUnused(t *testing.T) { - assert := assert.New(t) - - // Single config - ns := "namespace" - cfg := getConfig() - cfg.Agent.Type = flowslatest.AgentIPFIX - cfg.Processor.LogLevel = "info" - cfg.Processor.Advanced.DropUnusedFields = ptr.To(true) - b := monoBuilder(ns, &cfg) - cm, _, err := b.configMap() - assert.NoError(err) - cfs, pipeline := validatePipelineConfig(t, cm) - assert.Equal( - `[{"name":"ipfix"},{"name":"filter","follows":"ipfix"},{"name":"extract_conntrack","follows":"filter"},{"name":"enrich","follows":"extract_conntrack"},{"name":"loki","follows":"enrich"},{"name":"prometheus","follows":"enrich"}]`, - pipeline, - ) - - jsonParams, _ := json.Marshal(cfs.Parameters[1].Transform.Filter) - assert.Contains(string(jsonParams), `{"input":"CustomBytes1","type":"remove_field"}`) - assert.Contains(string(jsonParams), `{"input":"CustomInteger5","type":"remove_field"}`) - assert.Contains(string(jsonParams), `{"input":"MPLS1Label","type":"remove_field"}`) -} - func TestPipelineTraceStage(t *testing.T) { assert := assert.New(t) diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go deleted file mode 100644 index 6725bb861..000000000 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ /dev/null @@ -1,133 +0,0 @@ -package ovs - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/log" - - flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/helper" -) - -type FlowsConfigCNOController struct { - *reconcilers.Common - ovsConfigMapName string - cnoNamespace string -} - -func NewFlowsConfigCNOController(common *reconcilers.Common, cnoNamespace, ovsConfigMapName string) *FlowsConfigCNOController { - return &FlowsConfigCNOController{ - Common: common, - cnoNamespace: cnoNamespace, - ovsConfigMapName: ovsConfigMapName, - } -} - -// Reconcile reconciles the status of the ovs-flows-config configmap with -// the target FlowCollector ipfix section map -func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowslatest.FlowCollector) error { - rlog := log.FromContext(ctx, "component", "FlowsConfigCNOController") - - current, err := c.current(ctx) - if err != nil { - return err - } - if !helper.UseIPFIX(&target.Spec) { - if current == nil { - return nil - } - // If the user has changed the agent type, we need to manually undeploy the configmap - if current != nil { - return c.Delete(ctx, &corev1.ConfigMap{ - ObjectMeta: v1.ObjectMeta{ - Name: c.ovsConfigMapName, - Namespace: c.cnoNamespace, - }, - }) - } - return nil - } - - desired := c.desired(ctx, target) - - // compare current and desired - if current == nil { - rlog.Info("Provided IPFIX configuration. Creating " + c.ovsConfigMapName + " ConfigMap") - cm, err := c.flowsConfigMap(desired) - if err != nil { - return err - } - return c.Create(ctx, cm) - } - - if desired != nil && *desired != *current { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") - cm, err := c.flowsConfigMap(desired) - if err != nil { - return err - } - return c.Update(ctx, cm) - } - - rlog.Info("No changes needed") - return nil -} - -func (c *FlowsConfigCNOController) current(ctx context.Context) (*flowsConfig, error) { - curr := &corev1.ConfigMap{} - if err := c.Get(ctx, types.NamespacedName{ - Name: c.ovsConfigMapName, - Namespace: c.cnoNamespace, - }, curr); err != nil { - if errors.IsNotFound(err) { - // the map is not yet created. As it is associated to a flowCollector that already - // exists (premise to invoke this controller). We will handle accordingly this "nil" - // as an expected value - return nil, nil - } - return nil, fmt.Errorf("retrieving %s/%s configmap: %w", - c.cnoNamespace, c.ovsConfigMapName, err) - } - - return configFromMap(curr.Data) -} - -func (c *FlowsConfigCNOController) desired( - ctx context.Context, coll *flowslatest.FlowCollector) *flowsConfig { - - corrected := coll.Spec.Agent.IPFIX.DeepCopy() - corrected.Sampling = getSampling(ctx, corrected) - - return &flowsConfig{ - FlowCollectorIPFIX: *corrected, - NodePort: *helper.GetAdvancedProcessorConfig(coll.Spec.Processor.Advanced).Port, - } -} - -func (c *FlowsConfigCNOController) flowsConfigMap(fc *flowsConfig) (*corev1.ConfigMap, error) { - data, err := fc.asStringMap() - if err != nil { - return nil, err - } - cm := &corev1.ConfigMap{ - TypeMeta: v1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: v1.ObjectMeta{ - Name: c.ovsConfigMapName, - Namespace: c.cnoNamespace, - }, - Data: data, - } - if err := c.SetControllerReference(cm); err != nil { - return nil, err - } - return cm, nil -} diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go deleted file mode 100644 index dd9048a2a..000000000 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ /dev/null @@ -1,132 +0,0 @@ -package ovs - -import ( - "context" - "errors" - "fmt" - "strconv" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kerr "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/log" - - flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - "github.com/netobserv/network-observability-operator/controllers/reconcilers" - "github.com/netobserv/network-observability-operator/pkg/helper" -) - -type FlowsConfigOVNKController struct { - *reconcilers.Common - config flowslatest.OVNKubernetesConfig -} - -func NewFlowsConfigOVNKController(common *reconcilers.Common, config flowslatest.OVNKubernetesConfig) *FlowsConfigOVNKController { - return &FlowsConfigOVNKController{ - Common: common, - config: config, - } -} - -// Reconcile reconciles the status of the ovs-flows-config configmap with -// the target FlowCollector ipfix section map -func (c *FlowsConfigOVNKController) Reconcile( - ctx context.Context, target *flowslatest.FlowCollector) error { - - desiredEnv, err := c.desiredEnv(ctx, target) - if err != nil { - return err - } - - return c.updateEnv(ctx, target, desiredEnv) -} - -func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flowslatest.FlowCollector, desiredEnv map[string]string) error { - rlog := log.FromContext(ctx, "component", "FlowsConfigOVNKController") - - ds := &appsv1.DaemonSet{} - if err := c.Get(ctx, types.NamespacedName{ - Name: c.config.DaemonSetName, - Namespace: c.config.Namespace, - }, ds); err != nil { - if kerr.IsNotFound(err) && !helper.UseIPFIX(&target.Spec) { - // If we don't want IPFIX and ovn-k daemonset is not found, assume there no ovn-k, just succeed - rlog.Info("Skip reconciling OVN: OVN DaemonSet not found") - return nil - } - return fmt.Errorf("retrieving %s/%s daemonset: %w", c.config.Namespace, c.config.DaemonSetName, err) - } - - ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.IPFIX.OVNKubernetes.ContainerName) - if ovnkubeNode == nil { - return errors.New("could not find container ovnkube-node") - } - - anyUpdate := false - for k, v := range desiredEnv { - if checkUpdateEnv(k, v, ovnkubeNode) { - anyUpdate = true - } - } - if anyUpdate { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") - return c.Update(ctx, ds) - } - - rlog.Info("No changes needed") - return nil -} - -func (c *FlowsConfigOVNKController) desiredEnv(ctx context.Context, coll *flowslatest.FlowCollector) (map[string]string, error) { - cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.IPFIX.CacheActiveTimeout) - if err != nil { - return nil, err - } - sampling := getSampling(ctx, &coll.Spec.Agent.IPFIX) - - envs := map[string]string{ - "OVN_IPFIX_TARGETS": "", - "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT": strconv.Itoa(int(cacheTimeout.Seconds())), - "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.IPFIX.CacheMaxFlows)), - "OVN_IPFIX_SAMPLING": strconv.Itoa(int(sampling)), - } - - if !helper.UseIPFIX(&coll.Spec) { - // No IPFIX => leave target empty and return - return envs, nil - } - - envs["OVN_IPFIX_TARGETS"] = fmt.Sprintf(":%d", *helper.GetAdvancedProcessorConfig(coll.Spec.Processor.Advanced).Port) - return envs, nil -} - -func checkUpdateEnv(name, value string, container *corev1.Container) bool { - for i, env := range container.Env { - if env.Name == name { - if env.Value == value { - return false - } - container.Env[i].Value = value - return true - } - } - container.Env = append(container.Env, corev1.EnvVar{ - Name: name, - Value: value, - }) - return true -} - -// Finalize will remove IPFIX config from ovn pods env -func (c *FlowsConfigOVNKController) Finalize(ctx context.Context, target *flowslatest.FlowCollector) error { - // Remove all env - desiredEnv := map[string]string{ - "OVN_IPFIX_TARGETS": "", - "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT": "", - "OVN_IPFIX_CACHE_MAX_FLOWS": "", - "OVN_IPFIX_SAMPLING": "", - } - return c.updateEnv(ctx, target, desiredEnv) -} diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go deleted file mode 100644 index e1bdc78b3..000000000 --- a/controllers/ovs/flowsconfig_types.go +++ /dev/null @@ -1,51 +0,0 @@ -package ovs - -import ( - "context" - "fmt" - "reflect" - - "github.com/mitchellh/mapstructure" - "sigs.k8s.io/controller-runtime/pkg/log" - - flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" -) - -type flowsConfig struct { - flowslatest.FlowCollectorIPFIX `json:",inline" mapstructure:",squash"` - SharedTarget string `json:"sharedTarget,omitempty" mapstructure:"sharedTarget,omitempty"` - NodePort int32 `json:"nodePort,omitempty" mapstructure:"nodePort,omitempty"` -} - -func configFromMap(data map[string]string) (*flowsConfig, error) { - config := flowsConfig{} - err := mapstructure.WeakDecode(data, &config) - return &config, err -} - -func (fc *flowsConfig) asStringMap() (map[string]string, error) { - vals := map[string]interface{}{} - if err := mapstructure.WeakDecode(fc, &vals); err != nil { - return nil, err - } - stringVals := map[string]string{} - for k, v := range vals { - if reflect.ValueOf(v).IsZero() { - continue - } - stringVals[k] = fmt.Sprint(v) - } - return stringVals, nil -} - -// getSampling returns the configured sampling, or 1 if ipfix.forceSampleAll is true -// Note that configured sampling has a minimum value of 2. -// See also https://bugzilla.redhat.com/show_bug.cgi?id=2103136 , https://bugzilla.redhat.com/show_bug.cgi?id=2104943 -func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIPFIX) int32 { - rlog := log.FromContext(ctx) - if cfg.ForceSampleAll { - rlog.Info("Warning, sampling is set to 1. This may put cluster stability at risk.") - return 1 - } - return cfg.Sampling -} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 1e00f4869..d7261fe82 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) { // go test ./... runs always Ginkgo test suites in parallel and they would interfere // this way we make sure that both test sub-suites are executed serially var _ = Describe("FlowCollector Controller", Ordered, Serial, func() { - flowCollectorControllerSpecs() flowCollectorConsolePluginSpecs() flowCollectorEBPFSpecs() flowCollectorEBPFKafkaSpecs() diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index d05289d95..4fcc554e8 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -183,7 +183,7 @@ Agent configuration for flows extraction. type enum - `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ `type` [deprecated (*)] selects the flows tracing agent. The only possible value is `EBPF` (default), to use NetObserv eBPF agent.
Previously, using an IPFIX collector was allowed, but was deprecated and it is now removed.
Setting `IPFIX` is ignored and still use the eBPF Agent. Since there is only a single option here, this field will be remove in a future API version.

Enum: EBPF, IPFIX
Default: EBPF
@@ -3391,7 +3391,7 @@ TLS client configuration for Loki URL. dropUnusedFields boolean - `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.
+ `dropUnusedFields` [deprecated (*)] this setting is not used anymore.

Default: true
@@ -5087,7 +5087,7 @@ Agent configuration for flows extraction. type enum - `type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ `type` [deprecated (*)] selects the flows tracing agent. The only possible value is `eBPF` (default), to use NetObserv eBPF agent.
Previously, using an IPFIX collector was allowed, but was deprecated and it is now removed.
Setting `IPFIX` is ignored and still use the eBPF Agent. Since there is only a single option here, this field will be remove in a future API version.

Enum: eBPF, IPFIX
Default: eBPF
@@ -9009,7 +9009,7 @@ TLS client configuration for Loki URL. dropUnusedFields boolean - `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.
+ `dropUnusedFields` [deprecated (*)] this setting is not used anymore.

Default: true
diff --git a/go.mod b/go.mod index 2375f02ff..8b05f217a 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.20 require ( github.com/go-logr/logr v1.4.1 - github.com/mitchellh/mapstructure v1.5.0 github.com/netobserv/flowlogs-pipeline v0.1.12-0.20240305083238-24bf8cec8807 github.com/onsi/ginkgo/v2 v2.15.0 github.com/onsi/gomega v1.31.1 diff --git a/go.sum b/go.sum index 69be5b1b2..fef3b8033 100644 --- a/go.sum +++ b/go.sum @@ -136,8 +136,6 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index eec4e2efe..118ccfffc 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -10,18 +10,10 @@ import ( ) func GetSampling(spec *flowslatest.FlowCollectorSpec) int { - if UseEBPF(spec) { - return int(*spec.Agent.EBPF.Sampling) + if spec.Agent.EBPF.Sampling == nil { + return 50 } - return int(spec.Agent.IPFIX.Sampling) -} - -func UseEBPF(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentEBPF -} - -func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentIPFIX + return int(*spec.Agent.EBPF.Sampling) } func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { diff --git a/pkg/loki/labels.go b/pkg/loki/labels.go index 2d25fded5..6a5829217 100644 --- a/pkg/loki/labels.go +++ b/pkg/loki/labels.go @@ -24,17 +24,15 @@ func GetLokiLabels(desired *flowslatest.FlowCollectorSpec) []string { indexFields = append(indexFields, constants.LokiZoneIndexFields...) } - if helper.UseEBPF(desired) { - dedupJustMark, _ := strconv.ParseBool(ebpf.DedupeJustMarkDefault) - if desired.Agent.EBPF.Advanced != nil { - if v, ok := desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeJustMark]; ok { - dedupJustMark, _ = strconv.ParseBool(v) - } - } - if dedupJustMark { - indexFields = append(indexFields, constants.LokiDeduperMarkIndexFields...) + dedupJustMark, _ := strconv.ParseBool(ebpf.DedupeJustMarkDefault) + if desired.Agent.EBPF.Advanced != nil { + if v, ok := desired.Agent.EBPF.Advanced.Env[ebpf.EnvDedupeJustMark]; ok { + dedupJustMark, _ = strconv.ParseBool(v) } } + if dedupJustMark { + indexFields = append(indexFields, constants.LokiDeduperMarkIndexFields...) + } return indexFields } diff --git a/pkg/manager/status/status_manager.go b/pkg/manager/status/status_manager.go index 125e4b098..1742ace0a 100644 --- a/pkg/manager/status/status_manager.go +++ b/pkg/manager/status/status_manager.go @@ -23,7 +23,6 @@ const ( FlowCollectorLegacy ComponentName = "FlowCollectorLegacy" FLPParent ComponentName = "FLPParent" FLPMonolith ComponentName = "FLPMonolith" - FLPIngestOnly ComponentName = "FLPIngestOnly" FLPTransformOnly ComponentName = "FLPTransformOnly" Monitoring ComponentName = "Monitoring" ) diff --git a/pkg/metrics/predefined_metrics.go b/pkg/metrics/predefined_metrics.go index ff5166648..d45db369e 100644 --- a/pkg/metrics/predefined_metrics.go +++ b/pkg/metrics/predefined_metrics.go @@ -207,13 +207,13 @@ func GetIncludeList(spec *flowslatest.FlowCollectorSpec) []string { list = append(list, string(m)) } } - if !helper.UseEBPF(spec) || !helper.IsPktDropEnabled(&spec.Agent.EBPF) { + if !helper.IsPktDropEnabled(&spec.Agent.EBPF) { list = removeMetricsByPattern(list, "_drop_") } - if !helper.UseEBPF(spec) || !helper.IsFlowRTTEnabled(&spec.Agent.EBPF) { + if !helper.IsFlowRTTEnabled(&spec.Agent.EBPF) { list = removeMetricsByPattern(list, "_rtt_") } - if !helper.UseEBPF(spec) || !helper.IsDNSTrackingEnabled(&spec.Agent.EBPF) { + if !helper.IsDNSTrackingEnabled(&spec.Agent.EBPF) { list = removeMetricsByPattern(list, "_dns_") } return list diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c75823490..000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac3..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 832d03ce5..25c9c62a2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -97,9 +97,6 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mitchellh/mapstructure v1.5.0 -## explicit; go 1.14 -github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent