diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index 78e45fb19..12624823e 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -154,7 +154,7 @@ type FlowCollectorIPFIX struct { // - `NetworkEvents`, to track Network events.
// - `PacketTranslation`, to enrich flows with packets translation information.
// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping" type AgentFeature string @@ -272,13 +272,15 @@ type EBPFFlowFilter struct { // Set `enable` to `true` to enable the eBPF flow filtering feature. Enable *bool `json:"enable,omitempty"` - // [deprecated (*)] this setting is not used anymore. + // [deprecated (*)] this setting is not used anymore. It is replaced with the `rules` list. EBPFFlowFilterRule `json:",inline"` - // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // `rules` defines a list of filtering rules on the eBPF Agents. + // When filtering is enabled, by default, flows that don't match any rule are rejected. + // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. // +kubebuilder:validation:MinItems:=1 // +kubebuilder:validation:MaxItems:=16 - FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` + Rules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information @@ -364,7 +366,7 @@ type FlowCollectorEBPF struct { // the kernel debug filesystem, so the eBPF pod has to run as privileged. // - `PacketTranslation`: enable enriching flows with packet's translation information.
// - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // - `UDNMapping`, to enable interfaces mappind to udn.
+ // - `UDNMapping`, to enable interfaces mapping to udn.
// +optional Features []AgentFeature `json:"features,omitempty"` diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go index 96f30bc6b..aa0ccb4a1 100644 --- a/apis/flowcollector/v1beta1/zz_generated.conversion.go +++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go @@ -530,7 +530,7 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { return err } - out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + out.Rules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules)) return nil } @@ -544,7 +544,7 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { return err } - out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + out.Rules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules)) return nil } diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go index e79aca3d5..50bbbdbc5 100644 --- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go @@ -131,8 +131,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { **out = **in } in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) - if in.FlowFilterRules != nil { - in, out := &in.FlowFilterRules, &out.FlowFilterRules + if in.Rules != nil { + in, out := &in.Rules, &out.Rules *out = make([]EBPFFlowFilterRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index d6d6120b7..9e481f4c4 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -80,7 +80,7 @@ type FlowCollectorSpec struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // `exporters` define additional optional exporters for custom consumption or storage. + // `exporters` defines additional optional exporters for custom consumption or storage. // +optional // +k8s:conversion-gen=false Exporters []*FlowCollectorExporter `json:"exporters"` @@ -174,10 +174,10 @@ type FlowCollectorIPFIX struct { // - `PacketDrop`, to track packet drops.
// - `DNSTracking`, to track specific information on DNS traffic.
// - `FlowRTT`, to track TCP latency.
-// - `NetworkEvents`, to track Network events [Developer Preview].
-// - `PacketTranslation`, to enrich flows with packets translation information.
-// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `NetworkEvents`, to track network events [Technology Preview].
+// - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+// - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+// - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping" type AgentFeature string @@ -285,7 +285,7 @@ type EBPFFlowFilterRule struct { // +optional PktDrops *bool `json:"pktDrops,omitempty"` - // `sampling` sampling rate for the matched flow + // `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. // +optional Sampling *uint32 `json:"sampling,omitempty"` } @@ -295,13 +295,16 @@ type EBPFFlowFilter struct { // Set `enable` to `true` to enable the eBPF flow filtering feature. Enable *bool `json:"enable,omitempty"` - // [deprecated (*)] this setting is not used anymore. + // [Deprecated (*)]. This setting is not used anymore. It is replaced with the `rules` list. EBPFFlowFilterRule `json:",inline"` - // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // `rules` defines a list of filtering rules on the eBPF Agents. + // When filtering is enabled, by default, flows that don't match any rule are rejected. + // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + // [Unsupported (*)]. // +kubebuilder:validation:MinItems:=1 // +kubebuilder:validation:MaxItems:=16 - FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` + Rules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information @@ -378,22 +381,20 @@ type FlowCollectorEBPF struct { Advanced *AdvancedAgentConfig `json:"advanced,omitempty"` // List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- // - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - // the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + // the kernel debug filesystem, so the eBPF agent pods must run as privileged. // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- // - `DNSTracking`: enable the DNS tracking feature.
- // - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- // - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // - `DNSTracking`: Enable the DNS tracking feature.
+ // - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ // - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. // It requires using the OVN-Kubernetes network plugin with the Observability feature. - // IMPORTANT: This feature is available as a Developer Preview.
- // - `PacketTranslation`: enable enriching flows with packet's translation information.
- // - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `UDNMapping`, to enable interfaces mappind to udn.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // IMPORTANT: This feature is available as a Technology Preview.
+ // - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ // - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ // - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. // It requires using the OVN-Kubernetes network plugin with the Observability feature. - // IMPORTANT: This feature is available as a Developer Preview.
// +optional Features []AgentFeature `json:"features,omitempty"` @@ -649,7 +650,7 @@ type FlowCollectorFLP struct { KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` // `logTypes` defines the desired record types to generate. Possible values are:
- // - `Flows` (default) to export regular network flows.
+ // - `Flows` to export regular network flows. This is the default.
// - `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
// - `EndedConversations` to generate only ended conversations events.
// - `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -678,13 +679,15 @@ type FlowCollectorFLP struct { SubnetLabels SubnetLabels `json:"subnetLabels,omitempty"` //+optional - // `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - // IMPORTANT: This feature is available as a Developer Preview. + // `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + // [Unsupported (*)]. Deduper *FLPDeduper `json:"deduper,omitempty"` // +optional - // `filters` let you define custom filters to limit the amount of generated flows. - // IMPORTANT: This feature is available as a Developer Preview. + // `filters` lets you define custom filters to limit the amount of generated flows. + // These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + // but with a lesser improvement in performance. + // [Unsupported (*)]. Filters []FLPFilterSet `json:"filters"` // `advanced` allows setting some aspects of the internal configuration of the flow processor. @@ -702,11 +705,11 @@ const ( FLPDeduperSample FLPDeduperMode = "Sample" ) -// `FLPDeduper` defines the desired configuration for FLP-based deduper +// `FLPDeduper` defines the desired configuration for FLP-based deduper. type FLPDeduper struct { // Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- // - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ // - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
// - Use `Disabled` to turn off Processor-based de-duplication.
// +kubebuilder:validation:Enum:="Disabled";"Drop";"Sample" // +kubebuilder:default:=Disabled @@ -734,13 +737,13 @@ const ( FLPFilterTargetExporters FLPFilterTarget = "Exporters" ) -// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. type FLPFilterSet struct { // `filters` is a list of matches that must be all satisfied in order to remove a flow. // +optional AllOf []FLPSingleFilter `json:"allOf"` - // If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. + // If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. // +optional // +kubebuilder:validation:Enum:="";"Loki";"Metrics";"Exporters" OutputTarget FLPFilterTarget `json:"outputTarget,omitempty"` @@ -751,15 +754,15 @@ type FLPFilterSet struct { Sampling int32 `json:"sampling,omitempty"` } -// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter +// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter. type FLPSingleFilter struct { - // Type of matching to apply + // Type of matching to apply. // +kubebuilder:validation:Enum:="Equal";"NotEqual";"Presence";"Absence";"MatchRegex";"NotMatchRegex" // +kubebuilder:default:="Equal" MatchType FLPFilterMatch `json:"matchType"` - // Name of the field to filter on - // Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + // Name of the field to filter on. + // Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. // +required Field string `json:"field"` @@ -1329,7 +1332,7 @@ type AdvancedProcessorConfig struct { // +optional Scheduling *SchedulingConfig `json:"scheduling,omitempty"` - // Define secondary networks to be checked for resources identification. + // Defines secondary networks to be checked for resources identification. // To guarantee a correct identification, indexed values must form an unique identifier across the cluster. // If the same index is used by several resources, those resources might be incorrectly labeled. // +optional @@ -1419,7 +1422,7 @@ type AdvancedPluginConfig struct { Scheduling *SchedulingConfig `json:"scheduling,omitempty"` } -// `SubnetLabels` allows to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift. +// `SubnetLabels` allows you to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift. type SubnetLabels struct { // `openShiftAutoDetect` allows, when set to `true`, to detect automatically the machines, pods and services subnets based on the // OpenShift install configuration and the Cluster Network Operator configuration. Indirectly, this is a way to accurately detect @@ -1477,7 +1480,7 @@ type FlowCollectorExporter struct { type FlowCollectorStatus struct { // Important: Run "make" to regenerate code after modifying this file - // `conditions` represent the latest available observations of an object's state + // `conditions` represents the latest available observations of an object's state Conditions []metav1.Condition `json:"conditions"` // Namespace where console plugin and flowlogs-pipeline have been deployed. diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go index 3d9e66c82..480e0fc4a 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go @@ -119,8 +119,8 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollectorSpec) var errs []error if fc.Agent.EBPF.FlowFilter != nil && fc.Agent.EBPF.FlowFilter.Enable != nil && *fc.Agent.EBPF.FlowFilter.Enable { m := make(map[string]bool) - for i := range fc.Agent.EBPF.FlowFilter.FlowFilterRules { - rule := fc.Agent.EBPF.FlowFilter.FlowFilterRules[i] + for i := range fc.Agent.EBPF.FlowFilter.Rules { + rule := fc.Agent.EBPF.FlowFilter.Rules[i] key := rule.CIDR + "-" + rule.PeerCIDR if found := m[key]; found { errs = append(errs, fmt.Errorf("flow filter rule CIDR and PeerCIDR %s already exists", diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go index 0ff2dac08..02c1d3993 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go @@ -44,7 +44,7 @@ func TestValidateAgent(t *testing.T) { Sampling: ptr.To(int32(100)), FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -73,7 +73,7 @@ func TestValidateAgent(t *testing.T) { Sampling: ptr.To(int32(100)), FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -181,7 +181,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -208,7 +208,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("abcd"), }, @@ -232,7 +232,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80-255"), }, @@ -255,7 +255,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("255-80"), }, @@ -279,7 +279,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80-?"), }, @@ -303,7 +303,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("255,80"), }, @@ -326,7 +326,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80,100,250"), }, @@ -350,7 +350,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { CIDR: "1.1.1.1", }, diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go index 6461b82b2..3960ab41f 100644 --- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go @@ -290,8 +290,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { **out = **in } in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) - if in.FlowFilterRules != nil { - in, out := &in.FlowFilterRules, &out.FlowFilterRules + if in.Rules != nil { + in, out := &in.Rules, &out.Rules *out = make([]EBPFFlowFilterRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) diff --git a/apis/flowmetrics/v1alpha1/flowmetric_types.go b/apis/flowmetrics/v1alpha1/flowmetric_types.go index 6b8ce9a67..dee58a025 100644 --- a/apis/flowmetrics/v1alpha1/flowmetric_types.go +++ b/apis/flowmetrics/v1alpha1/flowmetric_types.go @@ -93,7 +93,7 @@ type FlowMetricSpec struct { // +optional Labels []string `json:"labels"` - // `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + // `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. // For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. // +optional Flatten []string `json:"flatten"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 3529b1c6e..d2298edf7 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -137,7 +137,7 @@ spec: the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items: description: |- Agent feature, can be one of:
@@ -147,7 +147,7 @@ spec: - `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum: - PacketDrop - DNSTracking @@ -242,8 +242,10 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf - agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering @@ -3950,32 +3952,30 @@ spec: features: description: |- List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
items: description: |- Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum: - PacketDrop - DNSTracking @@ -4070,8 +4070,11 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf - agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + [Unsupported (*)]. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering @@ -4155,7 +4158,8 @@ spec: type: string sampling: description: '`sampling` sampling rate for the matched - flow' + flows, overriding the global sampling defined + at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -4190,7 +4194,7 @@ spec: type: array sampling: description: '`sampling` sampling rate for the matched - flow' + flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -6052,7 +6056,7 @@ spec: - Kafka type: string exporters: - description: '`exporters` define additional optional exporters for + description: '`exporters` defines additional optional exporters for custom consumption or storage.' items: description: '`FlowCollectorExporter` defines an additional exporter @@ -8157,7 +8161,7 @@ spec: type: object secondaryNetworks: description: |- - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. items: @@ -8195,15 +8199,15 @@ spec: type: string deduper: description: |- - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - IMPORTANT: This feature is available as a Developer Preview. + `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + [Unsupported (*)]. properties: mode: default: Disabled description: |- Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum: - Disabled @@ -8220,27 +8224,29 @@ spec: type: object filters: description: |- - `filters` let you define custom filters to limit the amount of generated flows. - IMPORTANT: This feature is available as a Developer Preview. + `filters` lets you define custom filters to limit the amount of generated flows. + These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + but with a lesser improvement in performance. + [Unsupported (*)]. items: description: '`FLPFilterSet` defines the desired configuration - for FLP-based filtering satisfying all conditions' + for FLP-based filtering satisfying all conditions.' properties: allOf: description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.' items: description: '`FLPSingleFilter` defines the desired configuration - for a single FLP-based filter' + for a single FLP-based filter.' properties: field: description: |- - Name of the field to filter on - Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + Name of the field to filter on. + Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. type: string matchType: default: Equal - description: Type of matching to apply + description: Type of matching to apply. enum: - Equal - NotEqual @@ -8261,8 +8267,8 @@ spec: type: object type: array outputTarget: - description: 'If specified, this filters only target a single - output: `Loki`, `Metrics` or `Exporters`. By default, + description: 'If specified, these filters only target a + single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' enum: - "" @@ -8625,7 +8631,7 @@ spec: default: Flows description: |- `logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -9043,7 +9049,7 @@ spec: description: '`FlowCollectorStatus` defines the observed state of FlowCollector' properties: conditions: - description: '`conditions` represent the latest available observations + description: '`conditions` represents the latest available observations of an object''s state' items: description: Condition contains details for one aspect of the current diff --git a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml index ad3b13f7d..818da1b67 100644 --- a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml +++ b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml @@ -198,7 +198,7 @@ spec: type: array flatten: description: |- - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. items: type: string diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index a48070ee9..a481e14fd 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -122,7 +122,7 @@ spec: the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items: description: |- Agent feature, can be one of:
@@ -132,7 +132,7 @@ spec: - `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum: - PacketDrop - DNSTracking @@ -215,7 +215,10 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' properties: @@ -3632,32 +3635,30 @@ spec: features: description: |- List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
items: description: |- Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum: - PacketDrop - DNSTracking @@ -3740,7 +3741,11 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + [Unsupported (*)]. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' properties: @@ -3810,7 +3815,7 @@ spec: - SCTP type: string sampling: - description: '`sampling` sampling rate for the matched flow' + description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -3844,7 +3849,7 @@ spec: minItems: 1 type: array sampling: - description: '`sampling` sampling rate for the matched flow' + description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -5604,7 +5609,7 @@ spec: - Kafka type: string exporters: - description: '`exporters` define additional optional exporters for custom consumption or storage.' + description: '`exporters` defines additional optional exporters for custom consumption or storage.' items: description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' properties: @@ -7501,7 +7506,7 @@ spec: type: object secondaryNetworks: description: |- - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. items: @@ -7534,15 +7539,15 @@ spec: type: string deduper: description: |- - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - IMPORTANT: This feature is available as a Developer Preview. + `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + [Unsupported (*)]. properties: mode: default: Disabled description: |- Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum: - Disabled @@ -7558,24 +7563,26 @@ spec: type: object filters: description: |- - `filters` let you define custom filters to limit the amount of generated flows. - IMPORTANT: This feature is available as a Developer Preview. + `filters` lets you define custom filters to limit the amount of generated flows. + These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + but with a lesser improvement in performance. + [Unsupported (*)]. items: - description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions' + description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.' properties: allOf: description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.' items: - description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter' + description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.' properties: field: description: |- - Name of the field to filter on - Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + Name of the field to filter on. + Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. type: string matchType: default: Equal - description: Type of matching to apply + description: Type of matching to apply. enum: - Equal - NotEqual @@ -7593,7 +7600,7 @@ spec: type: object type: array outputTarget: - description: 'If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' + description: 'If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' enum: - "" - Loki @@ -7946,7 +7953,7 @@ spec: default: Flows description: |- `logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -8325,7 +8332,7 @@ spec: description: '`FlowCollectorStatus` defines the observed state of FlowCollector' properties: conditions: - description: '`conditions` represent the latest available observations of an object''s state' + description: '`conditions` represents the latest available observations of an object''s state' items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml index f8d868b2a..5adc45fa1 100644 --- a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml +++ b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml @@ -188,7 +188,7 @@ spec: type: array flatten: description: |- - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. items: type: string diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml index b9339b571..7bd8064b3 100644 --- a/controllers/consoleplugin/config/static-frontend-config.yaml +++ b/controllers/consoleplugin/config/static-frontend-config.yaml @@ -1038,22 +1038,22 @@ filters: name: Xlat Zone Id component: number - id: xlat_src_address - name: Xlat src address + name: Xlat source address component: text category: source hint: Specify a single IP or range. - id: xlat_dst_address - name: Xlat dst address + name: Xlat destination address component: text category: destination hint: Specify a single IP or range. - id: xlat_src_port - name: Xlat src port + name: Xlat source port component: autocomplete category: source hint: Specify a single port number or name. - id: xlat_dst_port - name: Xlat dst port + name: Xlat destination port component: autocomplete category: destination hint: Specify a single port number or name. @@ -1402,22 +1402,22 @@ fields: description: packet translation zone id - name: XlatSrcPort type: number - description: packet translation src port + description: packet translation source port - name: XlatDstPort type: number - description: packet translation dst port + description: packet translation destination port - name: XlatSrcAddr type: string - description: packet translation src address + description: packet translation source address - name: XlatDstAddr type: string - description: packet translation dst address + description: packet translation destination address - name: K8S_ClusterName type: string description: Cluster name or identifier - name: _RecordType type: string - description: "Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking" + description: "Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking" - name: _HashId type: string description: In conversation tracking, the conversation identifier diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index e4cea0f27..1d9412ff9 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -504,8 +504,8 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"}) - if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 { - if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil { + if len(coll.Spec.Agent.EBPF.FlowFilter.Rules) != 0 { + if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.Rules); filterRules != nil { config = append(config, filterRules...) } } else { diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index f1cad2c0c..a9267f768 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -294,7 +294,7 @@ If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.< the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-- `UDNMapping`, to enable interfaces mappind to udn.

+- `UDNMapping`, to enable interfaces mapping to udn.

false @@ -537,7 +537,9 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: rules []object - `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
false @@ -6106,7 +6108,7 @@ Kafka can provide better scalability, resiliency, and high availability (for mor exporters []object - `exporters` define additional optional exporters for custom consumption or storage.
+ `exporters` defines additional optional exporters for custom consumption or storage.
false @@ -6271,22 +6273,20 @@ Otherwise it is matched as a case-sensitive string.
[]enum List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
-- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting -the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting +the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
-- `DNSTracking`: enable the DNS tracking feature.
-- `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
-- `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. -This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `DNSTracking`: Enable the DNS tracking feature.
+- `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+- `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview.
-- `PacketTranslation`: enable enriching flows with packet's translation information.
-- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-IMPORTANT: This feature is available as a Developer Preview.
-- `UDNMapping`, to enable interfaces mappind to udn.
-This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. -It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview.

+IMPORTANT: This feature is available as a Technology Preview.
+- `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+- `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+- `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. +It requires using the OVN-Kubernetes network plugin with the Observability feature.
false @@ -8283,14 +8283,17 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: rules []object - `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)].
false sampling integer - `sampling` sampling rate for the matched flow
+ `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.

Format: int32
@@ -8431,7 +8434,7 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: sampling integer - `sampling` sampling rate for the matched flow
+ `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.

Format: int32
@@ -14387,16 +14390,18 @@ such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.
deduper object - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. -IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)].
false filters []object - `filters` let you define custom filters to limit the amount of generated flows. -IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)].
false @@ -14462,7 +14467,7 @@ This setting is ignored when Kafka is disabled.
enum `logTypes` defines the desired record types to generate. Possible values are:
-- `Flows` (default) to export regular network flows.
+- `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.

@@ -14632,7 +14637,7 @@ By convention, some values are forbidden. It must be greater than 1024 and diffe secondaryNetworks []object - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled.
@@ -16429,8 +16434,8 @@ Fields absent from the 'k8s.v1.cni.cncf.io/network-status' annotation must not b -`deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. -IMPORTANT: This feature is available as a Developer Preview. +`deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. @@ -16446,8 +16451,8 @@ IMPORTANT: This feature is available as a Developer Preview.
enum Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
-- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
-- Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.


Enum: Disabled, Drop, Sample
@@ -16474,7 +16479,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. @@ -16496,7 +16501,7 @@ IMPORTANT: This feature is available as a Developer Preview. @@ -16520,7 +16525,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPSingleFilter` defines the desired configuration for a single FLP-based filter +`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.
outputTarget enum - If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
+ If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.

Enum: , Loki, Metrics, Exporters
@@ -16535,15 +16540,15 @@ IMPORTANT: This feature is available as a Developer Preview. diff --git a/docs/FlowMetric.md b/docs/FlowMetric.md index afa5f9097..31c95eb70 100644 --- a/docs/FlowMetric.md +++ b/docs/FlowMetric.md @@ -155,7 +155,7 @@ Refer to the documentation for the list of available fields: https://docs.opensh diff --git a/docs/flowcollector-flows-netobserv-io-v1beta2.adoc b/docs/flowcollector-flows-netobserv-io-v1beta2.adoc index 76865c55f..762d9fb54 100644 --- a/docs/flowcollector-flows-netobserv-io-v1beta2.adoc +++ b/docs/flowcollector-flows-netobserv-io-v1beta2.adoc @@ -102,7 +102,7 @@ Kafka can provide better scalability, resiliency, and high availability (for mor | `exporters` | `array` -| `exporters` define additional optional exporters for custom consumption or storage. +| `exporters` defines additional optional exporters for custom consumption or storage. | `kafka` | `object` @@ -204,19 +204,27 @@ Otherwise it is matched as a case-sensitive string. | `array (string)` | List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are: + -- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting -the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting +the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported. + -- `DNSTracking`: enable the DNS tracking feature. + +- `DNSTracking`: Enable the DNS tracking feature. + -- `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic. + +- `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic. + -- `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. -This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview. + +IMPORTANT: This feature is available as a Technology Preview. + +- `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT. + + +- `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage Network Observability eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed. + + +- `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN). + + +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. +It requires using the OVN-Kubernetes network plugin with the Observability feature. | `flowFilter` | `object` @@ -407,6 +415,11 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: | `integer` | `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by. +| `peerCIDR` +| `string` +| `peerCIDR` defines the Peer IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + | `peerIP` | `string` | `peerIP` optionally defines the remote IP address to filter flows by. @@ -427,6 +440,120 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: | `string` | `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`. +| `rules` +| `array` +| `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)]. + +| `sampling` +| `integer` +| `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. + +| `sourcePorts` +| `integer-or-string` +| `sourcePorts` optionally defines the source ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `tcpFlags` +| `string` +| `tcpFlags` optionally defines TCP flags to filter flows by. +In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + +|=== +== .spec.agent.ebpf.flowFilter.rules +Description:: ++ +-- +`rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)]. +-- + +Type:: + `array` + + + + +== .spec.agent.ebpf.flowFilter.rules[] +Description:: ++ +-- +`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `action` +| `string` +| `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`. + +| `cidr` +| `string` +| `cidr` defines the IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + +| `destPorts` +| `integer-or-string` +| `destPorts` optionally defines the destination ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `direction` +| `string` +| `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`. + +| `icmpCode` +| `integer` +| `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by. + +| `icmpType` +| `integer` +| `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by. + +| `peerCIDR` +| `string` +| `peerCIDR` defines the Peer IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + +| `peerIP` +| `string` +| `peerIP` optionally defines the remote IP address to filter flows by. +Example: `10.10.10.10`. + +| `pktDrops` +| `boolean` +| `pktDrops` optionally filters only flows containing packet drops. + +| `ports` +| `integer-or-string` +| `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. +To filter a single port, set a single port as an integer value. For example, `ports: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `protocol` +| `string` +| `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`. + +| `sampling` +| `integer` +| `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. + | `sourcePorts` | `integer-or-string` | `sourcePorts` optionally defines the source ports to filter flows by. @@ -937,7 +1064,7 @@ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-co Description:: + -- -`exporters` define additional optional exporters for custom consumption or storage. +`exporters` defines additional optional exporters for custom consumption or storage. -- Type:: @@ -2575,6 +2702,18 @@ such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. | `string` | `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using {product-title}, leave empty to make it automatically determined. +| `deduper` +| `object` +| `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. + +| `filters` +| `array` +| `filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)]. + | `imagePullPolicy` | `string` | `imagePullPolicy` is the Kubernetes pull policy for the image defined above @@ -2605,13 +2744,13 @@ This setting is ignored when Kafka is disabled. | `string` | `logTypes` defines the desired record types to generate. Possible values are: + -- `Flows` (default) to export regular network flows + +- `Flows` to export regular network flows. This is the default. + -- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates + +- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates. + -- `EndedConversations` to generate only ended conversations events + +- `EndedConversations` to generate only ended conversations events. + -- `All` to generate both network flows and all conversations events + +- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint. + | `metrics` @@ -2700,7 +2839,7 @@ By convention, some values are forbidden. It must be greater than 1024 and diffe | `secondaryNetworks` | `array` -| Define secondary networks to be checked for resources identification. +| Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. @@ -2773,7 +2912,7 @@ Type:: Description:: + -- -Define secondary networks to be checked for resources identification. +Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. -- @@ -2814,6 +2953,133 @@ Fields absent from the 'k8s.v1.cni.cncf.io/network-status' annotation must not b | `string` | `name` should match the network name as visible in the pods annotation 'k8s.v1.cni.cncf.io/network-status'. +|=== +== .spec.processor.deduper +Description:: ++ +-- +`deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `mode` +| `string` +| Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes. + + +- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events. + + +- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500. + + +- Use `Disabled` to turn off Processor-based de-duplication. + + + +| `sampling` +| `integer` +| `sampling` is the sampling rate when deduper `mode` is `Sample`. + +|=== +== .spec.processor.filters +Description:: ++ +-- +`filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)]. +-- + +Type:: + `array` + + + + +== .spec.processor.filters[] +Description:: ++ +-- +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `allOf` +| `array` +| `filters` is a list of matches that must be all satisfied in order to remove a flow. + +| `outputTarget` +| `string` +| If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. + +| `sampling` +| `integer` +| `sampling` is an optional sampling rate to apply to this filter. + +|=== +== .spec.processor.filters[].allOf +Description:: ++ +-- +`filters` is a list of matches that must be all satisfied in order to remove a flow. +-- + +Type:: + `array` + + + + +== .spec.processor.filters[].allOf[] +Description:: ++ +-- +`FLPSingleFilter` defines the desired configuration for a single FLP-based filter. +-- + +Type:: + `object` + +Required:: + - `field` + - `matchType` + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `field` +| `string` +| Name of the field to filter on. +Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. + +| `matchType` +| `string` +| Type of matching to apply. + +| `value` +| `string` +| Value to filter on. When `matchType` is `Equal` or `NotEqual`, you can use field injection with `$(SomeField)` to refer to any other field of the flow. + |=== == .spec.processor.kafkaConsumerAutoscaler Description:: @@ -2865,7 +3131,8 @@ Note that the more metrics you add, the bigger is the impact on Prometheus workl Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `node_egress_bytes_total`, `workload_ingress_bytes_total`, `workload_egress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), -`namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled). +`namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled), +`namespace_network_policy_events_total` (when `NetworkEvents` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md | `server` diff --git a/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc b/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc index 6bda2865c..88647448d 100644 --- a/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc +++ b/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc @@ -107,6 +107,11 @@ When set to `Egress`, it is equivalent to adding the regular expression filter o be used to eliminate duplicates: `Duplicate != "true"` and `FlowDirection = "0"`. Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. +| `flatten` +| `array (string)` +| `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. +For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. + | `labels` | `array (string)` | `labels` is a list of fields that should be used as Prometheus labels, also known as dimensions. diff --git a/docs/flows-format.adoc b/docs/flows-format.adoc index 76f5c85d3..d87ab52ab 100644 --- a/docs/flows-format.adoc +++ b/docs/flows-format.adoc @@ -99,6 +99,13 @@ The "Cardinality" column gives information about the implied metric cardinality | yes | fine | destination.k8s.namespace.name +| `DstK8S_NetworkName` +| string +| Destination network name +| `dst_network` +| no +| fine +| n/a | `DstK8S_OwnerName` | string | Name of the destination owner, such as Deployment name, StatefulSet name, etc. @@ -156,14 +163,14 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | n/a | `Flags` -| number -| Logical OR combination of unique TCP flags comprised in the flow, as per RFC-9293, with additional custom flags to represent the following per-packet combinations: + -- SYN+ACK (0x100) + -- FIN+ACK (0x200) + -- RST+ACK (0x400) +| string[] +| List of TCP flags comprised in the flow, as per RFC-9293, with additional custom flags to represent the following per-packet combinations: + +- SYN_ACK + +- FIN_ACK + +- RST_ACK | `tcp_flags` | no -| fine +| careful | tcp.flags | `FlowDirection` | number @@ -190,7 +197,7 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | icmp.type | `IfDirections` -| number +| number[] | Flow directions from the network interface observation point. Can be one of: + - 0: Ingress (interface incoming traffic) + - 1: Egress (interface outgoing traffic) @@ -199,7 +206,7 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | interface.directions | `Interfaces` -| string +| string[] | Network interfaces | `interfaces` | no @@ -220,8 +227,14 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | k8s.layer | `NetworkEvents` -| string -| Network events flow monitoring +| object[] +| Network events, such as network policy actions, composed of nested fields: + +- Feature (such as "acl" for network policies) + +- Type (such as an "AdminNetworkPolicy") + +- Namespace (namespace where the event applies, if any) + +- Name (name of the resource that triggered the event) + +- Action (such as "allow" or "drop") + +- Direction (Ingress or Egress) | `network_events` | no | avoid @@ -229,7 +242,7 @@ The "Cardinality" column gives information about the implied metric cardinality | `Packets` | number | Number of packets -| n/a +| `pkt_drop_cause` | no | avoid | packets @@ -275,6 +288,13 @@ The "Cardinality" column gives information about the implied metric cardinality | no | fine | protocol +| `Sampling` +| number +| Sampling rate used for this flow +| n/a +| no +| fine +| n/a | `SrcAddr` | string | Source IP address (ipv4 or ipv6) @@ -310,6 +330,13 @@ The "Cardinality" column gives information about the implied metric cardinality | yes | fine | source.k8s.namespace.name +| `SrcK8S_NetworkName` +| string +| Source network name +| `src_network` +| no +| fine +| n/a | `SrcK8S_OwnerName` | string | Name of the source owner, such as Deployment name, StatefulSet name, etc. @@ -387,6 +414,48 @@ The "Cardinality" column gives information about the implied metric cardinality | no | avoid | timereceived +| `Udns` +| string[] +| List of User Defined Networks +| `udns` +| no +| careful +| n/a +| `XlatDstAddr` +| string +| packet translation destination address +| `xlat_dst_address` +| no +| avoid +| n/a +| `XlatDstPort` +| number +| packet translation destination port +| `xlat_dst_port` +| no +| careful +| n/a +| `XlatSrcAddr` +| string +| packet translation source address +| `xlat_src_address` +| no +| avoid +| n/a +| `XlatSrcPort` +| number +| packet translation source port +| `xlat_src_port` +| no +| careful +| n/a +| `ZoneId` +| number +| packet translation zone id +| `xlat_zone_id` +| no +| avoid +| n/a | `_HashId` | string | In conversation tracking, the conversation identifier @@ -396,7 +465,7 @@ The "Cardinality" column gives information about the implied metric cardinality | n/a | `_RecordType` | string -| Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking +| Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking | `type` | yes | fine diff --git a/hack/asciidoc-flows-gen.sh b/hack/asciidoc-flows-gen.sh index 95443f702..7019f4414 100755 --- a/hack/asciidoc-flows-gen.sh +++ b/hack/asciidoc-flows-gen.sh @@ -63,9 +63,9 @@ for i in $(seq 0 $(( $nbfields-1 )) ); do echo -e "| $otel" >> $ADOC done +echo -e '|===' >> $ADOC + if [[ $errors != "" ]]; then echo -e $errors exit 1 fi - -echo -e '|===' >> $ADOC diff --git a/pkg/helper/cardinality/cardinality.json b/pkg/helper/cardinality/cardinality.json index 1f19d555e..84088a788 100644 --- a/pkg/helper/cardinality/cardinality.json +++ b/pkg/helper/cardinality/cardinality.json @@ -13,6 +13,7 @@ "SrcK8S_HostIP": "fine", "SrcK8S_HostName": "fine", "SrcK8S_Zone": "fine", + "SrcK8S_NetworkName": "fine", "SrcSubnetLabel": "fine", "DstK8S_Name": "careful", "DstK8S_Type": "fine", @@ -25,6 +26,7 @@ "DstK8S_HostIP": "fine", "DstK8S_HostName": "fine", "DstK8S_Zone": "fine", + "DstK8S_NetworkName": "fine", "DstSubnetLabel": "fine", "K8S_FlowLayer": "fine", "Proto": "fine", @@ -64,6 +66,7 @@ "XlatIcmpId": "avoid", "XlatDstPort": "careful", "XlatDstAddr": "avoid", + "Udns": "careful", "_RecordType": "fine", "_HashId": "avoid" }
field string - Name of the field to filter on -Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on. +Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
true
matchType enum - Type of matching to apply
+ Type of matching to apply.

Enum: Equal, NotEqual, Presence, Absence, MatchRegex, NotMatchRegex
Default: Equal
@@ -18235,7 +18240,7 @@ If the namespace is different, the config map or the secret is copied so that it
conditions []object - `conditions` represent the latest available observations of an object's state
+ `conditions` represents the latest available observations of an object's state
true
flatten []string - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
false