diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go
index 78e45fb19..12624823e 100644
--- a/apis/flowcollector/v1beta1/flowcollector_types.go
+++ b/apis/flowcollector/v1beta1/flowcollector_types.go
@@ -154,7 +154,7 @@ type FlowCollectorIPFIX struct {
// - `NetworkEvents`, to track Network events.
// - `PacketTranslation`, to enrich flows with packets translation information.
// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping"
type AgentFeature string
@@ -272,13 +272,15 @@ type EBPFFlowFilter struct {
// Set `enable` to `true` to enable the eBPF flow filtering feature.
Enable *bool `json:"enable,omitempty"`
- // [deprecated (*)] this setting is not used anymore.
+ // [deprecated (*)] this setting is not used anymore. It is replaced with the `rules` list.
EBPFFlowFilterRule `json:",inline"`
- // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // `rules` defines a list of filtering rules on the eBPF Agents.
+ // When filtering is enabled, by default, flows that don't match any rule are rejected.
+ // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
// +kubebuilder:validation:MinItems:=1
// +kubebuilder:validation:MaxItems:=16
- FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
+ Rules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
@@ -364,7 +366,7 @@ type FlowCollectorEBPF struct {
// the kernel debug filesystem, so the eBPF pod has to run as privileged.
// - `PacketTranslation`: enable enriching flows with packet's translation information.
// - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // - `UDNMapping`, to enable interfaces mappind to udn.
+ // - `UDNMapping`, to enable interfaces mapping to udn.
// +optional
Features []AgentFeature `json:"features,omitempty"`
diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go
index 96f30bc6b..aa0ccb4a1 100644
--- a/apis/flowcollector/v1beta1/zz_generated.conversion.go
+++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go
@@ -530,7 +530,7 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi
if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
return err
}
- out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ out.Rules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules))
return nil
}
@@ -544,7 +544,7 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB
if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
return err
}
- out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ out.Rules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules))
return nil
}
diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
index e79aca3d5..50bbbdbc5 100644
--- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
@@ -131,8 +131,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
**out = **in
}
in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
- if in.FlowFilterRules != nil {
- in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
*out = make([]EBPFFlowFilterRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go
index d6d6120b7..9e481f4c4 100644
--- a/apis/flowcollector/v1beta2/flowcollector_types.go
+++ b/apis/flowcollector/v1beta2/flowcollector_types.go
@@ -80,7 +80,7 @@ type FlowCollectorSpec struct {
// +optional
Kafka FlowCollectorKafka `json:"kafka,omitempty"`
- // `exporters` define additional optional exporters for custom consumption or storage.
+ // `exporters` defines additional optional exporters for custom consumption or storage.
// +optional
// +k8s:conversion-gen=false
Exporters []*FlowCollectorExporter `json:"exporters"`
@@ -174,10 +174,10 @@ type FlowCollectorIPFIX struct {
// - `PacketDrop`, to track packet drops.
// - `DNSTracking`, to track specific information on DNS traffic.
// - `FlowRTT`, to track TCP latency.
-// - `NetworkEvents`, to track Network events [Developer Preview].
-// - `PacketTranslation`, to enrich flows with packets translation information.
-// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `NetworkEvents`, to track network events [Technology Preview].
+// - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+// - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+// - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping"
type AgentFeature string
@@ -285,7 +285,7 @@ type EBPFFlowFilterRule struct {
// +optional
PktDrops *bool `json:"pktDrops,omitempty"`
- // `sampling` sampling rate for the matched flow
+ // `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.
// +optional
Sampling *uint32 `json:"sampling,omitempty"`
}
@@ -295,13 +295,16 @@ type EBPFFlowFilter struct {
// Set `enable` to `true` to enable the eBPF flow filtering feature.
Enable *bool `json:"enable,omitempty"`
- // [deprecated (*)] this setting is not used anymore.
+ // [Deprecated (*)]. This setting is not used anymore. It is replaced with the `rules` list.
EBPFFlowFilterRule `json:",inline"`
- // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // `rules` defines a list of filtering rules on the eBPF Agents.
+ // When filtering is enabled, by default, flows that don't match any rule are rejected.
+ // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ // [Unsupported (*)].
// +kubebuilder:validation:MinItems:=1
// +kubebuilder:validation:MaxItems:=16
- FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
+ Rules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
@@ -378,22 +381,20 @@ type FlowCollectorEBPF struct {
Advanced *AdvancedAgentConfig `json:"advanced,omitempty"`
// List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- // - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- // the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ // the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- // - `DNSTracking`: enable the DNS tracking feature.
- // - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- // - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // - `DNSTracking`: Enable the DNS tracking feature.
+ // - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ // - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// It requires using the OVN-Kubernetes network plugin with the Observability feature.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `PacketTranslation`: enable enriching flows with packet's translation information.
- // - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `UDNMapping`, to enable interfaces mappind to udn.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // IMPORTANT: This feature is available as a Technology Preview.
+ // - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ // - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ // - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// It requires using the OVN-Kubernetes network plugin with the Observability feature.
- // IMPORTANT: This feature is available as a Developer Preview.
// +optional
Features []AgentFeature `json:"features,omitempty"`
@@ -649,7 +650,7 @@ type FlowCollectorFLP struct {
KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"`
// `logTypes` defines the desired record types to generate. Possible values are:
- // - `Flows` (default) to export regular network flows.
+ // - `Flows` to export regular network flows. This is the default.
// - `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
// - `EndedConversations` to generate only ended conversations events.
// - `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -678,13 +679,15 @@ type FlowCollectorFLP struct {
SubnetLabels SubnetLabels `json:"subnetLabels,omitempty"`
//+optional
- // `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- // IMPORTANT: This feature is available as a Developer Preview.
+ // `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ // [Unsupported (*)].
Deduper *FLPDeduper `json:"deduper,omitempty"`
// +optional
- // `filters` let you define custom filters to limit the amount of generated flows.
- // IMPORTANT: This feature is available as a Developer Preview.
+ // `filters` lets you define custom filters to limit the amount of generated flows.
+ // These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ // but with a lesser improvement in performance.
+ // [Unsupported (*)].
Filters []FLPFilterSet `json:"filters"`
// `advanced` allows setting some aspects of the internal configuration of the flow processor.
@@ -702,11 +705,11 @@ const (
FLPDeduperSample FLPDeduperMode = "Sample"
)
-// `FLPDeduper` defines the desired configuration for FLP-based deduper
+// `FLPDeduper` defines the desired configuration for FLP-based deduper.
type FLPDeduper struct {
// Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- // - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ // - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
// - Use `Disabled` to turn off Processor-based de-duplication.
// +kubebuilder:validation:Enum:="Disabled";"Drop";"Sample"
// +kubebuilder:default:=Disabled
@@ -734,13 +737,13 @@ const (
FLPFilterTargetExporters FLPFilterTarget = "Exporters"
)
-// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions
+// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.
type FLPFilterSet struct {
// `filters` is a list of matches that must be all satisfied in order to remove a flow.
// +optional
AllOf []FLPSingleFilter `json:"allOf"`
- // If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
+ // If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
// +optional
// +kubebuilder:validation:Enum:="";"Loki";"Metrics";"Exporters"
OutputTarget FLPFilterTarget `json:"outputTarget,omitempty"`
@@ -751,15 +754,15 @@ type FLPFilterSet struct {
Sampling int32 `json:"sampling,omitempty"`
}
-// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter
+// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter.
type FLPSingleFilter struct {
- // Type of matching to apply
+ // Type of matching to apply.
// +kubebuilder:validation:Enum:="Equal";"NotEqual";"Presence";"Absence";"MatchRegex";"NotMatchRegex"
// +kubebuilder:default:="Equal"
MatchType FLPFilterMatch `json:"matchType"`
- // Name of the field to filter on
- // Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ // Name of the field to filter on.
+ // Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
// +required
Field string `json:"field"`
@@ -1329,7 +1332,7 @@ type AdvancedProcessorConfig struct {
// +optional
Scheduling *SchedulingConfig `json:"scheduling,omitempty"`
- // Define secondary networks to be checked for resources identification.
+ // Defines secondary networks to be checked for resources identification.
// To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
// If the same index is used by several resources, those resources might be incorrectly labeled.
// +optional
@@ -1419,7 +1422,7 @@ type AdvancedPluginConfig struct {
Scheduling *SchedulingConfig `json:"scheduling,omitempty"`
}
-// `SubnetLabels` allows to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift.
+// `SubnetLabels` allows you to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift.
type SubnetLabels struct {
// `openShiftAutoDetect` allows, when set to `true`, to detect automatically the machines, pods and services subnets based on the
// OpenShift install configuration and the Cluster Network Operator configuration. Indirectly, this is a way to accurately detect
@@ -1477,7 +1480,7 @@ type FlowCollectorExporter struct {
type FlowCollectorStatus struct {
// Important: Run "make" to regenerate code after modifying this file
- // `conditions` represent the latest available observations of an object's state
+ // `conditions` represents the latest available observations of an object's state
Conditions []metav1.Condition `json:"conditions"`
// Namespace where console plugin and flowlogs-pipeline have been deployed.
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
index 3d9e66c82..480e0fc4a 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
@@ -119,8 +119,8 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollectorSpec)
var errs []error
if fc.Agent.EBPF.FlowFilter != nil && fc.Agent.EBPF.FlowFilter.Enable != nil && *fc.Agent.EBPF.FlowFilter.Enable {
m := make(map[string]bool)
- for i := range fc.Agent.EBPF.FlowFilter.FlowFilterRules {
- rule := fc.Agent.EBPF.FlowFilter.FlowFilterRules[i]
+ for i := range fc.Agent.EBPF.FlowFilter.Rules {
+ rule := fc.Agent.EBPF.FlowFilter.Rules[i]
key := rule.CIDR + "-" + rule.PeerCIDR
if found := m[key]; found {
errs = append(errs, fmt.Errorf("flow filter rule CIDR and PeerCIDR %s already exists",
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
index 0ff2dac08..02c1d3993 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
@@ -44,7 +44,7 @@ func TestValidateAgent(t *testing.T) {
Sampling: ptr.To(int32(100)),
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -73,7 +73,7 @@ func TestValidateAgent(t *testing.T) {
Sampling: ptr.To(int32(100)),
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -181,7 +181,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -208,7 +208,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("abcd"),
},
@@ -232,7 +232,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80-255"),
},
@@ -255,7 +255,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("255-80"),
},
@@ -279,7 +279,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80-?"),
},
@@ -303,7 +303,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("255,80"),
},
@@ -326,7 +326,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80,100,250"),
},
@@ -350,7 +350,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
CIDR: "1.1.1.1",
},
diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
index 6461b82b2..3960ab41f 100644
--- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
@@ -290,8 +290,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
**out = **in
}
in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
- if in.FlowFilterRules != nil {
- in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
*out = make([]EBPFFlowFilterRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
diff --git a/apis/flowmetrics/v1alpha1/flowmetric_types.go b/apis/flowmetrics/v1alpha1/flowmetric_types.go
index 6b8ce9a67..dee58a025 100644
--- a/apis/flowmetrics/v1alpha1/flowmetric_types.go
+++ b/apis/flowmetrics/v1alpha1/flowmetric_types.go
@@ -93,7 +93,7 @@ type FlowMetricSpec struct {
// +optional
Labels []string `json:"labels"`
- // `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ // `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
// For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
// +optional
Flatten []string `json:"flatten"`
diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
index 3529b1c6e..d2298edf7 100644
--- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
@@ -137,7 +137,7 @@ spec:
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items:
description: |-
Agent feature, can be one of:
@@ -147,7 +147,7 @@ spec:
- `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum:
- PacketDrop
- DNSTracking
@@ -242,8 +242,10 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf
- agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
items:
description: '`EBPFFlowFilterRule` defines the desired
eBPF agent configuration regarding flow filtering
@@ -3950,32 +3952,30 @@ spec:
features:
description: |-
List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ the kernel debug filesystem, so the eBPF agent pods must run as privileged.
If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
items:
description: |-
Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum:
- PacketDrop
- DNSTracking
@@ -4070,8 +4070,11 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf
- agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ [Unsupported (*)].
items:
description: '`EBPFFlowFilterRule` defines the desired
eBPF agent configuration regarding flow filtering
@@ -4155,7 +4158,8 @@ spec:
type: string
sampling:
description: '`sampling` sampling rate for the matched
- flow'
+ flows, overriding the global sampling defined
+ at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -4190,7 +4194,7 @@ spec:
type: array
sampling:
description: '`sampling` sampling rate for the matched
- flow'
+ flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -6052,7 +6056,7 @@ spec:
- Kafka
type: string
exporters:
- description: '`exporters` define additional optional exporters for
+ description: '`exporters` defines additional optional exporters for
custom consumption or storage.'
items:
description: '`FlowCollectorExporter` defines an additional exporter
@@ -8157,7 +8161,7 @@ spec:
type: object
secondaryNetworks:
description: |-
- Define secondary networks to be checked for resources identification.
+ Defines secondary networks to be checked for resources identification.
To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
If the same index is used by several resources, those resources might be incorrectly labeled.
items:
@@ -8195,15 +8199,15 @@ spec:
type: string
deduper:
description: |-
- `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ [Unsupported (*)].
properties:
mode:
default: Disabled
description: |-
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum:
- Disabled
@@ -8220,27 +8224,29 @@ spec:
type: object
filters:
description: |-
- `filters` let you define custom filters to limit the amount of generated flows.
- IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows.
+ These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ but with a lesser improvement in performance.
+ [Unsupported (*)].
items:
description: '`FLPFilterSet` defines the desired configuration
- for FLP-based filtering satisfying all conditions'
+ for FLP-based filtering satisfying all conditions.'
properties:
allOf:
description: '`filters` is a list of matches that must be
all satisfied in order to remove a flow.'
items:
description: '`FLPSingleFilter` defines the desired configuration
- for a single FLP-based filter'
+ for a single FLP-based filter.'
properties:
field:
description: |-
- Name of the field to filter on
- Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on.
+ Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
type: string
matchType:
default: Equal
- description: Type of matching to apply
+ description: Type of matching to apply.
enum:
- Equal
- NotEqual
@@ -8261,8 +8267,8 @@ spec:
type: object
type: array
outputTarget:
- description: 'If specified, this filters only target a single
- output: `Loki`, `Metrics` or `Exporters`. By default,
+ description: 'If specified, these filters only target a
+ single output: `Loki`, `Metrics` or `Exporters`. By default,
all outputs are targeted.'
enum:
- ""
@@ -8625,7 +8631,7 @@ spec:
default: Flows
description: |-
`logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -9043,7 +9049,7 @@ spec:
description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
properties:
conditions:
- description: '`conditions` represent the latest available observations
+ description: '`conditions` represents the latest available observations
of an object''s state'
items:
description: Condition contains details for one aspect of the current
diff --git a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
index ad3b13f7d..818da1b67 100644
--- a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
@@ -198,7 +198,7 @@ spec:
type: array
flatten:
description: |-
- `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
items:
type: string
diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
index a48070ee9..a481e14fd 100644
--- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
@@ -122,7 +122,7 @@ spec:
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items:
description: |-
Agent feature, can be one of:
@@ -132,7 +132,7 @@ spec:
- `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum:
- PacketDrop
- DNSTracking
@@ -215,7 +215,10 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
items:
description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
properties:
@@ -3632,32 +3635,30 @@ spec:
features:
description: |-
List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ the kernel debug filesystem, so the eBPF agent pods must run as privileged.
If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
items:
description: |-
Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum:
- PacketDrop
- DNSTracking
@@ -3740,7 +3741,11 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ [Unsupported (*)].
items:
description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
properties:
@@ -3810,7 +3815,7 @@ spec:
- SCTP
type: string
sampling:
- description: '`sampling` sampling rate for the matched flow'
+ description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -3844,7 +3849,7 @@ spec:
minItems: 1
type: array
sampling:
- description: '`sampling` sampling rate for the matched flow'
+ description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -5604,7 +5609,7 @@ spec:
- Kafka
type: string
exporters:
- description: '`exporters` define additional optional exporters for custom consumption or storage.'
+ description: '`exporters` defines additional optional exporters for custom consumption or storage.'
items:
description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.'
properties:
@@ -7501,7 +7506,7 @@ spec:
type: object
secondaryNetworks:
description: |-
- Define secondary networks to be checked for resources identification.
+ Defines secondary networks to be checked for resources identification.
To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
If the same index is used by several resources, those resources might be incorrectly labeled.
items:
@@ -7534,15 +7539,15 @@ spec:
type: string
deduper:
description: |-
- `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ [Unsupported (*)].
properties:
mode:
default: Disabled
description: |-
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum:
- Disabled
@@ -7558,24 +7563,26 @@ spec:
type: object
filters:
description: |-
- `filters` let you define custom filters to limit the amount of generated flows.
- IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows.
+ These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ but with a lesser improvement in performance.
+ [Unsupported (*)].
items:
- description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions'
+ description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.'
properties:
allOf:
description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.'
items:
- description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter'
+ description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.'
properties:
field:
description: |-
- Name of the field to filter on
- Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on.
+ Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
type: string
matchType:
default: Equal
- description: Type of matching to apply
+ description: Type of matching to apply.
enum:
- Equal
- NotEqual
@@ -7593,7 +7600,7 @@ spec:
type: object
type: array
outputTarget:
- description: 'If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.'
+ description: 'If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.'
enum:
- ""
- Loki
@@ -7946,7 +7953,7 @@ spec:
default: Flows
description: |-
`logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -8325,7 +8332,7 @@ spec:
description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
properties:
conditions:
- description: '`conditions` represent the latest available observations of an object''s state'
+ description: '`conditions` represents the latest available observations of an object''s state'
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
diff --git a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
index f8d868b2a..5adc45fa1 100644
--- a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
@@ -188,7 +188,7 @@ spec:
type: array
flatten:
description: |-
- `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
items:
type: string
diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml
index b9339b571..7bd8064b3 100644
--- a/controllers/consoleplugin/config/static-frontend-config.yaml
+++ b/controllers/consoleplugin/config/static-frontend-config.yaml
@@ -1038,22 +1038,22 @@ filters:
name: Xlat Zone Id
component: number
- id: xlat_src_address
- name: Xlat src address
+ name: Xlat source address
component: text
category: source
hint: Specify a single IP or range.
- id: xlat_dst_address
- name: Xlat dst address
+ name: Xlat destination address
component: text
category: destination
hint: Specify a single IP or range.
- id: xlat_src_port
- name: Xlat src port
+ name: Xlat source port
component: autocomplete
category: source
hint: Specify a single port number or name.
- id: xlat_dst_port
- name: Xlat dst port
+ name: Xlat destination port
component: autocomplete
category: destination
hint: Specify a single port number or name.
@@ -1402,22 +1402,22 @@ fields:
description: packet translation zone id
- name: XlatSrcPort
type: number
- description: packet translation src port
+ description: packet translation source port
- name: XlatDstPort
type: number
- description: packet translation dst port
+ description: packet translation destination port
- name: XlatSrcAddr
type: string
- description: packet translation src address
+ description: packet translation source address
- name: XlatDstAddr
type: string
- description: packet translation dst address
+ description: packet translation destination address
- name: K8S_ClusterName
type: string
description: Cluster name or identifier
- name: _RecordType
type: string
- description: "Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking"
+ description: "Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking"
- name: _HashId
type: string
description: In conversation tracking, the conversation identifier
diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go
index e4cea0f27..1d9412ff9 100644
--- a/controllers/ebpf/agent_controller.go
+++ b/controllers/ebpf/agent_controller.go
@@ -504,8 +504,8 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC
if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) {
config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"})
- if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 {
- if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil {
+ if len(coll.Spec.Agent.EBPF.FlowFilter.Rules) != 0 {
+ if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.Rules); filterRules != nil {
config = append(config, filterRules...)
}
} else {
diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md
index f1cad2c0c..a9267f768 100644
--- a/docs/FlowCollector.md
+++ b/docs/FlowCollector.md
@@ -294,7 +294,7 @@ If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.<
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-- `UDNMapping`, to enable interfaces mappind to udn.
+- `UDNMapping`, to enable interfaces mapping to udn.
enum |
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes. -- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events. -- Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500. +- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events. +- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500. - Use `Disabled` to turn off Processor-based de-duplication. Enum: Disabled, Drop, Sample @@ -16474,7 +16479,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.
|