Skip to content

Commit

Permalink
remove relevancy manager
Browse files Browse the repository at this point in the history
Signed-off-by: Matthias Bertschy <[email protected]>
  • Loading branch information
matthyx committed Oct 1, 2024
1 parent e4fe0c7 commit 750a8a0
Show file tree
Hide file tree
Showing 30 changed files with 165 additions and 165,150 deletions.
5 changes: 2 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,10 @@ require (
github.com/goradd/maps v0.1.5
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/inspektor-gadget/inspektor-gadget v0.32.1-0.20240910080600-c7396e29cbf6
github.com/kinbiko/jsonassert v1.1.1
github.com/kubescape/backend v0.0.20
github.com/kubescape/go-logger v0.0.23
github.com/kubescape/k8s-interface v0.0.170
github.com/kubescape/storage v0.0.119
github.com/kubescape/k8s-interface v0.0.176
github.com/kubescape/storage v0.0.127
github.com/panjf2000/ants/v2 v2.9.1
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.3
Expand Down
10 changes: 4 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -529,8 +529,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kinbiko/jsonassert v1.1.1 h1:DB12divY+YB+cVpHULLuKePSi6+ui4M/shHSzJISkSE=
github.com/kinbiko/jsonassert v1.1.1/go.mod h1:NO4lzrogohtIdNUNzx8sdzB55M4R4Q1bsrWVdqQ7C+A=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
Expand All @@ -551,10 +549,10 @@ github.com/kubescape/backend v0.0.20 h1:E3nZGqWW8ELSh/n3ZRitlkmuZq33Lyx/42Lm4gpg
github.com/kubescape/backend v0.0.20/go.mod h1:FpazfN+c3Ucuvv4jZYCnk99moSBRNMVIxl5aWCZAEBo=
github.com/kubescape/go-logger v0.0.23 h1:5xh+Nm8eGImhFbtippRKLaFgsvlKE1ufvQhNM2P/570=
github.com/kubescape/go-logger v0.0.23/go.mod h1:Ayg7g769c7sXVB+P3fkJmbsJpoEmMmaUf9jeo+XuC3U=
github.com/kubescape/k8s-interface v0.0.170 h1:EtzomWoeeIWDz7QrAEsqUDpLHQwoh2m3tZITfrE/tiE=
github.com/kubescape/k8s-interface v0.0.170/go.mod h1:VoEoHI4Va08NiGAkYzbITF50aFMT5y4fPHRb4x2LtME=
github.com/kubescape/storage v0.0.119 h1:7qCSxMRfuCG35H3o832q69hBA06KKHyyLVW76nFy5YA=
github.com/kubescape/storage v0.0.119/go.mod h1:DAR1CmSDhRRBK26nNU4MrVpRAst5nN7IuPuvcnw9XeI=
github.com/kubescape/k8s-interface v0.0.176 h1:o8vKztPZtVff06qRK9H9h2M8auD7sQfbY36X0oFG9ws=
github.com/kubescape/k8s-interface v0.0.176/go.mod h1:1jA1z5uLLyVkGxWjlT9WibUvcE+chK4g9Hrxg+GajP4=
github.com/kubescape/storage v0.0.127 h1:yMY9I/s1hz0bz2O0wrE9T0Rr9sIXgaMZQ5EsBIh/qdM=
github.com/kubescape/storage v0.0.127/go.mod h1:Gkd2FkRb2pkSAvd4ezg/IKIGFame3EMTLdX//t8iA7o=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
Expand Down
25 changes: 2 additions & 23 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/kubescape/node-agent/pkg/containerwatcher/v1"
"github.com/kubescape/node-agent/pkg/dnsmanager"
"github.com/kubescape/node-agent/pkg/exporters"
"github.com/kubescape/node-agent/pkg/filehandler/v1"
"github.com/kubescape/node-agent/pkg/healthmanager"
"github.com/kubescape/node-agent/pkg/malwaremanager"
malwaremanagerv1 "github.com/kubescape/node-agent/pkg/malwaremanager/v1"
Expand All @@ -32,13 +31,10 @@ import (
"github.com/kubescape/node-agent/pkg/objectcache/k8scache"
"github.com/kubescape/node-agent/pkg/objectcache/networkneighborhoodcache"
objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1"
"github.com/kubescape/node-agent/pkg/relevancymanager"
relevancymanagerv1 "github.com/kubescape/node-agent/pkg/relevancymanager/v1"
rulebinding "github.com/kubescape/node-agent/pkg/rulebindingmanager"
rulebindingcachev1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/cache"
"github.com/kubescape/node-agent/pkg/rulemanager"
rulemanagerv1 "github.com/kubescape/node-agent/pkg/rulemanager/v1"
"github.com/kubescape/node-agent/pkg/sbomhandler/syfthandler"
"github.com/kubescape/node-agent/pkg/seccompmanager"
seccompmanagerv1 "github.com/kubescape/node-agent/pkg/seccompmanager/v1"
"github.com/kubescape/node-agent/pkg/storage/v1"
Expand Down Expand Up @@ -172,23 +168,6 @@ func main() {
applicationProfileManager = applicationprofilemanager.CreateApplicationProfileManagerMock()
}

// Create the relevancy manager
var relevancyManager relevancymanager.RelevancyManagerClient
if cfg.EnableRelevancy {
fileHandler, err := filehandler.CreateInMemoryFileHandler()
if err != nil {
logger.L().Ctx(ctx).Fatal("failed to create the filehandler for relevancy manager", helpers.Error(err))
}

sbomHandler := syfthandler.CreateSyftSBOMHandler(storageClient)
relevancyManager, err = relevancymanagerv1.CreateRelevancyManager(ctx, cfg, clusterData.ClusterName, fileHandler, k8sClient, sbomHandler, preRunningContainersIDs)
if err != nil {
logger.L().Ctx(ctx).Fatal("error creating the relevancy manager", helpers.Error(err))
}
} else {
relevancyManager = relevancymanager.CreateRelevancyManagerMock()
}

// Create the network and DNS managers
var networkManagerClient networkmanager.NetworkManagerClient
var dnsManagerClient dnsmanager.DNSManagerClient
Expand Down Expand Up @@ -250,7 +229,7 @@ func main() {
var profileManager nodeprofilemanager.NodeProfileManagerClient
if cfg.EnableNodeProfile {
// FIXME validate the HTTPExporterConfig before we use it ?
profileManager = nodeprofilemanagerv1.NewNodeProfileManager(cfg, *clusterData, nodeName, k8sObjectCache, relevancyManager, ruleManager)
profileManager = nodeprofilemanagerv1.NewNodeProfileManager(cfg, *clusterData, nodeName, k8sObjectCache, ruleManager)
} else {
profileManager = nodeprofilemanager.NewNodeProfileManagerMock()
}
Expand All @@ -269,7 +248,7 @@ func main() {
}

// Create the container handler
mainHandler, err := containerwatcher.CreateIGContainerWatcher(cfg, applicationProfileManager, k8sClient, relevancyManager, networkManagerClient, dnsManagerClient, prometheusExporter, ruleManager, malwareManager, preRunningContainersIDs, &ruleBindingNotify, containerRuntime, nil)
mainHandler, err := containerwatcher.CreateIGContainerWatcher(cfg, applicationProfileManager, k8sClient, networkManagerClient, dnsManagerClient, prometheusExporter, ruleManager, malwareManager, preRunningContainersIDs, &ruleBindingNotify, containerRuntime, nil)
if err != nil {
logger.L().Ctx(ctx).Fatal("error creating the container watcher", helpers.Error(err))
}
Expand Down
50 changes: 29 additions & 21 deletions pkg/applicationprofilemanager/v1/applicationprofile_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,14 @@ func (am *ApplicationProfileManager) ensureInstanceID(container *containercollec
return fmt.Errorf("failed to get workload: %w", err)
}
pod := wl.(*workloadinterface.Workload)

// fill container type, index and names
if watchedContainer.ContainerType == utils.Unknown {
if err := watchedContainer.SetContainerInfo(pod, container.K8s.ContainerName); err != nil {
return fmt.Errorf("failed to set container info: %w", err)
}
}
// get pod template hash
watchedContainer.TemplateHash, _ = pod.GetLabel("pod-template-hash")

// find parentWlid
kind, name, err := am.k8sClient.CalculateWorkloadParentRecursive(pod)
if err != nil {
Expand All @@ -107,7 +111,7 @@ func (am *ApplicationProfileManager) ensureInstanceID(container *containercollec
return fmt.Errorf("failed to validate WLID: %w", err)
}
watchedContainer.ParentResourceVersion = w.GetResourceVersion()
// find instanceID
// find instanceID - this has to be the last one
instanceIDs, err := instanceidhandler.GenerateInstanceID(pod)
if err != nil {
return fmt.Errorf("failed to generate instanceID: %w", err)
Expand All @@ -118,10 +122,6 @@ func (am *ApplicationProfileManager) ensureInstanceID(container *containercollec
watchedContainer.InstanceID = instanceIDs[i]
}
}
// fill container type, index and names
if watchedContainer.ContainerType == utils.Unknown {
watchedContainer.SetContainerInfo(pod, container.K8s.ContainerName)
}
return nil
}

Expand All @@ -143,6 +143,7 @@ func (am *ApplicationProfileManager) deleteResources(watchedContainer *utils.Wat
am.toSaveOpens.Delete(watchedContainer.K8sContainerID)
am.watchedContainerChannels.Delete(watchedContainer.ContainerID)
}

func (am *ApplicationProfileManager) ContainerReachedMaxTime(containerID string) {
if channel := am.watchedContainerChannels.Get(containerID); channel != nil {
channel <- utils.ContainerReachedMaxTime
Expand Down Expand Up @@ -332,16 +333,17 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
ObjectMeta: metav1.ObjectMeta{
Name: slug,
Annotations: map[string]string{
helpersv1.InstanceIDMetadataKey: watchedContainer.InstanceID.GetStringNoContainer(),
helpersv1.WlidMetadataKey: watchedContainer.Wlid,
helpersv1.CompletionMetadataKey: string(watchedContainer.GetCompletionStatus()),
helpersv1.StatusMetadataKey: string(watchedContainer.GetStatus()),
},
Labels: utils.GetLabels(watchedContainer, true),
},
}
addContainers := func(containers []v1beta1.ApplicationProfileContainer, containerNames []string) []v1beta1.ApplicationProfileContainer {
for _, name := range containerNames {
seccompProfile, err := am.seccompManager.GetSeccompProfile(name, watchedContainer.SeccompProfilePath)
addContainers := func(containers []v1beta1.ApplicationProfileContainer, containerInfos []utils.ContainerInfo) []v1beta1.ApplicationProfileContainer {
for _, info := range containerInfos {
seccompProfile, err := am.seccompManager.GetSeccompProfile(info.Name, watchedContainer.SeccompProfilePath)
if err != nil {
logger.L().Ctx(ctx).Error("ApplicationProfileManager - failed to get seccomp profile", helpers.Error(err),
helpers.String("slug", slug),
Expand All @@ -350,21 +352,23 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
helpers.String("k8s workload", watchedContainer.K8sContainerID))
}
containers = append(containers, v1beta1.ApplicationProfileContainer{
Name: name,
Name: info.Name,
Endpoints: make([]v1beta1.HTTPEndpoint, 0),
Execs: make([]v1beta1.ExecCalls, 0),
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
SeccompProfile: seccompProfile,
ImageTag: info.ImageTag,
ImageID: info.ImageID,
})
}
return containers
}
newObject.Spec.Architectures = []string{runtime.GOARCH}
newObject.Spec.Containers = addContainers(newObject.Spec.Containers, watchedContainer.ContainerNames[utils.Container])
newObject.Spec.InitContainers = addContainers(newObject.Spec.InitContainers, watchedContainer.ContainerNames[utils.InitContainer])
newObject.Spec.EphemeralContainers = addContainers(newObject.Spec.EphemeralContainers, watchedContainer.ContainerNames[utils.EphemeralContainer])
newObject.Spec.Containers = addContainers(newObject.Spec.Containers, watchedContainer.ContainerInfos[utils.Container])
newObject.Spec.InitContainers = addContainers(newObject.Spec.InitContainers, watchedContainer.ContainerInfos[utils.InitContainer])
newObject.Spec.EphemeralContainers = addContainers(newObject.Spec.EphemeralContainers, watchedContainer.ContainerInfos[utils.EphemeralContainer])
// enrich container
newContainer := utils.GetApplicationProfileContainer(newObject, watchedContainer.ContainerType, watchedContainer.ContainerIndex)
utils.EnrichApplicationProfileContainer(newContainer, capabilities, observedSyscalls, execs, opens, endpoints)
Expand Down Expand Up @@ -394,12 +398,12 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
helpers.String("k8s workload", watchedContainer.K8sContainerID))
} else {
var replaceOperations []utils.PatchOperation
containerNames := watchedContainer.ContainerNames[watchedContainer.ContainerType]
containerNames := watchedContainer.ContainerInfos[watchedContainer.ContainerType]
// check existing container
existingContainer := utils.GetApplicationProfileContainer(existingObject, watchedContainer.ContainerType, watchedContainer.ContainerIndex)
if existingContainer == nil {
name := containerNames[watchedContainer.ContainerIndex]
seccompProfile, err := am.seccompManager.GetSeccompProfile(name, watchedContainer.SeccompProfilePath)
info := containerNames[watchedContainer.ContainerIndex]
seccompProfile, err := am.seccompManager.GetSeccompProfile(info.Name, watchedContainer.SeccompProfilePath)
if err != nil {
logger.L().Ctx(ctx).Error("ApplicationProfileManager - failed to get seccomp profile", helpers.Error(err),
helpers.String("slug", slug),
Expand All @@ -409,13 +413,15 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
}
logger.L().Debug("ApplicationProfileManager - got seccomp profile", helpers.Interface("profile", seccompProfile))
existingContainer = &v1beta1.ApplicationProfileContainer{
Name: containerNames[watchedContainer.ContainerIndex],
Name: info.Name,
Endpoints: make([]v1beta1.HTTPEndpoint, 0),
Execs: make([]v1beta1.ExecCalls, 0),
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
SeccompProfile: seccompProfile,
ImageTag: info.ImageTag,
ImageID: info.ImageID,
}
}
// update it
Expand All @@ -440,8 +446,8 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
}
// 3b. ensure the slice has all the containers
for i := len(existingContainers); i < len(containerNames); i++ {
name := containerNames[i]
seccompProfile, err := am.seccompManager.GetSeccompProfile(name, watchedContainer.SeccompProfilePath)
info := containerNames[i]
seccompProfile, err := am.seccompManager.GetSeccompProfile(info.Name, watchedContainer.SeccompProfilePath)
if err != nil {
logger.L().Ctx(ctx).Error("ApplicationProfileManager - failed to get seccomp profile", helpers.Error(err),
helpers.String("slug", slug),
Expand All @@ -453,13 +459,15 @@ func (am *ApplicationProfileManager) saveProfile(ctx context.Context, watchedCon
Op: "add",
Path: fmt.Sprintf("/spec/%s/%d", watchedContainer.ContainerType, i),
Value: v1beta1.ApplicationProfileContainer{
Name: name,
Name: info.Name,
Endpoints: make([]v1beta1.HTTPEndpoint, 0),
Execs: make([]v1beta1.ExecCalls, 0),
Opens: make([]v1beta1.OpenCalls, 0),
Capabilities: make([]string, 0),
Syscalls: make([]string, 0),
SeccompProfile: seccompProfile,
ImageTag: info.ImageTag,
ImageID: info.ImageID,
},
})
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/containerwatcher/v1/container_watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ import (
"github.com/kubescape/node-agent/pkg/malwaremanager"
"github.com/kubescape/node-agent/pkg/metricsmanager"
"github.com/kubescape/node-agent/pkg/networkmanager"
"github.com/kubescape/node-agent/pkg/relevancymanager"
rulebinding "github.com/kubescape/node-agent/pkg/rulebindingmanager"
"github.com/kubescape/node-agent/pkg/rulemanager"
"github.com/kubescape/node-agent/pkg/utils"
Expand Down Expand Up @@ -86,7 +85,6 @@ type IGContainerWatcher struct {
// Clients
applicationProfileManager applicationprofilemanager.ApplicationProfileManagerClient
k8sClient *k8sinterface.KubernetesApi
relevancyManager relevancymanager.RelevancyManagerClient
networkManager networkmanager.NetworkManagerClient
dnsManager dnsmanager.DNSManagerClient
ruleManager rulemanager.RuleManagerClient
Expand Down Expand Up @@ -149,7 +147,7 @@ type IGContainerWatcher struct {

var _ containerwatcher.ContainerWatcher = (*IGContainerWatcher)(nil)

func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager applicationprofilemanager.ApplicationProfileManagerClient, k8sClient *k8sinterface.KubernetesApi, relevancyManager relevancymanager.RelevancyManagerClient, networkManagerClient networkmanager.NetworkManagerClient, dnsManagerClient dnsmanager.DNSManagerClient, metrics metricsmanager.MetricsManager, ruleManager rulemanager.RuleManagerClient, malwareManager malwaremanager.MalwareManagerClient, preRunningContainers mapset.Set[string], ruleBindingPodNotify *chan rulebinding.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig, thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]]) (*IGContainerWatcher, error) {
func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager applicationprofilemanager.ApplicationProfileManagerClient, k8sClient *k8sinterface.KubernetesApi, networkManagerClient networkmanager.NetworkManagerClient, dnsManagerClient dnsmanager.DNSManagerClient, metrics metricsmanager.MetricsManager, ruleManager rulemanager.RuleManagerClient, malwareManager malwaremanager.MalwareManagerClient, preRunningContainers mapset.Set[string], ruleBindingPodNotify *chan rulebinding.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig, thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]]) (*IGContainerWatcher, error) {
// Use container collection to get notified for new containers
containerCollection := &containercollection.ContainerCollection{}
// Create a tracer collection instance
Expand Down Expand Up @@ -196,7 +194,6 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli
}
metrics.ReportEvent(utils.ExecveEventType)
applicationProfileManager.ReportFileExec(k8sContainerID, path, event.Args)
relevancyManager.ReportFileExec(event.Runtime.ContainerID, k8sContainerID, path)
ruleManager.ReportEvent(utils.ExecveEventType, &event)
malwareManager.ReportEvent(utils.ExecveEventType, &event)

Expand Down Expand Up @@ -227,7 +224,6 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli

metrics.ReportEvent(utils.OpenEventType)
applicationProfileManager.ReportFileOpen(k8sContainerID, path, event.Flags)
relevancyManager.ReportFileOpen(event.Runtime.ContainerID, k8sContainerID, path)
ruleManager.ReportEvent(utils.OpenEventType, &event)
malwareManager.ReportEvent(utils.OpenEventType, &event)

Expand Down Expand Up @@ -385,7 +381,6 @@ func CreateIGContainerWatcher(cfg config.Config, applicationProfileManager appli
// Clients
applicationProfileManager: applicationProfileManager,
k8sClient: k8sClient,
relevancyManager: relevancyManager,
networkManager: networkManagerClient,
dnsManager: dnsManagerClient,
ruleManager: ruleManager,
Expand Down
2 changes: 0 additions & 2 deletions pkg/containerwatcher/v1/container_watcher_private.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ func (ch *IGContainerWatcher) containerCallback(notif containercollection.PubSub
logger.L().Info("monitoring time ended", helpers.String("container ID", notif.Container.Runtime.ContainerID), helpers.String("k8s workload", k8sContainerID))
ch.timeBasedContainers.Remove(notif.Container.Runtime.ContainerID)
ch.applicationProfileManager.ContainerReachedMaxTime(notif.Container.Runtime.ContainerID)
ch.relevancyManager.ContainerReachedMaxTime(notif.Container.Runtime.ContainerID)
ch.networkManager.ContainerReachedMaxTime(notif.Container.Runtime.ContainerID)
ch.unregisterContainer(notif.Container)
})
Expand All @@ -83,7 +82,6 @@ func (ch *IGContainerWatcher) startContainerCollection(ctx context.Context) erro
containerEventFuncs := []containercollection.FuncNotify{
ch.containerCallback,
ch.applicationProfileManager.ContainerCallback,
ch.relevancyManager.ContainerCallback,
ch.networkManager.ContainerCallback,
ch.malwareManager.ContainerCallback,
ch.ruleManager.ContainerCallback,
Expand Down
Loading

0 comments on commit 750a8a0

Please sign in to comment.