diff --git a/configurations/k8s_workloads/smart-remediation/c0016-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0016-fixed.yaml new file mode 100644 index 00000000..d1e26cb0 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0016-fixed.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: ["NET_ADMIN"] + privileged: false + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0017-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0017-fixed.yaml new file mode 100644 index 00000000..2910b70b --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0017-fixed.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true + readOnlyRootFilesystem: true + volumeMounts: + - name: cache + mountPath: /var/cache/nginx + - name: run + mountPath: /var/run + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + volumes: + - name: cache + emptyDir: {} + - name: run + emptyDir: {} + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0034-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0034-fixed.yaml new file mode 100644 index 00000000..e815e339 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0034-fixed.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + automountServiceAccountToken: false + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0045-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0045-fixed.yaml new file mode 100644 index 00000000..4de90019 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0045-fixed.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + readOnly: true + - name: host-volume + mountPath: /host-etc + readOnly: true + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0046-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0046-fixed.yaml new file mode 100644 index 00000000..3a88db15 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0046-fixed.yaml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + privileged: true + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0048-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0048-fixed.yaml new file mode 100644 index 00000000..ef94d02d --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0048-fixed.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true diff --git a/configurations/k8s_workloads/smart-remediation/c0057-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0057-fixed.yaml new file mode 100644 index 00000000..6bc74807 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0057-fixed.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: false + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/c0074-fixed.yaml b/configurations/k8s_workloads/smart-remediation/c0074-fixed.yaml new file mode 100644 index 00000000..183f0636 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/c0074-fixed.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true + volumeMounts: + - name: host-volume + mountPath: /host-etc + volumes: + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/k8s_workloads/smart-remediation/nginx-deployment.yaml b/configurations/k8s_workloads/smart-remediation/nginx-deployment.yaml new file mode 100644 index 00000000..0da50b37 --- /dev/null +++ b/configurations/k8s_workloads/smart-remediation/nginx-deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + securityContext: + capabilities: + add: ["NET_ADMIN"] + privileged: true + volumeMounts: + - name: docker-socket + mountPath: /host-docker.sock + - name: host-volume + mountPath: /host-etc + volumes: + - name: docker-socket + hostPath: + path: /run/containerd/containerd.sock + type: Socket + - name: host-volume + hostPath: + path: /etc + type: Directory diff --git a/configurations/system/tests.py b/configurations/system/tests.py index 682b8ae4..fa714ce0 100644 --- a/configurations/system/tests.py +++ b/configurations/system/tests.py @@ -1,4 +1,5 @@ from configurations.system.tests_cases.network_policy_tests import NetworkPolicyTests +from configurations.system.tests_cases.smart_remediation_tests import SmartRemediationTests from configurations.system.tests_cases.synchronizer_tests import SynchronizerTests from systest_utils import TestUtil @@ -22,6 +23,7 @@ def all_tests_names(): tests.extend(TestUtil.get_class_methods(RelevantVulnerabilityScanningTests)) tests.extend(TestUtil.get_class_methods(NetworkPolicyTests)) tests.extend(TestUtil.get_class_methods(NotificationSTests)) + tests.extend(TestUtil.get_class_methods(SmartRemediationTests)) tests.extend(TestUtil.get_class_methods(SynchronizerTests)) return tests @@ -44,6 +46,8 @@ def get_test(test_name): return NetworkPolicyTests().__getattribute__(test_name)() if test_name in TestUtil.get_class_methods(NotificationSTests): return NotificationSTests().__getattribute__(test_name)() + if test_name in TestUtil.get_class_methods(SmartRemediationTests): + return SmartRemediationTests().__getattribute__(test_name)() if test_name in TestUtil.get_class_methods(SynchronizerTests): return SynchronizerTests().__getattribute__(test_name)() diff --git a/configurations/system/tests_cases/smart_remediation_tests.py b/configurations/system/tests_cases/smart_remediation_tests.py new file mode 100644 index 00000000..c3aeb14e --- /dev/null +++ b/configurations/system/tests_cases/smart_remediation_tests.py @@ -0,0 +1,103 @@ +import inspect + +from .structures import TestConfiguration +from systest_utils import statics + + +class SmartRemediationTests(object): + + # C-0016 - Allow privilege escalation + @staticmethod + def smart_remediation_c0016(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0016", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0016-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0017 - Immutable container filesystem + @staticmethod + def smart_remediation_c0017(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0017", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0017-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0034 - Automatic mapping of service account + @staticmethod + def smart_remediation_c0034(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0034", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0034-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0045 - Writable hostPath mount + @staticmethod + def smart_remediation_c0045(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0045", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0045-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0046 - Insecure capabilities + @staticmethod + def smart_remediation_c0046(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0046", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0046-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0048 - Insecure capabilities + @staticmethod + def smart_remediation_c0048(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0048", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0048-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0057 - Privileged container + @staticmethod + def smart_remediation_c0057(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0057", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0057-fixed.yaml"), + test_obj=SmartRemediation) + + # C-0074 - Container runtime socket mounted + @staticmethod + def smart_remediation_c0074(): + from tests_scripts.helm.smart_remediation import SmartRemediation + from os.path import join + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + control="C-0074", + workload=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "nginx-deployment.yaml"), + workload_fix=join(statics.DEFAULT_SMART_REMEDIATION_PATH, "c0074-fixed.yaml"), + test_obj=SmartRemediation) diff --git a/infrastructure/backend_api.py b/infrastructure/backend_api.py index 2ebef7cf..1ffa26d0 100644 --- a/infrastructure/backend_api.py +++ b/infrastructure/backend_api.py @@ -43,7 +43,7 @@ class NotExistingCustomer(Exception): API_TENANT_DETAILS = "/api/v1/tenants/tenantDetails" API_TENANT_CREATE= "/api/v1/tenants/createTenant" API_CLUSTER = "/api/v1/cluster" -API_IMAGE_SCAN_STATS = "/api/v1/customerState/reports/imageScan" +API_IMAGE_SCAN_STATS = "/api/v1/customerState/reports/imageScan" API_POSTURE_CLUSTERSOVERTIME = "/api/v1/posture/clustersOvertime" API_POSTURE_FRAMEWORKS = "/api/v1/posture/frameworks" API_POSTURE_CONTROLS = "/api/v1/posture/controls" @@ -57,7 +57,7 @@ class NotExistingCustomer(Exception): API_VULNERABILITY_SCANRESULTSDETAILS = "/api/v1/vulnerability/scanResultsDetails" API_VULNERABILITY_UNIQUE_VALUES_SUMMARY = "/api/v1/uniqueValues/vulnerability/scanResultsSumSummary" -API_VULNERABILITY_V2_WORKLOAD = "/api/v1/vulnerability_v2/workload" +API_VULNERABILITY_V2_WORKLOAD = "/api/v1/vulnerability_v2/workload" API_VULNERABILITY_V2 = "/api/v1/vulnerability_v2/vulnerability" API_VULNERABILITY_V2_IMAGE = "/api/v1/vulnerability_v2/image" API_VULNERABILITY_V2_COMPONENT = "/api/v1/vulnerability_v2/component" @@ -111,7 +111,7 @@ def apply_cookie(*args, **kwargs): ControlPanelAPIObj = args[0] if type(ControlPanelAPIObj) != ControlPanelAPI: raise Exception("In 'apply_cookie': First argument must be ControlPanelAPI object") - + if "params" not in kwargs: kwargs["params"] = {} @@ -160,7 +160,7 @@ class ControlPanelAPI(object): secret_key : str the secret_key to use for login_method "frontegg" login_customer_cookie : str - the cookie used for login the API. + the cookie used for login the API. login_customer_guid : str the customer_guid used for login the API. In order to access the admin APIS, this customer must be configured in AllowedAnyCustomer. selected_tenant_cookie : str @@ -176,7 +176,7 @@ class ControlPanelAPI(object): api_login : APILogin the api_login object constructed from the login_method. - + """ def __init__(self, user_name, password, customer, client_id, secret_key, url, auth_url=None, login_method=LOGIN_METHOD_KEYCLOAK, customer_guid=None): @@ -190,7 +190,7 @@ def __init__(self, user_name, password, customer, client_id, secret_key, url, au self.customer = customer # Required for login_method == LOGIN_METHOD_FRONTEGG - self.client_id = client_id + self.client_id = client_id self.secret_key = secret_key self.auth_url = auth_url @@ -209,9 +209,9 @@ def __init__(self, user_name, password, customer, client_id, secret_key, url, au self.api_login = APILogin() self.verify = True - + self.login(self.login_method) - + def login(self, login_method): if login_method == LOGIN_METHOD_KEYCLOAK: @@ -222,8 +222,8 @@ def login(self, login_method): self.api_login = FrontEggUsernameAPILogin(server=self.server, username=self.username, password=self.password, customer=self.customer, customer_guid=self.customer_guid) else: raise Exception(f"Login method '{login_method}' not supported") - - self.login_customer_guid, self.login_customer_cookie, auth = self.api_login.login() + + self.login_customer_guid, self.login_customer_cookie, auth = self.api_login.login() Logger.logger.info(f"Customer guid {self.login_customer_guid} authenticated successfully") if login_method == LOGIN_METHOD_FRONTEGG_USERNAME: self.auth = {"Cookie" : "auth=" + auth} @@ -232,7 +232,7 @@ def login(self, login_method): self.selected_tenant_id = self.login_customer_guid self.selected_tenant_cookie = self.login_customer_cookie - + # set access keys access_keys_response = self.get_access_keys() access_keys = access_keys_response.json() @@ -255,26 +255,26 @@ def get_selected_tenant(self) -> str: """ return self.selected_tenant_id - + def get_tenant_cookie(self, tenant_id: str) -> requests.Response: """ Get the cookie of the given tenant. """ if tenant_id == self.selected_tenant_id: return self.selected_tenant_cookie - + return self.api_login.getCookie(self.server, self.api_login.frontEgg_auth, tenant_id) def select_tenant(self, tenant_id: str): - """ - Configure tenant_id as the current selected tenant. + """ + Configure tenant_id as the current selected tenant. Once selected, all APIs will be executed on this tenant except for the admin APIs. """ if self.get_selected_tenant() != tenant_id: self.selected_tenant_cookie = self.get_tenant_cookie(tenant_id) self.selected_tenant_id = tenant_id Logger.logger.info(f"Selected tenant: {tenant_id}") - + def get_tenant_details(self, tenant_id=None) -> requests.Response: """ Get tenant details for tenant_id. If None, get details of the selected tenant. @@ -289,7 +289,7 @@ def get_tenant_details(self, tenant_id=None) -> requests.Response: raise Exception(f"Requested {tenant_id} details, got {res.text['guid']}. Make sure to first select the customer using select_tenant()") return res - + def create_tenant(self, tenantName: str): """ Creates a new tenant with name tenantName. @@ -302,7 +302,7 @@ def create_tenant(self, tenantName: str): if self.login_method != LOGIN_METHOD_FRONTEGG_SECRET: raise Exception(f"create_tenant() is only supported for {LOGIN_METHOD_FRONTEGG_SECRET} login_method") - + res = self.post(API_TENANT_CREATE, json={"customerName": tenantName, "userId": self.api_login.get_frontEgg_user_id()}, cookies=None, headers={"Authorization": f"Bearer {self.api_login.get_frontEgg_auth_user_id()}"}) assert res.status_code in [client.CREATED, client.OK], f"Failed to create tenant {tenantName}: {res.text}" @@ -310,7 +310,7 @@ def create_tenant(self, tenantName: str): assert json_response.get("tenantId", {}) != {}, f"tenantId is empty: {res.text}" assert json_response.get("agentAccessKey", {}).get("value", {}) != {}, f"agentAccessKey['value'] is empty: {res.text}" return json_response["tenantId"], json_response["agentAccessKey"]["value"] - + def delete_tenant(self, tenant_id) -> requests.Response: """ Deletes a tenant. . @@ -318,7 +318,7 @@ def delete_tenant(self, tenant_id) -> requests.Response: params: tenant_id: The id of the tenant to delete. - returns: + returns: The response of the request. Exceptions: @@ -333,7 +333,7 @@ def delete_tenant(self, tenant_id) -> requests.Response: assert res.status_code == client.OK, f"delete tenant failed {tenant_id}: {res.status_code} {res.text}" return res - + def get_access_keys(self) -> requests.Response: """ Returns the access keys of the selected tenant. @@ -341,14 +341,14 @@ def get_access_keys(self) -> requests.Response: res = self.get(API_ACCESS_KEYS) assert res.status_code == client.OK, f"failed to get access keys for tenant_id {self.selected_tenant_id}. Response: {res.text}" return res - + ## ************** Stripe Backend APIs ************** ## def stripe_billing_portal(self) -> requests.Response: """ Creates a stripe billing portal url for the selected tenant. """ - res = self.get(API_STRIPE_BILLING_PORTAL) + res = self.get(API_STRIPE_BILLING_PORTAL) assert res.status_code == client.CREATED, f"stripe billing portal failed to create url for tenant_id {self.selected_tenant_id}. Response: {res.text}" return res @@ -359,7 +359,7 @@ def stripe_checkout(self, priceID: str, qauntity: int) -> requests.Response: res = self.post(API_STRIPE_CHECKOUT, json={"priceID": priceID, "quantity": qauntity},) assert res.status_code == client.CREATED, f"stripe checkout failed to create url for tenant_id {self.selected_tenant_id}. Response: {res.text}" return res - + def get_stripe_plans(self) -> requests.Response: """ Get all stripe plans. @@ -368,7 +368,7 @@ def get_stripe_plans(self) -> requests.Response: assert res.status_code == client.OK, f"get_stripe_plans Failed. expected status code 200, found {res.status_code}. response: {res.text} Make sure you have a valid stripe secret key and priceIdsMap is well configured" return res - + def create_subscription(self, priceID: str, stripeCustomerID: str, quantity: int, tenantID: str)-> requests.Response: """ Creates a subscription for a tenant. @@ -391,7 +391,7 @@ def create_subscription(self, priceID: str, stripeCustomerID: str, quantity: int ) assert res.status_code == client.OK, f"stripe create subscription failed with priceID: {priceID}, response.text: {res.text}" return res - + def cancel_subscription(self, tenantID: str)-> dict: """ Cancels a subscription for a tenant. @@ -407,7 +407,7 @@ def cancel_subscription(self, tenantID: str)-> dict: ) assert res.status_code == client.OK, f"cancel subscription failed for tenantID: {tenantID}" return res - + def renew_subscription(self, tenantID: str)-> dict: """ Renews a subscription for a tenant. @@ -420,10 +420,10 @@ def renew_subscription(self, tenantID: str)-> dict: json={ "tenantID": tenantID }, - ) + ) assert res.status_code == client.OK, f"renew subscription failed for tenantID: {tenantID}" return res - + def get_customer_guid(self): return self.selected_tenant_id @@ -435,7 +435,7 @@ def get_secret_key(self): def get_access_key(self): return self.access_key - + def cleanup(self, namespace=str(), ca_cluster=str()): Logger.logger.info("ControlPanelAPI Clean Up") @@ -815,7 +815,7 @@ def get_repository_posture_resources(self, report_guid: str): % (self.customer, r.status_code, r.text) ) result_length = r.json()['total']['value'] - + result = [] for i in range(1, math.ceil(result_length / page_size)+1): params['pageNum'] = i @@ -826,13 +826,13 @@ def get_repository_posture_resources(self, report_guid: str): % (self.customer, r.status_code, r.text) ) result.extend(r.json()['response']) - + return result def get_posture_frameworks(self, report_guid: str, framework_name: str = ""): params = {"pageNum": 1, "pageSize": 1000, "orderBy": "timestamp:desc", "innerFilters": [{ "reportGUID": report_guid, "name": framework_name}]} - + if framework_name in statics.SECURITY_FRAMEWORKS: params["innerFilters"][0]["typeTags"] = statics.SECURITY_FRAMEWORK_TYPETAG @@ -872,7 +872,7 @@ def get_posture_controls(self, framework_name: str, report_guid: str): return r.json()['response'] def get_top_controls_results(self, cluster_name): - # TODO: change to "topControls" when it will be deprecated + # TODO: change to "topControls" when it will be deprecated r = self.post(API_POSTURE_TOPFAILEDCONTROLS, params={"customerGUID": self.selected_tenant_id, "cluster": cluster_name}, json={"pageNum": 1, "pageSize": 5, "innerFilters": [{ }]}) @@ -887,12 +887,12 @@ def get_top_controls_results(self, cluster_name): def get_posture_resources(self, framework_name: str, report_guid: str, resource_name: str = "", related_exceptions: str = "false", namespace=None, order_by=None): - + if order_by is None: order_by = "timestamp:desc" - - body={"pageNum": 1, - "pageSize": 150, + + body={"pageNum": 1, + "pageSize": 150, "orderBy": order_by, "innerFilters": [{ "frameworkName": framework_name, "reportGUID": report_guid, @@ -901,7 +901,7 @@ def get_posture_resources(self, framework_name: str, report_guid: str, resource_ body["innerFilters"][0]["designators.attributes.namespace"] = namespace r = self.post(API_POSTURE_RESOURCES, params={"customerGUID": self.customer_guid, "relatedExceptions": related_exceptions, "ignoreRulesSummary": related_exceptions}, json=body) - + if not 200 <= r.status_code < 300: raise Exception( 'Error accessing dashboard. Request: results of posture resources "%s" (code: %d, message: %s)' % ( @@ -929,7 +929,7 @@ def get_posture_resources_by_control(self, framework_name: str, report_guid: str 'Error accessing dashboard. Request: results of posture resources by control is empty') return r.json()['response'] - + def get_image_scan_stats(self): r = self.get(API_IMAGE_SCAN_STATS, params={"customerGUID": self.selected_tenant_id, "includeLastReport": False}) if not 200 <= r.status_code < 300: @@ -1233,11 +1233,11 @@ def get_registry_container_layers(self, container_scan_id: str): raise Exception( 'Error accessing layers summery. Request: get scan layer summery "%s" (code: %d, message: %s). Url: "%s" ContainersScanID "%s" ' % ( self.customer, r.status_code, r.text, self.server + API_REGISTRY_SCANRESULTSLAYERSUMMARY, container_scan_id)) - + Logger.logger.info( 'layers of container scan id : {} response {}'.format(container_scan_id, r.json())) return r.json() - + def get_unique_values_for_field_scan_summary(self, since_time, field, customer_guid): params = {"customerGUID": customer_guid} body = { @@ -1253,9 +1253,9 @@ def get_unique_values_for_field_scan_summary(self, since_time, field, customer_g raise Exception( 'Error accessing dashboard. Request: get scan results details "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) - + return r - + def get_summary_with_inner_filters(self, since_time, filter, customer_guid): params = {"customerGUID": customer_guid} body = { @@ -1273,9 +1273,9 @@ def get_summary_with_inner_filters(self, since_time, filter, customer_guid): def get_scan_results_details(self, since_time: str, containers_scan_id: str, expected_results, total_cve): params = {"customerGUID": self.selected_tenant_id, "ignoreRulesSummary": "true", "relatedExceptions": "true"} page_size = 100 - body = {"pageNum": 1, + body = {"pageNum": 1, "orderBy": "timestamp:desc,name:desc", - "pageSize": page_size, + "pageSize": page_size, "since": since_time, "innerFilters": [{"containersScanID": containers_scan_id}]} result_length = self.get_length_of_post_response(url=API_VULNERABILITY_SCANRESULTSDETAILS, params=params, body=body) @@ -1297,7 +1297,7 @@ def get_scan_results_details(self, since_time: str, containers_scan_id: str, exp 'container scan id : {} len(result):{}, len(expected_results):{} '.format(containers_scan_id, result, total_cve)) - + if len(result) < total_cve: raise Exception( f'wait for aggregation to end in the backend, number of CVEs is lower than expected. ' \ @@ -1338,9 +1338,9 @@ def get_scan_results_sum_summary_CSV(self, namespace: str, expected_results: int message = {"innerFilters": [{'cluster': cluster_name, 'namespace': namespace}]} if severity is not None: - message['innerFilters'][0]['severitiesStats.severity'] = severity + message['innerFilters'][0]['severitiesStats.severity'] = severity if fixable: - message['innerFilters'][0]['severitiesStats.fixedTotal'] = "1|greater" + message['innerFilters'][0]['severitiesStats.fixedTotal'] = "1|greater" self.ws_send(ws, json.dumps(message)) result = self.ws_extract_receive(ws) @@ -1427,7 +1427,7 @@ def create_vuln_scan_job_request(self, cluster_name, namespaces_list: list, sche def create_registry_scan_job_request_deprecated(self, cluster_name, registry_name: str, schedule_string: str = ''): params = {"customerGUID": self.selected_tenant_id} body = [] - + body.append({"clusterName": cluster_name, "registryName": registry_name, "cronTabSchedule": schedule_string}) @@ -1439,7 +1439,7 @@ def create_registry_scan_job_request_deprecated(self, cluster_name, registry_nam self.customer, r.status_code, r.text)) return r - def create_registry_scan_job_request(self, cluster_name, registry_name: str, auth_method: dict, schedule_string: str, registry_type: str, + def create_registry_scan_job_request(self, cluster_name, registry_name: str, auth_method: dict, schedule_string: str, registry_type: str, excluded_repositories: list = []): return self.send_registry_command(command=statics.CREATE_REGISTRY_CJ_COMMAND, cluster_name=cluster_name,registry_name= registry_name, excluded_repositories= excluded_repositories, registry_type=registry_type, auth_method=auth_method, schedule_string=schedule_string) @@ -1475,7 +1475,7 @@ def send_registry_command(self, command, cluster_name, registry_name: str, re command, self.customer, r.status_code, r.text)) return r - + def get_vuln_scan_cronjob_list(self, cluster_name: str, expected_cjs): @@ -1514,7 +1514,7 @@ def get_registry_scan_cronjob_list(self, cluster_name: str, expected_cjs): self.customer, r.status_code, r.text)) cronjob_list = r.json() registry_scan_cronjob_list = [cj for cj in cronjob_list if cj[statics.CA_VULN_SCAN_CRONJOB_CLUSTER_NAME_FILED] == cluster_name] - self.compare_registry_be_cjs_to_expected(actual_cjs=registry_scan_cronjob_list, expected_cjs=expected_cjs, cluster_name=cluster_name) + self.compare_registry_be_cjs_to_expected(actual_cjs=registry_scan_cronjob_list, expected_cjs=expected_cjs, cluster_name=cluster_name) return registry_scan_cronjob_list @@ -1527,7 +1527,7 @@ def get_registry_scan_cronjob_list_deprecated(self, cluster_name: str, expected_ self.customer, r.status_code, r.text)) cronjob_list = r.json() registry_scan_cronjob_list = [cj for cj in cronjob_list if cj[statics.CA_VULN_SCAN_CRONJOB_CLUSTER_NAME_FILED] == cluster_name] - self.compare_registry_be_cjs_to_expected(actual_cjs=registry_scan_cronjob_list, expected_cjs=expected_cjs, cluster_name=cluster_name) + self.compare_registry_be_cjs_to_expected(actual_cjs=registry_scan_cronjob_list, expected_cjs=expected_cjs, cluster_name=cluster_name) return registry_scan_cronjob_list @@ -1544,8 +1544,8 @@ def compare_registry_be_cjs_to_expected(self, actual_cjs, expected_cjs, cluster_ assert expected.spec.schedule == actual[statics.CA_VULN_SCAN_CRONJOB_CRONTABSCHEDULE_FILED], f'cronjob schedule is not as expected' assert actual[statics.CA_VULN_SCAN_CRONJOB_NAME_FILED].startswith("kubescape-registry-scan"), f'cronjob name is not as expected' assert actual[statics.CA_REGISTRY_SCAN_CRONJOB_REGISTRY_NAME_FIELD] == expected.spec.job_template.spec.template.metadata.annotations[statics.CA_REGISTRY_SCAN_CRONJOB_REGISTRY_NAME_ANNOTATION_FIELD], f'registry name is not as expected' - - + + def get_vuln_scan_cronjob(self, cj_name: str, expect_to_results: bool = True): params = {"customerGUID": self.selected_tenant_id} @@ -1621,7 +1621,7 @@ def update_registry_scan_cronjob(self, cj_name, cj_id, cluster_name, registry_na statics.UPDATE_REGISTRY_CJ_COMMAND, self.customer, r.status_code, r.text)) return r - + def update_registry_scan_cronjob_deprecated(self, cj): cj = [cj] if isinstance(cj, dict) else cj @@ -1642,7 +1642,7 @@ def delete_vuln_scan_cronjob(self, cj): 'Error accessing dashboard. Request: delete vuln scan cronjob "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) return r.json() - + def delete_registry_scan_cronjob(self, cj): params = {"customerGUID": self.selected_tenant_id} body = [ @@ -1694,7 +1694,7 @@ def is_ks_cronjob_created_in_backend(self, cluster_name: str, framework_name:str if cj[statics.CA_VULN_SCAN_CRONJOB_CLUSTER_NAME_FILED] == cluster_name and "ks-scheduled-scan-{}".format(framework_name.lower()) in cj[statics.CA_VULN_SCAN_CRONJOB_NAME_FILED]: return True return False - + def is__backend_returning_only_ks_cronjob(self, cluster_name: str): params = {"customerGUID": self.selected_tenant_id} r = self.get(API_POSTURE_SCAN, params=params) @@ -1742,13 +1742,13 @@ def delete_kubescape_job_request(self, cluster_name, schedule, cronjobs_name): def get_component(self, component): return Component(self, component.guid, component.solution_guid) - + @deco_cookie def post(self, url, **args): if not url.startswith("http://") and not url.startswith("https://"): url = self.server + url return requests.post(url, **args) - + @deco_cookie def get(self, url, **args): if not url.startswith("http://") and not url.startswith("https://"): @@ -1799,7 +1799,7 @@ def ws_extract_receive(self, ws): r = ws.recv() if r: r = json.loads(r) - Logger.logger.debug("request chunk: {}".format(r)) + Logger.logger.debug("request chunk: {}".format(r)) result.extend(r['response']) nbmsg += 1 assert nbmsg == totalChunks, 'Excepted %d chunks, receive %d' % (totalChunks, nbmsg) @@ -1826,11 +1826,11 @@ def get_job_report_request(self, job_id): if 200 <= r.status_code < 300: return r.json() time.sleep(5) - + raise Exception( 'Error accessing dashboard. Request: get job report status "%s" (code: %d, message: %s, jobID: "%s")' % ( self.customer, r.status_code, r.text, job_id)) - + def get_repositories_list(self, job_id): params = {"customerGUID": self.selected_tenant_id, "jobID": job_id} @@ -1854,11 +1854,11 @@ def test_registry_connectivity_request(self, cluster_name, registry_name, auth_m { "registryProvider": provider, "action": "testRegistryConnectivity", - "clusterName": cluster_name, - "registryName": registry_name, + "clusterName": cluster_name, + "registryName": registry_name, "cronTabSchedule": "", "registryType": "public", - "depth": 3, + "depth": 3, "include":[], "exclude": excluded_repositories, "kind":"", @@ -1893,23 +1893,23 @@ def delete_registry_scan(self, containers_scan_id): 'Error accessing dashboard. Request: get scan results sum summary "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) return r - - def get_notifications_unsubscribed(self) -> requests.Response: + + def get_notifications_unsubscribed(self) -> requests.Response: res = self.get(API_NOTIFICATIONS_UNSUBSCRIBE, cookies=self.selected_tenant_cookie) if not 200 <= res.status_code < 300: raise Exception( 'Error accessing dashboard. Request: get scan notifications unsubscribe "%s" (code: %d, message: %s)' % ( self.customer, res.status_code, res.text)) return res - - def add_notifications_unsubscribed(self, notifications_identifiers) -> requests.Response: + + def add_notifications_unsubscribed(self, notifications_identifiers) -> requests.Response: res = self.post(API_NOTIFICATIONS_UNSUBSCRIBE, cookies=self.selected_tenant_cookie, json=notifications_identifiers) if not 200 <= res.status_code < 300: raise Exception( 'Error accessing dashboard. Request: get scan notifications unsubscribe "%s" (code: %d, message: %s)' % ( self.customer, res.status_code, res.text)) return res - def remove_notifications_unsubscribed(self, notifications_identifiers) -> requests.Response: + def remove_notifications_unsubscribed(self, notifications_identifiers) -> requests.Response: res = self.delete(API_NOTIFICATIONS_UNSUBSCRIBE, cookies=self.selected_tenant_cookie, json=notifications_identifiers) if not 200 <= res.status_code < 300: raise Exception( @@ -2008,7 +2008,7 @@ def get_network_policies(self, cluster_name, namespace) -> (requests.Response, d raise Exception( 'Error accessing dashboard. Request: get network policies generate "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) - + response = json.loads(r.text) workloads_list = response.get("response", None) @@ -2033,12 +2033,12 @@ def get_network_policies_generate(self, cluster_name, workload_name, namespace) raise Exception( 'Error accessing dashboard. Request: get network policies generate "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) - + response = json.loads(r.text) # verify there is a response assert len(response) > 0, "network policies generate response is empty '%s' (code: %d, message: %s)" % (self.customer, r.status_code, r.text) - + np = response[0].get("networkPolicies", None).get("kubernetes", None).get("new", None) # verify there is a 'new' network policy assert np is not None, "no 'new' NetworkPolicy '%s' (code: %d, message: %s)" % (self.customer, r.status_code, r.text) @@ -2077,12 +2077,12 @@ def has_active_attack_chains(self, cluster_name=None) -> bool: assert response['total']['value'] == 0, f"attack-chains not fixed yet" return True - + def get_kubernetes_resources(self, cluster_name: str, namespace:str=None, with_resource: bool=False): params = {"customerGUID": self.selected_tenant_id} if with_resource: params["enrichObjects"] = "true" - + payload = { "innerFilters": [{"cluster": cluster_name}], } @@ -2099,17 +2099,17 @@ def get_kubernetes_resources(self, cluster_name: str, namespace:str=None, with_r raise Exception( 'Error accessing dashboard. Request: POST kubernetes resources generate "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) - + response = json.loads(r.text) be_resources = response.get("response", None) assert be_resources is not None, "kubernetes resources response is empty '%s' (code: %d, message: %s)" % (self.customer, r.status_code, r.text) return be_resources - - def post_details_request(self,url, body: dict): + + def post_details_request(self,url, body: dict): r = self.post(url + '/details', params={"customerGUID": self.customer_guid}, - json=body) + json=body) if not 200 <= r.status_code < 300: raise Exception( 'Error accessing dashboard. Request: results of vuln workload details "%s" (code: %d, message: %s)' % ( @@ -2117,11 +2117,11 @@ def post_details_request(self,url, body: dict): j = r.json() if not j: raise Exception('Request: results of vuln workload details is empty body: %s' % body) - return j - + return j + def post_list_request(self, url, body: dict, expected_results: int = 0): r = self.post(url + "/list", params={"customerGUID": self.customer_guid}, - json=body) + json=body) if not 200 <= r.status_code < 300: raise Exception( 'Error accessing dashboard. Request to: %s "%s" (code: %d, message: %s)' % ( @@ -2133,28 +2133,39 @@ def post_list_request(self, url, body: dict, expected_results: int = 0): raise Exception('Request: results is empty') if len(j['response']) < expected_results: raise Exception('Excepted %d workloads, receive %d' % (expected_results, len(j['response']))) - return j['response'] - - def get_vuln_v2_workloads(self, body: dict, expected_results: int = 0): + return j['response'] + + def get_vuln_v2_workloads(self, body: dict, expected_results: int = 0): return self.post_list_request(API_VULNERABILITY_V2_WORKLOAD, body, expected_results) - - def get_vuln_v2_workload_details(self, body: dict): - return self.post_details_request(API_VULNERABILITY_V2_WORKLOAD, body) - - def get_vulns_v2(self, body: dict, expected_results: int = 0): + + def get_vuln_v2_workload_details(self, body: dict): + return self.post_details_request(API_VULNERABILITY_V2_WORKLOAD, body) + + def get_vulns_v2(self, body: dict, expected_results: int = 0): return self.post_list_request(API_VULNERABILITY_V2, body, expected_results) - + def get_vuln_v2_details(self, body: dict): return self.post_details_request(API_VULNERABILITY_V2, body) - - def get_vuln_v2_images(self, body: dict, expected_results: int = 0): + + def get_vuln_v2_images(self, body: dict, expected_results: int = 0): return self.post_list_request(API_VULNERABILITY_V2_IMAGE, body, expected_results) - - def get_vuln_v2_components(self, body: dict, expected_results: int = 0): + + def get_vuln_v2_components(self, body: dict, expected_results: int = 0): return self.post_list_request(API_VULNERABILITY_V2_COMPONENT, body, expected_results) - - - + + def get_posture_resources_highlights(self, body: dict): + r = self.post(API_POSTURE_RESOURCES + '/highlights', + params={"smEnabled": "true", "customerGUID": self.selected_tenant_id}, + json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error accessing smart remediation. Request: results of posture resources highlights "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + j = r.json() + if not j: + raise Exception('Request: results of posture resources highlights is empty body: %s' % body) + return j + class Solution(object): """docstring for Solution""" diff --git a/system_test_mapping.json b/system_test_mapping.json index 8de85172..e6dea128 100644 --- a/system_test_mapping.json +++ b/system_test_mapping.json @@ -576,6 +576,134 @@ "description": "", "skip_on_environment": "" }, + "smart_remediation_c0016": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0016", + "skip_on_environment": "" + }, + "smart_remediation_c0017": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0017", + "skip_on_environment": "" + }, + "smart_remediation_c0034": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0034", + "skip_on_environment": "" + }, + "smart_remediation_c0045": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0045", + "skip_on_environment": "" + }, + "smart_remediation_c0046": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0046", + "skip_on_environment": "" + }, + "smart_remediation_c0048": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0048", + "skip_on_environment": "" + }, + "smart_remediation_c0057": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0057", + "skip_on_environment": "" + }, + "smart_remediation_c0074": { + "target": [ + "In cluster", + "Backend" + ], + "target_repositories": [ + "helm-chart", + "node-agent", + "storage", + "cadashboardbe", + "event-ingester-service", + "synchronizer" + ], + "description": "Checks smart remediation C0074", + "skip_on_environment": "" + }, "synchronizer": { "target": [ "In cluster", diff --git a/systest_utils/statics.py b/systest_utils/statics.py index e7c7c67b..1f90d479 100644 --- a/systest_utils/statics.py +++ b/systest_utils/statics.py @@ -43,6 +43,9 @@ DEFAULT_NAMESPACE_PATH = os.path.join(DEFAULT_K8S_PATHS, 'namespaces') DEFAULT_CONFIGMAP_PATH = os.path.join(DEFAULT_K8S_PATHS, 'config-map') +# smart remediation tests +DEFAULT_SMART_REMEDIATION_PATH = os.path.join(DEFAULT_K8S_PATHS, 'smart-remediation') + # synchronizer tests DEFAULT_SYNCHRONIZER_PATH = os.path.join(DEFAULT_K8S_PATHS, 'synchronizer') DEFAULT_SYNCHRONIZER_CRDS_PATH = os.path.abspath(os.path.join('configurations', 'kubescape-crds', 'supported')) diff --git a/tests_scripts/helm/smart_remediation.py b/tests_scripts/helm/smart_remediation.py new file mode 100644 index 00000000..d3bc64c8 --- /dev/null +++ b/tests_scripts/helm/smart_remediation.py @@ -0,0 +1,122 @@ +from .base_helm import BaseHelm +from ..kubescape.base_kubescape import BaseKubescape +from systest_utils import statics, Logger, TestUtil + + +class SmartRemediation(BaseKubescape, BaseHelm): + def __init__( + self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None + ): + super(SmartRemediation, self).__init__( + test_driver=test_driver, + test_obj=test_obj, + backend=backend, + kubernetes_obj=kubernetes_obj, + ) + + self.helm_kwargs = { + "capabilities.relevancy": "enable", + "capabilities.configurationScan": "enable", + "capabilities.continuousScan": "disable", + "capabilities.nodeScan": "disable", + "capabilities.vulnerabilityScan": "disable", + "capabilities.runtimeObservability": "enable", + "grypeOfflineDB.enabled": "false", + } + + test_helm_kwargs = self.test_obj.get_arg("helm_kwargs") + if test_helm_kwargs: + self.helm_kwargs.update(test_helm_kwargs) + + def cleanup(self, **kwargs): + super().cleanup(**kwargs) + return statics.SUCCESS, "" + + def check_smart_remediation(self, body, want=True, retries=0): + for _ in range(retries): + hl = self.backend.get_posture_resources_highlights(body) + if len(hl["response"]) > 0 and (want == ("smartRemediations" in hl["response"][0])): + return True + TestUtil.sleep(10, "wait for smart remediation") + return False + + def start(self): + """ + Test plan: + 1. Install Helm chart + 2. Apply workload + ... + """ + assert ( + self.backend is not None + ), f"the test {self.test_driver.test_name} must run with backend" + + cluster, namespace = self.setup(apply_services=False) + print("Debug: cluster: ", cluster) + + Logger.logger.info(f"1. Install Helm Chart") + self.add_and_upgrade_armo_to_repo() + self.install_armo_helm_chart(helm_kwargs=self.helm_kwargs) + self.verify_running_pods( + namespace=statics.CA_NAMESPACE_FROM_HELM_NAME, timeout=360 + ) + + Logger.logger.info(f"2. Apply workload") + workload = self.apply_yaml_file( + yaml_file=self.test_obj["workload"], namespace=namespace + ) + self.verify_all_pods_are_running( + namespace=namespace, workload=workload, timeout=300 + ) + + Logger.logger.info(f"3. Trigger a scan") + self.backend.trigger_posture_scan( + cluster_name=cluster, + framework_list=["AllControls"], + with_host_sensor="false", + ) + + Logger.logger.info(f"3.1. Get report guid") + report_guid = self.get_report_guid( + cluster_name=cluster, wait_to_result=True, framework_name="AllControls" + ) + assert report_guid != "", "report guid is empty" + + Logger.logger.info(f"4. Check smart remediation is available") + body = {"pageNum": 1, "pageSize": 1, "cursor": "", "orderBy": "", "innerFilters": [{ + "resourceID": "apps/v1/" + namespace + "/Deployment/" + workload["metadata"]["name"], + "controlID": self.test_obj["control"], + "reportGUID": report_guid, + "frameworkName": "AllControls" + }]} + assert self.check_smart_remediation(body, retries=30), "smartRemediations is not found" + + Logger.logger.info(f"5. Correct the issue") + workload_fix = self.apply_yaml_file( + yaml_file=self.test_obj["workload_fix"], namespace=namespace, replace=True + ) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_fix, timeout=60) + + Logger.logger.info(f"6. Trigger another scan") + self.backend.trigger_posture_scan( + cluster_name=cluster, + framework_list=["AllControls"], + with_host_sensor="false", + ) + + Logger.logger.info(f"6.1. Get report guid") + report_guid = self.get_report_guid( + cluster_name=cluster, wait_to_result=True, framework_name="AllControls" + ) + assert report_guid != "", "report guid is empty" + + Logger.logger.info(f"7. Check the issue is resolved") + body = {"pageNum": 1, "pageSize": 1, "cursor": "", "orderBy": "", "innerFilters": [{ + "resourceID": "apps/v1/" + namespace + "/Deployment/" + workload["metadata"]["name"], + "controlID": self.test_obj["control"], + "reportGUID": report_guid, + "frameworkName": "AllControls" + }]} + assert self.check_smart_remediation(body, want=False, retries=30), "smartRemediations should be empty" + + return self.cleanup() diff --git a/tests_scripts/kubernetes/base_k8s.py b/tests_scripts/kubernetes/base_k8s.py index 4c10bf64..d5222374 100755 --- a/tests_scripts/kubernetes/base_k8s.py +++ b/tests_scripts/kubernetes/base_k8s.py @@ -319,7 +319,7 @@ def apply_directory(self, path: str, **kwargs): def apply_yaml_file(self, yaml_file, namespace: str, path: str = statics.DEFAULT_DEPLOYMENT_PATH, unique_name: bool = False, name: str = None, wlid: str = None, auto_attach: bool = False, - auto_protect: bool = False, **kwargs): + auto_protect: bool = False, replace: bool = False, **kwargs): """ currently supports yaml with *one* workload for applying more than one workload the yaml_file should be a list of files @@ -349,6 +349,9 @@ def apply_yaml_file(self, yaml_file, namespace: str, path: str = statics.DEFAULT statics.AUTO_ATTACH_SECRET_VALUE}) self.update_workload_metadata_env(workload=workload, env=self.env) + if replace: + # cannot use patch because of https://github.com/kubernetes-client/python/issues/1359 + self.kubernetes_obj.delete_workload(application=workload, namespace=namespace) self.apply_workload(workload=workload, namespace=namespace, **kwargs) return workload @@ -1058,7 +1061,7 @@ def collect_safe_mode_logs(self, host: str): Logger.logger.info("exiting websocket thread") def run_exec_cmd(self, pod_name: str, namespace: str, cmd: str, repeat: int = 1): - """ + """ Run a command inside a pod :param pod_name: pod name :param namespace: namespace