diff --git a/backend/dataall/modules/dataset_sharing/services/dataset_alarm_service.py b/backend/dataall/modules/dataset_sharing/services/dataset_alarm_service.py index d568dd4d8..0d31f3516 100644 --- a/backend/dataall/modules/dataset_sharing/services/dataset_alarm_service.py +++ b/backend/dataall/modules/dataset_sharing/services/dataset_alarm_service.py @@ -19,11 +19,9 @@ def trigger_table_sharing_failure_alarm( target_environment: Environment, ): log.info('Triggering share failure alarm...') - subject = ( - f'ALARM: DATAALL Table {table.GlueTableName} Sharing Failure Notification' - ) + subject = f'Data.all Share Failure for Table {table.GlueTableName}'[:100] message = f""" - You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the table {table.GlueTableName} with Lake Formation. + You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the table {table.GlueTableName} with Lake Formation. Alarm Details: - State Change: OK -> ALARM @@ -51,9 +49,9 @@ def trigger_revoke_table_sharing_failure_alarm( target_environment: Environment, ): log.info('Triggering share failure alarm...') - subject = f'ALARM: DATAALL Table {table.GlueTableName} Revoking LF permissions Failure Notification' + subject = f'Data.all Revoke LF Permissions Failure for Table {table.GlueTableName}'[:100] message = f""" - You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to revoke Lake Formation permissions for table {table.GlueTableName} with Lake Formation. + You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to revoke Lake Formation permissions for table {table.GlueTableName} with Lake Formation. Alarm Details: - State Change: OK -> ALARM @@ -76,11 +74,9 @@ def trigger_revoke_table_sharing_failure_alarm( def trigger_dataset_sync_failure_alarm(self, dataset: Dataset, error: str): log.info(f'Triggering dataset {dataset.name} tables sync failure alarm...') - subject = ( - f'ALARM: DATAALL Dataset {dataset.name} Tables Sync Failure Notification' - ) + subject = f'Data.all Dataset Tables Sync Failure for {dataset.name}'[:100] message = f""" -You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to synchronize Dataset {dataset.name} tables from AWS Glue to the Search Catalog. +You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to synchronize Dataset {dataset.name} tables from AWS Glue to the Search Catalog. Alarm Details: - State Change: OK -> ALARM @@ -101,11 +97,9 @@ def trigger_folder_sharing_failure_alarm( target_environment: Environment, ): log.info('Triggering share failure alarm...') - subject = ( - f'ALARM: DATAALL Folder {folder.S3Prefix} Sharing Failure Notification' - ) + subject = f'Data.all Folder Share Failure for {folder.S3Prefix}'[:100] message = f""" -You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the folder {folder.S3Prefix} with S3 Access Point. +You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the folder {folder.S3Prefix} with S3 Access Point. Alarm Details: - State Change: OK -> ALARM - Reason for State Change: S3 Folder sharing failure @@ -129,11 +123,9 @@ def trigger_revoke_folder_sharing_failure_alarm( target_environment: Environment, ): log.info('Triggering share failure alarm...') - subject = ( - f'ALARM: DATAALL Folder {folder.S3Prefix} Sharing Revoke Failure Notification' - ) + subject = f'Data.all Folder Share Revoke Failure for {folder.S3Prefix}'[:100] message = f""" -You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the folder {folder.S3Prefix} with S3 Access Point. +You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to share the folder {folder.S3Prefix} with S3 Access Point. Alarm Details: - State Change: OK -> ALARM - Reason for State Change: S3 Folder sharing Revoke failure @@ -173,11 +165,9 @@ def handle_bucket_sharing_failure(self, bucket: DatasetBucket, target_environment: Environment, alarm_type: str): log.info(f'Triggering {alarm_type} failure alarm...') - subject = ( - f'ALARM: DATAALL S3 Bucket {bucket.S3BucketName} {alarm_type} Failure Notification' - ) + subject = f'Data.all S3 Bucket Failure for {bucket.S3BucketName} {alarm_type}'[:100] message = f""" -You are receiving this email because your DATAALL {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to {alarm_type} the S3 Bucket {bucket.S3BucketName}. +You are receiving this email because your Data.all {self.envname} environment in the {self.region} region has entered the ALARM state, because it failed to {alarm_type} the S3 Bucket {bucket.S3BucketName}. Alarm Details: - State Change: OK -> ALARM - Reason for State Change: S3 Bucket {alarm_type} failure diff --git a/backend/dataall/modules/dataset_sharing/services/share_managers/s3_access_point_share_manager.py b/backend/dataall/modules/dataset_sharing/services/share_managers/s3_access_point_share_manager.py index ae1c02ca4..d090dd6f8 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_managers/s3_access_point_share_manager.py +++ b/backend/dataall/modules/dataset_sharing/services/share_managers/s3_access_point_share_manager.py @@ -510,7 +510,7 @@ def handle_share_failure(self, error: Exception) -> None: self.target_folder, self.share, self.target_environment ) - def handle_revoke_failure(self, error: Exception) -> None: + def handle_revoke_failure(self, error: Exception) -> bool: """ Handles share failure by raising an alarm to alarmsTopic Returns @@ -526,6 +526,7 @@ def handle_revoke_failure(self, error: Exception) -> None: DatasetAlarmService().trigger_revoke_folder_sharing_failure_alarm( self.target_folder, self.share, self.target_environment ) + return True @staticmethod def generate_default_kms_decrypt_policy_statement(target_requester_arn): diff --git a/backend/dataall/modules/dataset_sharing/services/share_managers/s3_bucket_share_manager.py b/backend/dataall/modules/dataset_sharing/services/share_managers/s3_bucket_share_manager.py index 375c1b94c..924dcd80c 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_managers/s3_bucket_share_manager.py +++ b/backend/dataall/modules/dataset_sharing/services/share_managers/s3_bucket_share_manager.py @@ -444,7 +444,7 @@ def handle_revoke_failure(self, error: Exception) -> bool: f'with target account {self.target_environment.AwsAccountId}/{self.target_environment.region} ' f'due to: {error}' ) - DatasetAlarmService().trigger_revoke_folder_sharing_failure_alarm( + DatasetAlarmService().trigger_revoke_s3_bucket_sharing_failure_alarm( self.target_bucket, self.share, self.target_environment ) return True diff --git a/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_cross_account_share.py b/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_cross_account_share.py index 51ba97cc7..950396b9d 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_cross_account_share.py +++ b/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_cross_account_share.py @@ -111,11 +111,14 @@ def process_approved_shares(self) -> bool: shared_item_SM.update_state_single_item(self.session, share_item, new_state) except Exception as e: - self.handle_share_failure(table=table, share_item=share_item, error=e) + # must run first to ensure state transitions to failed new_state = shared_item_SM.run_transition(ShareItemActions.Failure.value) shared_item_SM.update_state_single_item(self.session, share_item, new_state) success = False + # statements which can throw exceptions but are not critical + self.handle_share_failure(table=table, share_item=share_item, error=e) + return success def process_revoked_shares(self) -> bool: @@ -178,9 +181,12 @@ def process_revoked_shares(self) -> bool: revoked_item_SM.update_state_single_item(self.session, share_item, new_state) except Exception as e: - self.handle_revoke_failure(share_item=share_item, table=table, error=e) + # must run first to ensure state transitions to failed new_state = revoked_item_SM.run_transition(ShareItemActions.Failure.value) revoked_item_SM.update_state_single_item(self.session, share_item, new_state) success = False + # statements which can throw exceptions but are not critical + self.handle_revoke_failure(share_item=share_item, table=table, error=e) + return success diff --git a/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_same_account_share.py b/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_same_account_share.py index 54df2d900..ac23d5349 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_same_account_share.py +++ b/backend/dataall/modules/dataset_sharing/services/share_processors/lf_process_same_account_share.py @@ -95,11 +95,14 @@ def process_approved_shares(self) -> bool: shared_item_SM.update_state_single_item(self.session, share_item, new_state) except Exception as e: - self.handle_share_failure(table, share_item, e) + # must run first to ensure state transitions to failed new_state = shared_item_SM.run_transition(ShareItemActions.Failure.value) shared_item_SM.update_state_single_item(self.session, share_item, new_state) success = False + # statements which can throw exceptions but are not critical + self.handle_share_failure(table, share_item, e) + return success def process_revoked_shares(self) -> bool: @@ -151,9 +154,12 @@ def process_revoked_shares(self) -> bool: revoked_item_SM.update_state_single_item(self.session, share_item, new_state) except Exception as e: - self.handle_revoke_failure(share_item, table, e) + # must run first to ensure state transitions to failed new_state = revoked_item_SM.run_transition(ShareItemActions.Failure.value) revoked_item_SM.update_state_single_item(self.session, share_item, new_state) success = False + # statements which can throw exceptions but are not critical + self.handle_revoke_failure(share_item, table, e) + return success diff --git a/backend/dataall/modules/dataset_sharing/services/share_processors/s3_access_point_process_share.py b/backend/dataall/modules/dataset_sharing/services/share_processors/s3_access_point_process_share.py index da02c5a3c..620c0217e 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_processors/s3_access_point_process_share.py +++ b/backend/dataall/modules/dataset_sharing/services/share_processors/s3_access_point_process_share.py @@ -97,11 +97,14 @@ def process_approved_shares( shared_item_SM.update_state_single_item(session, sharing_item, new_state) except Exception as e: - sharing_folder.handle_share_failure(e) + # must run first to ensure state transitions to failed new_state = shared_item_SM.run_transition(ShareItemActions.Failure.value) shared_item_SM.update_state_single_item(session, sharing_item, new_state) success = False + # statements which can throw exceptions but are not critical + sharing_folder.handle_share_failure(e) + return success @classmethod @@ -160,11 +163,14 @@ def process_revoked_shares( revoked_item_SM.update_state_single_item(session, removing_item, new_state) except Exception as e: - removing_folder.handle_revoke_failure(e) + # must run first to ensure state transitions to failed new_state = revoked_item_SM.run_transition(ShareItemActions.Failure.value) revoked_item_SM.update_state_single_item(session, removing_item, new_state) success = False + # statements which can throw exceptions but are not critical + removing_folder.handle_revoke_failure(e) + return success @classmethod diff --git a/backend/dataall/modules/dataset_sharing/services/share_processors/s3_bucket_process_share.py b/backend/dataall/modules/dataset_sharing/services/share_processors/s3_bucket_process_share.py index 5a43f2b5c..08ee9532b 100644 --- a/backend/dataall/modules/dataset_sharing/services/share_processors/s3_bucket_process_share.py +++ b/backend/dataall/modules/dataset_sharing/services/share_processors/s3_bucket_process_share.py @@ -91,10 +91,14 @@ def process_approved_shares( shared_item_SM.update_state_single_item(session, sharing_item, new_state) except Exception as e: - sharing_bucket.handle_share_failure(e) + # must run first to ensure state transitions to failed new_state = shared_item_SM.run_transition(ShareItemActions.Failure.value) shared_item_SM.update_state_single_item(session, sharing_item, new_state) success = False + + # statements which can throw exceptions but are not critical + sharing_bucket.handle_share_failure(e) + return success @classmethod @@ -161,9 +165,12 @@ def process_revoked_shares( revoked_item_SM.update_state_single_item(session, removing_item, new_state) except Exception as e: - removing_bucket.handle_revoke_failure(e) + # must run first to ensure state transitions to failed new_state = revoked_item_SM.run_transition(ShareItemActions.Failure.value) revoked_item_SM.update_state_single_item(session, removing_item, new_state) success = False + # statements which can throw exceptions but are not critical + removing_bucket.handle_revoke_failure(e) + return success diff --git a/deploy/stacks/aurora.py b/deploy/stacks/aurora.py index d1b5d36a8..4d2d1630b 100644 --- a/deploy/stacks/aurora.py +++ b/deploy/stacks/aurora.py @@ -30,9 +30,7 @@ def __init__( super().__init__(scope, id, **kwargs) # if exclude_characters property is set make sure that the pwd regex in DbConfig is changed accordingly - db_credentials = rds.DatabaseSecret( - self, f'{resource_prefix}-{envname}-aurora-db', username='dtaadmin' - ) + db_subnet_group = rds.SubnetGroup( self, @@ -65,6 +63,10 @@ def __init__( alias=f'{resource_prefix}-{envname}-aurora', enable_key_rotation=True, ) + + db_credentials = rds.DatabaseSecret( + self, f'{resource_prefix}-{envname}-aurora-db', username='dtaadmin', encryption_key=key + ) database = rds.ServerlessCluster( self, diff --git a/deploy/stacks/cognito.py b/deploy/stacks/cognito.py index 331604aa2..7e65ed6a3 100644 --- a/deploy/stacks/cognito.py +++ b/deploy/stacks/cognito.py @@ -160,8 +160,8 @@ def __init__( cross_account_frontend_config_role = iam.Role( self, - f'{resource_prefix}-{envname}-frontend-config-role', - role_name=f'{resource_prefix}-{envname}-frontend-config-role', + f'{resource_prefix}-{envname}-cognito-config-role', + role_name=f'{resource_prefix}-{envname}-cognito-config-role', assumed_by=iam.AccountPrincipal(tooling_account_id), ) cross_account_frontend_config_role.add_to_policy( diff --git a/deploy/stacks/pipeline.py b/deploy/stacks/pipeline.py index d8504901e..51d9703db 100644 --- a/deploy/stacks/pipeline.py +++ b/deploy/stacks/pipeline.py @@ -893,7 +893,7 @@ def cognito_config_action(self, target_env): f'export enable_cw_canaries={target_env.get("enable_cw_canaries", False)}', 'mkdir ~/.aws/ && touch ~/.aws/config', 'echo "[profile buildprofile]" > ~/.aws/config', - f'echo "role_arn = arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-frontend-config-role" >> ~/.aws/config', + f'echo "role_arn = arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-cognito-config-role" >> ~/.aws/config', 'echo "credential_source = EcsContainer" >> ~/.aws/config', 'aws sts get-caller-identity --profile buildprofile', 'export AWS_PROFILE=buildprofile', @@ -906,6 +906,10 @@ def cognito_config_action(self, target_env): ) def set_albfront_stage(self, target_env, repository_name): + if target_env.get('custom_auth', None) is None: + frontend_deployment_role_arn = f'arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-cognito-config-role' + else: + frontend_deployment_role_arn = f'arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-frontend-config-role' albfront_stage = self.pipeline.add_stage( AlbFrontStage( self, @@ -956,7 +960,7 @@ def set_albfront_stage(self, target_env, repository_name): f'export custom_auth_claims_mapping_user_id={str(target_env.get("custom_auth", {}).get("claims_mapping", {}).get("user_id", "None"))}', 'mkdir ~/.aws/ && touch ~/.aws/config', 'echo "[profile buildprofile]" > ~/.aws/config', - f'echo "role_arn = arn:aws:iam::{target_env["account"]}:role/{self.resource_prefix}-{target_env["envname"]}-frontend-config-role" >> ~/.aws/config', + f'echo "role_arn = {frontend_deployment_role_arn}" >> ~/.aws/config', 'echo "credential_source = EcsContainer" >> ~/.aws/config', 'aws sts get-caller-identity --profile buildprofile', 'export AWS_PROFILE=buildprofile', diff --git a/frontend/package-lock.json b/frontend/package-lock.json index b3c46ca3f..c0b75f6f4 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -32,6 +32,7 @@ "classnames": "^2.3.1", "date-fns": "^2.28.0", "dayjs": "^1.11.0", + "dompurify": "^3.0.6", "formik": "^2.2.9", "graphql-tag": "^2.12.6", "json5": "^2.2.2", @@ -16236,6 +16237,11 @@ "url": "https://github.com/fb55/domhandler?sponsor=1" } }, + "node_modules/dompurify": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.6.tgz", + "integrity": "sha512-ilkD8YEnnGh1zJ240uJsW7AzE+2qpbOUYjacomn3AvJ6J4JhKGSZ2nh4wUIXPZrEPppaCLx5jFe8T89Rk8tQ7w==" + }, "node_modules/domutils": { "version": "2.8.0", "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", @@ -42417,6 +42423,11 @@ "domelementtype": "^2.2.0" } }, + "dompurify": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.6.tgz", + "integrity": "sha512-ilkD8YEnnGh1zJ240uJsW7AzE+2qpbOUYjacomn3AvJ6J4JhKGSZ2nh4wUIXPZrEPppaCLx5jFe8T89Rk8tQ7w==" + }, "domutils": { "version": "2.8.0", "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index a2cd31ea9..babc566e7 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -62,6 +62,7 @@ "react-router": "6.0.0", "react-router-dom": "6.0.0", "react-scripts": "^5.0.1", + "dompurify": "^3.0.6", "simplebar": "^5.3.6", "simplebar-react": "^2.3.6", "web-vitals": "^2.1.4", diff --git a/frontend/src/design/components/SanitizedHTML.js b/frontend/src/design/components/SanitizedHTML.js new file mode 100644 index 000000000..a802d9ba0 --- /dev/null +++ b/frontend/src/design/components/SanitizedHTML.js @@ -0,0 +1,12 @@ +import DOMPurify from 'dompurify'; + +export const SanitizedHTML = ({ dirtyHTML }) => { + const defaultOptions = { + ALLOWED_TAGS: ['b', 'i', 'em', 'strong', 'a'], + ALLOWED_ATTR: ['href'] + }; + + const sanitizedHtml = DOMPurify.sanitize(dirtyHTML, defaultOptions); + + return
; +}; diff --git a/frontend/src/design/components/index.js b/frontend/src/design/components/index.js index 9fcddb9f1..b9c460dd7 100644 --- a/frontend/src/design/components/index.js +++ b/frontend/src/design/components/index.js @@ -28,3 +28,4 @@ export * from './UpVotesReadOnly'; export * from './defaults'; export * from './layout'; export * from './popovers'; +export * from './SanitizedHTML'; diff --git a/frontend/src/modules/Catalog/components/RequestAccessModal.js b/frontend/src/modules/Catalog/components/RequestAccessModal.js index 70d20f14e..3dda9cb98 100644 --- a/frontend/src/modules/Catalog/components/RequestAccessModal.js +++ b/frontend/src/modules/Catalog/components/RequestAccessModal.js @@ -35,29 +35,37 @@ export const RequestAccessModal = (props) => { const client = useClient(); const [environmentOptions, setEnvironmentOptions] = useState([]); const [loadingGroups, setLoadingGroups] = useState(false); + const [loadingEnvs, setLoadingEnvs] = useState(false); const [groupOptions, setGroupOptions] = useState([]); const [loadingRoles, setLoadingRoles] = useState(false); const [roleOptions, setRoleOptions] = useState([]); const fetchEnvironments = useCallback(async () => { - const response = await client.query( - listValidEnvironments({ - filter: Defaults.selectListFilter - }) - ); - if (!response.errors) { - setEnvironmentOptions( - response.data.listValidEnvironments.nodes.map((e) => ({ - ...e, - value: e.environmentUri, - label: e.label - })) + setLoadingEnvs(true); + try { + const response = await client.query( + listValidEnvironments({ + filter: Defaults.selectListFilter + }) ); - } else { - dispatch({ type: SET_ERROR, error: response.errors[0].message }); - } - if (stopLoader) { - stopLoader(); + if (!response.errors) { + setEnvironmentOptions( + response.data.listValidEnvironments.nodes.map((e) => ({ + ...e, + value: e.environmentUri, + label: e.label + })) + ); + } else { + dispatch({ type: SET_ERROR, error: response.errors[0].message }); + } + } catch (e) { + dispatch({ type: SET_ERROR, error: e.message }); + } finally { + if (stopLoader) { + stopLoader(); + } + setLoadingEnvs(false); } }, [client, dispatch, stopLoader]); @@ -210,7 +218,7 @@ export const RequestAccessModal = (props) => { } } - if (!hit) { + if (!hit || loadingEnvs) { return null; } diff --git a/frontend/src/modules/Environments/views/EnvironmentCreateForm.js b/frontend/src/modules/Environments/views/EnvironmentCreateForm.js index a16cdfa7e..cde6968ef 100644 --- a/frontend/src/modules/Environments/views/EnvironmentCreateForm.js +++ b/frontend/src/modules/Environments/views/EnvironmentCreateForm.js @@ -39,6 +39,7 @@ import { getCDKExecPolicyPresignedUrl } from '../services'; import { + SanitizedHTML, ArrowLeftIcon, ChevronRightIcon, ChipInput, @@ -57,6 +58,7 @@ import { isModuleEnabled, ModuleNames } from 'utils'; +import config from '../../../generated/config.json'; const EnvironmentCreateForm = (props) => { const dispatch = useDispatch(); @@ -305,171 +307,190 @@ const EnvironmentCreateForm = (props) => {