diff --git a/app-infrastructure/app-security-groups.tf b/app-infrastructure/app-security-groups.tf
deleted file mode 100644
index 02c39791..00000000
--- a/app-infrastructure/app-security-groups.tf
+++ /dev/null
@@ -1,85 +0,0 @@
-resource "aws_security_group" "inbound-from-edge" {
- name = "allow_inbound_from_edge_subnet_to_app_subnet_${var.stack_githash}"
- description = "Allow inbound traffic from edge-private-subnets on port 8080 until we have TLS in place for app server"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 8080
- to_port = 8080
- protocol = "tcp"
- cidr_blocks = [
- var.edge-subnet-us-east-1a-cidr,
- var.edge-subnet-us-east-1b-cidr
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-from-edge Security Group - ${var.target-stack}"
- }
-}
-resource "aws_security_group" "outbound-to-hpds" {
- name = "allow_outbound_from_app_subnets_to_hpds_port_in_hpds_subnets_${var.stack_githash}"
- description = "Allow outbound traffic to data-hpds-subnets on port 8080 until we have TLS in place for app server"
- vpc_id = var.target-vpc
-
- egress {
- from_port = 8080
- to_port = 8080
- protocol = "tcp"
- cidr_blocks = [
- var.db-subnet-us-east-1a-cidr,
- var.db-subnet-us-east-1b-cidr
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - outbound-to-hpds Security Group - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "inbound-app-ssh-from-nessus" {
- name = "allow_inbound_from_lma_subnet_to_app_server_${var.stack_githash}"
- description = "Allow inbound traffic from LMA on port 22"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.73/32"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-app-ssh-from-nessus - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "outbound-to-aurora" {
- name = "allow_outbound_from_app_subneets_to_mysql_port_in_db_subnets_${var.stack_githash}"
- description = "Allow outbound traffic to data-db-subnets on port 3306"
- vpc_id = var.target-vpc
-
- egress {
- from_port = 3306
- to_port = 3306
- protocol = "tcp"
- cidr_blocks = [
- var.db-subnet-us-east-1a-cidr,
- var.db-subnet-us-east-1b-cidr
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - outbound-to-aurora Security Group - ${var.target-stack}"
- }
-}
diff --git a/app-infrastructure/aurora-rds.tf b/app-infrastructure/aurora-rds.tf
index 80a74774..051f74fe 100644
--- a/app-infrastructure/aurora-rds.tf
+++ b/app-infrastructure/aurora-rds.tf
@@ -17,7 +17,7 @@
#
# tags = {
# Owner = "Avillach_Lab"
-# Environment = "development"
+# Environment = var.environment_name
# Name = "FISMA Terraform Playground - ${var.stack_githash} - RDS Aurora Cluster"
# }
#}
@@ -35,7 +35,7 @@
#
# tags = {
# Owner = "Avillach_Lab"
-# Environment = "development"
+# Environment = var.environment_name
# Name = "FISMA Terraform Playground - ${var.stack_githash} - RDS Aurora DB Instance - ${count.index}"
# }
#}
\ No newline at end of file
diff --git a/app-infrastructure/auth-hpds-instance.tf b/app-infrastructure/auth-hpds-instance.tf
index ae9fb5a2..a13fc7ab 100644
--- a/app-infrastructure/auth-hpds-instance.tf
+++ b/app-infrastructure/auth-hpds-instance.tf
@@ -1,12 +1,12 @@
-
data "template_file" "auth_hpds-user_data" {
template = file("scripts/auth_hpds-user_data.sh")
vars = {
- stack_githash = var.stack_githash_long
- dataset_s3_object_key = var.dataset-s3-object-key
- genomic_dataset_s3_object_key = var.genomic-dataset-s3-object-key
- stack_s3_bucket = var.stack_s3_bucket
- target-stack = var.target-stack
+ stack_githash = var.stack_githash_long
+ dataset_s3_object_key = var.dataset_s3_object_key
+ genomic_dataset_s3_object_key = var.genomic_dataset_s3_object_key
+ stack_s3_bucket = var.stack_s3_bucket
+ target_stack = var.target_stack
+ gss_prefix = "bdc_${var.env_is_open_access ? "open" : "auth"}_${var.environment_name}"
}
}
@@ -23,41 +23,39 @@ data "template_cloudinit_config" "auth_hpds-user-data" {
}
resource "aws_instance" "auth-hpds-ec2" {
- ami = var.ami-id
- instance_type = "m5.12xlarge"
+ count = var.include_auth_hpds ? 1 : 0
- key_name = "biodata_nessus"
-
- associate_public_ip_address = false
+ ami = local.ami_id
+ instance_type = "m5.12xlarge"
- subnet_id = var.db-subnet-us-east-1a-id
+ subnet_id = local.private2_subnet_ids[0]
- iam_instance_profile = "auth-hpds-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ iam_instance_profile = "auth-hpds-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
user_data = data.template_cloudinit_config.auth_hpds-user-data.rendered
vpc_security_group_ids = [
aws_security_group.outbound-to-internet.id,
- aws_security_group.inbound-hpds-from-app.id,
- aws_security_group.outbound-to-trend-micro.id,
- aws_security_group.inbound-data-ssh-from-nessus.id
+ aws_security_group.inbound-hpds-from-wildfly.id,
]
+
root_block_device {
delete_on_termination = true
- encrypted = true
- volume_size = 1000
+ encrypted = true
+ volume_size = 1000
}
tags = {
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - Auth HPDS - ${var.target-stack}"
+ Environment = var.environment_name
+ Stack = var.target_stack
+ Project = local.project
+ Name = "Auth HPDS - ${var.target_stack} - ${local.uniq_name}"
}
metadata_options {
- http_endpoint = "enabled"
- http_tokens = "required"
- instance_metadata_tags = "enabled"
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
}
-}
-
+}
\ No newline at end of file
diff --git a/app-infrastructure/configs/aggregate-resource.properties b/app-infrastructure/configs/aggregate-resource.properties
index b1ad769c..00bdef75 100644
--- a/app-infrastructure/configs/aggregate-resource.properties
+++ b/app-infrastructure/configs/aggregate-resource.properties
@@ -1,4 +1,4 @@
-target.picsure.url=http://open-hpds.${target-stack}.datastage.hms.harvard.edu:8080/PIC-SURE/
+target.picsure.url=http://open-hpds.${target_stack}.${env_private_dns_name}:8080/PIC-SURE/
target.picsure.token=
target.picsure.obfuscation_threshold=10
target.picsure.obfuscation_variance=3
diff --git a/app-infrastructure/configs/httpd-vhosts.conf b/app-infrastructure/configs/httpd-vhosts.conf
index f2e9993c..05ad9d65 100644
--- a/app-infrastructure/configs/httpd-vhosts.conf
+++ b/app-infrastructure/configs/httpd-vhosts.conf
@@ -45,7 +45,7 @@ ServerTokens Prod
- ServerName picsure.biodatacatalyst.nhlbi.nih.gov
+ ServerName ${env_public_dns_name}
SSLProxyEngine on
SSLProxyCheckPeerCN off
@@ -63,17 +63,14 @@ ServerTokens Prod
# unsafe-inline - Allows inline JavaScript, CSS, and event handlers
# style-src - Allows inline styles but only from the same origin
# img-src - Allows images from the same origin and data: URIs
- Header always set Content-Security-Policy "frame-ancestors 'none'; default-src 'self'; style-src 'self' 'unsafe-inline'; worker-src 'self' blob:; script-src 'self' 'unsafe-eval' 'unsafe-inline' https://*.googletagmanager.com; img-src 'self' data: https://public.era.nih.gov blob: https://*.google-analytics.com https://*.googletagmanager.com; connect-src 'self' https://*.google-analytics.com https://*.analytics.google.com https://*.googletagmanager.com;"
+ # https://www.googletagmanager.com - is needed for Google Analytics
+ Header always set Content-Security-Policy "frame-ancestors 'none'; default-src 'self'; style-src 'self' 'unsafe-inline'; worker-src 'self' blob:; script-src 'self' 'unsafe-eval' 'unsafe-inline' data: https://*.googletagmanager.com; img-src 'self' data: https://public.era.nih.gov blob: https://*.google-analytics.com https://*.googletagmanager.com; connect-src 'self' https://*.google-analytics.com https://*.analytics.google.com https://*.googletagmanager.com;"
- # A fall back for legacy browsers that don't yet support CSP frame-ancestors.
- Header always set X-Frame-Options "DENY"
+ # Attempt to prevent some MIME-type confusion attacks. There is no perfect solution to this problem.
+ Header always set X-Content-Type-Options "nosniff"
- # Attempt to prevent some MIME-type confusion attacks. There is no perfect solution to this problem.
- Header always set X-Content-Type-Options "nosniff"
-
- # Enables built-in XSS protection in modern web browsers.
- # If a XSS is detected mode=block will block the entire page.
- Header always set X-XSS-Protection "1; mode=block;"
+ # Enables built-in XSS protection in modern web browsers.
+ # If a XSS is detected mode=block will block the entire page.
# A fall back for legacy browsers that don't yet support CSP frame-ancestors.
Header always set X-Frame-Options "DENY"
@@ -81,19 +78,14 @@ ServerTokens Prod
RewriteEngine On
ProxyPreserveHost On
- # Validate the Host header
- RewriteCond %%{HTTP_HOST} !^$
- RewriteCond %%{HTTP_HOST} !^(www\.)?(${allowed_hosts})$ [NC]
- RewriteRule ^ - [E=HOST:%%{HTTP_HOST},E=ALLOWED_HOSTS:${allowed_hosts},F]
-
#Dont allow httpd debug methods
RewriteCond %%{REQUEST_METHOD} ^TRACK
RewriteRule .* - [F]
RewriteCond %%{REQUEST_METHOD} ^TRACE
RewriteRule .* - [F]
- RewriteRule ^/picsure/(.*)$ "http://wildfly.${target-stack}.datastage.hms.harvard.edu:8080/pic-sure-api-2/PICSURE/$1" [P]
- RewriteRule ^/psama/(.*)$ "http://wildfly.${target-stack}.datastage.hms.harvard.edu:8080/pic-sure-auth-services/auth/$1" [P]
+ RewriteRule ^/picsure/(.*)$ "http://wildfly.${target_stack}.${env_private_dns_name}:8080/pic-sure-api-2/PICSURE/$1" [P]
+ RewriteRule ^/psama/(.*)$ "http://wildfly.${target_stack}.${env_private_dns_name}:8080/pic-sure-auth-services/auth/$1" [P]
RewriteCond %%{DOCUMENT_ROOT}/%%{REQUEST_FILENAME} !-f
RewriteCond %%{DOCUMENT_ROOT}/%%{REQUEST_FILENAME} !-d
@@ -107,10 +99,17 @@ ServerTokens Prod
DocumentRoot "$${HTTPD_PREFIX}/htdocs"
+ LogFormat "%%{X-Forwarded-For}i %t %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b" proxy-ssl
+ LogFormat "%h %l %u %t \"%r\" %>s %b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
+ LogFormat "%%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%%{Referer}i\" \"%%{User-Agent}i\"" proxy
+ SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/access_log" combined env=!forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/access_log" proxy env=forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/ssl_request_log" proxy-ssl env=forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/ssl_request_log" \
+ "%t %h %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b" env=!forwarded
ErrorLog "$${HTTPD_PREFIX}/logs/error_log"
TransferLog "$${HTTPD_PREFIX}/logs/access_log"
- CustomLog "$${HTTPD_PREFIX}/logs/ssl_request_log" \
- "%t %h %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b"
BrowserMatch "MSIE [2-5]" \
nokeepalive ssl-unclean-shutdown \
@@ -119,7 +118,7 @@ ServerTokens Prod
- ServerName preprod.picsure.biodatacatalyst.nhlbi.nih.gov
+ ServerName ${env_public_dns_name_staging}
SSLProxyEngine on
SSLProxyCheckPeerCN off
@@ -137,34 +136,28 @@ ServerTokens Prod
# unsafe-inline - Allows inline JavaScript, CSS, and event handlers
# style-src - Allows inline styles but only from the same origin
# img-src - Allows images from the same origin and data: URIs
- Header always set Content-Security-Policy "frame-ancestors 'none'; default-src 'self'; style-src 'self' 'unsafe-inline'; worker-src 'self' blob:; script-src 'self' 'unsafe-eval' 'unsafe-inline'; img-src 'self' data: https://public.era.nih.gov;"
-
- # A fall back for legacy browsers that don't yet support CSP frame-ancestors.
- Header always set X-Frame-Options "DENY"
+ # https://www.googletagmanager.com - is needed for Google Analytics
+ Header always set Content-Security-Policy "frame-ancestors 'none'; default-src 'self'; style-src 'self' 'unsafe-inline'; worker-src 'self' blob:; script-src 'self' 'unsafe-eval' 'unsafe-inline' data: https://*.googletagmanager.com; img-src 'self' data: https://public.era.nih.gov blob: https://*.google-analytics.com https://*.googletagmanager.com; connect-src 'self' https://*.google-analytics.com https://*.analytics.google.com https://*.googletagmanager.com;"
# Attempt to prevent some MIME-type confusion attacks. There is no perfect solution to this problem.
Header always set X-Content-Type-Options "nosniff"
# Enables built-in XSS protection in modern web browsers.
# If a XSS is detected mode=block will block the entire page.
- Header always set X-XSS-Protection "1; mode=block;"
+ # A fall back for legacy browsers that don't yet support CSP frame-ancestors.
+ Header always set X-Frame-Options "DENY"
RewriteEngine On
ProxyPreserveHost On
- # Validate the Host header
- RewriteCond %%{HTTP_HOST} !^$
- RewriteCond %%{HTTP_HOST} !^(www\.)?(${allowed_hosts})$ [NC]
- RewriteRule ^ - [E=HOST:%%{HTTP_HOST},E=ALLOWED_HOSTS:${allowed_hosts},F]
-
#Dont allow httpd debug methods
RewriteCond %%{REQUEST_METHOD} ^TRACK
RewriteRule .* - [F]
RewriteCond %%{REQUEST_METHOD} ^TRACE
RewriteRule .* - [F]
- RewriteRule ^/picsure/(.*)$ "http://wildfly.${target-stack}.datastage.hms.harvard.edu:8080/pic-sure-api-2/PICSURE/$1" [P]
- RewriteRule ^/psama/(.*)$ "http://wildfly.${target-stack}.datastage.hms.harvard.edu:8080/pic-sure-auth-services/auth/$1" [P]
+ RewriteRule ^/picsure/(.*)$ "http://wildfly.${target_stack}.${env_private_dns_name}:8080/pic-sure-api-2/PICSURE/$1" [P]
+ RewriteRule ^/psama/(.*)$ "http://wildfly.${target_stack}.${env_private_dns_name}:8080/pic-sure-auth-services/auth/$1" [P]
RewriteCond %%{DOCUMENT_ROOT}/%%{REQUEST_FILENAME} !-f
RewriteCond %%{DOCUMENT_ROOT}/%%{REQUEST_FILENAME} !-d
@@ -178,13 +171,22 @@ ServerTokens Prod
DocumentRoot "$${HTTPD_PREFIX}/htdocs"
- ErrorLog "$${HTTPD_PREFIX}/logs/preprod_error_log"
- TransferLog "$${HTTPD_PREFIX}/logs/preprod_access_log"
- CustomLog "$${HTTPD_PREFIX}/logs/preprod_ssl_request_log" \
- "%t %h %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b"
+ LogFormat "%%{X-Forwarded-For}i %t %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b" proxy-ssl
+ LogFormat "%h %l %u %t \"%r\" %>s %b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
+ LogFormat "%%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%%{Referer}i\" \"%%{User-Agent}i\"" proxy
+ SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/access_log" combined env=!forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/access_log" proxy env=forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/ssl_request_log" proxy-ssl env=forwarded
+ CustomLog "$${HTTPD_PREFIX}/logs/ssl_request_log" \
+ "%t %h %%{SSL_PROTOCOL}x %%{SSL_CIPHER}x \"%r\" %b" env=!forwarded
+ ErrorLog "$${HTTPD_PREFIX}/logs/error_log"
+ TransferLog "$${HTTPD_PREFIX}/logs/access_log"
BrowserMatch "MSIE [2-5]" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
+
+
diff --git a/app-infrastructure/configs/pic-sure-schema.sql b/app-infrastructure/configs/pic-sure-schema.sql
index 13ebc778..6e2d9a70 100644
--- a/app-infrastructure/configs/pic-sure-schema.sql
+++ b/app-infrastructure/configs/pic-sure-schema.sql
@@ -101,12 +101,11 @@ CREATE TABLE `resource` (
--
-- Dumping data for table `resource`
--
-
LOCK TABLES `resource` WRITE;
/*!40000 ALTER TABLE `resource` DISABLE KEYS */;
-INSERT INTO `resource` VALUES (0x02E23F52F3544E8B992CD37C8B9BA140,NULL,'http://auth-hpds.${target-stack}.datastage.hms.harvard.edu:8080/PIC-SURE/','Authorized Access HPDS resource','auth-hpds',NULL, NULL, NULL);
-INSERT INTO `resource` VALUES (0x70c837be5ffc11ebae930242ac130002,NULL,'http://localhost:8080/pic-sure-aggregate-resource/pic-sure/aggregate-data-sharing','Open Access (aggregate) resource','open-hpds',NULL, NULL, NULL);
-INSERT INTO `resource` VALUES (0x36363664623161342d386538652d3131,NULL,'http://dictionary.${target-stack}.datastage.hms.harvard.edu:8080/dictionary/pic-sure','Dictionary','dictionary',NULL, NULL, NULL);
+${include_auth_hpds ? "INSERT INTO `resource` VALUES (0x02E23F52F3544E8B992CD37C8B9BA140,NULL,'http://auth-hpds.${target_stack}.${env_private_dns_name}:8080/PIC-SURE/','Authorized Access HPDS resource','auth-hpds',NULL, NULL, NULL);" : ""}
+${include_open_hpds ? "INSERT INTO `resource` VALUES (0x70c837be5ffc11ebae930242ac130002,NULL,'http://localhost:8080/pic-sure-aggregate-resource/pic-sure/aggregate-data-sharing','Open Access (aggregate) resource','open-hpds',NULL, NULL, NULL);" : ""}
+INSERT INTO `resource` VALUES (0x36363664623161342d386538652d3131,NULL,'http://dictionary.${target_stack}.${env_private_dns_name}:8080/dictionary/pic-sure','Dictionary','dictionary',NULL, NULL, NULL);
INSERT INTO `resource` VALUES (0xCA0AD4A9130A3A8AAE00E35B07F1108B,NULL,'http://localhost:8080/pic-sure-visualization-resource/pic-sure/visualization','Visualization','visualization',NULL, NULL, NULL);
/*!40000 ALTER TABLE `resource` ENABLE KEYS */;
UNLOCK TABLES;
@@ -674,25 +673,3 @@ SELECT privilege.uuid, unhex(@uuidGate) from privilege, role_privilege, role
where privilege.uuid = role_privilege.privilege_id
AND role_privilege.role_id = role.uuid
AND role.name = 'FENCE_ROLE_OPEN_ACCESS';
-
-SET @searchValuesAccessRuleUUID = REPLACE(uuid(),'-','');
-INSERT INTO access_rule (uuid, name, description, rule, type, value, checkMapKeyOnly, checkMapNode, subAccessRuleParent_uuid, isEvaluateOnlyByGates, isGateAnyRelation)
-VALUES (
- unhex(@searchValuesAccessRuleUUID),
- 'ALLOW_SEARCH_VALUES_ACCESS',
- 'Allow access to search values endpoint',
- '$.path',
- 11,
- '/search/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/values',
- false,
- true,
- NULL,
- true,
- false
- );
-
-INSERT INTO accessRule_privilege (privilege_id, accessRule_id)
-SELECT privilege.uuid, unhex(@searchValuesAccessRuleUUID) from privilege, role_privilege, role
-where privilege.uuid = role_privilege.privilege_id
- AND role_privilege.role_id = role.uuid
- AND role.name = 'FENCE_ROLE_OPEN_ACCESS';
\ No newline at end of file
diff --git a/app-infrastructure/configs/picsureui_settings.json b/app-infrastructure/configs/picsureui_settings.json
index 3c87b2ea..f5f7fca9 100644
--- a/app-infrastructure/configs/picsureui_settings.json
+++ b/app-infrastructure/configs/picsureui_settings.json
@@ -11,7 +11,7 @@
"picSureResourceId":"02e23f52-f354-4e8b-992c-d37c8b9ba140",
"openAccessResourceId":"70c837be-5ffc-11eb-ae93-0242ac130002",
"visualizationResourceId":"ca0ad4a9-130a-3a8a-ae00-e35b07f1108b",
- "applicationIdForBaseQuery":"8b5722c9-62fd-48d6-b0bf-4f67e53efb2b",
+ "applicationIdForBaseQuery": "${application_id_for_base_query}",
"helpLink": "https://biodatacatalyst.nhlbi.nih.gov/contact\" aria-label='Contact Us, this link will open a new browser tab' target='_blank'",
"pdfLink": "https://tinyurl.com/BDC-PIC-SURE-User-Guide\" aria-label='User Guide, this link will open a new browser tab' target='_blank'",
"videoLink": "https://www.youtube.com/playlist?list=PLJ6YccH8TEufZ5L-ctxzFF7vuZRLVacKw\" aria-label='Video Demonstration, this link will open a new browser tab' target='_blank'",
@@ -87,19 +87,18 @@
"phs002383": "Walk-PHaSST",
"phs002348": "MSH"
},
- "categorySearchResultList" : [
- "DCC_Harmonized_data_set",
- "Coronary_Artery_Risk_Development_in_Young_Adults_CARDIA",
- "Framingham_Cohort",
- "Genetic_Epidemiology_of_COPD_COPDGene_",
- "Multi_Ethnic_Study_of_Atherosclerosis_MESA_Cohort",
- "The_Jackson_Heart_Study_JHS_",
- "_Consents"
- ],
- "idp_provider": "fence",
- "idp_provider_uri": "https://gen3.biodatacatalyst.nhlbi.nih.gov",
- "fence_client_id": "${fence_client_id}",
- "fence_redirect_url": "https://biodatacatalyst.integration.hms.harvard.edu/psamaui/login/",
- "analyticsId": "${analytics_id}",
+ "categorySearchResultList" : [
+ "DCC_Harmonized_data_set",
+ "Coronary_Artery_Risk_Development_in_Young_Adults_CARDIA",
+ "Framingham_Cohort",
+ "Genetic_Epidemiology_of_COPD_COPDGene_",
+ "Multi_Ethnic_Study_of_Atherosclerosis_MESA_Cohort",
+ "The_Jackson_Heart_Study_JHS_",
+ "_Consents"
+ ],
+ "idp_provider":"${idp_provider}",
+ "idp_provider_uri":"${idp_provider_uri}",
+ "fence_client_id":"${fence_client_id}",
+ "analyticsId":"${analytics_id}",
"tagManagerId": "${tag_manager_id}"
}
diff --git a/app-infrastructure/configs/resources-registration.sql b/app-infrastructure/configs/resources-registration.sql
new file mode 100644
index 00000000..4424a6db
--- /dev/null
+++ b/app-infrastructure/configs/resources-registration.sql
@@ -0,0 +1,14 @@
+/**
+This script needs to run if an application is using a persistant application RDS
+The resources are not stored in a dynamic fashion so they need to be upserted with each deployment
+
+**/
+
+USE `picsure`;
+-- Upsert to resources tables to handle stack hardcodings.
+-- We have multiple stacks so we CANNOT use the same UUID for both if they are both reliant on the same RDS
+-- if we ever want to make this compatiable with other interfaces we must make resource configuration more abstract and dynamic. - TD
+/*!40000 ALTER TABLE `resource` DISABLE KEYS */;
+${include_auth_hpds ? "INSERT INTO `resource` VALUES (0x02E23F52F3544E8B992CD37C8B9BA140,NULL,'http://auth-hpds.${target_stack}.${env_private_dns_name}:8080/PIC-SURE/','Authorized Access HPDS resource','auth-hpds',NULL, NULL, NULL) ON DUPLICATE KEY UPDATE `resourceRSPath` = VALUES(`resourceRSPath`);" : ""}
+INSERT INTO `resource` VALUES (0x36363664623161342d386538652d3131, NULL, 'http://dictionary.${target_stack}.${env_private_dns_name}:8080/dictionary/pic-sure', 'Dictionary', 'dictionary', NULL, NULL, NULL) ON DUPLICATE KEY UPDATE `resourceRSPath` = VALUES(`resourceRSPath`);
+/*!40000 ALTER TABLE `resource` ENABLE KEYS */;
\ No newline at end of file
diff --git a/app-infrastructure/configs/standalone.xml b/app-infrastructure/configs/standalone.xml
index c4f869e8..812abdd3 100644
--- a/app-infrastructure/configs/standalone.xml
+++ b/app-infrastructure/configs/standalone.xml
@@ -159,7 +159,7 @@
- jdbc:mysql://picsure-db.${target-stack}.datastage.hms.harvard.edu:3306/auth?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&autoReconnectForPools=true
+ jdbc:mysql://picsure-db.${target_stack}.${env_private_dns_name}:3306/auth?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&autoReconnectForPools=true
mysql
2
@@ -179,7 +179,7 @@
- jdbc:mysql://picsure-db.${target-stack}.datastage.hms.harvard.edu:3306/picsure?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&autoReconnectForPools=true
+ jdbc:mysql://picsure-db.${target_stack}.${env_private_dns_name}:3306/picsure?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&autoReconnectForPools=true
mysql
3
@@ -469,12 +469,12 @@
-
-
+
+
-
+
@@ -484,7 +484,7 @@
-
+
@@ -575,13 +575,18 @@
-
+
-
+
+
+
+
+
+
diff --git a/app-infrastructure/configs/visualization-resource.properties b/app-infrastructure/configs/visualization-resource.properties
index a7e389a5..f129d815 100644
--- a/app-infrastructure/configs/visualization-resource.properties
+++ b/app-infrastructure/configs/visualization-resource.properties
@@ -1,4 +1,4 @@
target.origin.id=http://localhost:8080/pic-sure-api-2/PICSURE/
visualization.resource.id=ca0ad4a9-130a-3a8a-ae00-e35b07f1108b
auth.hpds.resource.id=02e23f52-f354-4e8b-992c-d37c8b9ba140
-open.hpds.resource.id=70c837be-5ffc-11eb-ae93-0242ac130002
\ No newline at end of file
+open.hpds.resource.id=70c837be-5ffc-11eb-ae93-0242ac130002
diff --git a/app-infrastructure/configs/wildfly_mysql_module.xml b/app-infrastructure/configs/wildfly_mysql_module.xml
new file mode 100644
index 00000000..b48dad97
--- /dev/null
+++ b/app-infrastructure/configs/wildfly_mysql_module.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/app-infrastructure/data-security-groups.tf b/app-infrastructure/data-security-groups.tf
deleted file mode 100644
index af69b78b..00000000
--- a/app-infrastructure/data-security-groups.tf
+++ /dev/null
@@ -1,65 +0,0 @@
-resource "aws_security_group" "inbound-hpds-from-app" {
- name = "allow_inbound_from_app_subnet_to_hpds_${var.stack_githash}"
- description = "Allow inbound traffic from app-subnets on port 8080 until we have TLS in place for hpds server"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 8080
- to_port = 8080
- protocol = "tcp"
- cidr_blocks = [
- var.app-subnet-us-east-1a-cidr,
- var.app-subnet-us-east-1b-cidr
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-hpds-from-app Security Group - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "inbound-data-ssh-from-nessus" {
- name = "allow_inbound_from_lma_subnet_to_hpds_server_${var.stack_githash}"
- description = "Allow inbound traffic from LMA on port 22"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.73/32"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-data-ssh-from-nessus - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "inbound-mysql-from-app" {
- name = "allow_inbound_from_app_subnet_to_mysql_${var.stack_githash}"
- description = "Allow inbound traffic from app-subnets on port 3306"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 3306
- to_port = 3306
- protocol = "tcp"
- cidr_blocks = [
- var.app-subnet-us-east-1a-cidr,
- var.app-subnet-us-east-1b-cidr
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-mysql-from-app Security Group - ${var.target-stack}"
- }
-}
-
diff --git a/app-infrastructure/dictionary-instance.tf b/app-infrastructure/dictionary-instance.tf
index 1a3158e1..24707ec3 100644
--- a/app-infrastructure/dictionary-instance.tf
+++ b/app-infrastructure/dictionary-instance.tf
@@ -1,15 +1,14 @@
-
-data "template_file" "dictionary-user_data" {
+data "template_file" "dictionary-user-data-template" {
template = file("scripts/dictionary-user_data.sh")
vars = {
- stack_githash = var.stack_githash_long
+ stack_githash = var.stack_githash_long
stack_s3_bucket = var.stack_s3_bucket
- dataset_s3_object_key = var.dataset-s3-object-key
- target-stack = var.target-stack
+ target_stack = var.target_stack
+ dataset_s3_object_key = var.dataset_s3_object_key
+ gss_prefix = "bdc_${var.env_is_open_access ? "open" : "auth"}_${var.environment_name}"
}
}
-
data "template_cloudinit_config" "dictionary-user-data" {
gzip = true
base64_encode = true
@@ -17,48 +16,44 @@ data "template_cloudinit_config" "dictionary-user-data" {
# user_data
part {
content_type = "text/x-shellscript"
- content = data.template_file.dictionary-user_data.rendered
+ content = data.template_file.dictionary-user-data-template.rendered
}
}
resource "aws_instance" "dictionary-ec2" {
-
- ami = var.ami-id
+ ami = local.ami_id
//TODO double check this value at runtime to check that performance not impacted
instance_type = "m5.xlarge"
- key_name = "biodata_nessus"
+ subnet_id = local.private2_subnet_ids[0]
- associate_public_ip_address = false
-
- subnet_id = var.db-subnet-us-east-1a-id
-
- iam_instance_profile = "dictionary-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ iam_instance_profile = "dictionary-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
user_data = data.template_cloudinit_config.dictionary-user-data.rendered
vpc_security_group_ids = [
aws_security_group.outbound-to-internet.id,
- aws_security_group.inbound-hpds-from-app.id,
- aws_security_group.outbound-to-trend-micro.id,
- aws_security_group.inbound-data-ssh-from-nessus.id
+ aws_security_group.inbound-dictionary-from-wildfly.id,
]
+
root_block_device {
delete_on_termination = true
- encrypted = true
- volume_size = 100
+ encrypted = true
+ volume_size = 100
}
tags = {
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - Dictionary - ${var.target-stack}"
+ Environment = var.environment_name
+ Stack = var.target_stack
+ Project = local.project
+ Name = "Dictionary - ${var.target_stack} - ${local.uniq_name}"
}
metadata_options {
- http_endpoint = "enabled"
- http_tokens = "required"
- instance_metadata_tags = "enabled"
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
}
}
diff --git a/app-infrastructure/edge-security-groups.tf b/app-infrastructure/edge-security-groups.tf
deleted file mode 100644
index d0239201..00000000
--- a/app-infrastructure/edge-security-groups.tf
+++ /dev/null
@@ -1,70 +0,0 @@
-resource "aws_security_group" "inbound-from-public-internet" {
- name = "allow_inbound_from_public_internet_to_httpd_${var.stack_githash}"
- description = "Allow inbound traffic from public internet to httpd servers"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 80
- to_port = 80
- protocol = "tcp"
- cidr_blocks = [
- "0.0.0.0/0"
- ]
- }
- ingress {
- from_port = 443
- to_port = 443
- protocol = "tcp"
- cidr_blocks = [
- "0.0.0.0/0"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-from-public-internet Security Group - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "inbound-edge-ssh-from-nessus" {
- name = "allow_inbound_from_lma_subnet_to_edge_server_${var.stack_githash}"
- description = "Allow inbound traffic from LMA on port 22"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.73/32"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-edge-ssh-from-nessus - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "outbound-to-app" {
- name = "allow_outbound_from_edge_to_wildfly_port_in_app_vpc_${var.stack_githash}"
- description = "Allow outbound traffic to app-subnets on port 8080 until we have TLS in place for app server"
- vpc_id = var.target-vpc
-
- egress {
- from_port = 8080
- to_port = 8080
- protocol = "tcp"
- cidr_blocks = [
- var.app-subnet-us-east-1a-cidr,
- var.app-subnet-us-east-1b-cidr
- ]
- }
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - outbound-to-app Security Group - ${var.target-stack}"
- }
-}
diff --git a/app-infrastructure/httpd-instance.tf b/app-infrastructure/httpd-instance.tf
index 28385474..65e10666 100644
--- a/app-infrastructure/httpd-instance.tf
+++ b/app-infrastructure/httpd-instance.tf
@@ -1,12 +1,11 @@
-
data "template_file" "httpd-user_data" {
template = file("scripts/httpd-user_data.sh")
vars = {
- stack_githash = var.stack_githash_long
- fence_client_id = var.fence_client_id
- stack_s3_bucket = var.stack_s3_bucket
- dataset_s3_object_key = var.dataset-s3-object-key
- target-stack = var.target-stack
+ stack_githash = var.stack_githash_long
+ stack_s3_bucket = var.stack_s3_bucket
+ dataset_s3_object_key = var.dataset_s3_object_key
+ target_stack = var.target_stack
+ gss_prefix = "bdc_${var.env_is_open_access ? "open" : "auth"}_${var.environment_name}"
}
}
@@ -23,25 +22,20 @@ data "template_cloudinit_config" "httpd-user-data" {
}
resource "aws_instance" "httpd-ec2" {
- ami = var.ami-id
+ ami = local.ami_id
instance_type = "m5.large"
- key_name = "biodata_nessus"
-
- associate_public_ip_address = false
+ subnet_id = local.private1_subnet_ids[0]
- subnet_id = var.edge-subnet-us-east-1a-id
-
- iam_instance_profile = "httpd-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ iam_instance_profile = "httpd-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
user_data = data.template_cloudinit_config.httpd-user-data.rendered
vpc_security_group_ids = [
aws_security_group.outbound-to-internet.id,
- aws_security_group.inbound-from-public-internet.id,
- aws_security_group.outbound-to-app.id,
- aws_security_group.inbound-edge-ssh-from-nessus.id
+ aws_security_group.inbound-httpd-from-alb.id,
]
+
root_block_device {
delete_on_termination = true
encrypted = true
@@ -49,15 +43,18 @@ resource "aws_instance" "httpd-ec2" {
}
tags = {
+ Node = "HTTPD"
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - Apache HTTPD - ${var.target-stack}"
+ Environment = var.environment_name
+ Stack = var.target_stack
+ Project = local.project
+ Name = "Apache HTTPD - ${var.target_stack} - ${local.uniq_name}"
}
-
+
metadata_options {
- http_endpoint = "enabled"
- http_tokens = "required"
- instance_metadata_tags = "enabled"
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
}
}
@@ -65,30 +62,35 @@ resource "aws_instance" "httpd-ec2" {
data "template_file" "httpd-vhosts-conf" {
template = file("configs/httpd-vhosts.conf")
vars = {
+
wildfly-base-url = "http://${aws_instance.wildfly-ec2.private_ip}:8080"
- target-stack = var.target-stack
+ target_stack = var.target_stack
release-id = var.stack_githash_long
- allowed_hosts = var.allowed_hosts
+ env_private_dns_name = var.env_private_dns_name
+ env_public_dns_name = var.env_public_dns_name
+ env_public_dns_name_staging = var.env_public_dns_name_staging
}
}
resource "local_file" "httpd-vhosts-conf-file" {
- content = data.template_file.httpd-vhosts-conf.rendered
- filename = "httpd-vhosts.conf"
+ content = data.template_file.httpd-vhosts-conf.rendered
+ filename = "httpd-vhosts.conf"
}
data "template_file" "picsureui_settings" {
template = file("configs/picsureui_settings.json")
vars = {
- fence_client_id = var.fence_client_id
- analytics_id = var.analytics_id
- tag_manager_id = var.tag_manager_id
+ analytics_id = var.analytics_id,
+ tag_manager_id = var.tag_manager_id
+ fence_client_id = var.fence_client_id
+ idp_provider = var.idp_provider
+ idp_provider_uri = var.idp_provider_uri
+ application_id_for_base_query = var.application_id_for_base_query
}
}
resource "local_file" "picsureui-settings-json" {
- content = data.template_file.picsureui_settings.rendered
- filename = "picsureui-settings.json"
+ content = data.template_file.picsureui_settings.rendered
+ filename = "picsureui-settings.json"
}
-
diff --git a/app-infrastructure/locals.tf b/app-infrastructure/locals.tf
new file mode 100644
index 00000000..97b024f1
--- /dev/null
+++ b/app-infrastructure/locals.tf
@@ -0,0 +1,91 @@
+
+#Lookup latest AMI
+data "aws_ami" "centos" {
+ most_recent = true
+ owners = ["752463128620"]
+ name_regex = "^srce-centos7-golden-*"
+}
+
+# Random string to use for dynamic names.
+# use to get rid of git_hash in names causes conflicts if different env use same release controls
+resource "random_string" "random" {
+ length = 6
+ special = false
+}
+locals {
+ uniq_name = random_string.random.result
+}
+
+data "aws_vpc" "target_vpc" {
+ filter {
+ name = "tag:Name"
+ values = ["*-picsure-${var.environment_name}-${var.target_stack}-vpc"]
+ }
+ filter {
+ name = "tag:ApplicationName"
+ values = [local.project]
+ }
+}
+
+data "aws_vpc" "alb_vpc" {
+ filter {
+ name = "tag:Name"
+ values = ["*-picsure-${var.environment_name}-a-vpc"]
+ }
+ filter {
+ name = "tag:ApplicationName"
+ values = [local.project]
+ }
+}
+
+data "aws_subnets" "private1" {
+ filter {
+ name = "vpc-id"
+ values = [local.target_vpc]
+ }
+ filter {
+ name = "tag:Name"
+ values = ["*private1*"]
+ }
+}
+
+data "aws_subnets" "private2" {
+ filter {
+ name = "vpc-id"
+ values = [local.target_vpc]
+ }
+ filter {
+ name = "tag:Name"
+ values = ["*private2*"]
+ }
+}
+
+data "aws_subnets" "public" {
+ filter {
+ name = "vpc-id"
+ values = [local.alb_vpc]
+ }
+ filter {
+ name = "tag:Name"
+ values = ["*public*"]
+ }
+}
+
+data "aws_subnet" "public" {
+ for_each = toset(data.aws_subnets.public.ids)
+ id = each.value
+}
+
+# valid project values "Open PIC-SURE" : "Auth PIC-SURE"
+# better off explicitly setting this so we can deploy any project's resources in an environment.
+# won't be able to look up correct vpc tags otherwise
+locals {
+ ami_id = data.aws_ami.centos.id
+ target_vpc = data.aws_vpc.target_vpc.id
+ alb_vpc = data.aws_vpc.alb_vpc.id
+ private1_subnet_ids = data.aws_subnets.private1.ids
+ private2_subnet_ids = data.aws_subnets.private2.ids
+ public_subnet_cidrs = values(data.aws_subnet.public).*.cidr_block
+ project = var.env_project
+ db_subnet_group_name = local.project == "Open PIC-SURE" ? "open-pic-sure-${var.environment_name}-${var.target_stack}" : "auth-pic-sure-${var.environment_name}-${var.target_stack}"
+}
diff --git a/app-infrastructure/mysql-connector-java-5.1.38.jar b/app-infrastructure/mysql-connector-java-5.1.38.jar
new file mode 100644
index 00000000..be09493c
Binary files /dev/null and b/app-infrastructure/mysql-connector-java-5.1.38.jar differ
diff --git a/app-infrastructure/open-hpds-instance.tf b/app-infrastructure/open-hpds-instance.tf
index dce184e5..6115e94b 100644
--- a/app-infrastructure/open-hpds-instance.tf
+++ b/app-infrastructure/open-hpds-instance.tf
@@ -1,11 +1,11 @@
-
data "template_file" "open_hpds-user_data" {
template = file("scripts/open_hpds-user_data.sh")
vars = {
- stack_githash = var.stack_githash_long
- destigmatized_dataset_s3_object_key = var.destigmatized-dataset-s3-object-key
- stack_s3_bucket = var.stack_s3_bucket
- target-stack = var.target-stack
+ stack_githash = var.stack_githash_long
+ destigmatized_dataset_s3_object_key = var.destigmatized_dataset_s3_object_key
+ stack_s3_bucket = var.stack_s3_bucket
+ target_stack = var.target_stack
+ gss_prefix = "bdc_${var.env_is_open_access ? "open" : "auth"}_${var.environment_name}"
}
}
@@ -22,42 +22,40 @@ data "template_cloudinit_config" "open_hpds-user-data" {
}
resource "aws_instance" "open-hpds-ec2" {
- ami = var.ami-id
- instance_type = "m5.2xlarge"
+ count = var.include_open_hpds ? 1 : 0
- key_name = "biodata_nessus"
-
- associate_public_ip_address = false
+ ami = local.ami_id
+ instance_type = "m5.2xlarge"
- subnet_id = var.db-subnet-us-east-1a-id
+ subnet_id = local.private2_subnet_ids[0]
- iam_instance_profile = "open-hpds-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ iam_instance_profile = "open-hpds-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
user_data = data.template_cloudinit_config.open_hpds-user-data.rendered
vpc_security_group_ids = [
aws_security_group.outbound-to-internet.id,
- aws_security_group.inbound-hpds-from-app.id,
- aws_security_group.outbound-to-trend-micro.id,
- aws_security_group.inbound-data-ssh-from-nessus.id
+ aws_security_group.inbound-hpds-from-wildfly.id,
]
+
root_block_device {
delete_on_termination = true
- encrypted = true
- volume_size = 1000
+ encrypted = true
+ volume_size = 1000
}
tags = {
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - Open HPDS - ${var.target-stack}"
+ Environment = var.environment_name
+ Project = local.project
+ Stack = var.target_stack
+ Name = "Open HPDS - ${var.target_stack} - ${local.uniq_name}"
}
-
+
metadata_options {
- http_endpoint = "enabled"
- http_tokens = "required"
- instance_metadata_tags = "enabled"
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
}
}
-
diff --git a/app-infrastructure/outbound-to-internet-security-group.tf b/app-infrastructure/outbound-to-internet-security-group.tf
deleted file mode 100644
index 349b9fd9..00000000
--- a/app-infrastructure/outbound-to-internet-security-group.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-
-resource "aws_security_group" "outbound-to-internet" {
- name = "allow_outbound_to_public_internet_${var.stack_githash}"
- description = "Allow outbound traffic to public internet"
- vpc_id = var.target-vpc
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - outbound-to-internet Security Group - ${var.target-stack}"
- }
-}
diff --git a/app-infrastructure/output.tf b/app-infrastructure/output.tf
deleted file mode 100644
index 7ef3be9f..00000000
--- a/app-infrastructure/output.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-#output "cluster_endpoint" {
-# value = join("", aws_rds_cluster.aurora-db-cluster.*.endpoint)
-#}
-#
-#output "all_instance_endpoints_list" {
-# value = [concat(
-# aws_rds_cluster_instance.aurora-cluster-instance.*.endpoint,
-# )]
-#}
-#
-#output "reader_endpoint" {
-# value = join("", aws_rds_cluster.aurora-db-cluster.*.reader_endpoint)
-#}
-#
-#output "cluster_identifier" {
-# value = join("", aws_rds_cluster.aurora-db-cluster.*.id)
-#}
\ No newline at end of file
diff --git a/app-infrastructure/outputs.tf b/app-infrastructure/outputs.tf
new file mode 100644
index 00000000..a5a13771
--- /dev/null
+++ b/app-infrastructure/outputs.tf
@@ -0,0 +1,24 @@
+output "httpd-ec2-id" {
+ value = aws_instance.httpd-ec2.id
+}
+
+output "wildfly-ec2-id" {
+ value = aws_instance.wildfly-ec2.id
+}
+
+output "dictionary-ec2-id" {
+ value = aws_instance.dictionary-ec2.id
+}
+
+output "hpds-ec2-open-id" {
+ value = local.open_hpds_instance_id
+}
+
+output "hpds-ec2-auth-id" {
+ value = local.auth_hpds_instance_id
+}
+
+locals {
+ open_hpds_instance_id = length(aws_instance.open-hpds-ec2) > 0 ? aws_instance.open-hpds-ec2[0].id : ""
+ auth_hpds_instance_id = length(aws_instance.auth-hpds-ec2) > 0 ? aws_instance.auth-hpds-ec2[0].id : ""
+}
\ No newline at end of file
diff --git a/app-infrastructure/picsure-db.tf b/app-infrastructure/picsure-db.tf
index 2b7271b8..e2ac4d7b 100644
--- a/app-infrastructure/picsure-db.tf
+++ b/app-infrastructure/picsure-db.tf
@@ -1,26 +1,69 @@
resource "aws_db_instance" "pic-sure-mysql" {
- allocated_storage = 50
- storage_type = "gp2"
- engine = "mysql"
- engine_version = "5.7"
- instance_class = "db.t3.small"
- name = "picsure"
- username = "root"
- password = random_password.picsure-db-password.result
- parameter_group_name = "default.mysql5.7"
- storage_encrypted = true
- db_subnet_group_name = "main-${var.target-stack}"
- copy_tags_to_snapshot = true
- skip_final_snapshot = true
- vpc_security_group_ids = [aws_security_group.inbound-mysql-from-app.id]
+ allocated_storage = 50
+ storage_type = "gp2"
+ engine = "mysql"
+ engine_version = "5.7"
+ instance_class = "db.t3.small"
+ name = "picsure"
+ username = "root"
+ password = random_password.picsure-db-password.result
+ parameter_group_name = "default.mysql5.7"
+ storage_encrypted = true
+ db_subnet_group_name = local.db_subnet_group_name
+ copy_tags_to_snapshot = true
+ skip_final_snapshot = true
+ vpc_security_group_ids = [aws_security_group.inbound-mysql-from-wildfly.id]
+
+ snapshot_identifier = var.picsure_rds_snapshot_id
+
tags = {
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - PIC-SURE DB Instance - ${var.target-stack}"
+ Environment = var.environment_name
+ Stack = var.target_stack
+ Project = var.env_project
+ Name = "PIC-SURE DB Instance - ${var.target_stack} - ${local.uniq_name}"
}
}
resource "random_password" "picsure-db-password" {
- length = 16
+ length = 16
special = false
}
+
+data "template_file" "pic-sure-schema-sql" {
+ template = file("configs/pic-sure-schema.sql")
+ vars = {
+ picsure_token_introspection_token = var.picsure_token_introspection_token
+ target_stack = var.target_stack
+ env_private_dns_name = var.env_private_dns_name
+ include_auth_hpds = var.include_auth_hpds
+ include_open_hpds = var.include_open_hpds
+ }
+}
+
+resource "local_file" "pic-sure-schema-sql-file" {
+ content = data.template_file.pic-sure-schema-sql.rendered
+ filename = "pic-sure-schema.sql"
+}
+
+# Need to handle if the a stack is using a the hardcoded urls for resources in the db.
+# Upserts on deployments will at least ensure a newly deployed stack is using the correct urls for resources
+# This will not be a good methodology for standalone as each stack cannot use the same UUID.
+# Need a more dynamic and abstract method to find resources instead of hardcoding them into config files.
+# Another table with simple uniq names that app can configure to lookup the UUIDs.
+#
+data "template_file" "resources-registration" {
+ template = file("configs/resources-registration.sql")
+ vars = {
+ picsure_token_introspection_token = var.picsure_token_introspection_token
+ target_stack = var.target_stack
+ env_private_dns_name = var.env_private_dns_name
+ include_auth_hpds = var.include_auth_hpds
+ include_open_hpds = var.include_open_hpds
+ }
+}
+
+resource "local_file" "resources-registration-file" {
+ content = data.template_file.resources-registration.rendered
+ filename = "resources-registration.sql"
+}
\ No newline at end of file
diff --git a/app-infrastructure/provider.tf b/app-infrastructure/provider.tf
index 25fcf963..10bbc9ed 100644
--- a/app-infrastructure/provider.tf
+++ b/app-infrastructure/provider.tf
@@ -1,5 +1,6 @@
provider "aws" {
- region = "us-east-1"
- profile = "avillachlab-secure-infrastructure"
- version = "3.74"
+ region = "us-east-1"
+ profile = "avillachlab-secure-infrastructure"
+ version = "3.74"
+
}
diff --git a/app-infrastructure/route53-template.tf b/app-infrastructure/route53-template.tf
index f1043fe5..16f0698d 100644
--- a/app-infrastructure/route53-template.tf
+++ b/app-infrastructure/route53-template.tf
@@ -1,16 +1,70 @@
-data "template_file" "route53-ip-vars" {
- template = file("route53-variables.template")
- vars = {
- pic-sure-mysql-address = aws_db_instance.pic-sure-mysql.address
- wildfly-ec2-private_ip = aws_instance.wildfly-ec2.private_ip
- httpd-ec2-private_ip = aws_instance.httpd-ec2.private_ip
- open-hpds-ec2-private_ip = aws_instance.open-hpds-ec2.private_ip
- auth-hpds-ec2-private_ip = aws_instance.auth-hpds-ec2.private_ip
- dictionary-ec2-private_ip = aws_instance.dictionary-ec2.private_ip
- }
+# this needs to be refactored.
+# should just make the route53 changes in terraform
+# instead of outputting this file and using jenkins + aws / cli in the Teardown and Rebuild job to make the record changes. Yikes!!
+#data "template_file" "route53-ip-vars" {
+# template = file("route53-variables.template")
+# vars = {
+# pic-sure-mysql-address = aws_db_instance.pic-sure-mysql.address
+# wildfly-ec2-private_ip = aws_instance.wildfly-ec2.private_ip
+# httpd-ec2-private_ip = aws_instance.httpd-ec2.private_ip
+# open-hpds-ec2-private_ip = aws_instance.open-hpds-ec2.private_ip
+# auth-hpds-ec2-private_ip = aws_instance.auth-hpds-ec2.private_ip
+# dictionary-ec2-private_ip = aws_instance.dictionary-ec2.private_ip
+# }
+#}
+
+#resource "local_file" "route53-ip-vars-file" {
+# content = data.template_file.route53-ip-vars.rendered
+# filename = "ip-vars.properties"
+#}
+
+# DNS RECORDS for all nodes
+resource "aws_route53_record" "httpd-addr-record" {
+ zone_id = var.env_hosted_zone_id
+ name = "httpd.${var.target_stack}.${var.env_private_dns_name}"
+ type = "A"
+ ttl = 60
+ records = [aws_instance.httpd-ec2.private_ip]
+}
+
+resource "aws_route53_record" "wildfly-addr-record" {
+ zone_id = var.env_hosted_zone_id
+ name = "wildfly.${var.target_stack}.${var.env_private_dns_name}"
+ type = "A"
+ ttl = 60
+ records = [aws_instance.wildfly-ec2.private_ip]
+}
+
+resource "aws_route53_record" "open-hpds-addr-record" {
+ count = var.include_open_hpds ? 1 : 0
+ zone_id = var.env_hosted_zone_id
+ name = "open-hpds.${var.target_stack}.${var.env_private_dns_name}"
+ type = "A"
+ ttl = 60
+ records = [aws_instance.open-hpds-ec2[0].private_ip]
+}
+
+resource "aws_route53_record" "auth-hpds-addr-record" {
+ count = var.include_auth_hpds ? 1 : 0
+ zone_id = var.env_hosted_zone_id
+ name = "auth-hpds.${var.target_stack}.${var.env_private_dns_name}"
+ type = "A"
+ ttl = 60
+ records = [aws_instance.auth-hpds-ec2[0].private_ip]
+}
+
+resource "aws_route53_record" "dictionary-addr-record" {
+ zone_id = var.env_hosted_zone_id
+ name = "dictionary.${var.target_stack}.${var.env_private_dns_name}"
+ type = "A"
+ ttl = 60
+ records = [aws_instance.dictionary-ec2.private_ip]
}
-resource "local_file" "route53-ip-vars-file" {
- content = data.template_file.route53-ip-vars.rendered
- filename = "ip-vars.properties"
+resource "aws_route53_record" "picsure-db-cname-record" {
+ zone_id = var.env_hosted_zone_id
+ name = "picsure-db.${var.target_stack}.${var.env_private_dns_name}"
+ type = "CNAME"
+ ttl = 60
+ records = [aws_db_instance.pic-sure-mysql.address]
}
diff --git a/app-infrastructure/s3_roles.tf b/app-infrastructure/s3_roles.tf
new file mode 100644
index 00000000..a5eb19e3
--- /dev/null
+++ b/app-infrastructure/s3_roles.tf
@@ -0,0 +1,520 @@
+
+resource "aws_iam_instance_profile" "wildfly-deployment-s3-profile" {
+ name = "wildfly-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
+ role = aws_iam_role.wildfly-deployment-s3-role.name
+}
+
+resource "aws_iam_role_policy" "wildfly-deployment-s3-policy" {
+ name = "wildfly-deployment-s3-policy-${var.target_stack}-${local.uniq_name}"
+ role = aws_iam_role.wildfly-deployment-s3-role.id
+ policy = < /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s
-
-#!/bin/bash
-
-ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/'
-MANAGERURL='https://dsm.datastage.hms.harvard.edu:443'
-CURLOPTIONS='--silent --tlsv1.2'
-linuxPlatform='';
-isRPM='';
-
-if [[ $(/usr/bin/id -u) -ne 0 ]]; then
- echo You are not running as the root user. Please try again with root privileges.;
- logger -t You are not running as the root user. Please try again with root privileges.;
- exit 1;
-fi;
-
-if ! type curl >/dev/null 2>&1; then
- echo "Please install CURL before running this script."
- logger -t Please install CURL before running this script
- exit 1
-fi
-
-curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure
-
-if [ -s /tmp/PlatformDetection ]; then
- . /tmp/PlatformDetection
-else
- echo "Failed to download the agent installation support script."
- logger -t Failed to download the Deep Security Agent installation support script
- exit 1
-fi
-platform_detect
-if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then
- echo Unsupported platform is detected
- logger -t Unsupported platform is detected
- exit 1
-fi
+echo "SPLUNK_INDEX=hms_aws_${gss_prefix}" | sudo tee /opt/srce/startup.config
+echo "NESSUS_GROUP=${gss_prefix}_${target_stack}" | sudo tee -a /opt/srce/startup.config
-echo Downloading agent package...
-if [[ $isRPM == 1 ]]; then package='agent.rpm'
- else package='agent.deb'
-fi
-curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure
-
-echo Installing agent package...
-rc=1
-if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then
- rpm -ihv /tmp/agent.rpm
- rc=$?
-elif [[ -s /tmp/agent.deb ]]; then
- dpkg -i /tmp/agent.deb
- rc=$?
-else
- echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- exit 1
-fi
-if [[ $${rc} != 0 ]]; then
- echo Failed to install the agent package
- logger -t Failed to install the agent package
- exit 1
-fi
-
-echo Install the agent package successfully
-
-sleep 15
-/opt/ds_agent/dsa_control -r
-/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14"
-# /opt/ds_agent/dsa_control -a dsm://dsm01.dbmi-datastage.local:4120/ "policyid:11"
-
-echo "starting Splunk configuration"
-
-useradd -r -m splunk
-
-for i in 1 2 3 4 5; do echo "trying to download Splunk local forwarder from s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz /opt/ && break || sleep 60; done
-echo "pulled Splunk tar file, extracting"
-
-cd /opt
-sudo tar -xf splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz
-
-echo "changing splunk permissions"
-chown -R splunk:splunk splunkforwarder
-echo "starting splunk UF as splunk user"
-sudo -u splunk /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes --no-prompt
-echo "stopping service again to enable boot-start"
-sudo -u splunk /opt/splunkforwarder/bin/splunk stop
-echo "enabling boot-start"
-sudo /opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 -user splunk
-
-echo "Configuring inputs and outputs"
+sudo sh /opt/srce/scripts/start-gsstools.sh
echo "
-[default]
-host = $(curl http://169.254.169.254/latest/meta-data/instance-id)
[monitor:///var/log/hpds-docker-logs]
sourcetype = hms_app_logs
source = hpds_logs
-index=hms_aws_bdcprod
-" > /opt/splunkforwarder/etc/system/local/inputs.conf
-
-echo "updating permissions for app logs using ACL"
-mkdir -p /var/log/hpds-docker-logs
-sudo setfacl -R -m g:splunk:rx /var/log/hpds-docker-logs
-
-echo "starting splunk as a service"
-sudo systemctl start SplunkForwarder
+index=hms_aws_${gss_prefix}
+" | sudo tee -a /opt/splunkforwarder/etc/system/local/inputs.conf
+sudo systemctl restart SplunkForwarder || true
-echo "completed Splunk configuration"
+echo "user-data progress starting update"
+sudo yum -y update
-## Download and Install Nessus
-for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done
-sh /opt/nessus_setup.sh "${stack_s3_bucket}" "BDC_Prod_$(echo ${target-stack}|tr '[a-b]' '[A-B]')"
+s3_copy() {
+ for i in {1..5}; do
+ sudo /usr/bin/aws --region us-east-1 s3 cp $* && break || sleep 30
+ done
+}
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds.tar.gz /home/centos/pic-sure-hpds.tar.gz && break || sleep 45; done
mkdir -p /opt/local/hpds/all
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/javabins_rekeyed.tar.gz /opt/local/hpds/javabins_rekeyed.tar.gz && break || sleep 45; done
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr0masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr1masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr2masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr3masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr4masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr5masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr6masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr7masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr8masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr9masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr10masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr11masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr12masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr13masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr14masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr15masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr16masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr17masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr18masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr19masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr20masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr21masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/chr22masks.bin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/BucketIndexBySample.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Gene_with_variant_infoStore.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_class_infoStore.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_consequence_calculated_infoStore.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_frequency_as_text_infoStore.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_severity_infoStore.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/BucketIndexBySampleStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Gene_with_variant_infoStoreStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_class_infoStoreStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_consequence_calculated_infoStoreStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_frequency_as_text_infoStoreStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/Variant_severity_infoStoreStorage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/variantIndex_fbbis.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/variantIndex_fbbis_storage.javabin /opt/local/hpds/all/
-aws s3 cp s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/variantStore.javabin /opt/local/hpds/all/
+s3_copy s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds.tar.gz /home/centos/pic-sure-hpds.tar.gz
+s3_copy s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/javabins_rekeyed.tar.gz /opt/local/hpds/javabins_rekeyed.tar.gz
+s3_copy s3://${stack_s3_bucket}/data/${genomic_dataset_s3_object_key}/all/ /opt/local/hpds/all/ --recursive
cd /opt/local/hpds
tar -xvzf javabins_rekeyed.tar.gz
cd ~
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done
-sudo bash /root/domain-join.sh
+# Load and run docker container. Then wait for initialization before tagging instance as init complete.
+echo "Loading and running docker container"
+INIT_MESSAGE="WebApplicationContext: initialization completed"
+INIT_TIMEOUT_SECS=2400 # Set your desired timeout in seconds
+INIT_START_TIME=$(date +%s)
-sudo mkdir -p /var/log/hpds-docker-logs
+CONTAINER_NAME="auth-hpds"
HPDS_IMAGE=`sudo docker load < /home/centos/pic-sure-hpds.tar.gz | cut -d ' ' -f 3`
-sudo docker run --name=auth-hpds \
+sudo docker run --name=$CONTAINER_NAME \
--restart unless-stopped \
--log-driver syslog --log-opt tag=auth-hpds \
-v /opt/local/hpds:/opt/local/hpds \
@@ -260,8 +48,26 @@ sudo docker run --name=auth-hpds \
-e CATALINA_OPTS=" -XX:+UseParallelGC -XX:SurvivorRatio=250 -Xms10g -Xmx110g -DCACHE_SIZE=2500 -DSMALL_TASK_THREADS=1 -DLARGE_TASK_THREADS=1 -DSMALL_JOB_LIMIT=100 -DID_BATCH_SIZE=5000 '-DALL_IDS_CONCEPT=NONE' '-DID_CUBE_NAME=NONE'" \
-d $HPDS_IMAGE
-sudo docker logs -f hpds > /var/log/hpds-docker-logs/hpds.log &
-
-INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
-sudo /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
-
+echo "Waiting for container to initialize"
+while true; do
+ status=$(docker logs "$CONTAINER_NAME" 2>&1 | grep "$INIT_MESSAGE")
+
+ if [ -z $status ];then
+ echo "$CONTAINER_NAME container has initialized."
+
+ INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
+ sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $INSTANCE_ID --tags Key=InitComplete,Value=true
+ break
+ else
+ CURRENT_TIME=$(date +%s)
+ ELAPSED_TIME=$((CURRENT_TIME - INIT_START_TIME))
+
+ if [ "$ELAPSED_TIME" -ge "$INIT_TIMEOUT_SECS" ]; then
+ echo "Timeout reached ($INIT_TIMEOUT_SECS seconds). The $CONTAINER_NAME container initialization didn't complete."
+ INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
+ sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $INSTANCE_ID --tags Key=InitComplete,Value=failed
+
+ break
+ fi
+ fi
+done
diff --git a/app-infrastructure/scripts/dictionary-user_data.sh b/app-infrastructure/scripts/dictionary-user_data.sh
index 60372042..b7df918f 100644
--- a/app-infrastructure/scripts/dictionary-user_data.sh
+++ b/app-infrastructure/scripts/dictionary-user_data.sh
@@ -1,224 +1,41 @@
#!/bin/bash
-sudo yum install wget -y
-sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
-sudo systemctl enable amazon-ssm-agent
-sudo systemctl start amazon-ssm-agent
-wget https://s3.amazonaws.com/amazoncloudwatch-agent/centos/amd64/latest/amazon-cloudwatch-agent.rpm
-sudo rpm -U amazon-cloudwatch-agent.rpm
-sudo touch /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-echo "
-
-{
- \"metrics\": {
-
- \"metrics_collected\": {
- \"cpu\": {
- \"measurement\": [
- \"cpu_usage_idle\",
- \"cpu_usage_user\",
- \"cpu_usage_system\"
- ],
- \"metrics_collection_interval\": 300,
- \"totalcpu\": false
- },
- \"disk\": {
- \"measurement\": [
- \"used_percent\"
- ],
- \"metrics_collection_interval\": 600,
- \"resources\": [
- \"*\"
- ]
- },
- \"mem\": {
- \"measurement\": [
- \"mem_used_percent\",
- \"mem_available\",
- \"mem_available_percent\",
- \"mem_total\",
- \"mem_used\"
-
- ],
- \"metrics_collection_interval\": 600
- }
- }
- },
- \"logs\":{
- \"logs_collected\":{
- \"files\":{
- \"collect_list\":[
- {
- \"file_path\":\"/var/log/secure\",
- \"log_group_name\":\"secure\",
- \"log_stream_name\":\"{instance_id} secure\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/messages\",
- \"log_group_name\":\"messages\",
- \"log_stream_name\":\"{instance_id} messages\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/audit/audit.log\",
- \"log_group_name\":\"audit.log\",
- \"log_stream_name\":\"{instance_id} audit.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/yum.log\",
- \"log_group_name\":\"yum.log\",
- \"log_stream_name\":\"{instance_id} yum.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/dictionary-docker-logs/*\",
- \"log_group_name\":\"dictionary-logs\",
- \"log_stream_name\":\"{instance_id} ${stack_githash} dictionary-app-logs\",
- \"timestamp_format\":\"UTC\"
- }
- ]
- }
- }
- }
-
-
-}
-
-" > /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s
-
-#!/bin/bash
-
-ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/'
-MANAGERURL='https://dsm.datastage.hms.harvard.edu:443'
-CURLOPTIONS='--silent --tlsv1.2'
-linuxPlatform='';
-isRPM='';
-
-if [[ $(/usr/bin/id -u) -ne 0 ]]; then
- echo You are not running as the root user. Please try again with root privileges.;
- logger -t You are not running as the root user. Please try again with root privileges.;
- exit 1;
-fi;
-
-if ! type curl >/dev/null 2>&1; then
- echo "Please install CURL before running this script."
- logger -t Please install CURL before running this script
- exit 1
-fi
-
-curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure
-
-if [ -s /tmp/PlatformDetection ]; then
- . /tmp/PlatformDetection
-else
- echo "Failed to download the agent installation support script."
- logger -t Failed to download the Deep Security Agent installation support script
- exit 1
-fi
-
-platform_detect
-if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then
- echo Unsupported platform is detected
- logger -t Unsupported platform is detected
- exit 1
-fi
-
-echo Downloading agent package...
-if [[ $isRPM == 1 ]]; then package='agent.rpm'
- else package='agent.deb'
-fi
-curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure
-
-echo Installing agent package...
-rc=1
-if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then
- rpm -ihv /tmp/agent.rpm
- rc=$?
-elif [[ -s /tmp/agent.deb ]]; then
- dpkg -i /tmp/agent.deb
- rc=$?
-else
- echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- exit 1
-fi
-if [[ $${rc} != 0 ]]; then
- echo Failed to install the agent package
- logger -t Failed to install the agent package
- exit 1
-fi
-echo Install the agent package successfully
-
-sleep 15
-/opt/ds_agent/dsa_control -r
-/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14"
-# /opt/ds_agent/dsa_control -a dsm://dsm01.dbmi-datastage.local:4120/ "policyid:11"
-
-echo "starting Splunk configuration"
-
-useradd -r -m splunk
-
-for i in 1 2 3 4 5; do echo "trying to download Splunk local forwarder from s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz /opt/ && break || sleep 60; done
-echo "pulled Splunk tar file, extracting"
-
-for i in 1 2 3 4 5; do echo "trying to download fence mapping from s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json && break || sleep 45; done
-echo "pulled fence mapping"
-
-cd /opt
-sudo tar -xf splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz
-
-echo "changing splunk permissions"
-chown -R splunk:splunk splunkforwarder
-echo "starting splunk UF as splunk user"
-sudo -u splunk /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes --no-prompt
-echo "stopping service again to enable boot-start"
-sudo -u splunk /opt/splunkforwarder/bin/splunk stop
-echo "enabling boot-start"
-sudo /opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 -user splunk
-
-echo "Configuring inputs and outputs"
+echo "SPLUNK_INDEX=hms_aws_${gss_prefix}" | sudo tee /opt/srce/startup.config
+echo "NESSUS_GROUP=${gss_prefix}_${target_stack}" | sudo tee -a /opt/srce/startup.config
+sudo sh /opt/srce/scripts/start-gsstools.sh
echo "
-[default]
-host = $(curl http://169.254.169.254/latest/meta-data/instance-id)
-[monitor:///var/log/hpds-docker-logs]
+[monitor:///var/log/dictionary-docker-logs/]
sourcetype = hms_app_logs
-source = hpds_logs
-index=hms_aws_bdcprod
-" > /opt/splunkforwarder/etc/system/local/inputs.conf
-
-echo "updating permissions for app logs using ACL"
-mkdir -p /var/log/hpds-docker-logs
-sudo setfacl -R -m g:splunk:rx /var/log/hpds-docker-logs
-
-echo "starting splunk as a service"
-sudo systemctl start SplunkForwarder
-
-echo "completed Splunk configuration"
-
-## Download and Install Nessus
-for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done
-sh /opt/nessus_setup.sh "${stack_s3_bucket}" "BDC_Prod_$(echo ${target-stack}|tr '[a-b]' '[A-B]')"
-
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done
-sudo bash /root/domain-join.sh
+source = dictionary_logs
+index=hms_aws_${gss_prefix}
+" | sudo tee -a /opt/splunkforwarder/etc/system/local/inputs.conf
+sudo systemctl restart SplunkForwarder || true
+
+echo "user-data progress starting update"
+sudo yum -y update
+
+s3_copy() {
+ for i in {1..5}; do
+ sudo /usr/bin/aws --region us-east-1 s3 cp $* && break || sleep 30
+ done
+}
-sudo mkdir -p /var/log/dictionary-docker-logs
-for i in 1 2 3 4 5 6 7 8 9; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds-dictionary-resource.tar.gz . && break || sleep 45; done
+s3_copy s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds-dictionary-resource.tar.gz /home/centos/pic-sure-hpds-dictionary-resource.tar.gz
-for i in 1 2 3 4 5; do echo "trying to download fence mapping from s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json && break || sleep 45; done
+s3_copy s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json
echo "pulled fence mapping"
-#sudo mkdir -p /usr/local/docker-config/search/
-#for i in 1 2 3 4 5 6 7 8 9; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${stack_githash}/dictionary.javabin.tar.gz /usr/local/docker-config/search/dictionary.javabin.tar.gz && break || sleep 45; done
-#sudo tar -xvzf /usr/local/docker-config/search/dictionary.javabin.tar.gz -C /usr/local/docker-config/search/
-for i in 1 2 3 4 5 6 7 8 9; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds-dictionary-resource.tar.gz /home/centos/pic-sure-hpds-dictionary-resource.tar.gz && break || sleep 45; done
+
sudo mkdir -p /usr/local/docker-config/search/
+sudo mkdir -p /var/log/dictionary-docker-logs
DICTIONARY_IMAGE=`sudo docker load < /home/centos/pic-sure-hpds-dictionary-resource.tar.gz | cut -d ' ' -f 3`
-sudo docker run --name=dictionary -v /var/log/dictionary-docker-logs/:/usr/local/tomcat/logs/ -v /home/centos/fence_mapping.json:/usr/local/docker-config/search/fence_mapping.json -e CATALINA_OPTS=" -Xms1g -Xmx12g " -p 8080:8080 -d $DICTIONARY_IMAGE
+sudo docker run --name=dictionary \
+ --log-driver syslog --log-opt tag=dictionary \
+ -v /var/log/dictionary-docker-logs/:/usr/local/tomcat/logs/ \
+ -v /home/centos/fence_mapping.json:/usr/local/docker-config/search/fence_mapping.json \
+ -e CATALINA_OPTS=" -Xms1g -Xmx12g " \
+ -p 8080:8080 -d $DICTIONARY_IMAGE
INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
-sudo /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
+sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
diff --git a/app-infrastructure/scripts/httpd-user_data.sh b/app-infrastructure/scripts/httpd-user_data.sh
index 35103540..c6a66cca 100644
--- a/app-infrastructure/scripts/httpd-user_data.sh
+++ b/app-infrastructure/scripts/httpd-user_data.sh
@@ -1,231 +1,55 @@
#!/bin/bash
-sudo yum install wget -y
-sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
-sudo systemctl enable amazon-ssm-agent
-sudo systemctl start amazon-ssm-agent
-wget https://s3.amazonaws.com/amazoncloudwatch-agent/centos/amd64/latest/amazon-cloudwatch-agent.rpm
-sudo rpm -U amazon-cloudwatch-agent.rpm
-sudo touch /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-echo "
-
-{
- \"metrics\": {
-
- \"metrics_collected\": {
- \"cpu\": {
- \"measurement\": [
- \"cpu_usage_idle\",
- \"cpu_usage_user\",
- \"cpu_usage_system\"
- ],
- \"metrics_collection_interval\": 300,
- \"totalcpu\": false
- },
- \"disk\": {
- \"measurement\": [
- \"used_percent\"
- ],
- \"metrics_collection_interval\": 600,
- \"resources\": [
- \"*\"
- ]
- },
- \"mem\": {
- \"measurement\": [
- \"mem_used_percent\",
- \"mem_available\",
- \"mem_available_percent\",
- \"mem_total\",
- \"mem_used\"
-
- ],
- \"metrics_collection_interval\": 600
- }
- }
- },
- \"logs\":{
- \"logs_collected\":{
- \"files\":{
- \"collect_list\":[
- {
- \"file_path\":\"/var/log/secure\",
- \"log_group_name\":\"secure\",
- \"log_stream_name\":\"{instance_id} secure\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/messages\",
- \"log_group_name\":\"messages\",
- \"log_stream_name\":\"{instance_id} messages\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/audit/audit.log\",
- \"log_group_name\":\"audit.log\",
- \"log_stream_name\":\"{instance_id} audit.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/yum.log\",
- \"log_group_name\":\"yum.log\",
- \"log_stream_name\":\"{instance_id} yum.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/httpd-docker-logs/*\",
- \"log_group_name\":\"httpd-logs\",
- \"log_stream_name\":\"{instance_id} ${stack_githash} httpd-app-logs\",
- \"timestamp_format\":\"UTC\"
- }
- ]
- }
- }
- }
-
-
-}
-
-" > /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s
-
-
-#!/bin/bash
-
-ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/'
-MANAGERURL='https://dsm.datastage.hms.harvard.edu:443'
-CURLOPTIONS='--silent --tlsv1.2'
-linuxPlatform='';
-isRPM='';
-
-if [[ $(/usr/bin/id -u) -ne 0 ]]; then
- echo You are not running as the root user. Please try again with root privileges.;
- logger -t You are not running as the root user. Please try again with root privileges.;
- exit 1;
-fi;
-
-if ! type curl >/dev/null 2>&1; then
- echo "Please install CURL before running this script."
- logger -t Please install CURL before running this script
- exit 1
-fi
-
-curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure
-
-if [ -s /tmp/PlatformDetection ]; then
- . /tmp/PlatformDetection
-else
- echo "Failed to download the agent installation support script."
- logger -t Failed to download the Deep Security Agent installation support script
- exit 1
-fi
-
-platform_detect
-if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then
- echo Unsupported platform is detected
- logger -t Unsupported platform is detected
- exit 1
-fi
-echo Downloading agent package...
-if [[ $isRPM == 1 ]]; then package='agent.rpm'
- else package='agent.deb'
-fi
-curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure
+echo "SPLUNK_INDEX=hms_aws_${gss_prefix}" | sudo tee /opt/srce/startup.config
+echo "NESSUS_GROUP=${gss_prefix}_${target_stack}" | sudo tee -a /opt/srce/startup.config
-echo Installing agent package...
-rc=1
-if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then
- rpm -ihv /tmp/agent.rpm
- rc=$?
-elif [[ -s /tmp/agent.deb ]]; then
- dpkg -i /tmp/agent.deb
- rc=$?
-else
- echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- exit 1
-fi
-if [[ $${rc} != 0 ]]; then
- echo Failed to install the agent package
- logger -t Failed to install the agent package
- exit 1
-fi
-
-echo Install the agent package successfully
-
-sleep 15
-/opt/ds_agent/dsa_control -r
-/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14"
-# /opt/ds_agent/dsa_control -a dsm://dsm01.dbmi-datastage.local:4120/ "policyid:11"
-
-mkdir -p /usr/local/docker-config/cert
-mkdir -p /var/log/httpd-docker-logs/ssl_mutex
-
-echo "starting Splunk configuration"
-
-useradd -r -m splunk
-
-for i in 1 2 3 4 5; do echo "trying to download Splunk local forwarder from s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz /opt/ && break || sleep 60; done
-echo "pulled Splunk tar file, extracting"
-
-cd /opt
-sudo tar -xf splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz
-
-echo "changing splunk permissions"
-chown -R splunk:splunk splunkforwarder
-echo "starting splunk UF as splunk user"
-sudo -u splunk /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes --no-prompt
-echo "stopping service again to enable boot-start"
-sudo -u splunk /opt/splunkforwarder/bin/splunk stop
-echo "enabling boot-start"
-sudo /opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 -user splunk
-
-echo "Configuring inputs and outputs"
+sudo sh /opt/srce/scripts/start-gsstools.sh
echo "
-[default]
-host = $(curl http://169.254.169.254/latest/meta-data/instance-id)
-[monitor:///var/log/httpd-docker-logs]
+[monitor:///var/log/httpd-docker-logs/]
sourcetype = hms_app_logs
source = httpd_logs
-index=hms_aws_bdcprod
-" > /opt/splunkforwarder/etc/system/local/inputs.conf
-
-echo "updating permissions for app logs using ACL"
-mkdir -p /var/log/httpd-docker-logs
-sudo setfacl -R -m g:splunk:rx /var/log/httpd-docker-logs
-
-echo "starting splunk as a service"
-sudo systemctl start SplunkForwarder
+index=hms_aws_${gss_prefix}
+" | sudo tee -a /opt/splunkforwarder/etc/system/local/inputs.conf
+sudo systemctl restart SplunkForwarder || true
-echo "completed Splunk configuration"
+echo "user-data progress starting update"
+sudo yum -y update
-## Download and Install Nessus
-for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done
-sh /opt/nessus_setup.sh "${stack_s3_bucket}" "BDC_Prod_$(echo ${target-stack}|tr '[a-b]' '[A-B]')"
+mkdir -p /usr/local/docker-config/cert
+mkdir -p /var/log/httpd-docker-logs/ssl_mutex
-for i in 1 2 3 4 5 6 7 8 9; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-ui.tar.gz /home/centos/pic-sure-ui.tar.gz && break || sleep 45; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/httpd-vhosts.conf /usr/local/docker-config/httpd-vhosts.conf && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/server.chain /usr/local/docker-config/cert/server.chain && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/server.crt /usr/local/docker-config/cert/server.crt && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/server.key /usr/local/docker-config/cert/server.key && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/preprod_server.chain /usr/local/docker-config/cert/preprod_server.chain && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/preprod_server.crt /usr/local/docker-config/cert/preprod_server.crt && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/httpd/preprod_server.key /usr/local/docker-config/cert/preprod_server.key && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/picsureui_settings.json /usr/local/docker-config/picsureui_settings.json && break || sleep 15; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/banner_config.json /usr/local/docker-config/banner_config.json && break || sleep 15; done
-for i in 1 2 3 4 5; do echo "trying to download fence mapping from s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json && break || sleep 45; done
+s3_copy() {
+ for i in {1..5}; do
+ sudo /usr/bin/aws --region us-east-1 s3 cp $* && break || sleep 30
+ done
+}
+# sleep for awhile as these files could still be in the process of being rendered.
+echo "waiting for terraform to render files"
+sleep 1200
+s3_copy s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-ui.tar.gz /home/centos/pic-sure-ui.tar.gz
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/httpd-vhosts.conf /usr/local/docker-config/httpd-vhosts.conf
+s3_copy s3://${stack_s3_bucket}/certs/httpd/ /usr/local/docker-config/cert/ --recursive
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/picsureui_settings.json /usr/local/docker-config/picsureui_settings.json
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/banner_config.json /usr/local/docker-config/banner_config.json
+s3_copy s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json
for i in 1 2 3 4 5; do echo "confirming wildfly resolvable" && sudo curl --connect-timeout 1 $(grep -A30 preprod /usr/local/docker-config/httpd-vhosts.conf | grep wildfly | grep api | cut -d "\"" -f 2 | sed 's/pic-sure-api-2.*//') || if [ $? = 6 ]; then (exit 1); fi && break || sleep 60; done
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done
-sudo bash /root/domain-join.sh
+sudo mkdir -p /var/log/httpd-docker-logs
HTTPD_IMAGE=`sudo docker load < /home/centos/pic-sure-ui.tar.gz | cut -d ' ' -f 3`
-sudo docker run --restart unless-stopped --name=httpd -v /var/log/httpd-docker-logs/:/usr/local/apache2/logs/ -v /usr/local/docker-config/picsureui_settings.json:/usr/local/apache2/htdocs/picsureui/settings/settings.json -v /usr/local/docker-config/banner_config.json:/usr/local/apache2/htdocs/picsureui/settings/banner_config.json -v /home/centos/fence_mapping.json:/usr/local/apache2/htdocs/picsureui/studyAccess/studies-data.json -v /usr/local/docker-config/cert:/usr/local/apache2/cert/ -v /usr/local/docker-config/httpd-vhosts.conf:/usr/local/apache2/conf/extra/httpd-vhosts.conf -p 80:80 -p 443:443 -d $HTTPD_IMAGE
-sudo docker logs -f httpd > /var/log/httpd-docker-logs/httpd.log &
+sudo docker run --name=httpd \
+ --restart unless-stopped \
+ --log-driver syslog --log-opt tag=httpd \
+ -v /var/log/httpd-docker-logs/:/usr/local/apache2/logs/ \
+ -v /usr/local/docker-config/picsureui_settings.json:/usr/local/apache2/htdocs/picsureui/settings/settings.json \
+ -v /usr/local/docker-config/banner_config.json:/usr/local/apache2/htdocs/picsureui/settings/banner_config.json \
+ -v /home/centos/fence_mapping.json:/usr/local/apache2/htdocs/picsureui/studyAccess/studies-data.json \
+ -v /usr/local/docker-config/cert:/usr/local/apache2/cert/ \
+ -v /usr/local/docker-config/httpd-vhosts.conf:/usr/local/apache2/conf/extra/httpd-vhosts.conf \
+ -p 443:443 -d $HTTPD_IMAGE
INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
-sudo /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
-
+sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
diff --git a/app-infrastructure/scripts/open_hpds-user_data.sh b/app-infrastructure/scripts/open_hpds-user_data.sh
index e798da86..c5f96f65 100644
--- a/app-infrastructure/scripts/open_hpds-user_data.sh
+++ b/app-infrastructure/scripts/open_hpds-user_data.sh
@@ -1,227 +1,71 @@
#!/bin/bash
-sudo yum install wget -y
-sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
-sudo systemctl enable amazon-ssm-agent
-sudo systemctl start amazon-ssm-agent
-wget https://s3.amazonaws.com/amazoncloudwatch-agent/centos/amd64/latest/amazon-cloudwatch-agent.rpm
-sudo rpm -U amazon-cloudwatch-agent.rpm
-sudo touch /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-echo "
-
-{
- \"metrics\": {
-
- \"metrics_collected\": {
- \"cpu\": {
- \"measurement\": [
- \"cpu_usage_idle\",
- \"cpu_usage_user\",
- \"cpu_usage_system\"
- ],
- \"metrics_collection_interval\": 300,
- \"totalcpu\": false
- },
- \"disk\": {
- \"measurement\": [
- \"used_percent\"
- ],
- \"metrics_collection_interval\": 600,
- \"resources\": [
- \"*\"
- ]
- },
- \"mem\": {
- \"measurement\": [
- \"mem_used_percent\",
- \"mem_available\",
- \"mem_available_percent\",
- \"mem_total\",
- \"mem_used\"
-
- ],
- \"metrics_collection_interval\": 600
- }
- }
- },
- \"logs\":{
- \"logs_collected\":{
- \"files\":{
- \"collect_list\":[
- {
- \"file_path\":\"/var/log/secure\",
- \"log_group_name\":\"secure\",
- \"log_stream_name\":\"{instance_id} secure\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/messages\",
- \"log_group_name\":\"messages\",
- \"log_stream_name\":\"{instance_id} messages\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/audit/audit.log\",
- \"log_group_name\":\"audit.log\",
- \"log_stream_name\":\"{instance_id} audit.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/yum.log\",
- \"log_group_name\":\"yum.log\",
- \"log_stream_name\":\"{instance_id} yum.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/hpds-docker-logs/*\",
- \"log_group_name\":\"hpds-logs\",
- \"log_stream_name\":\"{instance_id} ${stack_githash} hpds-app-logs\",
- \"timestamp_format\":\"UTC\"
- }
- ]
- }
- }
- }
-
-}
-
-" > /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s
+echo "SPLUNK_INDEX=hms_aws_${gss_prefix}" | sudo tee /opt/srce/startup.config
+echo "NESSUS_GROUP=${gss_prefix}_${target_stack}" | sudo tee -a /opt/srce/startup.config
-#!/bin/bash
-
-ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/'
-MANAGERURL='https://dsm.datastage.hms.harvard.edu:443'
-CURLOPTIONS='--silent --tlsv1.2'
-linuxPlatform='';
-isRPM='';
-
-if [[ $(/usr/bin/id -u) -ne 0 ]]; then
- echo You are not running as the root user. Please try again with root privileges.;
- logger -t You are not running as the root user. Please try again with root privileges.;
- exit 1;
-fi;
-
-if ! type curl >/dev/null 2>&1; then
- echo "Please install CURL before running this script."
- logger -t Please install CURL before running this script
- exit 1
-fi
-
-curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure
-
-if [ -s /tmp/PlatformDetection ]; then
- . /tmp/PlatformDetection
-else
- echo "Failed to download the agent installation support script."
- logger -t Failed to download the Deep Security Agent installation support script
- exit 1
-fi
-
-platform_detect
-if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then
- echo Unsupported platform is detected
- logger -t Unsupported platform is detected
- exit 1
-fi
-
-echo Downloading agent package...
-if [[ $isRPM == 1 ]]; then package='agent.rpm'
- else package='agent.deb'
-fi
-curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure
-
-echo Installing agent package...
-rc=1
-if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then
- rpm -ihv /tmp/agent.rpm
- rc=$?
-elif [[ -s /tmp/agent.deb ]]; then
- dpkg -i /tmp/agent.deb
- rc=$?
-else
- echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- exit 1
-fi
-if [[ $${rc} != 0 ]]; then
- echo Failed to install the agent package
- logger -t Failed to install the agent package
- exit 1
-fi
-
-echo Install the agent package successfully
-
-sleep 15
-/opt/ds_agent/dsa_control -r
-/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14"
-# /opt/ds_agent/dsa_control -a dsm://dsm01.dbmi-datastage.local:4120/ "policyid:11"
-
-echo "starting Splunk configuration"
-
-useradd -r -m splunk
-
-for i in 1 2 3 4 5; do echo "trying to download Splunk local forwarder from s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz /opt/ && break || sleep 60; done
-echo "pulled Splunk tar file, extracting"
-
-cd /opt
-sudo tar -xf splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz
-
-echo "changing splunk permissions"
-chown -R splunk:splunk splunkforwarder
-echo "starting splunk UF as splunk user"
-sudo -u splunk /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes --no-prompt
-echo "stopping service again to enable boot-start"
-sudo -u splunk /opt/splunkforwarder/bin/splunk stop
-echo "enabling boot-start"
-sudo /opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 -user splunk
-
-echo "Configuring inputs and outputs"
+sudo sh /opt/srce/scripts/start-gsstools.sh
echo "
-[default]
-host = $(curl http://169.254.169.254/latest/meta-data/instance-id)
[monitor:///var/log/hpds-docker-logs]
sourcetype = hms_app_logs
source = hpds_logs
-index=hms_aws_bdcprod
-" > /opt/splunkforwarder/etc/system/local/inputs.conf
-
-echo "updating permissions for app logs using ACL"
-mkdir -p /var/log/hpds-docker-logs
-sudo setfacl -R -m g:splunk:rx /var/log/hpds-docker-logs
+index=hms_aws_${gss_prefix}
+" | sudo tee -a /opt/splunkforwarder/etc/system/local/inputs.conf
+sudo systemctl restart SplunkForwarder || true
-echo "starting splunk as a service"
-sudo systemctl start SplunkForwarder
+echo "user-data progress starting update"
+sudo yum -y update
-echo "completed Splunk configuration"
+s3_copy() {
+ for i in {1..5}; do
+ sudo /usr/bin/aws --region us-east-1 s3 cp $* && break || sleep 30
+ done
+}
-## Download and Install Nessus
-for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done
-sh /opt/nessus_setup.sh "${stack_s3_bucket}" "BDC_Prod_$(echo ${target-stack}|tr '[a-b]' '[A-B]')"
+s3_copy s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds.tar.gz /home/centos/pic-sure-hpds.tar.gz
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-hpds.tar.gz /home/centos/pic-sure-hpds.tar.gz && break || sleep 45; done
+s3_copy s3://${stack_s3_bucket}/data/${destigmatized_dataset_s3_object_key}/destigmatized_javabins_rekeyed.tar.gz /opt/local/hpds/destigmatized_javabins_rekeyed.tar.gz
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${destigmatized_dataset_s3_object_key}/destigmatized_javabins_rekeyed.tar.gz /opt/local/hpds/destigmatized_javabins_rekeyed.tar.gz && break || sleep 45; done
cd /opt/local/hpds
tar -xvzf destigmatized_javabins_rekeyed.tar.gz
cd ~
-sudo mkdir -p /var/log/hpds-docker-logs
+# Waiting for application to finish initialization
+INIT_MESSAGE="WebApplicationContext: initialization completed"
+INIT_TIMEOUT_SEX=2400 # Set your desired timeout in seconds
+INIT_START_TIME=$(date +%s)
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done
-sudo bash /root/domain-join.sh
+CONTAINER_NAME="open-hpds"
HPDS_IMAGE=`sudo docker load < /home/centos/pic-sure-hpds.tar.gz | cut -d ' ' -f 3`
-sudo docker run --name=open-hpds \
+sudo docker run --name=$CONTAINER_NAME \
--restart unless-stopped \
--log-driver syslog --log-opt tag=open-hpds \
- -v /opt/local/hpds:/opt/local/hpds -p 8080:8080 \
+ -v /opt/local/hpds:/opt/local/hpds \
+ -p 8080:8080 \
-e CATALINA_OPTS=" -XX:+UseParallelGC -XX:SurvivorRatio=250 -Xms10g -Xmx40g -DCACHE_SIZE=2500 -DSMALL_TASK_THREADS=1 -DLARGE_TASK_THREADS=1 -DSMALL_JOB_LIMIT=100 -DID_BATCH_SIZE=5000 " \
-d $HPDS_IMAGE
-sudo docker logs -f hpds > /var/log/hpds-docker-logs/hpds.log &
-
-INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
-sudo /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
-
+echo "Waiting for container to initialize"
+while true; do
+ status=$(docker logs "$CONTAINER_NAME" 2>&1 | grep "$INIT_MESSAGE")
+
+ if [ -z $status ];then
+ echo "$CONTAINER_NAME container has initialized."
+
+ INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
+ sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $INSTANCE_ID --tags Key=InitComplete,Value=true
+ break
+ else
+ CURRENT_TIME=$(date +%s)
+ ELAPSED_TIME=$((CURRENT_TIME - INIT_START_TIME))
+
+ if [ "$ELAPSED_TIME" -ge "$INIT_TIMEOUT_SECS" ]; then
+ echo "Timeout reached ($INIT_TIMEOUT_SECS seconds). The $CONTAINER_NAME container initialization didn't complete."
+ INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
+ sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $INSTANCE_ID --tags Key=InitComplete,Value=failed
+
+ break
+ fi
+ fi
+done
diff --git a/app-infrastructure/scripts/wildfly-user_data.sh b/app-infrastructure/scripts/wildfly-user_data.sh
index 5aba7073..8611c8f0 100644
--- a/app-infrastructure/scripts/wildfly-user_data.sh
+++ b/app-infrastructure/scripts/wildfly-user_data.sh
@@ -1,250 +1,78 @@
#!/bin/bash
-sudo yum install wget -y
-sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
-sudo systemctl enable amazon-ssm-agent
-sudo systemctl start amazon-ssm-agent
-wget https://s3.amazonaws.com/amazoncloudwatch-agent/centos/amd64/latest/amazon-cloudwatch-agent.rpm
-sudo rpm -U amazon-cloudwatch-agent.rpm
-sudo touch /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-echo "
-
-{
- \"metrics\": {
-
- \"metrics_collected\": {
- \"cpu\": {
- \"measurement\": [
- \"cpu_usage_idle\",
- \"cpu_usage_user\",
- \"cpu_usage_system\"
- ],
- \"metrics_collection_interval\": 300,
- \"totalcpu\": false
- },
- \"disk\": {
- \"measurement\": [
- \"used_percent\"
- ],
- \"metrics_collection_interval\": 600,
- \"resources\": [
- \"*\"
- ]
- },
- \"mem\": {
- \"measurement\": [
- \"mem_used_percent\",
- \"mem_available\",
- \"mem_available_percent\",
- \"mem_total\",
- \"mem_used\"
- ],
- \"metrics_collection_interval\": 600
- }
- }
- },
- \"logs\":{
- \"logs_collected\":{
- \"files\":{
- \"collect_list\":[
- {
- \"file_path\":\"/var/log/secure\",
- \"log_group_name\":\"secure\",
- \"log_stream_name\":\"{instance_id} secure\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/messages\",
- \"log_group_name\":\"messages\",
- \"log_stream_name\":\"{instance_id} messages\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/audit/audit.log\",
- \"log_group_name\":\"audit.log\",
- \"log_stream_name\":\"{instance_id} audit.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/yum.log\",
- \"log_group_name\":\"yum.log\",
- \"log_stream_name\":\"{instance_id} yum.log\",
- \"timestamp_format\":\"UTC\"
- },
- {
- \"file_path\":\"/var/log/wildfly-docker-logs/*\",
- \"log_group_name\":\"wildfly-logs\",
- \"log_stream_name\":\"{instance_id} ${stack_githash} wildfly-app-logs\",
- \"timestamp_format\":\"UTC\"
- }
- ]
- }
- }
- }
-
-
-}
-" > /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json
-sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s
+echo "SPLUNK_INDEX=hms_aws_${gss_prefix}" | sudo tee /opt/srce/startup.config
+echo "NESSUS_GROUP=${gss_prefix}_${target_stack}" | sudo tee -a /opt/srce/startup.config
-#!/bin/bash
-
-ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/'
-MANAGERURL='https://dsm.datastage.hms.harvard.edu:443'
-CURLOPTIONS='--silent --tlsv1.2'
-linuxPlatform='';
-isRPM='';
-
-if [[ $(/usr/bin/id -u) -ne 0 ]]; then
- echo You are not running as the root user. Please try again with root privileges.;
- logger -t You are not running as the root user. Please try again with root privileges.;
- exit 1;
-fi;
-
-if ! type curl >/dev/null 2>&1; then
- echo "Please install CURL before running this script."
- logger -t Please install CURL before running this script
- exit 1
-fi
-
-curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure
-
-if [ -s /tmp/PlatformDetection ]; then
- . /tmp/PlatformDetection
-else
- echo "Failed to download the agent installation support script."
- logger -t Failed to download the Deep Security Agent installation support script
- exit 1
-fi
-
-platform_detect
-if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then
- echo Unsupported platform is detected
- logger -t Unsupported platform is detected
- exit 1
-fi
-
-echo Downloading agent package...
-if [[ $isRPM == 1 ]]; then package='agent.rpm'
- else package='agent.deb'
-fi
-curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure
-
-echo Installing agent package...
-rc=1
-if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then
- rpm -ihv /tmp/agent.rpm
- rc=$?
-elif [[ -s /tmp/agent.deb ]]; then
- dpkg -i /tmp/agent.deb
- rc=$?
-else
- echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager
- exit 1
-fi
-if [[ $${rc} != 0 ]]; then
- echo Failed to install the agent package
- logger -t Failed to install the agent package
- exit 1
-fi
-
-echo Install the agent package successfully
-
-sleep 15
-/opt/ds_agent/dsa_control -r
-/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14"
-# /opt/ds_agent/dsa_control -a dsm://dsm01.dbmi-datastage.local:4120/ "policyid:11"
-
-echo "started cloudwatch agent"
-
-echo "starting Splunk configuration"
-
-useradd -r -m splunk
-
-for i in 1 2 3 4 5; do echo "trying to download Splunk local forwarder from s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/splunk_config/splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz /opt/ && break || sleep 60; done
-echo "pulled Splunk tar file, extracting"
-
-cd /opt
-sudo tar -xf splunkforwarder-9.0.3-dd0128b1f8cd-Linux-x86_64.tgz
-
-echo "changing splunk permissions"
-chown -R splunk:splunk splunkforwarder
-echo "starting splunk UF as splunk user"
-sudo -u splunk /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes --no-prompt
-echo "stopping service again to enable boot-start"
-sudo -u splunk /opt/splunkforwarder/bin/splunk stop
-echo "enabling boot-start"
-sudo /opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 -user splunk
-
-echo "Configuring inputs and outputs"
+sudo sh /opt/srce/scripts/start-gsstools.sh
echo "
-[default]
-host = $(curl http://169.254.169.254/latest/meta-data/instance-id)
[monitor:///var/log/wildfly-docker-logs]
sourcetype = hms_app_logs
source = wildfly_logs
-index=hms_aws_bdcprod
-" > /opt/splunkforwarder/etc/system/local/inputs.conf
-
-echo "updating permissions for app logs using ACL"
-mkdir -p /var/log/wildfly-docker-logs
-sudo setfacl -R -m g:splunk:rx /var/log/wildfly-docker-logs
-
-echo "starting splunk as a service"
-sudo systemctl start SplunkForwarder
-
-echo "completed Splunk configuration"
-
-## Download and Install Nessus
-for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done
-sh /opt/nessus_setup.sh "${stack_s3_bucket}" "BDC_Prod_$(echo ${target-stack}|tr '[a-b]' '[A-B]')"
+index=hms_aws_${gss_prefix}
-for i in 1 2 3 4 5; do echo "trying to download docker image from s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-wildfly.tar.gz" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-wildfly.tar.gz /home/centos/pic-sure-wildfly.tar.gz && break || sleep 60; done
-echo "pulled wildfly docker image"
-for i in 1 2 3 4 5; do echo "trying to download standalone.xml from s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/standalone.xml" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/standalone.xml /home/centos/standalone.xml && break || sleep 60; done
-echo "pulled standalone"
-for i in 1 2 3 4 5; do echo "trying to download schema from s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/pic-sure-schema.sql" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/pic-sure-schema.sql /home/centos/pic-sure-schema.sql && break || sleep 45; done
-echo "pulled pic-sure-schema"
-for i in 1 2 3 4 5; do echo "trying to download mysql_module from s3://${stack_s3_bucket}/modules/mysql/module.xml /home/centos/mysql_module.xml" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/modules/mysql/module.xml /home/centos/mysql_module.xml && break || sleep 45; done
-echo "pulled mysql_module"
-for i in 1 2 3 4 5; do echo "trying to download driver from s3://${stack_s3_bucket}/modules/mysql/mysql-connector-java-5.1.38.jar" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/modules/mysql/mysql-connector-java-5.1.38.jar /home/centos/mysql-connector-java-5.1.38.jar && break || sleep 45; done
-echo "pulled mysql driver"
-for i in 1 2 3 4 5; do echo "trying to download fence mapping from s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json && break || sleep 45; done
-echo "pulled fence mapping"
-for i in 1 2 3 4 5; do echo "trying to download driver from s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/aggregate-resource.properties" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/aggregate-resource.properties /home/centos/aggregate-resource.properties && break || sleep 45; done
-echo "pulled aggregate resource config"
-for i in 1 2 3 4 5; do echo "trying to download driver from s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/visualization-resource.properties" && sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/visualization-resource.properties /home/centos/visualization-resource.properties && break || sleep 45; done
-echo "pulled visualization resource config"
+[monitor:///var/log/wildfly-docker-os-logs]
+sourcetype = hms_app_logs
+source = wildfly_logs
+index=hms_aws_${gss_prefix}
+" | sudo tee -a /opt/splunkforwarder/etc/system/local/inputs.conf
+sudo systemctl restart SplunkForwarder || true
-for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done
-sudo bash /root/domain-join.sh
+echo "user-data progress starting update"
+sudo yum -y update
-sudo docker run -d --name schema-init -e "MYSQL_RANDOM_ROOT_PASSWORD=yes" --rm mysql
-sudo docker exec -i schema-init mysql -hpicsure-db.${target-stack}.datastage.hms.harvard.edu -uroot -p${mysql-instance-password} < /home/centos/pic-sure-schema.sql
-sudo docker stop schema-init
-echo "init'd mysql schemas"
+s3_copy() {
+ for i in {1..5}; do
+ sudo /usr/bin/aws --region us-east-1 s3 cp $* && break || sleep 30
+ done
+}
+# sleep for awhile because as these files are could still be in the process of being rendered.
+# containerize already.
+echo "waiting for terraform to render files"
+sleep 600
+
+s3_copy s3://${stack_s3_bucket}/releases/jenkins_pipeline_build_${stack_githash}/pic-sure-wildfly.tar.gz /home/centos/pic-sure-wildfly.tar.gz
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/standalone.xml /home/centos/standalone.xml
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/pic-sure-schema.sql /home/centos/pic-sure-schema.sql
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/resources-registration.sql /home/centos/resources-registration.sql
+s3_copy s3://${stack_s3_bucket}/modules/mysql/module.xml /home/centos/mysql_module.xml
+s3_copy s3://${stack_s3_bucket}/modules/mysql/mysql-connector-java-5.1.38.jar /home/centos/mysql-connector-java-5.1.38.jar
+s3_copy s3://${stack_s3_bucket}/data/${dataset_s3_object_key}/fence_mapping.json /home/centos/fence_mapping.json
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/aggregate-resource.properties /home/centos/aggregate-resource.properties
+s3_copy s3://${stack_s3_bucket}/configs/jenkins_pipeline_build_${stack_githash}/visualization-resource.properties /home/centos/visualization-resource.properties
+
+# if no snapshot db will be empty use this to atleast initialize it somewhere for now. - TD
+if [ -z ${picsure_rds_snapshot_id} ]; then
+ sudo docker run -d --name schema-init -e "MYSQL_RANDOM_ROOT_PASSWORD=yes" --rm mysql
+ sudo docker exec -i schema-init mysql -hpicsure-db.${target_stack}.${env_private_dns_name} -uroot -p${mysql-instance-password} < /home/centos/pic-sure-schema.sql
+ sudo docker stop schema-init
+ echo "init'd mysql schemas"
+else
+# if snapshot of live stack rds is used we need to configure target stack rds resource table with deployed stacks resources.
+# We cannot and should not be doing CORS between stacks. -TD
+ sudo docker run -d --name schema-init -e "MYSQL_RANDOM_ROOT_PASSWORD=yes" --rm mysql
+ sudo docker exec -i schema-init mysql -hpicsure-db.${target_stack}.${env_private_dns_name} -uroot -p${mysql-instance-password} < /home/centos/resources-registration.sql
+ sudo docker stop schema-init
+ echo "updated resources"
+fi
WILDFLY_IMAGE=`sudo docker load < /home/centos/pic-sure-wildfly.tar.gz | cut -d ' ' -f 3`
JAVA_OPTS="-Xms2g -Xmx26g -XX:MetaspaceSize=96M -XX:MaxMetaspaceSize=1024m -Djava.net.preferIPv4Stack=true"
-mkdir -p /var/log/wildfly-docker-logs
-mkdir -p /var/log/wildfly-docker-os-logs
-chmod 776 /var/log/wildfly-docker-logs
-chmod 776 /var/log/wildfly-docker-os-logs
+sudo mkdir /var/log/{wildfly-docker-logs,wildfly-docker-os-logs}
sudo docker run -u root --name=wildfly \
---restart unless-stopped \
--v /var/log/wildfly-docker-logs/:/opt/jboss/wildfly/standalone/log/ \
--v /home/centos/standalone.xml:/opt/jboss/wildfly/standalone/configuration/standalone.xml \
--v /home/centos/fence_mapping.json:/usr/local/docker-config/fence_mapping.json \
--v /home/centos/aggregate-resource.properties:/opt/jboss/wildfly/standalone/configuration/aggregate-data-sharing/pic-sure-aggregate-resource/resource.properties \
--v /home/centos/visualization-resource.properties:/opt/jboss/wildfly/standalone/configuration/visualization/pic-sure-visualization-resource/resource.properties \
--v /home/centos/mysql_module.xml:/opt/jboss/wildfly/modules/system/layers/base/com/sql/mysql/main/module.xml \
--v /home/centos/mysql-connector-java-5.1.38.jar:/opt/jboss/wildfly/modules/system/layers/base/com/sql/mysql/main/mysql-connector-java-5.1.38.jar \
--v /var/log/wildfly-docker-os-logs/:/var/log/ \
--p 8080:8080 -e JAVA_OPTS="$JAVA_OPTS" -d $WILDFLY_IMAGE
+ --restart unless-stopped \
+ --log-driver syslog --log-opt tag=wildfly \
+ -v /var/log/wildfly-docker-logs/:/opt/jboss/wildfly/standalone/log/ \
+ -v /home/centos/standalone.xml:/opt/jboss/wildfly/standalone/configuration/standalone.xml \
+ -v /home/centos/fence_mapping.json:/usr/local/docker-config/fence_mapping.json \
+ -v /home/centos/aggregate-resource.properties:/opt/jboss/wildfly/standalone/configuration/aggregate-data-sharing/pic-sure-aggregate-resource/resource.properties \
+ -v /home/centos/mysql_module.xml:/opt/jboss/wildfly/modules/system/layers/base/com/sql/mysql/main/module.xml \
+ -v /home/centos/mysql-connector-java-5.1.38.jar:/opt/jboss/wildfly/modules/system/layers/base/com/sql/mysql/main/mysql-connector-java-5.1.38.jar \
+ -v /var/log/wildfly-docker-os-logs/:/var/log/ \
+ -v /home/centos/visualization-resource.properties:/opt/jboss/wildfly/standalone/configuration/visualization/pic-sure-visualization-resource/resource.properties \
+ -p 8080:8080 -e JAVA_OPTS="$JAVA_OPTS" -d $WILDFLY_IMAGE
INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id)
-sudo /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
+sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true
diff --git a/app-infrastructure/security-groups.tf b/app-infrastructure/security-groups.tf
new file mode 100644
index 00000000..c3832065
--- /dev/null
+++ b/app-infrastructure/security-groups.tf
@@ -0,0 +1,121 @@
+### Inbound
+resource "aws_security_group" "inbound-httpd-from-alb" {
+ name = "allow_inbound_from_public_subnet_to_httpd_${local.uniq_name}"
+ description = "Allow inbound traffic from public subnets to httpd servers"
+ vpc_id = local.target_vpc
+
+ ingress {
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.public_subnet_cidrs
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "inbound-from-public-internet Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
+
+resource "aws_security_group" "inbound-wildfly-from-httpd" {
+ name = "allow_inbound_from_httpd_subnet_${local.uniq_name}"
+ description = "Allow inbound traffic from httpd to port 8080 on wildfly"
+ vpc_id = local.target_vpc
+
+ ingress {
+ from_port = 8080
+ to_port = 8080
+ protocol = "tcp"
+ #cidr_blocks = local.private1_subnet_ids
+ security_groups = [aws_security_group.inbound-httpd-from-alb.id]
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "inbound-for-hpds Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
+
+
+resource "aws_security_group" "inbound-hpds-from-wildfly" {
+ name = "allow_inbound_from_private_subnet_to_hpds_${local.uniq_name}"
+ description = "Allow inbound traffic from private-subnets on port 8080 for hpds"
+ vpc_id = local.target_vpc
+
+ ingress {
+ from_port = 8080
+ to_port = 8080
+ protocol = "tcp"
+ security_groups = [aws_security_group.inbound-wildfly-from-httpd.id]
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "inbound-hpds Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
+
+
+resource "aws_security_group" "inbound-dictionary-from-wildfly" {
+ name = "allow_inbound_from_dictionary_to_hpds_${local.uniq_name}"
+ description = "Allow inbound traffic from private-subnets on port 8080 for hpds"
+ vpc_id = local.target_vpc
+
+ ingress {
+ from_port = 8080
+ to_port = 8080
+ protocol = "tcp"
+ security_groups = [aws_security_group.inbound-wildfly-from-httpd.id]
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "inbound-wildfly Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
+
+
+resource "aws_security_group" "inbound-mysql-from-wildfly" {
+ name = "allow_inbound_from_wildfly_to_mysql_${local.uniq_name}"
+ description = "Allow inbound traffic from wildfly to mysql on port 3306"
+ vpc_id = local.target_vpc
+
+ ingress {
+ from_port = 3306
+ to_port = 3306
+ protocol = "tcp"
+ #cidr_blocks = local.private2_subnet_ids
+ security_groups = [aws_security_group.inbound-wildfly-from-httpd.id]
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "inbound-mysql Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
+
+
+### Outbound
+resource "aws_security_group" "outbound-to-internet" {
+ name = "allow_outbound_to_public_internet_${local.uniq_name}"
+ description = "Allow outbound traffic to public internet"
+ vpc_id = local.target_vpc
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ Owner = "Avillach_Lab"
+ Environment = var.environment_name
+ Name = "outbound-to-internet Security Group - ${var.target_stack} - ${local.uniq_name}"
+ }
+}
diff --git a/app-infrastructure/trend-micro-dsm-security-groups.tf b/app-infrastructure/trend-micro-dsm-security-groups.tf
deleted file mode 100644
index 35d45543..00000000
--- a/app-infrastructure/trend-micro-dsm-security-groups.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-
-resource "aws_security_group" "inbound-from-trend-micro" {
- name = "inbound-from-trend-micro_${var.stack_githash}"
- description = "Allow inbound traffic from TrendMicro DSA"
- vpc_id = var.target-vpc
-
- ingress {
- from_port = 4118
- to_port = 4118
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.78/32"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - inbound-from-trend-micro Security Group - ${var.target-stack}"
- }
-}
-
-resource "aws_security_group" "outbound-to-trend-micro" {
- name = "outbound-to-trend-micro_${var.stack_githash}"
- description = "Allow outbound traffic from TrendMicro DSA"
- vpc_id = var.target-vpc
-
- egress {
- from_port = 4120
- to_port = 4120
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.78/32"
- ]
- }
-
- egress {
- from_port = 5274
- to_port = 5274
- protocol = "tcp"
- cidr_blocks = [
- "172.25.255.78/32"
- ]
- }
-
- tags = {
- Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - outbound-to-trend-micro Security Group - ${var.target-stack}"
- }
-}
\ No newline at end of file
diff --git a/app-infrastructure/variables.tf b/app-infrastructure/variables.tf
index 1ae0ba79..40e6b0dd 100644
--- a/app-infrastructure/variables.tf
+++ b/app-infrastructure/variables.tf
@@ -1,40 +1,49 @@
variable "stack_githash" {
type = string
}
+
variable "stack_githash_long" {
type = string
}
-variable "target-stack" {
- description = "The stack identifier"
+variable "picsure_rds_snapshot_id" {
+ description = "Snapshot id to use for picsure rds instance. leave blank to create rds without a snapshot"
type = string
}
-variable "dataset-s3-object-key" {
- description = "The s3 object key within the environment s3 bucket"
+variable "target_stack" {
+ description = "Green stack to target for deployment"
type = string
}
-variable "destigmatized-dataset-s3-object-key" {
- description = "The s3 object key within the environment s3 bucket"
+variable "stack_s3_bucket" {
+ description = "S3 bucket for deployments"
type = string
}
-variable "genomic-dataset-s3-object-key" {
+variable "dataset_s3_object_key" {
description = "The s3 object key within the environment s3 bucket"
type = string
}
+variable "destigmatized_dataset_s3_object_key" {
+ description = "The s3 object key within the environment s3 bucket"
+ type = string
+}
-variable "ami-id" {
- description = "AMI to use for all ec2s"
+variable "genomic_dataset_s3_object_key" {
+ description = "The s3 object key within the environment s3 bucket"
type = string
}
variable "environment_name" {
description = "The name of the environment"
type = string
- default = "picsure"
+}
+
+variable "env_staging_subdomain" {
+ description = "Add Stack Tag"
+ type = string
}
variable "rds_master_username" {
@@ -46,21 +55,81 @@ variable "rds_master_username" {
variable "rds_master_password" {
description = "Master Password"
type = string
- default = "picsure!98765"
-}
+ default = "picsure!98765"
+}
-variable "allowed_hosts" {
- description = "List of allowed hosts for hosts header validation"
- type = string
- default = ""
+variable "env_public_dns_name" {
+ type = string
}
-variable "analytics_id" {
+variable "env_public_dns_name_staging" {
+ type = string
+}
+
+variable "env_private_dns_name" {
+ type = string
+}
+
+variable "env_hosted_zone_id" {
+ type = string
+}
+
+variable "env_is_open_access" {
+ type = bool
+}
+
+variable "include_auth_hpds" {
+ type = bool
+}
+
+variable "include_open_hpds" {
+ type = bool
+}
+
+# removing for now as they are secrets handled by the stack_variables
+variable "picsure_token_introspection_token" {
+ type = string
+ default = ""
+}
+
+variable "picsure_client_secret" {
+ type = string
+ default = ""
+}
+
+variable "fence_client_secret" {
type = string
+ default = ""
+}
+variable "fence_client_id" {
+ type = string
+ default = ""
+}
+
+variable "idp_provider_uri" {
+ type = string
+ default = "https://gen3.biodatacatalyst.nhlbi.nih.gov"
+}
+
+variable "idp_provider" {
+ type = string
+ default = "fence"
+}
+
+variable "application_id_for_base_query" {
+ type = string
+}
+
+variable "analytics_id" {
+ type = string
default = "__ANALYTICS_ID__"
}
variable "tag_manager_id" {
- type = string
+ type = string
default = "__TAG_MANAGER_ID__"
}
+
+variable "env_project" {
+ type = string
+}
diff --git a/app-infrastructure/wildfly-instance.tf b/app-infrastructure/wildfly-instance.tf
index 36b096ca..1dd8b3b7 100644
--- a/app-infrastructure/wildfly-instance.tf
+++ b/app-infrastructure/wildfly-instance.tf
@@ -1,13 +1,16 @@
-
data "template_file" "wildfly-user_data" {
template = file("scripts/wildfly-user_data.sh")
vars = {
- stack_githash = var.stack_githash_long
- stack_s3_bucket = var.stack_s3_bucket
- dataset_s3_object_key = var.dataset-s3-object-key
- mysql-instance-address = aws_db_instance.pic-sure-mysql.address
+ stack_githash = var.stack_githash_long
+ stack_s3_bucket = var.stack_s3_bucket
+ dataset_s3_object_key = var.dataset_s3_object_key
+ mysql-instance-address = aws_db_instance.pic-sure-mysql.address
mysql-instance-password = random_password.picsure-db-password.result
- target-stack = var.target-stack
+ target_stack = var.target_stack
+ gss_prefix = "bdc_${var.env_is_open_access ? "open" : "auth"}_${var.environment_name}"
+ env_private_dns_name = var.env_private_dns_name
+ env_public_dns_name = var.env_public_dns_name
+ picsure_rds_snapshot_id = var.picsure_rds_snapshot_id
}
}
@@ -24,28 +27,20 @@ data "template_cloudinit_config" "wildfly-user-data" {
}
resource "aws_instance" "wildfly-ec2" {
- ami = var.ami-id
+ ami = local.ami_id
instance_type = "m5.2xlarge"
- key_name = "biodata_nessus"
-
- associate_public_ip_address = false
+ subnet_id = local.private2_subnet_ids[0]
- subnet_id = var.app-subnet-us-east-1a-id
-
- iam_instance_profile = "wildfly-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ iam_instance_profile = "wildfly-deployment-s3-profile-${var.target_stack}-${local.uniq_name}"
user_data = data.template_cloudinit_config.wildfly-user-data.rendered
vpc_security_group_ids = [
aws_security_group.outbound-to-internet.id,
- aws_security_group.inbound-from-edge.id,
- aws_security_group.outbound-to-hpds.id,
- aws_security_group.outbound-to-aurora.id,
- aws_security_group.outbound-to-trend-micro.id,
- aws_security_group.inbound-app-ssh-from-nessus.id,
- aws_security_group.inbound-hpds-from-app.id
+ aws_security_group.inbound-wildfly-from-httpd.id
]
+
root_block_device {
delete_on_termination = true
encrypted = true
@@ -54,14 +49,16 @@ resource "aws_instance" "wildfly-ec2" {
tags = {
Owner = "Avillach_Lab"
- Environment = "development"
- Name = "FISMA Terraform Playground - ${var.stack_githash} - Wildfly - ${var.target-stack}"
+ Environment = var.environment_name
+ Project = local.project
+ Stack = var.target_stack
+ Name = "Wildfly - ${var.target_stack} - ${local.uniq_name}"
}
-
+
metadata_options {
- http_endpoint = "enabled"
- http_tokens = "required"
- instance_metadata_tags = "enabled"
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
}
}
@@ -73,52 +70,44 @@ data "template_file" "wildfly-standalone-xml" {
picsure_client_secret = var.picsure_client_secret
fence_client_secret = var.fence_client_secret
fence_client_id = var.fence_client_id
- target-stack = var.target-stack
+ target_stack = var.target_stack
picsure_token_introspection_token = var.picsure_token_introspection_token
mysql-instance-address = aws_db_instance.pic-sure-mysql.address
- }
-}
+ env_private_dns_name = var.env_private_dns_name
+ env_public_dns_name = var.env_public_dns_name
+ idp_provider = var.idp_provider
+ idp_provider_uri = var.idp_provider_uri
+ application_id_for_base_query = var.application_id_for_base_query
-resource "local_file" "wildfly-standalone-xml-file" {
- content = data.template_file.wildfly-standalone-xml.rendered
- filename = "standalone.xml"
-}
-
-data "template_file" "pic-sure-schema-sql" {
- template = file("configs/pic-sure-schema.sql")
- vars = {
- picsure_token_introspection_token = var.picsure_token_introspection_token
- target-stack = var.target-stack
}
}
-resource "local_file" "pic-sure-schema-sql-file" {
- content = data.template_file.pic-sure-schema-sql.rendered
- filename = "pic-sure-schema.sql"
+resource "local_file" "wildfly-standalone-xml-file" {
+ content = data.template_file.wildfly-standalone-xml.rendered
+ filename = "standalone.xml"
}
-
data "template_file" "aggregate-resource-properties" {
template = file("configs/aggregate-resource.properties")
vars = {
- target-stack = var.target-stack
+ target_stack = var.target_stack
+ env_private_dns_name = var.env_private_dns_name
}
}
resource "local_file" "aggregate-resource-properties-file" {
- content = data.template_file.aggregate-resource-properties.rendered
- filename = "aggregate-resource.properties"
+ content = data.template_file.aggregate-resource-properties.rendered
+ filename = "aggregate-resource.properties"
}
data "template_file" "visualization-resource-properties" {
template = file("configs/visualization-resource.properties")
vars = {
- target-stack = var.target-stack
+ target_stack = var.target_stack
}
}
resource "local_file" "visualization-resource-properties-file" {
- content = data.template_file.visualization-resource-properties.rendered
- filename = "visualization-resource.properties"
+ content = data.template_file.visualization-resource-properties.rendered
+ filename = "visualization-resource.properties"
}
-
diff --git a/deployment-s3-bucket/s3_baseline.tf b/deployment-s3-bucket/s3_baseline.tf
index 3d1cc7b0..be27a545 100644
--- a/deployment-s3-bucket/s3_baseline.tf
+++ b/deployment-s3-bucket/s3_baseline.tf
@@ -1,116 +1,62 @@
-
resource "aws_s3_bucket_object" "certs-folder" {
bucket = var.stack_s3_bucket
key = "certs/"
content_type = "application/x-directory"
+ lifecycle {
+ prevent_destroy = true
+ }
}
resource "aws_s3_bucket_object" "configs-folder" {
bucket = var.stack_s3_bucket
key = "configs/"
content_type = "application/x-directory"
+ lifecycle {
+ prevent_destroy = true
+ }
}
resource "aws_s3_bucket_object" "data-folder" {
bucket = var.stack_s3_bucket
key = "data/"
content_type = "application/x-directory"
+ lifecycle {
+ prevent_destroy = true
+ }
}
resource "aws_s3_bucket_object" "modules-folder" {
bucket = var.stack_s3_bucket
key = "modules/"
content_type = "application/x-directory"
+ lifecycle {
+ prevent_destroy = true
+ }
}
resource "aws_s3_bucket_object" "releases-folder" {
bucket = var.stack_s3_bucket
key = "releases/"
content_type = "application/x-directory"
-}
-
-resource "aws_s3_bucket_object" "tfstate-baseline-a" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/a/terraform.tfstate"
- content = file("terraform.tfstate_baseline")
-}
-
-resource "aws_s3_bucket_object" "tfstate-baseline-b" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/b/terraform.tfstate"
- content = file("terraform.tfstate_baseline")
-}
-
-resource "random_password" "picsure-client-secret" {
- length = 32
- special = false
-}
-
-data "template_file" "stack_variables_template" {
- template = file("stack_variables.tf_template")
- vars = {
- picsure_client_secret = random_password.picsure-client-secret.result
- stack_s3_bucket = var.stack_s3_bucket
+ lifecycle {
+ prevent_destroy = true
}
}
-resource "aws_s3_bucket_object" "stack-variables-baseline-a" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/a/stack_variables.tf"
- content = data.template_file.stack_variables_template.rendered
-}
-
-resource "aws_s3_bucket_object" "stack-variables-baseline-b" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/b/stack_variables.tf"
- content = data.template_file.stack_variables_template.rendered
-}
-
-resource "aws_s3_bucket_object" "subnet-variables-baseline-a" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/a/subnet_variables.tf"
- content = file("subnet_variables_a.tf_template")
-}
-
-resource "aws_s3_bucket_object" "subnet-variables-baseline-b" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/b/subnet_variables.tf"
- content = file("subnet_variables_b.tf_template")
-}
-
-resource "aws_s3_bucket_object" "stacks-json" {
- bucket = var.stack_s3_bucket
- key = "/deployment_state_metadata/stacks.json"
- object_lock_legal_hold_status = "OFF"
- content = file("stacks.json")
-}
-
resource "aws_s3_bucket_object" "mysql-connector-jar" {
bucket = var.stack_s3_bucket
key = "/modules/mysql/mysql-connector-java-5.1.38.jar"
content_base64 = filebase64("mysql-connector-java-5.1.38.jar")
+ lifecycle {
+ prevent_destroy = true
+ }
}
resource "aws_s3_bucket_object" "mysql-module-xml" {
bucket = var.stack_s3_bucket
key = "/modules/mysql/module.xml"
- content = file("wildfly_mysql_module.xml")
-}
-
-resource "aws_s3_bucket_object" "server-cert" {
- bucket = var.stack_s3_bucket
- key = "/certs/httpd/server.crt"
- content = file("server.crt")
-}
-
-resource "aws_s3_bucket_object" "server-key" {
- bucket = var.stack_s3_bucket
- key = "/certs/httpd/server.key"
- content = file("server.key")
-}
-
-resource "aws_s3_bucket_object" "server-chain" {
- bucket = var.stack_s3_bucket
- key = "/certs/httpd/server.chain"
- content = file("server.chain")
+ content = file("configs/wildfly_mysql_module.xml")
+ lifecycle {
+ prevent_destroy = true
+ }
}
diff --git a/deployment-s3-bucket/s3_baseline.tf_old b/deployment-s3-bucket/s3_baseline.tf_old
new file mode 100644
index 00000000..3d1cc7b0
--- /dev/null
+++ b/deployment-s3-bucket/s3_baseline.tf_old
@@ -0,0 +1,116 @@
+
+resource "aws_s3_bucket_object" "certs-folder" {
+ bucket = var.stack_s3_bucket
+ key = "certs/"
+ content_type = "application/x-directory"
+}
+
+resource "aws_s3_bucket_object" "configs-folder" {
+ bucket = var.stack_s3_bucket
+ key = "configs/"
+ content_type = "application/x-directory"
+}
+
+resource "aws_s3_bucket_object" "data-folder" {
+ bucket = var.stack_s3_bucket
+ key = "data/"
+ content_type = "application/x-directory"
+}
+
+resource "aws_s3_bucket_object" "modules-folder" {
+ bucket = var.stack_s3_bucket
+ key = "modules/"
+ content_type = "application/x-directory"
+}
+
+resource "aws_s3_bucket_object" "releases-folder" {
+ bucket = var.stack_s3_bucket
+ key = "releases/"
+ content_type = "application/x-directory"
+}
+
+resource "aws_s3_bucket_object" "tfstate-baseline-a" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/a/terraform.tfstate"
+ content = file("terraform.tfstate_baseline")
+}
+
+resource "aws_s3_bucket_object" "tfstate-baseline-b" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/b/terraform.tfstate"
+ content = file("terraform.tfstate_baseline")
+}
+
+resource "random_password" "picsure-client-secret" {
+ length = 32
+ special = false
+}
+
+data "template_file" "stack_variables_template" {
+ template = file("stack_variables.tf_template")
+ vars = {
+ picsure_client_secret = random_password.picsure-client-secret.result
+ stack_s3_bucket = var.stack_s3_bucket
+ }
+}
+
+resource "aws_s3_bucket_object" "stack-variables-baseline-a" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/a/stack_variables.tf"
+ content = data.template_file.stack_variables_template.rendered
+}
+
+resource "aws_s3_bucket_object" "stack-variables-baseline-b" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/b/stack_variables.tf"
+ content = data.template_file.stack_variables_template.rendered
+}
+
+resource "aws_s3_bucket_object" "subnet-variables-baseline-a" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/a/subnet_variables.tf"
+ content = file("subnet_variables_a.tf_template")
+}
+
+resource "aws_s3_bucket_object" "subnet-variables-baseline-b" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/b/subnet_variables.tf"
+ content = file("subnet_variables_b.tf_template")
+}
+
+resource "aws_s3_bucket_object" "stacks-json" {
+ bucket = var.stack_s3_bucket
+ key = "/deployment_state_metadata/stacks.json"
+ object_lock_legal_hold_status = "OFF"
+ content = file("stacks.json")
+}
+
+resource "aws_s3_bucket_object" "mysql-connector-jar" {
+ bucket = var.stack_s3_bucket
+ key = "/modules/mysql/mysql-connector-java-5.1.38.jar"
+ content_base64 = filebase64("mysql-connector-java-5.1.38.jar")
+}
+
+resource "aws_s3_bucket_object" "mysql-module-xml" {
+ bucket = var.stack_s3_bucket
+ key = "/modules/mysql/module.xml"
+ content = file("wildfly_mysql_module.xml")
+}
+
+resource "aws_s3_bucket_object" "server-cert" {
+ bucket = var.stack_s3_bucket
+ key = "/certs/httpd/server.crt"
+ content = file("server.crt")
+}
+
+resource "aws_s3_bucket_object" "server-key" {
+ bucket = var.stack_s3_bucket
+ key = "/certs/httpd/server.key"
+ content = file("server.key")
+}
+
+resource "aws_s3_bucket_object" "server-chain" {
+ bucket = var.stack_s3_bucket
+ key = "/certs/httpd/server.chain"
+ content = file("server.chain")
+}
diff --git a/prod-dns-pointer/prod-httpd-dns-record.tf b/prod-dns-pointer/httpd-dns-record.tf
similarity index 60%
rename from prod-dns-pointer/prod-httpd-dns-record.tf
rename to prod-dns-pointer/httpd-dns-record.tf
index b67c805b..53245b12 100644
--- a/prod-dns-pointer/prod-httpd-dns-record.tf
+++ b/prod-dns-pointer/httpd-dns-record.tf
@@ -1,20 +1,26 @@
-variable "target-prod-stack" {
+# passed in by CI env pipeline parameter
+variable "target_stack" {
description = "The stack identifier to become the current prod"
type = string
}
-variable "target-next-prod-stack" {
+# passed in by CI env pipeline parameter
+variable "target_next_stack" {
description = "The stack identifier to become the next prod(or stage)"
type = string
}
+variable "env_private_dns_name" {
+ type = string
+}
+
resource "aws_route53_record" "prod-httpd-dns-record" {
zone_id = var.internal-dns-zone-id
name = "prod-httpd"
type = "CNAME"
ttl = "60"
- records = ["httpd.${var.target-prod-stack}.datastage.hms.harvard.edu"]
+ records = ["httpd.${var.target_stack}.${var.env_private_dns_name}"]
}
resource "aws_route53_record" "next-prod-httpd-dns-record" {
@@ -22,5 +28,5 @@ resource "aws_route53_record" "next-prod-httpd-dns-record" {
name = "next-prod-httpd"
type = "CNAME"
ttl = "60"
- records = ["httpd.${var.target-next-prod-stack}.datastage.hms.harvard.edu"]
+ records = ["httpd.${var.target_next_stack}.${var.env_private_dns_name}"]
}
diff --git a/s3-deployment-roles/s3_roles.tf b/s3-deployment-roles/s3_roles.tf
index 5fc965af..66496e10 100644
--- a/s3-deployment-roles/s3_roles.tf
+++ b/s3-deployment-roles/s3_roles.tf
@@ -5,33 +5,33 @@ variable "stack_githash_long" {
type = string
}
-variable "target-stack" {
+variable "target_stack" {
description = "The stack identifier"
type = string
}
-variable "dataset-s3-object-key" {
+variable "dataset_s3_object_key" {
description = "The s3 object key within the environment s3 bucket"
type = string
}
-variable "destigmatized-dataset-s3-object-key" {
+variable "destigmatized_dataset_s3_object_key" {
description = "The s3 object key within the environment s3 bucket"
type = string
}
-variable "genomic-dataset-s3-object-key" {
+variable "genomic_dataset_s3_object_key" {
description = "The s3 object key within the environment s3 bucket"
type = string
}
resource "aws_iam_instance_profile" "wildfly-deployment-s3-profile" {
- name = "wildfly-deployment-s3-profile-${var.target-stack}-${var.stack_githash}"
+ name = "wildfly-deployment-s3-profile-${var.target_stack}-${var.stack_githash}"
role = aws_iam_role.wildfly-deployment-s3-role.name
}
resource "aws_iam_role_policy" "wildfly-deployment-s3-policy" {
- name = "wildfly-deployment-s3-policy-${var.target-stack}-${var.stack_githash}"
+ name = "wildfly-deployment-s3-policy-${var.target_stack}-${var.stack_githash}"
role = aws_iam_role.wildfly-deployment-s3-role.id
policy = <