From 1065b06d8b6c77ed48754739503f2c04cff3e0e4 Mon Sep 17 00:00:00 2001 From: DecFox <33030671+DecFox@users.noreply.github.com> Date: Wed, 14 Aug 2024 18:59:03 +0530 Subject: [PATCH 01/10] feat: add oonifindings migration script (#89) This diff adds the migration script to transfer data from the current clickhouse incidents table to the postgres table we have now. --- scripts/migrate-oonifindings.py | 92 +++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 scripts/migrate-oonifindings.py diff --git a/scripts/migrate-oonifindings.py b/scripts/migrate-oonifindings.py new file mode 100644 index 00000000..1191f194 --- /dev/null +++ b/scripts/migrate-oonifindings.py @@ -0,0 +1,92 @@ +""" +Dump OONI findings for clickhouse into postgres by performing appropriate +transformations. + +To setup run: + +pip install psycopg2 clickhouse-driver + +Then: + +OONI_PG_PASSWORD=XXXX python migrate-oonifindings.py +""" +import os +import json + +from clickhouse_driver import Client as Clickhouse +import psycopg2 + + +def dump_oonifindings_clickhouse(): + client = Clickhouse("localhost") + + rows, cols = client.execute("SELECT * FROM incidents", with_column_types=True) + col_names = list(map(lambda x: x[0], cols)) + + findings = [] + for row in rows: + d = dict(zip(col_names, row)) + + row = { + "finding_id": d["id"], + "update_time": d["update_time"], + "start_time": d["start_time"], + "end_time": d["end_time"], + "create_time": d["create_time"], + "creator_account_id": d["creator_account_id"], + "reported_by": d["reported_by"], + "title": d["title"], + "short_description": d["short_description"], + "text": d["text"], + "event_type": d["event_type"], + "published": d["published"], + "deleted": d["deleted"], + "email_address": d["email_address"], + "country_codes": json.dumps(d["CCs"]), + "asns": json.dumps(d["ASNs"]), + "domains": json.dumps(d["domains"]), + "links": json.dumps(d["links"]), + "test_names": json.dumps(d["test_names"]), + } + findings.append(row) + + return findings + + +def insert_findings_postgresql(data_to_insert): + db_params = { + 'dbname': 'oonipg', + 'user': 'oonipg', + 'password': os.environ["OONI_PG_PASSWORD"], + 'host': 'ooni-tier0-postgres' + } + + conn = psycopg2.connect(**db_params) + cur = conn.cursor() + + col_names = list(data_to_insert[0].keys()) + col_values = ["%s"]*len(col_names) + insert_query = f'INSERT INTO oonifinding ({",".join(col_names)}) VALUES ({",".join(col_values)})' + + insert_count = 0 + try: + for row in data_to_insert: + values = [row[cn] for cn in col_names] + cur.execute(insert_query, values) + insert_count += 1 + conn.commit() + print("Data inserted successfully") + except Exception as e: + conn.rollback() + print(f"Failed after {insert_count} rows at row:") + print(row) + print(f"An error occurred: {e}") + raise e + finally: + # Close the cursor and connection + cur.close() + conn.close() + + +valid_links = dump_oonifindings_clickhouse() +insert_findings_postgresql(valid_links) From c57d12d9b9bf3c7ed3f4e45f6731f189bb18e434 Mon Sep 17 00:00:00 2001 From: DecFox <33030671+DecFox@users.noreply.github.com> Date: Thu, 15 Aug 2024 00:07:47 +0530 Subject: [PATCH 02/10] fix(scripts): add missing tags column while migrating (#90) We were missing a `tags` column which led to a pydantic validation error in the API. This should be resolved here. --- scripts/migrate-oonifindings.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/migrate-oonifindings.py b/scripts/migrate-oonifindings.py index 1191f194..94f5158d 100644 --- a/scripts/migrate-oonifindings.py +++ b/scripts/migrate-oonifindings.py @@ -43,6 +43,7 @@ def dump_oonifindings_clickhouse(): "deleted": d["deleted"], "email_address": d["email_address"], "country_codes": json.dumps(d["CCs"]), + "tags": json.dumps(d["tags"]), "asns": json.dumps(d["ASNs"]), "domains": json.dumps(d["domains"]), "links": json.dumps(d["links"]), From 85f0331647dbb4b2ef669fc38190e1539c637aca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arturo=20Filast=C3=B2?= Date: Wed, 28 Aug 2024 12:12:51 +0200 Subject: [PATCH 03/10] Add vasilis to data.ooni.org host --- ansible/host_vars/data.ooni.org | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ansible/host_vars/data.ooni.org b/ansible/host_vars/data.ooni.org index 10369666..c3d9417f 100644 --- a/ansible/host_vars/data.ooni.org +++ b/ansible/host_vars/data.ooni.org @@ -47,6 +47,13 @@ ssh_users: [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqG1VepfzDdSp3zG27jZq3S9/62CKPLh93F///ht9rf", ] + vasilis: + login: vasilis + comment: "Vasilis Ververis" + keys: + [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJYsbeTjdma5cKyZISOFQfHbwwlZbWugPx9haeOx1UR" + ] admin_usernames: [ art, majakomel, mehul, norbel ] non_admin_usernames: [ ain, siti, ingrid, joss ] jupyterhub_allowed_users: "{{ ssh_users }}" From 55c0b6a442a86f60c137de7e2a8259a9750e3dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arturo=20Filast=C3=B2?= Date: Wed, 28 Aug 2024 12:14:09 +0200 Subject: [PATCH 04/10] Add vasilis to user list --- ansible/host_vars/data.ooni.org | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/host_vars/data.ooni.org b/ansible/host_vars/data.ooni.org index c3d9417f..48e20c09 100644 --- a/ansible/host_vars/data.ooni.org +++ b/ansible/host_vars/data.ooni.org @@ -55,6 +55,6 @@ ssh_users: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJYsbeTjdma5cKyZISOFQfHbwwlZbWugPx9haeOx1UR" ] admin_usernames: [ art, majakomel, mehul, norbel ] -non_admin_usernames: [ ain, siti, ingrid, joss ] +non_admin_usernames: [ ain, siti, ingrid, joss, vasilis ] jupyterhub_allowed_users: "{{ ssh_users }}" admin_group_name: adm From b14a122efacdfb900be605e1c98ef5e5989cbd69 Mon Sep 17 00:00:00 2001 From: DecFox <33030671+DecFox@users.noreply.github.com> Date: Fri, 6 Sep 2024 02:54:58 +0530 Subject: [PATCH 05/10] refactor: remove individual load balancers from services (#97) This diff removes individual load balancers from the ooniapi services and only allows a single load balancer for routing based on api paths. --- tf/modules/ooniapi_service/main.tf | 88 --------------------------- tf/modules/ooniapi_service/outputs.tf | 8 --- 2 files changed, 96 deletions(-) diff --git a/tf/modules/ooniapi_service/main.tf b/tf/modules/ooniapi_service/main.tf index 34cfc70e..eb49cf1f 100644 --- a/tf/modules/ooniapi_service/main.tf +++ b/tf/modules/ooniapi_service/main.tf @@ -131,10 +131,6 @@ resource "aws_ecs_service" "ooniapi_service" { container_port = "80" } - depends_on = [ - aws_alb_listener.ooniapi_service_http, - ] - force_new_deployment = true tags = var.tags @@ -169,87 +165,3 @@ resource "aws_alb_target_group" "ooniapi_service_mapped" { tags = var.tags } - -resource "aws_alb" "ooniapi_service" { - name = local.name - subnets = var.public_subnet_ids - security_groups = var.ooniapi_service_security_groups - - tags = var.tags -} - -resource "aws_alb_listener" "ooniapi_service_http" { - load_balancer_arn = aws_alb.ooniapi_service.id - port = "80" - protocol = "HTTP" - - default_action { - target_group_arn = aws_alb_target_group.ooniapi_service_direct.id - type = "forward" - } - - tags = var.tags -} - -resource "aws_alb_listener" "front_end_https" { - load_balancer_arn = aws_alb.ooniapi_service.id - port = "443" - protocol = "HTTPS" - ssl_policy = "ELBSecurityPolicy-2016-08" - certificate_arn = aws_acm_certificate.ooniapi_service.arn - - default_action { - target_group_arn = aws_alb_target_group.ooniapi_service_direct.id - type = "forward" - } - - tags = var.tags -} - -resource "aws_route53_record" "ooniapi_service" { - zone_id = var.dns_zone_ooni_io - name = "${var.service_name}.api.${var.stage}.ooni.io" - type = "A" - - alias { - name = aws_alb.ooniapi_service.dns_name - zone_id = aws_alb.ooniapi_service.zone_id - evaluate_target_health = true - } -} - -resource "aws_acm_certificate" "ooniapi_service" { - domain_name = "${var.service_name}.api.${var.stage}.ooni.io" - validation_method = "DNS" - - tags = var.tags - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_route53_record" "ooniapi_service_validation" { - for_each = { - for dvo in aws_acm_certificate.ooniapi_service.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.dns_zone_ooni_io -} - -resource "aws_acm_certificate_validation" "ooniapi_service" { - certificate_arn = aws_acm_certificate.ooniapi_service.arn - validation_record_fqdns = [for record in aws_route53_record.ooniapi_service_validation : record.fqdn] - depends_on = [ - aws_route53_record.ooniapi_service - ] -} diff --git a/tf/modules/ooniapi_service/outputs.tf b/tf/modules/ooniapi_service/outputs.tf index 90fcabd2..e035171d 100644 --- a/tf/modules/ooniapi_service/outputs.tf +++ b/tf/modules/ooniapi_service/outputs.tf @@ -1,11 +1,3 @@ -output "ooni_io_fqdn" { - value = aws_route53_record.ooniapi_service.name -} - -output "dns_name" { - value = aws_alb.ooniapi_service.dns_name -} - output "ecs_service_name" { value = aws_ecs_service.ooniapi_service.name } From 8edde89dca47e52906a1fc038fd294a5234280db Mon Sep 17 00:00:00 2001 From: DecFox <33030671+DecFox@users.noreply.github.com> Date: Fri, 6 Sep 2024 18:57:48 +0530 Subject: [PATCH 06/10] fix: remove microservice hosts from alerts (#98) This diff removes the microservice hosts from the prometheus alerts. Part of https://github.com/ooni/devops/issues/93 --- ansible/roles/prometheus/templates/prometheus.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ansible/roles/prometheus/templates/prometheus.yml b/ansible/roles/prometheus/templates/prometheus.yml index ee19e61a..6ad9cfd9 100755 --- a/ansible/roles/prometheus/templates/prometheus.yml +++ b/ansible/roles/prometheus/templates/prometheus.yml @@ -99,9 +99,6 @@ scrape_configs: password: '{{ prometheus_metrics_password_dev }}' static_configs: - targets: - - ooniauth.api.dev.ooni.io - - oonirun.api.dev.ooni.io - - ooniprobe.api.dev.ooni.io - oohelperd.th.dev.ooni.io - job_name: 'ooniapi-services-prod' From 99cd52ddc82ed9c5bc8fa5a44f4ff28eea63940d Mon Sep 17 00:00:00 2001 From: DecFox <33030671+DecFox@users.noreply.github.com> Date: Sun, 8 Sep 2024 01:30:25 +0530 Subject: [PATCH 07/10] feat: add clickhouse proxy instance (#100) This diff adds a clickhouse proxy server config to the existing oonibackend proxy to establish the connection between AWS and the clickhouse DB. Part of #95 --- tf/environments/dev/main.tf | 8 +++- tf/modules/ooni_backendproxy/main.tf | 45 +++++++++++-------- tf/modules/ooni_backendproxy/outputs.tf | 5 ++- .../templates/setup-backend-proxy.sh | 19 ++++++++ tf/modules/ooni_backendproxy/variables.tf | 27 +++++++++-- 5 files changed, 80 insertions(+), 24 deletions(-) diff --git a/tf/environments/dev/main.tf b/tf/environments/dev/main.tf index 6c809f9d..c0f896a7 100644 --- a/tf/environments/dev/main.tf +++ b/tf/environments/dev/main.tf @@ -251,13 +251,19 @@ moved { module "ooni_backendproxy" { source = "../../modules/ooni_backendproxy" + stage = local.environment + vpc_id = module.network.vpc_id - subnet_ids = module.network.vpc_subnet_public[*].id + subnet_id = module.network.vpc_subnet_public[0].id + private_subnet_cidr = module.network.vpc_subnet_private[*].cidr_block + dns_zone_ooni_io = local.dns_zone_ooni_io key_name = module.adm_iam_roles.oonidevops_key_name instance_type = "t2.micro" backend_url = "https://backend-hel.ooni.org/" + clickhouse_url = "backend-fsn.ooni.org" + clickhouse_port = "9000" tags = merge( local.tags, diff --git a/tf/modules/ooni_backendproxy/main.tf b/tf/modules/ooni_backendproxy/main.tf index 2f933ceb..4689efc0 100644 --- a/tf/modules/ooni_backendproxy/main.tf +++ b/tf/modules/ooni_backendproxy/main.tf @@ -17,6 +17,13 @@ resource "aws_security_group" "nginx_sg" { cidr_blocks = ["0.0.0.0/0"] } + ingress { + protocol = "tcp" + from_port = 9000 + to_port = 9000 + cidr_blocks = var.private_subnet_cidr + } + ingress { protocol = "tcp" from_port = 22 @@ -55,7 +62,9 @@ resource "aws_launch_template" "ooni_backendproxy" { key_name = var.key_name user_data = base64encode(templatefile("${path.module}/templates/setup-backend-proxy.sh", { - backend_url = var.backend_url + backend_url = var.backend_url, + clickhouse_url = var.clickhouse_url, + clickhouse_port = var.clickhouse_port })) lifecycle { @@ -65,6 +74,7 @@ resource "aws_launch_template" "ooni_backendproxy" { network_interfaces { delete_on_termination = true associate_public_ip_address = true + subnet_id = var.subnet_id security_groups = [ aws_security_group.nginx_sg.id, ] @@ -76,7 +86,7 @@ resource "aws_launch_template" "ooni_backendproxy" { } } -resource "aws_autoscaling_group" "oonibackend_proxy" { +resource "aws_instance" "oonibackend_proxy" { launch_template { id = aws_launch_template.ooni_backendproxy.id version = "$Latest" @@ -86,19 +96,7 @@ resource "aws_autoscaling_group" "oonibackend_proxy" { create_before_destroy = true } - name_prefix = "${var.name}-asg-" - - min_size = 1 - max_size = 2 - desired_capacity = 1 - vpc_zone_identifier = var.subnet_ids - - instance_refresh { - strategy = "Rolling" - preferences { - min_healthy_percentage = 50 - } - } + tags = var.tags } resource "aws_alb_target_group" "oonibackend_proxy" { @@ -114,7 +112,18 @@ resource "aws_alb_target_group" "oonibackend_proxy" { tags = var.tags } -resource "aws_autoscaling_attachment" "oonibackend_proxy" { - autoscaling_group_name = aws_autoscaling_group.oonibackend_proxy.id - lb_target_group_arn = aws_alb_target_group.oonibackend_proxy.arn +resource "aws_lb_target_group_attachment" "oonibackend_proxy" { + target_id = aws_instance.oonibackend_proxy.id + target_group_arn = aws_alb_target_group.oonibackend_proxy.arn +} + +resource "aws_route53_record" "clickhouse_proxy_alias" { + zone_id = var.dns_zone_ooni_io + name = "clickhouse.${var.stage}.ooni.io" + type = "CNAME" + ttl = 300 + + records = [ + aws_instance.oonibackend_proxy.public_dns + ] } diff --git a/tf/modules/ooni_backendproxy/outputs.tf b/tf/modules/ooni_backendproxy/outputs.tf index 54295fae..792e6958 100644 --- a/tf/modules/ooni_backendproxy/outputs.tf +++ b/tf/modules/ooni_backendproxy/outputs.tf @@ -1,6 +1,7 @@ -output "autoscaling_group_id" { - value = aws_autoscaling_group.oonibackend_proxy.id +output "aws_instance_id" { + value = aws_instance.oonibackend_proxy.id } + output "alb_target_group_id" { value = aws_alb_target_group.oonibackend_proxy.id } diff --git a/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh b/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh index 30e8a274..c32b3c68 100644 --- a/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh +++ b/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh @@ -22,5 +22,24 @@ server { EOF sudo mv $tmpfile /etc/nginx/sites-available/default + +tmpfile_stream=$(mktemp /tmp/nginx-stream-config.XXXXXX) +cat > $tmpfile_stream < Date: Tue, 1 Oct 2024 16:46:30 +0200 Subject: [PATCH 08/10] Consolidate dev and prod deployments and switch th back to DO (#96) Changes: * Add support for creating digital ocean droplets for test helpers * Switch networking config to NAT gateway less setup Fixes: * https://github.com/ooni/devops/issues/92 * https://github.com/ooni/devops/issues/91 * https://github.com/ooni/devops/issues/93 Checklist for doing it: * [x] Create terraform module for deploying test helpers to digital ocean * [x] Tweak ECS task sizes to reduce instance consumption * [x] Drop ECS cluster for test helpers * [x] Setup direct load balancer rules to address services based on hostname * [x] Drop test helper monitoring on AWS in monitoring host * [x] Add support for SAN in ACM certificates by creating new module * [x] Setup nginx based load balancing for test helpers * [x] Refactor EC2 instance deployment to use cloud-init Since AWS costs are too high for the test helpers with IPv6 support, we switch them back to digital ocean. How this is implemented is by adding a new rule to the oonibackend proxy that acts as a load balancer towards the test helpers on digital ocean. The reason to do this is so that we don't have to complicate the TLS setup by having to do certificate provisioning on the test helpers, but rather are able to keep it in AWS. Moreover by having a single entry point to the test helpers it means we can implement a cache which works across all the test helper backends, instead of having a per-test helper caching layer. What is missing is adding the rules that perform routing on a per domain basis to the load balancer config. --------- Co-authored-by: decfox --- .../roles/prometheus/templates/prometheus.yml | 26 +- ansible/roles/prometheus/vars/main.yml | 25 ++ tf/environments/dev/.terraform.lock.hcl | 43 ++++ tf/environments/dev/main.tf | 187 +++++++++----- tf/environments/dev/versions.tf | 5 +- tf/environments/prod/.terraform.lock.hcl | 43 ++++ tf/environments/prod/dns_records.tf | 8 - tf/environments/prod/main.tf | 242 ++++++++++++------ tf/modules/adm_iam_roles/main.tf | 9 +- tf/modules/adm_iam_roles/outputs.tf | 1 - tf/modules/network/main.tf | 54 +--- tf/modules/network_noipv6/main.tf | 145 ----------- tf/modules/network_noipv6/outputs.tf | 19 -- tf/modules/network_noipv6/variables.tf | 26 -- tf/modules/ooni_backendproxy/main.tf | 35 ++- .../templates/cloud-init.yml | 58 +++++ .../templates/setup-backend-proxy.sh | 45 ---- tf/modules/ooni_backendproxy/variables.tf | 16 +- tf/modules/ooni_th_droplet/main.tf | 48 ++++ tf/modules/ooni_th_droplet/outputs.tf | 10 + .../templates/cloud-init-docker.yml | 175 +++++++++++++ .../ooni_th_droplet/templates/cloud-init.yml | 59 +++++ tf/modules/ooni_th_droplet/variables.tf | 40 +++ tf/modules/ooniapi_frontend/main.tf | 138 ++++++---- tf/modules/ooniapi_frontend/outputs.tf | 8 +- tf/modules/ooniapi_frontend/variables.tf | 12 + tf/modules/ooniapi_service/main.tf | 30 +-- tf/modules/ooniapi_service/outputs.tf | 2 +- tf/modules/ooniapi_service/variables.tf | 2 +- tf/modules/postgresql/variables.tf | 2 +- 30 files changed, 977 insertions(+), 536 deletions(-) delete mode 100644 tf/modules/network_noipv6/main.tf delete mode 100644 tf/modules/network_noipv6/outputs.tf delete mode 100644 tf/modules/network_noipv6/variables.tf create mode 100644 tf/modules/ooni_backendproxy/templates/cloud-init.yml delete mode 100644 tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh create mode 100644 tf/modules/ooni_th_droplet/main.tf create mode 100644 tf/modules/ooni_th_droplet/outputs.tf create mode 100644 tf/modules/ooni_th_droplet/templates/cloud-init-docker.yml create mode 100644 tf/modules/ooni_th_droplet/templates/cloud-init.yml create mode 100644 tf/modules/ooni_th_droplet/variables.tf diff --git a/ansible/roles/prometheus/templates/prometheus.yml b/ansible/roles/prometheus/templates/prometheus.yml index 6ad9cfd9..e8f9cd30 100755 --- a/ansible/roles/prometheus/templates/prometheus.yml +++ b/ansible/roles/prometheus/templates/prometheus.yml @@ -99,7 +99,9 @@ scrape_configs: password: '{{ prometheus_metrics_password_dev }}' static_configs: - targets: - - oohelperd.th.dev.ooni.io + - ooniauth.dev.ooni.io + - oonirun.dev.ooni.io + - ooniprobe.dev.ooni.io - job_name: 'ooniapi-services-prod' scrape_interval: 5s @@ -110,11 +112,25 @@ scrape_configs: password: '{{ prometheus_metrics_password_prod }}' static_configs: - targets: - - ooniauth.api.prod.ooni.io - - oonirun.api.prod.ooni.io - - ooniprobe.api.prod.ooni.io - - oohelperd.th.prod.ooni.io + - ooniauth.prod.ooni.io + - oonirun.prod.ooni.io + - ooniprobe.prod.ooni.io + - job_name: 'oonith-prod' + scrape_interval: 5s + scheme: http + metrics_path: "/metrics" + basic_auth: + username: 'prom' + password: '{{ prometheus_metrics_password_prod }}' + static_configs: + - targets: + - 0.do.th.prod.ooni.io:9001 + - 0.do.th.prod.ooni.io + - 1.do.th.prod.ooni.io:9001 + - 1.do.th.prod.ooni.io + - 2.do.th.prod.ooni.io + - 2.do.th.prod.ooni.io:9001 - job_name: 'ooni-web' scrape_interval: 5m diff --git a/ansible/roles/prometheus/vars/main.yml b/ansible/roles/prometheus/vars/main.yml index 66483d75..6550cd51 100644 --- a/ansible/roles/prometheus/vars/main.yml +++ b/ansible/roles/prometheus/vars/main.yml @@ -28,6 +28,8 @@ blackbox_jobs: - "https://2.th.ooni.org/" - "https://3.th.ooni.org/" - "https://4.th.ooni.org/" + - "https://5.th.ooni.org/" + - "https://6.th.ooni.org/" - name: "ooni collector" module: "ooni_collector_ok" @@ -73,21 +75,44 @@ blackbox_jobs: module: "http_2xx" targets: - "https://api.ooni.io/api/v1/measurements" + - "https://api.ooni.org/api/v1/measurements" - name: "ooni API test-list urls" module: "https_2xx_json_meta" targets: - "https://api.ooni.io/api/v1/test-list/urls?country_code=US" + - "https://api.ooni.org/api/v1/test-list/urls?country_code=US" - name: "ooni API test-helpers" module: "https_2xx_json" targets: - "https://api.ooni.io/api/v1/test-helpers" + - "https://api.ooni.org/api/v1/test-helpers" - name: "ooni API priv global overview" module: "https_2xx_json" targets: - "https://api.ooni.io/api/_/global_overview" + - "https://api.ooni.org/api/_/global_overview" + + # Note: this always returns true by design + - name: "OONI API check_report_id" + module: "https_2xx_json" + targets: + - "https://api.ooni.io/api/_/check_report_id?report_id=RANDOM" + - "https://api.ooni.org/api/_/check_report_id?report_id=RANDOM" + + - name: "OONI API raw_measurement" + module: "https_2xx_json" + targets: + - "https://api.ooni.io/api/v1/raw_measurement?measurement_uid=20240924151005.116855_IT_httpinvalidrequestline_f63463817af9eebe" + - "https://api.ooni.org/api/v1/raw_measurement?measurement_uid=20240924151005.116855_IT_httpinvalidrequestline_f63463817af9eebe" + + - name: "OONI Run v2 API" + module: "https_2xx_json" + targets: + - "https://api.ooni.org/api/v2/oonirun/links/10009" + - "https://api.ooni.org/api/v2/oonirun/links/10009/revisions" # end of API # diff --git a/tf/environments/dev/.terraform.lock.hcl b/tf/environments/dev/.terraform.lock.hcl index 036d2518..cb0159a0 100644 --- a/tf/environments/dev/.terraform.lock.hcl +++ b/tf/environments/dev/.terraform.lock.hcl @@ -1,6 +1,30 @@ # This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. +provider "registry.terraform.io/digitalocean/digitalocean" { + version = "2.40.0" + constraints = "~> 2.0" + hashes = [ + "h1:71yfpCVVq+OoNzl7SX/7ObnFUQeZL4vHOOalLzEZ4U0=", + "zh:00235830abae70642ebefc4d9c00e5eb978e28b74abc6b34f16b078f242aa217", + "zh:09d77785f768bd568f85a121d3d79316083befe903ce4ccd5567689a23236fb0", + "zh:0c9c4e19b411702d316a6bd044903e2ec506a69d38495ed32cc31e3f3f26acae", + "zh:12b34c88faad5b6149e9a3ad1396680588e1bae263b20d6b19835460f111c190", + "zh:15f041fc57ea46673a828919efe2ef3f05f7c4b863b7d7881336b93e92bd1159", + "zh:45e01972de2fab1687a09ea8fb3e4519be11c93ef93a63f28665630850858a20", + "zh:4e18bf5c1d2ec1ec6b6a9f4b58045309006f510edf770168fc18e273e6a09289", + "zh:575528b7e36e3489d2309e0c6cb9bd9952595cac5459b914f2d2827de1a1e4fc", + "zh:67462192212f810875d556462c79f574a8f5713b7a869ba4fce25953bfcf2dd2", + "zh:7024637b31e8276b653265fdf3f479220182edde4b300b034562b4c287faefa5", + "zh:a7904721b2680be8330dde98dd826be15c67eb274da7876f042cbcd6592ac970", + "zh:b225d4b67037a19392b0ab00d1f5fc9e729db4dfc32d18d4b36225693270ef52", + "zh:bd1e8768819d6113b2ec16f939196a1f2ae6d2803824fde463a20d06e071b212", + "zh:c5da40dc0749548ee2e1943776fb41b952c994e50bbc404251df20a81f730242", + "zh:dabc3387392aaba297739e1e97fadf059258fc3efb4dff2f499dbc407b6e088d", + "zh:f42137cf424c3e7c9c935b3f73618e51096bd0367a8d364073e2d70588d2cbf2", + ] +} + provider "registry.terraform.io/hashicorp/aws" { version = "5.40.0" constraints = ">= 4.9.0, >= 4.66.1" @@ -25,6 +49,25 @@ provider "registry.terraform.io/hashicorp/aws" { ] } +provider "registry.terraform.io/hashicorp/cloudinit" { + version = "2.3.4" + hashes = [ + "h1:S3j8poSaLbaftlKq2STBkQEkZH253ZLaHhBHBifdpBQ=", + "zh:09f1f1e1d232da96fbf9513b0fb5263bc2fe9bee85697aa15d40bb93835efbeb", + "zh:381e74b90d7a038c3a8dcdcc2ce8c72d6b86da9f208a27f4b98cabe1a1032773", + "zh:398eb321949e28c4c5f7c52e9b1f922a10d0b2b073b7db04cb69318d24ffc5a9", + "zh:4a425679614a8f0fe440845828794e609b35af17db59134c4f9e56d61e979813", + "zh:4d955d8608ece4984c9f1dacda2a59fdb4ea6b0243872f049b388181aab8c80a", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a48fbee1d58d55a1f4c92c2f38c83a37c8b2f2701ed1a3c926cefb0801fa446a", + "zh:b748fe6631b16a1dafd35a09377c3bffa89552af584cf95f47568b6cd31fc241", + "zh:d4b931f7a54603fa4692a2ec6e498b95464babd2be072bed5c7c2e140a280d99", + "zh:f1c9337fcfe3a7be39d179eb7986c22a979cfb2c587c05f1b3b83064f41785c5", + "zh:f58fc57edd1ee3250a28943cd84de3e4b744cdb52df0356a53403fc240240636", + "zh:f5f50de0923ff530b03e1bca0ac697534d61bb3e5fc7f60e13becb62229097a9", + ] +} + provider "registry.terraform.io/hashicorp/local" { version = "2.4.1" constraints = ">= 2.0.0" diff --git a/tf/environments/dev/main.tf b/tf/environments/dev/main.tf index c0f896a7..d877a5da 100644 --- a/tf/environments/dev/main.tf +++ b/tf/environments/dev/main.tf @@ -34,6 +34,11 @@ provider "aws" { # source_profile = oonidevops_user } +# In order for this provider to work you have to set the following environment +# variable to your DigitalOcean API token: +# DIGITALOCEAN_ACCESS_TOKEN= +provider "digitalocean" {} + data "aws_availability_zones" "available" {} ### !!! IMPORTANT !!! @@ -107,7 +112,7 @@ module "ansible_inventory" { } module "network" { - source = "../../modules/network_noipv6" + source = "../../modules/network" az_count = var.az_count vpc_main_cidr_block = "10.0.0.0/16" @@ -202,6 +207,9 @@ resource "aws_secretsmanager_secret_version" "prometheus_metrics_password" { secret_string = random_password.prometheus_metrics_password.result } +data "aws_secretsmanager_secret_version" "prometheus_metrics_password" { + secret_id = aws_secretsmanager_secret.prometheus_metrics_password.id +} resource "aws_secretsmanager_secret" "oonipg_url" { name = "oonidevops/ooni-tier0-postgres/postgresql_url" @@ -230,6 +238,11 @@ resource "aws_s3_bucket" "oonith_codepipeline_bucket" { bucket = "codepipeline-oonith-${var.aws_region}-${random_id.artifact_id.hex}" } +data "aws_secretsmanager_secret_version" "deploy_key" { + secret_id = module.adm_iam_roles.oonidevops_deploy_key_arn + depends_on = [module.adm_iam_roles] +} + # The aws_codestarconnections_connection resource is created in the state # PENDING. Authentication with the connection provider must be completed in the # AWS Console. @@ -248,22 +261,40 @@ moved { ### OONI Tier0 Backend Proxy +module "ooni_th_droplet" { + source = "../../modules/ooni_th_droplet" + + stage = local.environment + instance_location = "fra1" + instance_size = "s-1vcpu-1gb" + droplet_count = 1 + deployer_key = jsondecode(data.aws_secretsmanager_secret_version.deploy_key.secret_string)["public_key"] + metrics_password = data.aws_secretsmanager_secret_version.prometheus_metrics_password.secret_string + ssh_keys = [ + "3d:81:99:17:b5:d1:20:a5:fe:2b:14:96:67:93:d6:34", + "f6:4b:8b:e2:0e:d2:97:c5:45:5c:07:a6:fe:54:60:0e" + ] + dns_zone_ooni_io = local.dns_zone_ooni_io +} + module "ooni_backendproxy" { source = "../../modules/ooni_backendproxy" stage = local.environment - vpc_id = module.network.vpc_id - subnet_id = module.network.vpc_subnet_public[0].id - private_subnet_cidr = module.network.vpc_subnet_private[*].cidr_block - dns_zone_ooni_io = local.dns_zone_ooni_io + vpc_id = module.network.vpc_id + subnet_id = module.network.vpc_subnet_public[0].id + private_subnet_cidr = module.network.vpc_subnet_private[*].cidr_block + dns_zone_ooni_io = local.dns_zone_ooni_io key_name = module.adm_iam_roles.oonidevops_key_name instance_type = "t2.micro" - backend_url = "https://backend-hel.ooni.org/" - clickhouse_url = "backend-fsn.ooni.org" - clickhouse_port = "9000" + backend_url = "https://backend-hel.ooni.org/" + wcth_addresses = module.ooni_th_droplet.droplet_ipv4_address + wcth_domain_suffix = "th.dev.ooni.io" + clickhouse_url = "backend-fsn.ooni.org" + clickhouse_port = "9000" tags = merge( local.tags, @@ -281,11 +312,11 @@ module "ooniapi_cluster" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_private[*].id - asg_min = 3 + asg_min = 2 asg_max = 6 asg_desired = 3 - instance_type = "t3.small" + instance_type = "t3.micro" tags = merge( local.tags, @@ -293,26 +324,6 @@ module "ooniapi_cluster" { ) } -module "oonith_cluster" { - source = "../../modules/ecs_cluster" - - name = "oonith-ecs-cluster" - key_name = module.adm_iam_roles.oonidevops_key_name - vpc_id = module.network.vpc_id - subnet_ids = module.network.vpc_subnet_private[*].id - - asg_min = 1 - asg_max = 4 - asg_desired = 1 - - instance_type = "t3.small" - - tags = merge( - local.tags, - { Name = "ooni-tier0-th-ecs-cluster" } - ) -} - #### OONI Tier0 #### OONI Probe service @@ -335,6 +346,9 @@ module "ooniapi_ooniprobe_deployer" { module "ooniapi_ooniprobe" { source = "../../modules/ooniapi_service" + task_cpu = 256 + task_memory = 512 + # First run should be set on first run to bootstrap the task definition # first_run = true @@ -386,6 +400,9 @@ module "ooniapi_oonirun_deployer" { module "ooniapi_oonirun" { source = "../../modules/ooniapi_service" + task_cpu = 256 + task_memory = 512 + vpc_id = module.network.vpc_id public_subnet_ids = module.network.vpc_subnet_public[*].id private_subnet_ids = module.network.vpc_subnet_private[*].id @@ -434,6 +451,9 @@ module "ooniapi_oonifindings_deployer" { module "ooniapi_oonifindings" { source = "../../modules/ooniapi_service" + task_cpu = 256 + task_memory = 512 + vpc_id = module.network.vpc_id public_subnet_ids = module.network.vpc_subnet_public[*].id private_subnet_ids = module.network.vpc_subnet_private[*].id @@ -482,6 +502,9 @@ module "ooniapi_ooniauth_deployer" { module "ooniapi_ooniauth" { source = "../../modules/ooniapi_service" + task_cpu = 256 + task_memory = 512 + vpc_id = module.network.vpc_id public_subnet_ids = module.network.vpc_subnet_public[*].id private_subnet_ids = module.network.vpc_subnet_private[*].id @@ -536,16 +559,20 @@ module "ooniapi_frontend" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_public[*].id - oonibackend_proxy_target_group_arn = module.ooni_backendproxy.alb_target_group_id - ooniapi_oonirun_target_group_arn = module.ooniapi_oonirun.alb_target_group_id - ooniapi_ooniauth_target_group_arn = module.ooniapi_ooniauth.alb_target_group_id - ooniapi_ooniprobe_target_group_arn = module.ooniapi_ooniprobe.alb_target_group_id + oonibackend_proxy_target_group_arn = module.ooni_backendproxy.alb_target_group_id + ooniapi_oonirun_target_group_arn = module.ooniapi_oonirun.alb_target_group_id + ooniapi_ooniauth_target_group_arn = module.ooniapi_ooniauth.alb_target_group_id + ooniapi_ooniprobe_target_group_arn = module.ooniapi_ooniprobe.alb_target_group_id ooniapi_oonifindings_target_group_arn = module.ooniapi_oonifindings.alb_target_group_id ooniapi_service_security_groups = [ module.ooniapi_cluster.web_security_group_id ] + ooniapi_acm_certificate_arn = aws_acm_certificate.ooniapi_frontend.arn + + oonith_domains = ["*.th.dev.ooni.io"] + stage = local.environment dns_zone_ooni_io = local.dns_zone_ooni_io @@ -555,53 +582,73 @@ module "ooniapi_frontend" { ) } -#### OONI oohelperd service +locals { + ooniapi_frontend_alternative_domains = { + "ooniauth.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "ooniprobe.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "oonirun.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "8.th.dev.ooni.io" : local.dns_zone_ooni_io, + } + ooniapi_frontend_main_domain_name = "api.${local.environment}.ooni.io" + ooniapi_frontend_main_domain_name_zone_id = local.dns_zone_ooni_io -module "oonith_oohelperd_deployer" { - source = "../../modules/oonith_service_deployer" +} - service_name = "oohelperd" - repo = "ooni/probe-cli" - branch_name = "master" - buildspec_path = "oonith/buildspec.yml" - codestar_connection_arn = aws_codestarconnections_connection.oonidevops.arn +resource "aws_route53_record" "ooniapi_frontend_main" { + name = local.ooniapi_frontend_main_domain_name - codepipeline_bucket = aws_s3_bucket.oonith_codepipeline_bucket.bucket + zone_id = local.ooniapi_frontend_main_domain_name_zone_id + type = "A" - ecs_service_name = module.oonith_oohelperd.ecs_service_name - ecs_cluster_name = module.oonith_cluster.cluster_name + alias { + name = module.ooniapi_frontend.ooniapi_dns_name + zone_id = module.ooniapi_frontend.ooniapi_dns_zone_id + evaluate_target_health = true + } } -module "oonith_oohelperd" { - source = "../../modules/oonith_service" +resource "aws_route53_record" "ooniapi_frontend_alt" { + for_each = local.ooniapi_frontend_alternative_domains - vpc_id = module.network.vpc_id - public_subnet_ids = module.network.vpc_subnet_public[*].id - private_subnet_ids = module.network.vpc_subnet_private[*].id + name = each.key + zone_id = each.value + type = "A" - service_name = "oohelperd" - default_docker_image_url = "ooni/oonith-oohelperd:latest" - stage = local.environment - dns_zone_ooni_io = local.dns_zone_ooni_io - key_name = module.adm_iam_roles.oonidevops_key_name - ecs_cluster_id = module.oonith_cluster.cluster_id - - task_secrets = { - PROMETHEUS_METRICS_PASSWORD = aws_secretsmanager_secret_version.prometheus_metrics_password.arn + alias { + name = module.ooniapi_frontend.ooniapi_dns_name + zone_id = module.ooniapi_frontend.ooniapi_dns_zone_id + evaluate_target_health = true } +} - oonith_service_security_groups = [ - module.oonith_cluster.web_security_group_id - ] +resource "aws_acm_certificate" "ooniapi_frontend" { + domain_name = local.ooniapi_frontend_main_domain_name + validation_method = "DNS" + + tags = local.tags - // Note: Since we do not have a dns zone for ooni org, we test on io domains here - alternative_names = { - "5.th.dev.ooni.io" = local.dns_zone_ooni_io, - "6.th.dev.ooni.io" = local.dns_zone_ooni_io, + subject_alternative_names = keys(local.ooniapi_frontend_alternative_domains) +} + +resource "aws_route53_record" "ooniapi_frontend_cert_validation" { + for_each = { + for dvo in aws_acm_certificate.ooniapi_frontend.domain_validation_options : dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + domain_name = dvo.domain_name + } } - tags = merge( - local.tags, - { Name = "ooni-tier0-oohelperd" } - ) + allow_overwrite = true + name = each.value.name + records = [each.value.record] + ttl = 60 + type = each.value.type + zone_id = lookup(local.ooniapi_frontend_alternative_domains, each.value.domain_name, local.dns_zone_ooni_io) } + +resource "aws_acm_certificate_validation" "ooniapi_frontend" { + certificate_arn = aws_acm_certificate.ooniapi_frontend.arn + validation_record_fqdns = [for record in aws_route53_record.ooniapi_frontend_cert_validation : record.fqdn] +} \ No newline at end of file diff --git a/tf/environments/dev/versions.tf b/tf/environments/dev/versions.tf index 682191e7..a712029e 100644 --- a/tf/environments/dev/versions.tf +++ b/tf/environments/dev/versions.tf @@ -1,7 +1,10 @@ terraform { required_version = ">= 1.0" - required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } aws = { source = "hashicorp/aws" version = ">= 4.66.1" diff --git a/tf/environments/prod/.terraform.lock.hcl b/tf/environments/prod/.terraform.lock.hcl index ed72cdfd..6f3c4ce4 100644 --- a/tf/environments/prod/.terraform.lock.hcl +++ b/tf/environments/prod/.terraform.lock.hcl @@ -1,6 +1,30 @@ # This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. +provider "registry.terraform.io/digitalocean/digitalocean" { + version = "2.41.0" + constraints = "~> 2.0" + hashes = [ + "h1:Ne6nxvygwwHbNEO9My9uukE/YtlwAVMr/Bud1FIc6uc=", + "zh:13bfbca765a302a8fdf9ca0e4c5d25c7ee62d21b2bc7fbc241e298215c78e5f7", + "zh:45ef1602bb56fde0b6755f99847da0549144ebdd4af2da695e44d1a06d24d685", + "zh:4a6d81c462a11e710dd6138bb18573f60af456e83c5af0c1158578b4dc8e07f9", + "zh:5827b9463f7fce29bf4d9eb9264771d3aec103ed25e2151e570e8bee27b2dc6a", + "zh:639e59ffddb267a5255d66b93c816b713df96a304c23757364a96a65159ee177", + "zh:6876c162f2e4f850c4acede81857c72665710af2f552f19b1de56bcd5addc86a", + "zh:6a23b529309d6e8f59339d9572504e08f5c90491dfa0d1b1468a6fd7bd6b1b3d", + "zh:7d6e2c103f097a694b81d0e22ecd24ec2778a307e64dbef8de4f956d53219274", + "zh:8203577b5ad891e84afa994a47c6aba85401edf4bdd5aaf7f5e30e59e1393880", + "zh:88672feeae8ac9f4f99391b99957426c9c0a667021c658c4c9dad23abd5b5832", + "zh:ae3703123073a7808cea5a7a89289973e58a4fd83e94680091d4a8420ad521f5", + "zh:b59dd8675402e49a1fba5d2cf14596553c21f104bbb90a1167aa44c39693e7a5", + "zh:bb608cf1db63f985709e0052dbc3d16e9c801a23ebbf4d0a687c8a89d09e3769", + "zh:f1164e25518c00a640a8a375b2214d9bfc86297d2d726a6d35ed6d5de334ef96", + "zh:fc8a0a0375b26095e78ecfd987b79e6ef26c9c5d2e4393d437a9601ea1f3c5c5", + "zh:ffae2daa3ef366047885ace62f2fd0d126d6581d253996ef78c11bc5acbb3999", + ] +} + provider "registry.terraform.io/hashicorp/aws" { version = "5.44.0" constraints = ">= 4.9.0, >= 4.66.1" @@ -24,6 +48,25 @@ provider "registry.terraform.io/hashicorp/aws" { ] } +provider "registry.terraform.io/hashicorp/cloudinit" { + version = "2.3.5" + hashes = [ + "h1:Sf1Lt21oTADbzsnlU38ylpkl8YXP0Beznjcy5F/Yx64=", + "zh:17c20574de8eb925b0091c9b6a4d859e9d6e399cd890b44cfbc028f4f312ac7a", + "zh:348664d9a900f7baf7b091cf94d657e4c968b240d31d9e162086724e6afc19d5", + "zh:5a876a468ffabff0299f8348e719cb704daf81a4867f8c6892f3c3c4add2c755", + "zh:6ef97ee4c8c6a69a3d36746ba5c857cf4f4d78f32aa3d0e1ce68f2ece6a5dba5", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8283e5a785e3c518a440f6ac6e7cc4fc07fe266bf34974246f4e2ef05762feda", + "zh:a44eb5077950168b571b7eb65491246c00f45409110f0f172cc3a7605f19dba9", + "zh:aa0806cbff72b49c1b389c0b8e6904586e5259c08dabb7cb5040418568146530", + "zh:bec4613c3beaad9a7be7ca99cdb2852073f782355b272892e6ee97a22856aec1", + "zh:d7fe368577b6c8d1ae44c751ed42246754c10305c7f001cc0109833e95aa107d", + "zh:df2409fc6a364b1f0a0f8a9cd8a86e61e80307996979ce3790243c4ce88f2915", + "zh:ed3c263396ff1f4d29639cc43339b655235acf4d06296a7c120a80e4e0fd6409", + ] +} + provider "registry.terraform.io/hashicorp/local" { version = "2.5.1" constraints = ">= 2.0.0" diff --git a/tf/environments/prod/dns_records.tf b/tf/environments/prod/dns_records.tf index 2129b5c3..06f68e81 100644 --- a/tf/environments/prod/dns_records.tf +++ b/tf/environments/prod/dns_records.tf @@ -30,14 +30,6 @@ resource "aws_route53_record" "ams-slack-1-ooni-org-_A_" { zone_id = local.dns_root_zone_ooni_org } -resource "aws_route53_record" "api-ooni-org-_A_" { - name = "api.ooni.org" - records = ["142.93.237.101"] - ttl = "1799" - type = "A" - zone_id = local.dns_root_zone_ooni_org -} - resource "aws_route53_record" "backend-fsn-ooni-org-_A_" { name = "backend-fsn.ooni.org" records = ["162.55.247.208"] diff --git a/tf/environments/prod/main.tf b/tf/environments/prod/main.tf index 19a09a72..e899839c 100644 --- a/tf/environments/prod/main.tf +++ b/tf/environments/prod/main.tf @@ -208,6 +208,9 @@ resource "aws_secretsmanager_secret_version" "prometheus_metrics_password" { secret_string = random_password.prometheus_metrics_password.result } +data "aws_secretsmanager_secret_version" "prometheus_metrics_password" { + secret_id = aws_secretsmanager_secret.prometheus_metrics_password.id +} resource "aws_secretsmanager_secret" "oonipg_url" { name = "oonidevops/ooni-tier0-postgres/postgresql_url" @@ -236,6 +239,11 @@ resource "aws_s3_bucket" "oonith_codepipeline_bucket" { bucket = "codepipeline-oonith-${var.aws_region}-${random_id.artifact_id.hex}" } +data "aws_secretsmanager_secret_version" "deploy_key" { + secret_id = module.adm_iam_roles.oonidevops_deploy_key_arn + depends_on = [module.adm_iam_roles] +} + # The aws_codestarconnections_connection resource is created in the state # PENDING. Authentication with the connection provider must be completed in the # AWS Console. @@ -254,14 +262,40 @@ moved { ### OONI Tier0 Backend Proxy +module "ooni_th_droplet" { + source = "../../modules/ooni_th_droplet" + + stage = local.environment + instance_location = "fra1" + instance_size = "s-1vcpu-1gb" + droplet_count = 3 + deployer_key = jsondecode(data.aws_secretsmanager_secret_version.deploy_key.secret_string)["public_key"] + metrics_password = data.aws_secretsmanager_secret_version.prometheus_metrics_password.secret_string + ssh_keys = [ + "3d:81:99:17:b5:d1:20:a5:fe:2b:14:96:67:93:d6:34", + "f6:4b:8b:e2:0e:d2:97:c5:45:5c:07:a6:fe:54:60:0e" + ] + dns_zone_ooni_io = local.dns_zone_ooni_io +} + module "ooni_backendproxy" { source = "../../modules/ooni_backendproxy" - vpc_id = module.network.vpc_id - subnet_ids = module.network.vpc_subnet_public[*].id + stage = local.environment + + vpc_id = module.network.vpc_id + subnet_id = module.network.vpc_subnet_public[0].id + private_subnet_cidr = module.network.vpc_subnet_private[*].cidr_block + dns_zone_ooni_io = local.dns_zone_ooni_io key_name = module.adm_iam_roles.oonidevops_key_name - instance_type = "t2.micro" + instance_type = "t3.micro" + + backend_url = "https://backend-fsn.ooni.org/" + wcth_addresses = module.ooni_th_droplet.droplet_ipv4_address + wcth_domain_suffix = "th.ooni.org" + clickhouse_url = "backend-fsn.ooni.org" + clickhouse_port = "9000" tags = merge( local.tags, @@ -280,35 +314,15 @@ module "ooniapi_cluster" { subnet_ids = module.network.vpc_subnet_public[*].id # You need be careful how these are tweaked. - asg_min = 5 - asg_max = 12 - asg_desired = 5 - - instance_type = "t3.small" - - tags = merge( - local.tags, - { Name = "ooni-tier0-api-ecs-cluster" } - ) -} - -module "oonith_cluster" { - source = "../../modules/ecs_cluster" - - name = "oonith-ecs-cluster" - key_name = module.adm_iam_roles.oonidevops_key_name - vpc_id = module.network.vpc_id - subnet_ids = module.network.vpc_subnet_public[*].id - asg_min = 3 - asg_max = 7 + asg_max = 8 asg_desired = 3 - instance_type = "t3.small" + instance_type = "t3.micro" tags = merge( local.tags, - { Name = "ooni-tier0-th-ecs-cluster" } + { Name = "ooni-tier0-api-ecs-cluster" } ) } @@ -417,6 +431,55 @@ module "ooniapi_oonirun" { ) } +#### OONI Findings service + +module "ooniapi_oonifindings_deployer" { + source = "../../modules/ooniapi_service_deployer" + + service_name = "oonifindings" + repo = "ooni/backend" + branch_name = "master" + buildspec_path = "ooniapi/services/oonifindings/buildspec.yml" + codestar_connection_arn = aws_codestarconnections_connection.oonidevops.arn + + codepipeline_bucket = aws_s3_bucket.ooniapi_codepipeline_bucket.bucket + + ecs_service_name = module.ooniapi_oonifindings.ecs_service_name + ecs_cluster_name = module.ooniapi_cluster.cluster_name +} + +module "ooniapi_oonifindings" { + source = "../../modules/ooniapi_service" + + first_run = true + vpc_id = module.network.vpc_id + public_subnet_ids = module.network.vpc_subnet_public[*].id + private_subnet_ids = module.network.vpc_subnet_private[*].id + + service_name = "oonifindings" + default_docker_image_url = "ooni/api-oonifindings:latest" + stage = local.environment + dns_zone_ooni_io = local.dns_zone_ooni_io + key_name = module.adm_iam_roles.oonidevops_key_name + ecs_cluster_id = module.ooniapi_cluster.cluster_id + + task_secrets = { + POSTGRESQL_URL = aws_secretsmanager_secret_version.oonipg_url.arn + JWT_ENCRYPTION_KEY = aws_secretsmanager_secret_version.jwt_secret.arn + PROMETHEUS_METRICS_PASSWORD = aws_secretsmanager_secret_version.prometheus_metrics_password.arn + } + + ooniapi_service_security_groups = [ + module.ooniapi_cluster.web_security_group_id + ] + + tags = merge( + local.tags, + { Name = "ooni-tier0-oonifindings" } + ) +} + + #### OONI Auth service module "ooniapi_ooniauth_deployer" { @@ -494,15 +557,22 @@ module "ooniapi_frontend" { vpc_id = module.network.vpc_id subnet_ids = module.network.vpc_subnet_public[*].id - oonibackend_proxy_target_group_arn = module.ooni_backendproxy.alb_target_group_id - ooniapi_oonirun_target_group_arn = module.ooniapi_oonirun.alb_target_group_id - ooniapi_ooniauth_target_group_arn = module.ooniapi_ooniauth.alb_target_group_id - ooniapi_ooniprobe_target_group_arn = module.ooniapi_ooniprobe.alb_target_group_id + oonibackend_proxy_target_group_arn = module.ooni_backendproxy.alb_target_group_id + ooniapi_oonirun_target_group_arn = module.ooniapi_oonirun.alb_target_group_id + ooniapi_ooniauth_target_group_arn = module.ooniapi_ooniauth.alb_target_group_id + ooniapi_ooniprobe_target_group_arn = module.ooniapi_ooniprobe.alb_target_group_id + ooniapi_oonifindings_target_group_arn = module.ooniapi_oonifindings.alb_target_group_id ooniapi_service_security_groups = [ module.ooniapi_cluster.web_security_group_id ] + ooniapi_acm_certificate_arn = aws_acm_certificate.ooniapi_frontend.arn + + oonith_domains = [ + "*.th.ooni.org", + ] + stage = local.environment dns_zone_ooni_io = local.dns_zone_ooni_io @@ -512,62 +582,90 @@ module "ooniapi_frontend" { ) } -#### OONI oohelperd service - -module "oonith_oohelperd_deployer" { - source = "../../modules/oonith_service_deployer" - service_name = "oohelperd" - repo = "ooni/probe-cli" - branch_name = "codedeploy/prod" - buildspec_path = "oonith/buildspec.yml" - codestar_connection_arn = aws_codestarconnections_connection.oonidevops.arn +## DNS - codepipeline_bucket = aws_s3_bucket.oonith_codepipeline_bucket.bucket +locals { + ooniapi_frontend_alternative_domains = { + "api.ooni.org" : local.dns_root_zone_ooni_org + "0.th.ooni.org" : local.dns_root_zone_ooni_org, + "1.th.ooni.org" : local.dns_root_zone_ooni_org, + "2.th.ooni.org" : local.dns_root_zone_ooni_org, + "3.th.ooni.org" : local.dns_root_zone_ooni_org, + "4.th.ooni.org" : local.dns_root_zone_ooni_org, + "5.th.ooni.org" : local.dns_root_zone_ooni_org, + "6.th.ooni.org" : local.dns_root_zone_ooni_org, + # TODO: add these once we unlock the quota for maximum certificates + #"ooniauth.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + #"ooniprobe.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + #"oonirun.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + } + ooniapi_frontend_main_domain_name = "api.${local.environment}.ooni.io" + ooniapi_frontend_main_domain_name_zone_id = local.dns_zone_ooni_io - ecs_service_name = module.oonith_oohelperd.ecs_service_name - ecs_cluster_name = module.oonith_cluster.cluster_name } -module "oonith_oohelperd" { - source = "../../modules/oonith_service" - #first_run = true +resource "aws_route53_record" "ooniapi_frontend_main" { + name = local.ooniapi_frontend_main_domain_name - vpc_id = module.network.vpc_id - private_subnet_ids = module.network.vpc_subnet_private[*].id - public_subnet_ids = module.network.vpc_subnet_public[*].id + zone_id = local.ooniapi_frontend_main_domain_name_zone_id + type = "A" - service_name = "oohelperd" - default_docker_image_url = "ooni/oonith-oohelperd:latest" - stage = local.environment - dns_zone_ooni_io = local.dns_zone_ooni_io - key_name = module.adm_iam_roles.oonidevops_key_name - ecs_cluster_id = module.oonith_cluster.cluster_id + alias { + name = module.ooniapi_frontend.ooniapi_dns_name + zone_id = module.ooniapi_frontend.ooniapi_dns_zone_id + evaluate_target_health = true + } +} - service_desired_count = 3 +resource "aws_route53_record" "ooniapi_frontend_alt" { + for_each = local.ooniapi_frontend_alternative_domains - task_secrets = { - PROMETHEUS_METRICS_PASSWORD = aws_secretsmanager_secret_version.prometheus_metrics_password.arn + name = each.key + zone_id = each.value + type = "A" + + alias { + name = module.ooniapi_frontend.ooniapi_dns_name + zone_id = module.ooniapi_frontend.ooniapi_dns_zone_id + evaluate_target_health = true } +} + +# TODO: currently the certificate is hardcoded +resource "aws_acm_certificate" "ooniapi_frontend" { + domain_name = local.ooniapi_frontend_main_domain_name + validation_method = "DNS" + + tags = local.tags + + subject_alternative_names = keys(local.ooniapi_frontend_alternative_domains) +} - alternative_names = { - "0.th.ooni.org" = local.dns_root_zone_ooni_org, - "1.th.ooni.org" = local.dns_root_zone_ooni_org, - "2.th.ooni.org" = local.dns_root_zone_ooni_org, - "3.th.ooni.org" = local.dns_root_zone_ooni_org, - "4.th.ooni.org" = local.dns_root_zone_ooni_org +resource "aws_route53_record" "ooniapi_frontend_cert_validation" { + for_each = { + for dvo in aws_acm_certificate.ooniapi_frontend.domain_validation_options : dvo.domain_name => { + name = dvo.resource_record_name + record = dvo.resource_record_value + type = dvo.resource_record_type + domain_name = dvo.domain_name + } } - oonith_service_security_groups = [ - module.oonith_cluster.web_security_group_id - ] + allow_overwrite = true + name = each.value.name + records = [each.value.record] + ttl = 60 + type = each.value.type + zone_id = lookup(local.ooniapi_frontend_alternative_domains, each.value.domain_name, local.dns_zone_ooni_io) +} - tags = merge( - local.tags, - { Name = "ooni-tier0-oohelperd" } - ) +resource "aws_acm_certificate_validation" "ooniapi_frontend" { + certificate_arn = aws_acm_certificate.ooniapi_frontend.arn + validation_record_fqdns = [for record in aws_route53_record.ooniapi_frontend_cert_validation : record.fqdn] } + ## Code signing setup module "codesigning" { @@ -584,9 +682,9 @@ module "codesigning" { module "ansible_controller" { source = "../../modules/ansible_controller" - vpc_id = module.network.vpc_id + vpc_id = module.network.vpc_id subnet_id = module.network.vpc_subnet_public[0].id - key_name = module.adm_iam_roles.oonidevops_key_name + key_name = module.adm_iam_roles.oonidevops_key_name dns_zone_ooni_io = local.dns_zone_ooni_io } diff --git a/tf/modules/adm_iam_roles/main.tf b/tf/modules/adm_iam_roles/main.tf index 78504275..bcafaf6c 100644 --- a/tf/modules/adm_iam_roles/main.tf +++ b/tf/modules/adm_iam_roles/main.tf @@ -79,12 +79,15 @@ resource "aws_key_pair" "oonidevops" { } resource "aws_secretsmanager_secret" "oonidevops_deploy_key" { - name = "oonidevops/deploy_key/ssh_key_private" + name = "oonidevops/deploy_key" tags = var.tags } resource "aws_secretsmanager_secret_version" "oonidevops_deploy_key" { - secret_id = aws_secretsmanager_secret.oonidevops_deploy_key.id - secret_string = tls_private_key.oonidevops.private_key_openssh + secret_id = aws_secretsmanager_secret.oonidevops_deploy_key.id + secret_string = jsonencode({ + private_key = tls_private_key.oonidevops.private_key_openssh, + public_key = tls_private_key.oonidevops.public_key_openssh, + }) } diff --git a/tf/modules/adm_iam_roles/outputs.tf b/tf/modules/adm_iam_roles/outputs.tf index a99f7905..6fc36f29 100644 --- a/tf/modules/adm_iam_roles/outputs.tf +++ b/tf/modules/adm_iam_roles/outputs.tf @@ -9,4 +9,3 @@ output "oonidevops_key_name" { output "oonidevops_deploy_key_arn" { value = aws_secretsmanager_secret.oonidevops_deploy_key.id } - diff --git a/tf/modules/network/main.tf b/tf/modules/network/main.tf index e4427670..f224fda2 100644 --- a/tf/modules/network/main.tf +++ b/tf/modules/network/main.tf @@ -7,7 +7,7 @@ resource "aws_vpc" "main" { cidr_block = var.vpc_main_cidr_block enable_dns_hostnames = true enable_dns_support = true - + assign_generated_ipv6_cidr_block = true tags = var.tags @@ -17,9 +17,7 @@ resource "aws_subnet" "public" { count = var.az_count cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index) - ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, count.index) - assign_ipv6_address_on_creation = true availability_zone = element(var.aws_availability_zones_available.names, count.index) vpc_id = aws_vpc.main.id @@ -42,11 +40,10 @@ resource "aws_subnet" "private" { cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, local.private_net_offset + count.index) ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, local.private_net_offset + count.index) - assign_ipv6_address_on_creation = true availability_zone = element(var.aws_availability_zones_available.names, count.index) vpc_id = aws_vpc.main.id - map_public_ip_on_launch = false + map_public_ip_on_launch = true depends_on = [aws_internet_gateway.gw] @@ -59,26 +56,6 @@ resource "aws_subnet" "private" { } } - -resource "aws_eip" "nat" { - count = var.az_count - domain = "vpc" - depends_on = [aws_internet_gateway.gw] -} - -resource "aws_nat_gateway" "nat_gw" { - count = var.az_count - - allocation_id = element(aws_eip.nat[*].id, count.index) - subnet_id = element(aws_subnet.public[*].id, count.index) - - depends_on = [aws_internet_gateway.gw] - - tags = { - Name = "ooni-nat-gw" - } -} - resource "aws_internet_gateway" "gw" { vpc_id = aws_vpc.main.id tags = { @@ -86,14 +63,6 @@ resource "aws_internet_gateway" "gw" { } } -resource "aws_egress_only_internet_gateway" "egress_gw" { - vpc_id = aws_vpc.main.id - - tags = { - Name = "ooni-egressonly-gw" - } -} - resource "aws_route_table" "public" { vpc_id = aws_vpc.main.id @@ -102,11 +71,6 @@ resource "aws_route_table" "public" { gateway_id = aws_internet_gateway.gw.id } - route { - ipv6_cidr_block = "::/0" - egress_only_gateway_id = aws_egress_only_internet_gateway.egress_gw.id - } - tags = { Name = "ooni-public-route-table" } @@ -119,28 +83,22 @@ resource "aws_route_table_association" "public" { } resource "aws_route_table" "private" { - count = var.az_count vpc_id = aws_vpc.main.id route { - cidr_block = "0.0.0.0/0" - nat_gateway_id = element(aws_nat_gateway.nat_gw[*].id, count.index) - } - - route { - ipv6_cidr_block = "::/0" - egress_only_gateway_id = aws_egress_only_internet_gateway.egress_gw.id + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id } tags = { - Name = "ooni-private-route-table-${count.index}" + Name = "ooni-private-route-table" } } resource "aws_route_table_association" "private" { count = var.az_count subnet_id = element(aws_subnet.private[*].id, count.index) - route_table_id = element(aws_route_table.private[*].id, count.index) + route_table_id = aws_route_table.private.id lifecycle { create_before_destroy = true diff --git a/tf/modules/network_noipv6/main.tf b/tf/modules/network_noipv6/main.tf deleted file mode 100644 index 447284c7..00000000 --- a/tf/modules/network_noipv6/main.tf +++ /dev/null @@ -1,145 +0,0 @@ -locals { - private_net_offset = 100 - cloudhsm_net_offset = 200 -} - -resource "aws_vpc" "main" { - cidr_block = var.vpc_main_cidr_block - enable_dns_hostnames = true - enable_dns_support = true - - tags = var.tags -} - -resource "aws_subnet" "public" { - count = var.az_count - - cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index) - - availability_zone = element(var.aws_availability_zones_available.names, count.index) - vpc_id = aws_vpc.main.id - map_public_ip_on_launch = true - - depends_on = [aws_internet_gateway.gw] - - lifecycle { - create_before_destroy = true - } - - tags = { - Name = "ooni-public-subnet-${count.index}" - } -} - -resource "aws_subnet" "private" { - count = var.az_count - - cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, local.private_net_offset + count.index) - - availability_zone = element(var.aws_availability_zones_available.names, count.index) - vpc_id = aws_vpc.main.id - map_public_ip_on_launch = true - - depends_on = [aws_internet_gateway.gw] - - lifecycle { - create_before_destroy = true - } - - tags = { - Name = "ooni-private-subnet-${count.index}" - } -} - -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.main.id - tags = { - Name = "ooni-internet-gw" - } -} - -resource "aws_route_table" "public" { - vpc_id = aws_vpc.main.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.gw.id - } - - tags = { - Name = "ooni-public-route-table" - } -} - -resource "aws_route_table_association" "public" { - count = var.az_count - subnet_id = element(aws_subnet.public[*].id, count.index) - route_table_id = aws_route_table.public.id -} - -resource "aws_route_table" "private" { - vpc_id = aws_vpc.main.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.gw.id - } - - tags = { - Name = "ooni-private-route-table" - } -} - -resource "aws_route_table_association" "private" { - count = var.az_count - subnet_id = element(aws_subnet.private[*].id, count.index) - route_table_id = aws_route_table.private.id - - lifecycle { - create_before_destroy = true - } -} - -locals { - cloudhsm_network_count = (var.enable_codesign_network ? 1 : 0) * var.az_count -} - -resource "aws_subnet" "cloudhsm" { - count = local.cloudhsm_network_count - cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, local.cloudhsm_net_offset + count.index) - - availability_zone = var.aws_availability_zones_available.names[count.index] - vpc_id = aws_vpc.main.id - map_public_ip_on_launch = false - - depends_on = [aws_internet_gateway.gw] - - lifecycle { - create_before_destroy = true - } - - tags = { - Name = "ooni-cloudhsm-subnet-${count.index}" - } -} - -resource "aws_route_table" "cloudhsm" { - count = local.cloudhsm_network_count - - vpc_id = aws_vpc.main.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.gw.id - } - - tags = { - Name = "ooni-cloudhsm-route-table" - } -} - -resource "aws_route_table_association" "cloudhsm" { - count = local.cloudhsm_network_count - subnet_id = element(aws_subnet.cloudhsm[*].id, count.index) - route_table_id = aws_route_table.cloudhsm[count.index].id -} diff --git a/tf/modules/network_noipv6/outputs.tf b/tf/modules/network_noipv6/outputs.tf deleted file mode 100644 index 555991dd..00000000 --- a/tf/modules/network_noipv6/outputs.tf +++ /dev/null @@ -1,19 +0,0 @@ -output "vpc_id" { - description = "The ID of the VPC" - value = aws_vpc.main.id -} - -output "vpc_subnet_public" { - description = "The value of the public subnet associated to the VPC" - value = aws_subnet.public -} - -output "vpc_subnet_private" { - description = "The value of the private subnet associated to the VPC" - value = aws_subnet.private -} - -output "vpc_subnet_cloudhsm" { - description = "The value of the cloudhsm subnet associated to the VPC" - value = aws_subnet.cloudhsm -} diff --git a/tf/modules/network_noipv6/variables.tf b/tf/modules/network_noipv6/variables.tf deleted file mode 100644 index 1416be87..00000000 --- a/tf/modules/network_noipv6/variables.tf +++ /dev/null @@ -1,26 +0,0 @@ -variable "az_count" { - description = "Number of AZs to cover in a given AWS region" - type = number - default = "2" -} - -variable "aws_availability_zones_available" { - description = "content of data.aws_availability_zones.available" -} - -variable "vpc_main_cidr_block" { - description = "the start address of the main VPC cidr" - default = "10.0.0.0/16" -} - -variable "tags" { - description = "tags to apply to the resources" - default = {} - type = map(string) -} - -variable "enable_codesign_network" { - description = "Enable codesign network" - default = false - type = bool -} diff --git a/tf/modules/ooni_backendproxy/main.tf b/tf/modules/ooni_backendproxy/main.tf index 4689efc0..ad5b9bec 100644 --- a/tf/modules/ooni_backendproxy/main.tf +++ b/tf/modules/ooni_backendproxy/main.tf @@ -17,7 +17,7 @@ resource "aws_security_group" "nginx_sg" { cidr_blocks = ["0.0.0.0/0"] } - ingress { + ingress { protocol = "tcp" from_port = 9000 to_port = 9000 @@ -40,7 +40,7 @@ resource "aws_security_group" "nginx_sg" { "0.0.0.0/0", ] } - + egress { from_port = 0 to_port = 0 @@ -55,17 +55,30 @@ resource "aws_security_group" "nginx_sg" { tags = var.tags } +data "cloudinit_config" "ooni_backendproxy" { + base64_encode = true + + part { + filename = "init.cfg" + content_type = "text/cloud-config" + content = templatefile("${path.module}/templates/cloud-init.yml", { + wcth_addresses = var.wcth_addresses, + wcth_domain_suffix = var.wcth_domain_suffix, + backend_url = var.backend_url, + clickhouse_url = var.clickhouse_url, + clickhouse_port = var.clickhouse_port + }) + } + +} + resource "aws_launch_template" "ooni_backendproxy" { - name_prefix = "${var.name}-nginx-tmpl-" + name_prefix = "${var.name}-bkprx-tmpl-" image_id = data.aws_ssm_parameter.ubuntu_22_ami.value instance_type = var.instance_type key_name = var.key_name - user_data = base64encode(templatefile("${path.module}/templates/setup-backend-proxy.sh", { - backend_url = var.backend_url, - clickhouse_url = var.clickhouse_url, - clickhouse_port = var.clickhouse_port - })) + user_data = data.cloudinit_config.ooni_backendproxy.rendered lifecycle { create_before_destroy = true @@ -74,7 +87,7 @@ resource "aws_launch_template" "ooni_backendproxy" { network_interfaces { delete_on_termination = true associate_public_ip_address = true - subnet_id = var.subnet_id + subnet_id = var.subnet_id security_groups = [ aws_security_group.nginx_sg.id, ] @@ -113,8 +126,8 @@ resource "aws_alb_target_group" "oonibackend_proxy" { } resource "aws_lb_target_group_attachment" "oonibackend_proxy" { - target_id = aws_instance.oonibackend_proxy.id - target_group_arn = aws_alb_target_group.oonibackend_proxy.arn + target_id = aws_instance.oonibackend_proxy.id + target_group_arn = aws_alb_target_group.oonibackend_proxy.arn } resource "aws_route53_record" "clickhouse_proxy_alias" { diff --git a/tf/modules/ooni_backendproxy/templates/cloud-init.yml b/tf/modules/ooni_backendproxy/templates/cloud-init.yml new file mode 100644 index 00000000..49663223 --- /dev/null +++ b/tf/modules/ooni_backendproxy/templates/cloud-init.yml @@ -0,0 +1,58 @@ +package_update: true + +packages: + - nginx + - libnginx-mod-stream + +write_files: + - path: /etc/nginx/sites-available/default + content: | + server { + listen 80; + + server_name _; + + location / { + proxy_pass ${backend_url}; + proxy_http_version 1.1; + proxy_set_header Host \$host; + } + error_log /var/log/nginx/error.log; + } + + %{ if length(wcth_addresses) > 0 } + upstream wcths { + %{ for address in wcth_addresses } + server ${ address }; + %{ endfor } + } + server { + server_name *.${ wcth_domain_suffix }; + listen 80; + + location / { + proxy_pass http://wcths; + proxy_http_version 1.1; + proxy_set_header Host \$host; + } + } + %{ endif } + + - path: /etc/nginx/modules-enabled/99-stream.conf + content: | + stream { + upstream clickhouse_backend { + server ${clickhouse_url}:${clickhouse_port}; + } + + server { + listen 9000; + + proxy_pass clickhouse_backend; + } + + error_log /var/log/nginx/error.log; + } + +runcmd: + - service nginx restart diff --git a/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh b/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh deleted file mode 100644 index c32b3c68..00000000 --- a/tf/modules/ooni_backendproxy/templates/setup-backend-proxy.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -e - -sudo apt update -sudo apt install -y nginx - -tmpfile=$(mktemp /tmp/nginx-config.XXXXXX) -cat > $tmpfile < $tmpfile_stream < d.ipv4_address + } + records = [each.value] +} diff --git a/tf/modules/ooni_th_droplet/outputs.tf b/tf/modules/ooni_th_droplet/outputs.tf new file mode 100644 index 00000000..fdcad4af --- /dev/null +++ b/tf/modules/ooni_th_droplet/outputs.tf @@ -0,0 +1,10 @@ +output "droplet_ipv4_address" { + value = digitalocean_droplet.ooni_th_docker[*].ipv4_address +} + +output "droplet_addresses" { + # for why we use values, + # see: https://github.com/hashicorp/terraform/issues/23245#issuecomment-548391304 + # https://github.com/hashicorp/terraform/issues/22476 + value = values(aws_route53_record.ooni_th)[*].fqdn +} diff --git a/tf/modules/ooni_th_droplet/templates/cloud-init-docker.yml b/tf/modules/ooni_th_droplet/templates/cloud-init-docker.yml new file mode 100644 index 00000000..4f82bcc6 --- /dev/null +++ b/tf/modules/ooni_th_droplet/templates/cloud-init-docker.yml @@ -0,0 +1,175 @@ +apt: + sources: + docker.list: + source: "deb [arch=amd64 signed-by=$KEY_FILE] https://download.docker.com/linux/ubuntu $RELEASE stable" + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + + mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth + lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh + 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq + L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7 + UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N + cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht + ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo + vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD + G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ + XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj + q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB + tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3 + BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO + v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd + tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk + jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m + 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P + XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc + FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8 + g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm + ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh + 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5 + G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW + FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB + EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF + M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx + Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu + w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk + z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8 + eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb + VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa + 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X + zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ + pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7 + ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ + BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY + 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp + YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI + mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES + KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7 + JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ + cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0 + 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5 + U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z + VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f + irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk + SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz + QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W + 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw + 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe + dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y + Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR + H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh + /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ + M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S + xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O + jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG + YT90qFF93M3v01BbxP+EIY2/9tiIPbrd + =0YYh + -----END PGP PUBLIC KEY BLOCK----- +package_update: true +packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + - prometheus-node-exporter + - nginx + +users: + - name: deployer + ssh-authorized-keys: + - ${deployer_key} + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: + - sudo + - docker + shell: /bin/bash + +write_files: + - path: /etc/ssh/sshd_config + content: | + PermitRootLogin no + PermitEmptyPasswords no + PasswordAuthentication no + KbdInteractiveAuthentication no + UsePAM yes + PubkeyAuthentication yes + X11Forwarding no + PrintMotd no + AcceptEnv LANG LC_* + AllowUsers deployer + + - path: /etc/docker/daemon.json + content: | + { + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" + } + + - path: /etc/nginx/sites-available/default + content: | + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=thcache:100M + max_size=5g inactive=24h use_temp_path=off; + + server { + listen 80; + server_name _; + gzip on; + resolver 127.0.0.1; + + # test helper application metrics + location /metrics { + allow ${monitoring_ip}; + deny all; + + proxy_pass http://127.0.0.1:8080; + } + + # local test helper + location / { + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + proxy_pass http://127.0.0.1:8080; + + proxy_cache thcache; + proxy_cache_min_uses 1; + proxy_cache_lock on; + proxy_cache_lock_timeout 30; + proxy_cache_lock_age 30; + proxy_cache_use_stale error timeout invalid_header updating; + # Cache POST without headers set by the test helper! + proxy_cache_methods POST; + proxy_cache_key "$request_uri|$request_body"; + proxy_cache_valid 200 10m; + proxy_cache_valid any 0; + add_header X-Cache-Status $upstream_cache_status; + } + } + + server { + listen 9001; + server_name localhost; + + allow ${monitoring_ip}; + deny all; + + # Metrics from node_exporter + location = /metrics { + proxy_pass http://127.0.0.1:9100; + } + } + +runcmd: + - sshd -t + - systemctl restart sshd + - systemctl restart docker + - ufw default deny incoming + - ufw default allow outgoing + - ufw allow 22/tcp + - ufw allow 80/tcp + - ufw allow 443/tcp + - ufw allow from ${monitoring_ip} proto tcp to any port 9001 + - ufw enable + - service nginx restart + - docker container rm -f oonith + - docker run -d -e PROMETHEUS_METRICS_PASSWORD='${metrics_password}' -p 8080:80 --restart unless-stopped --name oonith ooni/oonith-oohelperd:latest diff --git a/tf/modules/ooni_th_droplet/templates/cloud-init.yml b/tf/modules/ooni_th_droplet/templates/cloud-init.yml new file mode 100644 index 00000000..111502ef --- /dev/null +++ b/tf/modules/ooni_th_droplet/templates/cloud-init.yml @@ -0,0 +1,59 @@ +apt: + sources: + ooni.list: + source: "deb [trusted=yes] https://ooni-internal-deb.s3.eu-central-1.amazonaws.com unstable main" + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + + mDMEYGISFRYJKwYBBAHaRw8BAQdA4VxoR0gSsH56BbVqYdK9HNQ0Dj2YFVbvKIIZ + JKlaW920Mk9PTkkgcGFja2FnZSBzaWduaW5nIDxjb250YWN0QG9wZW5vYnNlcnZh + dG9yeS5vcmc+iJYEExYIAD4WIQS1oI8BeW5/UhhhtEk3LR/ycfLdUAUCYGISFQIb + AwUJJZgGAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRA3LR/ycfLdUFk+AQCb + gsUQsAQGxUFvxk1XQ4RgEoh7wy2yTuK8ZCkSHJ0HWwD/f2OAjDigGq07uJPYw7Uo + Ih9+mJ/ubwiPMzUWF6RSdgu4OARgYhIVEgorBgEEAZdVAQUBAQdAx4p1KerwcIhX + HfM9LbN6Gi7z9j4/12JKYOvr0d0yC30DAQgHiH4EGBYIACYWIQS1oI8BeW5/Uhhh + tEk3LR/ycfLdUAUCYGISFQIbDAUJJZgGAAAKCRA3LR/ycfLdUL4cAQCs53fLphhy + 6JMwVhRs02LXi1lntUtw1c+EMn6t7XNM6gD+PXpbgSZwoV3ZViLqr58o9fZQtV3s + oN7jfdbznrWVigE= + =PtYb + -----END PGP PUBLIC KEY BLOCK----- +package_update: true +packages: + - oohelperd + - nginx + +write_files: + - path: /etc/nginx/sites-available/default + content: | + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=thcache:100M + max_size=5g inactive=24h use_temp_path=off; + + server { + listen 80; + server_name _; + gzip on; + resolver 127.0.0.1; + # local test helper + location / { + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + proxy_pass http://127.0.0.1:8080; + + proxy_cache thcache; + proxy_cache_min_uses 1; + proxy_cache_lock on; + proxy_cache_lock_timeout 30; + proxy_cache_lock_age 30; + proxy_cache_use_stale error timeout invalid_header updating; + # Cache POST without headers set by the test helper! + proxy_cache_methods POST; + proxy_cache_key "$request_uri|$request_body"; + proxy_cache_valid 200 10m; + proxy_cache_valid any 0; + add_header X-Cache-Status $upstream_cache_status; + + } + } + +runcmd: + - service nginx restart diff --git a/tf/modules/ooni_th_droplet/variables.tf b/tf/modules/ooni_th_droplet/variables.tf new file mode 100644 index 00000000..522e3333 --- /dev/null +++ b/tf/modules/ooni_th_droplet/variables.tf @@ -0,0 +1,40 @@ +variable "stage" { + type = string +} + +variable "name" { + description = "Name of the droplets" + type = string + default = "ooni-wcth" +} + +variable "instance_location" { + type = string + default = "fra1" +} + +variable "instance_size" { + # s-2vcpu-4gb + type = string + default = "s-1vcpu-1gb" +} + +variable "droplet_count" { + default = 1 +} + +variable "ssh_keys" { + type = list(string) +} + +variable "deployer_key" { + type = string +} + +variable "metrics_password" { + type = string +} + +variable "dns_zone_ooni_io" { + type = string +} diff --git a/tf/modules/ooniapi_frontend/main.tf b/tf/modules/ooniapi_frontend/main.tf index 465e3ca7..c72937a2 100644 --- a/tf/modules/ooniapi_frontend/main.tf +++ b/tf/modules/ooniapi_frontend/main.tf @@ -1,5 +1,6 @@ locals { - name = "ooni-tier0-api-frontend" + name = "ooni-tier0-api-frontend" + direct_domain_suffix = "${var.stage}.ooni.io" } resource "aws_alb" "ooniapi" { @@ -33,7 +34,8 @@ resource "aws_alb_listener" "ooniapi_listener_https" { port = "443" protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-2016-08" - certificate_arn = aws_acm_certificate_validation.ooniapi.certificate_arn + certificate_arn = var.ooniapi_acm_certificate_arn + # In prod this has been manually applied default_action { target_group_arn = var.oonibackend_proxy_target_group_arn @@ -43,25 +45,27 @@ resource "aws_alb_listener" "ooniapi_listener_https" { tags = var.tags } -resource "aws_lb_listener_rule" "ooniapi_oonirun_rule" { +resource "aws_alb_listener_rule" "ooniapi_th" { listener_arn = aws_alb_listener.ooniapi_listener_https.arn - priority = 100 + priority = 90 action { type = "forward" - target_group_arn = var.ooniapi_oonirun_target_group_arn + target_group_arn = var.oonibackend_proxy_target_group_arn } condition { - path_pattern { - values = ["/api/v2/oonirun/*"] + host_header { + values = var.oonith_domains } } + + tags = var.tags } resource "aws_lb_listener_rule" "ooniapi_ooniauth_rule" { listener_arn = aws_alb_listener.ooniapi_listener_https.arn - priority = 101 + priority = 108 action { type = "forward" @@ -81,83 +85,119 @@ resource "aws_lb_listener_rule" "ooniapi_ooniauth_rule" { } } -resource "aws_lb_listener_rule" "ooniapi_ooniprobe_rule" { +resource "aws_lb_listener_rule" "ooniapi_ooniauth_rule_host" { listener_arn = aws_alb_listener.ooniapi_listener_https.arn - priority = 102 + priority = 109 action { type = "forward" - target_group_arn = var.ooniapi_ooniprobe_target_group_arn + target_group_arn = var.ooniapi_ooniauth_target_group_arn } condition { - path_pattern { - values = [ - "/api/v2/ooniprobe/*", - ] + host_header { + values = ["ooniauth.${local.direct_domain_suffix}"] } } } -resource "aws_lb_listener_rule" "ooniapi_oonifindings_rule" { +resource "aws_lb_listener_rule" "ooniapi_oonirun_rule" { listener_arn = aws_alb_listener.ooniapi_listener_https.arn - priority = 103 + priority = 110 action { type = "forward" - target_group_arn = var.ooniapi_oonifindings_target_group_arn + target_group_arn = var.ooniapi_oonirun_target_group_arn } condition { path_pattern { - values = ["/api/v1/incidents/*"] + values = ["/api/v2/oonirun/*"] } + } } -## DNS +resource "aws_lb_listener_rule" "ooniapi_oonirun_rule_host" { + listener_arn = aws_alb_listener.ooniapi_listener_https.arn + priority = 111 -resource "aws_route53_record" "ooniapi" { - zone_id = var.dns_zone_ooni_io - name = "api.${var.stage}.ooni.io" - type = "A" + action { + type = "forward" + target_group_arn = var.ooniapi_oonirun_target_group_arn + } - alias { - name = aws_alb.ooniapi.dns_name - zone_id = aws_alb.ooniapi.zone_id - evaluate_target_health = true + condition { + host_header { + values = ["oonirun.${local.direct_domain_suffix}"] + } } + } -resource "aws_acm_certificate" "ooniapi" { - domain_name = "api.${var.stage}.ooni.io" - validation_method = "DNS" +resource "aws_lb_listener_rule" "ooniapi_ooniprobe_rule" { + listener_arn = aws_alb_listener.ooniapi_listener_https.arn + priority = 120 - tags = var.tags + action { + type = "forward" + target_group_arn = var.ooniapi_ooniprobe_target_group_arn + } - lifecycle { - create_before_destroy = true + condition { + path_pattern { + values = [ + "/api/v2/ooniprobe/*", + ] + } } } -resource "aws_route53_record" "ooniapi_cert_validation" { - for_each = { - for dvo in aws_acm_certificate.ooniapi.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type +resource "aws_lb_listener_rule" "ooniapi_ooniprobe_rule_host" { + listener_arn = aws_alb_listener.ooniapi_listener_https.arn + priority = 121 + + action { + type = "forward" + target_group_arn = var.ooniapi_ooniprobe_target_group_arn + } + + + condition { + host_header { + values = ["ooniprobe.${local.direct_domain_suffix}"] } } - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.dns_zone_ooni_io } -resource "aws_acm_certificate_validation" "ooniapi" { - certificate_arn = aws_acm_certificate.ooniapi.arn - validation_record_fqdns = [for record in aws_route53_record.ooniapi_cert_validation : record.fqdn] +resource "aws_lb_listener_rule" "ooniapi_oonifindings_rule" { + listener_arn = aws_alb_listener.ooniapi_listener_https.arn + priority = 130 + + action { + type = "forward" + target_group_arn = var.ooniapi_oonifindings_target_group_arn + } + + condition { + path_pattern { + values = ["/api/v1/incidents/*"] + } + } } + +resource "aws_lb_listener_rule" "ooniapi_oonifindings_rule_host" { + listener_arn = aws_alb_listener.ooniapi_listener_https.arn + priority = 131 + + action { + type = "forward" + target_group_arn = var.ooniapi_oonifindings_target_group_arn + } + condition { + host_header { + values = ["oonifindings.${local.direct_domain_suffix}"] + } + } +} \ No newline at end of file diff --git a/tf/modules/ooniapi_frontend/outputs.tf b/tf/modules/ooniapi_frontend/outputs.tf index 732c7ad5..17c1717b 100644 --- a/tf/modules/ooniapi_frontend/outputs.tf +++ b/tf/modules/ooniapi_frontend/outputs.tf @@ -1,11 +1,11 @@ -output "ooniapi_ooni_io_fqdn" { - value = aws_route53_record.ooniapi.name -} - output "ooniapi_dns_name" { value = aws_alb.ooniapi.dns_name } +output "ooniapi_dns_zone_id" { + value = aws_alb.ooniapi.zone_id +} + output "ooniapi_listener_http_arn" { value = aws_alb_listener.ooniapi_listener_http.arn } diff --git a/tf/modules/ooniapi_frontend/variables.tf b/tf/modules/ooniapi_frontend/variables.tf index e29568e2..10d9bef7 100644 --- a/tf/modules/ooniapi_frontend/variables.tf +++ b/tf/modules/ooniapi_frontend/variables.tf @@ -19,12 +19,15 @@ variable "oonibackend_proxy_target_group_arn" { variable "ooniapi_oonirun_target_group_arn" { description = "arn for the target group of the oonirun service" } + variable "ooniapi_ooniauth_target_group_arn" { description = "arn for the target group of the ooniauth service" } + variable "ooniapi_ooniprobe_target_group_arn" { description = "arn for the target group of the ooniprobe service" } + variable "ooniapi_oonifindings_target_group_arn" { description = "arn for the target group of the oonifindings service" } @@ -41,3 +44,12 @@ variable "ooniapi_service_security_groups" { description = "the shared web security group from the ecs cluster" type = list(string) } + +variable "oonith_domains" { + type = list(string) + default = ["*.th.dev.ooni.io"] +} + +variable "ooniapi_acm_certificate_arn" { + type = string +} \ No newline at end of file diff --git a/tf/modules/ooniapi_service/main.tf b/tf/modules/ooniapi_service/main.tf index eb49cf1f..ad429a01 100644 --- a/tf/modules/ooniapi_service/main.tf +++ b/tf/modules/ooniapi_service/main.tf @@ -4,7 +4,7 @@ locals { # vocals are stripped. stripped_name = replace(replace(var.service_name, "ooni", ""), "[aeiou]", "") # Short prefix should be less than 5 characters - short_prefix = "oo${substr(var.service_name, 0, 3)}" + short_prefix = "O${substr(local.stripped_name, 0, 3)}" } resource "aws_iam_role" "ooniapi_service_task" { @@ -120,40 +120,22 @@ resource "aws_ecs_service" "ooniapi_service" { } load_balancer { - target_group_arn = aws_alb_target_group.ooniapi_service_direct.id + target_group_arn = aws_alb_target_group.ooniapi_service.id container_name = local.name container_port = "80" } - load_balancer { - target_group_arn = aws_alb_target_group.ooniapi_service_mapped.id - container_name = local.name - container_port = "80" - } - - force_new_deployment = true - - tags = var.tags -} - -# The direct target group is used for the direct domain name mapping -resource "aws_alb_target_group" "ooniapi_service_direct" { - name_prefix = "${local.short_prefix}D" - port = 80 - protocol = "HTTP" - vpc_id = var.vpc_id - target_type = "instance" - lifecycle { create_before_destroy = true } + force_new_deployment = true + tags = var.tags } -# The mapped target group is used for mapping it in the main API load balancer -resource "aws_alb_target_group" "ooniapi_service_mapped" { - name_prefix = "${local.short_prefix}M" +resource "aws_alb_target_group" "ooniapi_service" { + name_prefix = "${local.short_prefix}M-" port = 80 protocol = "HTTP" vpc_id = var.vpc_id diff --git a/tf/modules/ooniapi_service/outputs.tf b/tf/modules/ooniapi_service/outputs.tf index e035171d..85f5994d 100644 --- a/tf/modules/ooniapi_service/outputs.tf +++ b/tf/modules/ooniapi_service/outputs.tf @@ -3,5 +3,5 @@ output "ecs_service_name" { } output "alb_target_group_id" { - value = aws_alb_target_group.ooniapi_service_mapped.id + value = aws_alb_target_group.ooniapi_service.id } diff --git a/tf/modules/ooniapi_service/variables.tf b/tf/modules/ooniapi_service/variables.tf index d5e55067..f83e16d7 100644 --- a/tf/modules/ooniapi_service/variables.tf +++ b/tf/modules/ooniapi_service/variables.tf @@ -45,7 +45,7 @@ variable "service_desired_count" { } variable "task_cpu" { - default = 1024 + default = 256 description = "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_size" } diff --git a/tf/modules/postgresql/variables.tf b/tf/modules/postgresql/variables.tf index f72b62bd..279e39c3 100644 --- a/tf/modules/postgresql/variables.tf +++ b/tf/modules/postgresql/variables.tf @@ -48,7 +48,7 @@ variable "db_max_allocated_storage" { } variable "db_engine_version" { - default = "16.1" + default = "16.3" } variable "db_parameter_group" { From 48857b32652757e0784b9f31622336021360680a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arturo=20Filast=C3=B2?= Date: Wed, 2 Oct 2024 10:31:55 +0200 Subject: [PATCH 09/10] Data.ooni.org (#105) Add support for production OONI data pipeline v5 deployment in ansible. It also moves some of the ooni/sysadmin roles to devops and updates them to new patterns. There is also a new node_exporter role that's part of the bootstrap role for performing host monitoring. Eventually this should be deployed on every new host which we manually bootstrap outside of AWS. Implements: https://github.com/ooni/devops/issues/82 --- ansible/README.md | 7 +- ansible/group_vars/all/vars.yml | 2 +- ansible/group_vars/dev/vars.yml | 1 + ansible/group_vars/prod/vars.yml | 1 + ansible/host_vars/data.ooni.org | 4 +- ansible/host_vars/oonidata.ooni.org | 2 + ansible/inventory | 11 +- ansible/playbook-bootstrap.yml | 8 ++ ansible/playbook.yml | 12 ++ ansible/requirements.yml | 3 + ansible/roles/bootstrap/tasks/main.yml | 45 +++++++ ansible/roles/jupyterhub/tasks/main.yml | 76 ----------- .../templates/jupyterhub_config.py.j2 | 2 - ansible/roles/jupyterhub/vars/main.yml | 8 -- ansible/roles/miniconda/defaults/main.yml | 2 + ansible/roles/miniconda/tasks/install.yml | 23 ++++ ansible/roles/miniconda/tasks/main.yml | 21 +++ ansible/roles/nftables/README.adoc | 25 ++++ ansible/roles/nftables/handlers/main.yml | 5 + ansible/roles/nftables/tasks/main.yml | 45 +++++++ .../roles/nftables/templates/nftables.conf | 41 ++++++ .../roles/nginx/files/ffdhe2048_dhparam.pem | 8 ++ .../roles/nginx/files/ssl_intermediate.conf | 3 + ansible/roles/nginx/files/ssl_modern.conf | 4 + ansible/roles/nginx/handlers/main.yml | 15 +++ ansible/roles/nginx/tasks/main.yml | 39 ++++++ ansible/roles/nginx/templates/nginx.conf | 122 ++++++++++++++++++ ansible/roles/oonidata/defaults/main.yml | 8 ++ ansible/roles/oonidata/handlers/main.yml | 16 +++ ansible/roles/oonidata/tasks/jupyterhub.yml | 82 ++++++++++++ ansible/roles/oonidata/tasks/main.yml | 59 +++++++++ .../oonidata/tasks/oonipipeline-worker.yml | 57 ++++++++ .../oonidata/templates/jupyterhub.service.j2 | 17 +++ .../templates/jupyterhub_config.py.j2 | 3 + .../oonidata/templates/nginx-jupyterhub.j2 | 40 ++++++ .../templates/oonipipeline-config.toml.j2 | 7 + .../templates/oonipipeline-worker.service.j2 | 17 +++ .../handlers/main.yml | 21 +++ .../prometheus_node_exporter/tasks/main.yml | 56 ++++++++ .../templates/nginx-prometheus.j2 | 20 +++ ansible/roles/ssh_users/tasks/main.yml | 2 +- tf/environments/prod/dns_records.tf | 8 ++ 42 files changed, 855 insertions(+), 93 deletions(-) create mode 100644 ansible/group_vars/dev/vars.yml create mode 100644 ansible/group_vars/prod/vars.yml create mode 100644 ansible/host_vars/oonidata.ooni.org create mode 100644 ansible/playbook-bootstrap.yml create mode 100644 ansible/roles/bootstrap/tasks/main.yml delete mode 100644 ansible/roles/jupyterhub/tasks/main.yml delete mode 100644 ansible/roles/jupyterhub/templates/jupyterhub_config.py.j2 delete mode 100644 ansible/roles/jupyterhub/vars/main.yml create mode 100644 ansible/roles/miniconda/defaults/main.yml create mode 100644 ansible/roles/miniconda/tasks/install.yml create mode 100644 ansible/roles/miniconda/tasks/main.yml create mode 100644 ansible/roles/nftables/README.adoc create mode 100644 ansible/roles/nftables/handlers/main.yml create mode 100644 ansible/roles/nftables/tasks/main.yml create mode 100755 ansible/roles/nftables/templates/nftables.conf create mode 100644 ansible/roles/nginx/files/ffdhe2048_dhparam.pem create mode 100644 ansible/roles/nginx/files/ssl_intermediate.conf create mode 100644 ansible/roles/nginx/files/ssl_modern.conf create mode 100644 ansible/roles/nginx/handlers/main.yml create mode 100644 ansible/roles/nginx/tasks/main.yml create mode 100644 ansible/roles/nginx/templates/nginx.conf create mode 100644 ansible/roles/oonidata/defaults/main.yml create mode 100644 ansible/roles/oonidata/handlers/main.yml create mode 100644 ansible/roles/oonidata/tasks/jupyterhub.yml create mode 100644 ansible/roles/oonidata/tasks/main.yml create mode 100644 ansible/roles/oonidata/tasks/oonipipeline-worker.yml create mode 100644 ansible/roles/oonidata/templates/jupyterhub.service.j2 create mode 100644 ansible/roles/oonidata/templates/jupyterhub_config.py.j2 create mode 100644 ansible/roles/oonidata/templates/nginx-jupyterhub.j2 create mode 100644 ansible/roles/oonidata/templates/oonipipeline-config.toml.j2 create mode 100644 ansible/roles/oonidata/templates/oonipipeline-worker.service.j2 create mode 100644 ansible/roles/prometheus_node_exporter/handlers/main.yml create mode 100644 ansible/roles/prometheus_node_exporter/tasks/main.yml create mode 100644 ansible/roles/prometheus_node_exporter/templates/nginx-prometheus.j2 diff --git a/ansible/README.md b/ansible/README.md index 59b04f95..60da1de5 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -8,7 +8,12 @@ pyenv activate ooni-devops Install deps: ``` -pip install ansible dnspython boto3 +pip install ansible dnspython boto3 passlib +``` + +Install ansible galaxy modules: +``` +ansible-galaxy install -r requirements.yml ``` Setup AWS credentials, you should add 2 profiles called `oonidevops_user_dev` and `oonidevops_user_prod` which have access to the development and production environment respectively diff --git a/ansible/group_vars/all/vars.yml b/ansible/group_vars/all/vars.yml index de9d63b7..936fd374 100644 --- a/ansible/group_vars/all/vars.yml +++ b/ansible/group_vars/all/vars.yml @@ -26,4 +26,4 @@ ssh_users: admin_usernames: [ art, majakomel, mehul, norbel ] root_usernames: [ art, mehul ] non_admin_usernames: [ agrabeli ] -deactivated_usernames: [ sbs, federico, sarath ] +deactivated_usernames: [ sbs, federico, sarath ] \ No newline at end of file diff --git a/ansible/group_vars/dev/vars.yml b/ansible/group_vars/dev/vars.yml new file mode 100644 index 00000000..a952a5d4 --- /dev/null +++ b/ansible/group_vars/dev/vars.yml @@ -0,0 +1 @@ +prometheus_metrics_password: "{{ lookup('amazon.aws.aws_secret', 'oonidevops/ooni_services/prometheus_metrics_password', profile='oonidevops_user_dev') }}" \ No newline at end of file diff --git a/ansible/group_vars/prod/vars.yml b/ansible/group_vars/prod/vars.yml new file mode 100644 index 00000000..0248a20a --- /dev/null +++ b/ansible/group_vars/prod/vars.yml @@ -0,0 +1 @@ +prometheus_metrics_password: "{{ lookup('amazon.aws.aws_secret', 'oonidevops/ooni_services/prometheus_metrics_password', profile='oonidevops_user_prod') }}" \ No newline at end of file diff --git a/ansible/host_vars/data.ooni.org b/ansible/host_vars/data.ooni.org index c3d9417f..7763cdf7 100644 --- a/ansible/host_vars/data.ooni.org +++ b/ansible/host_vars/data.ooni.org @@ -55,6 +55,6 @@ ssh_users: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMJYsbeTjdma5cKyZISOFQfHbwwlZbWugPx9haeOx1UR" ] admin_usernames: [ art, majakomel, mehul, norbel ] -non_admin_usernames: [ ain, siti, ingrid, joss ] +non_admin_usernames: [ ain, siti, ingrid, joss, vasilis ] jupyterhub_allowed_users: "{{ ssh_users }}" -admin_group_name: adm +admin_group_name: adm \ No newline at end of file diff --git a/ansible/host_vars/oonidata.ooni.org b/ansible/host_vars/oonidata.ooni.org new file mode 100644 index 00000000..4dd0e917 --- /dev/null +++ b/ansible/host_vars/oonidata.ooni.org @@ -0,0 +1,2 @@ +admin_group_name: adm +tls_cert_dir: /var/lib/dehydrated/certs diff --git a/ansible/inventory b/ansible/inventory index 77e90223..1e13c160 100644 --- a/ansible/inventory +++ b/ansible/inventory @@ -1,6 +1,13 @@ [all] -monitoring.ooni.org -openvpn-server1.ooni.io # This requires manual setup of ~/.ssh/config #codesign-box + +[prod] data.ooni.org +oonidata.ooni.org +monitoring.ooni.org +openvpn-server1.ooni.io +notebook.ooni.org + +[dev] +oonidatatest.ooni.nu diff --git a/ansible/playbook-bootstrap.yml b/ansible/playbook-bootstrap.yml new file mode 100644 index 00000000..ab0d34d3 --- /dev/null +++ b/ansible/playbook-bootstrap.yml @@ -0,0 +1,8 @@ +# This playbook is to be run on hosts that don't support bootstrapping the base +# OS setup with something other than ansible (eg. cloud-init) +- name: Bootstrap the ssh_users on target host + hosts: all + remote_user: root + roles: + - ssh_users + - bootstrap diff --git a/ansible/playbook.yml b/ansible/playbook.yml index 5f4ee32a..dece3fbe 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -1,4 +1,10 @@ --- +- name: Ensure all hosts are bootstrapped correctly + hosts: all + become: yes + roles: + - bootstrap + - name: ClickHouse servers hosts: clickhouse_servers user: admin @@ -36,6 +42,12 @@ roles: - ssh_users +- name: Deploy oonidata hosts + hosts: oonidata.ooni.org + become: true + roles: + - oonidata + # commented out due to the fact it requires manual config of ~/.ssh/config #- name: Setup codesign box # hosts: codesign-box diff --git a/ansible/requirements.yml b/ansible/requirements.yml index 04ff726b..3b4d5ae0 100644 --- a/ansible/requirements.yml +++ b/ansible/requirements.yml @@ -1 +1,4 @@ - src: willshersystems.sshd +- src: nginxinc.nginx +- src: geerlingguy.certbot +- src: geerlingguy.node_exporter \ No newline at end of file diff --git a/ansible/roles/bootstrap/tasks/main.yml b/ansible/roles/bootstrap/tasks/main.yml new file mode 100644 index 00000000..88cd3a78 --- /dev/null +++ b/ansible/roles/bootstrap/tasks/main.yml @@ -0,0 +1,45 @@ +- ansible.builtin.include_role: + name: ssh_users + tags: + - ssh_users + +- name: Set the hostname to inventory_hostname + ansible.builtin.hostname: + name: "{{ inventory_hostname }}" + +- name: Install common packages + ansible.builtin.apt: + name: + - bash-completion + - ca-certificates + - curl + - file + - git + - htop + - iotop + - lsof + - lvm2 + - man-db + - mtr + - net-tools + - openssl + - python3-passlib + - rsync + - screen + - strace + - tcpdump + - tmux + - vim + state: latest + update_cache: yes + install_recommends: no + +- ansible.builtin.include_role: + name: nftables + tags: + - nftables + +- ansible.builtin.include_role: + name: prometheus_node_exporter + tags: + - node_exporter diff --git a/ansible/roles/jupyterhub/tasks/main.yml b/ansible/roles/jupyterhub/tasks/main.yml deleted file mode 100644 index 96ea8900..00000000 --- a/ansible/roles/jupyterhub/tasks/main.yml +++ /dev/null @@ -1,76 +0,0 @@ ---- -- name: Check if TLJH is installed - ansible.builtin.stat: - path: "{{ jupyterhub_tljh_prefix }}" - register: tljh_directory - -- name: Install required packages for TLJH - become: true - ansible.builtin.apt: - name: - - curl - - python3 - - python3-pip - - python3-dev - - python3-venv - - build-essential - - cifs-utils - state: present - update_cache: true - -- name: Download the TLJH installer - become: true - ansible.builtin.get_url: - url: "https://tljh.jupyter.org/bootstrap.py" - dest: "/tmp/tljh-bootstrap.py" - checksum: "sha256:2e20bf204c94e1b6eef31499c93f6a14324117deec2eb398a142cb14acbeedd1" - mode: "0700" - when: not tljh_directory.stat.exists - -- name: Run the TLJH installer - become: true - ansible.builtin.shell: | - python3 /tmp/tljh-bootstrap.py --admin {{ jupyterhub_tljh_admin_user }}:{{ jupyterhub_tljh_admin_password }} - creates: "{{ jupyterhub_tljh_prefix }}" - when: not tljh_directory.stat.exists - -- name: Restart the JupyterHub service with daemon-reload - become: true - tags: - - config - ansible.builtin.systemd: - name: jupyterhub - state: restarted - enabled: true - daemon_reload: true - when: not tljh_directory.stat.exists - -- name: Configure Let's Encrypt email and domain - become: true - ansible.builtin.shell: | - tljh-config set https.enabled true - tljh-config set https.letsencrypt.email {{ jupyterhub_letsencrypt_email }} - tljh-config add-item https.letsencrypt.domains {{ jupyterhub_letsencrypt_domain }} - tljh-config reload proxy - vars: - jupyterhub_letsencrypt_domain: "{{ inventory_hostname }}" - register: tljh_letsencrypt - changes_when: tljh_letsencrypt.rc != 0 - when: not tljh_directory.stat.exists - -- name: Copy the JupyterHub config - become: true - ansible.builtin.template: - src: jupyterhub_config.py.j2 - dest: "{{ jupyterhub_config_dest }}" - mode: preserve - -- name: Restart the JupyterHub service with daemon-reload - become: true - tags: - - config - ansible.builtin.systemd: - name: jupyterhub - state: restarted - enabled: true - daemon_reload: true diff --git a/ansible/roles/jupyterhub/templates/jupyterhub_config.py.j2 b/ansible/roles/jupyterhub/templates/jupyterhub_config.py.j2 deleted file mode 100644 index 0f5d7d36..00000000 --- a/ansible/roles/jupyterhub/templates/jupyterhub_config.py.j2 +++ /dev/null @@ -1,2 +0,0 @@ -# c.Spawner.cmd = ['/srv/jupyterhub/conda/bin/jupyterhub-singleuser'] -c.Authenticator.allowed_users = { {{jupyterhub_allowed_users | join(",")}} } diff --git a/ansible/roles/jupyterhub/vars/main.yml b/ansible/roles/jupyterhub/vars/main.yml deleted file mode 100644 index 015989bf..00000000 --- a/ansible/roles/jupyterhub/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ -jupyterhub_letsencrypt_email: admin@openobservatory.org - -jupyterhub_tljh_admin_user: admin -jupyterhub_tljh_admin_pass: oonity! -jupyterhub_tljh_prefix: /opt/tljh -jupyterhub_config_dest: /opt/tljh/config/jupyterhub_config.d/tljh.py - -jupyterhub_allowed_users: [] diff --git a/ansible/roles/miniconda/defaults/main.yml b/ansible/roles/miniconda/defaults/main.yml new file mode 100644 index 00000000..988c38eb --- /dev/null +++ b/ansible/roles/miniconda/defaults/main.yml @@ -0,0 +1,2 @@ +miniconda_install_dir: /opt/miniconda +admin_group_name: admin diff --git a/ansible/roles/miniconda/tasks/install.yml b/ansible/roles/miniconda/tasks/install.yml new file mode 100644 index 00000000..7366e2ff --- /dev/null +++ b/ansible/roles/miniconda/tasks/install.yml @@ -0,0 +1,23 @@ +--- +- name: Ensure miniconda directory exists + ansible.builtin.file: + path: "{{ miniconda_install_dir }}" + state: directory + owner: miniconda + group: "{{ admin_group_name }}" + +- name: Download the miniconda installer + ansible.builtin.get_url: + url: "https://repo.anaconda.com/miniconda/Miniconda3-py312_24.7.1-0-Linux-x86_64.sh" + dest: "{{ miniconda_install_dir }}/miniconda.sh" + checksum: "sha256:33442cd3813df33dcbb4a932b938ee95398be98344dff4c30f7e757cd2110e4f" + mode: "0700" + +- name: Run the miniconda installer + ansible.builtin.shell: | + bash {{ miniconda_install_dir }}/miniconda.sh -b -u -p {{ miniconda_install_dir }} + +- name: Delete installer + ansible.builtin.file: + path: "{{ miniconda_install_dir }}/miniconda.sh" + state: absent diff --git a/ansible/roles/miniconda/tasks/main.yml b/ansible/roles/miniconda/tasks/main.yml new file mode 100644 index 00000000..0ea358b3 --- /dev/null +++ b/ansible/roles/miniconda/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Ensure miniconda user exists + ansible.builtin.user: + name: miniconda + shell: /bin/false + +- name: Check if Miniconda is installed + ansible.builtin.stat: + path: "{{ miniconda_install_dir }}/bin/conda" + register: miniconda_bin + +- include_tasks: install.yml + when: not miniconda_bin.stat.exists + +- name: "install conda packages" + ansible.builtin.shell: + cmd: "{{ miniconda_install_dir }}/bin/conda install -y {{ item }}" + loop: + - pandas + - numpy + - altair diff --git a/ansible/roles/nftables/README.adoc b/ansible/roles/nftables/README.adoc new file mode 100644 index 00000000..e3bef58f --- /dev/null +++ b/ansible/roles/nftables/README.adoc @@ -0,0 +1,25 @@ +Install nftables based firewall + +Set up /etc/ooni/nftables/ + +Rules for specific services are *not* configured by this role + +When creating rules to accept TCP traffic from any IPv4/6 address, +files are named with the port number to detect collisions. + +Example (also see roles/nftables/tasks/main.yml): + +/etc/ooni/nftables/tcp/8080.nft + +``` +add rule inet filter input tcp dport 8080 counter accept comment "MyService" +``` + + +Otherwise: + +/etc/ooni/nftables/tcp/5432_postgres_internal.nft + +``` +add rule inet filter input ip saddr { 10.0.0.0/8, 192.168.0.0/16 } tcp dport 5432 counter accept comment "Internal PostgreSQL" +``` diff --git a/ansible/roles/nftables/handlers/main.yml b/ansible/roles/nftables/handlers/main.yml new file mode 100644 index 00000000..a5b0e4bf --- /dev/null +++ b/ansible/roles/nftables/handlers/main.yml @@ -0,0 +1,5 @@ +- name: Reload nftables + tags: nftables + ansible.builtin.systemd_service: + name: nftables + state: reloaded diff --git a/ansible/roles/nftables/tasks/main.yml b/ansible/roles/nftables/tasks/main.yml new file mode 100644 index 00000000..2789b150 --- /dev/null +++ b/ansible/roles/nftables/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Install nftables + ansible.builtin.apt: + cache_valid_time: 86400 + name: nftables + tags: + - nftables + +- name: create config dir + ansible.builtin.file: + path: /etc/ooni/nftables/tcp + state: directory + owner: root + group: root + mode: 0755 + tags: + - nftables + +- name: allow SSH + ansible.builtin.blockinfile: + path: /etc/ooni/nftables/tcp/22.nft + create: yes + block: | + add rule inet filter input tcp dport 22 counter accept comment "Incoming SSH" + tags: + - nftables + +- name: Overwrite nftables.conf + ansible.builtin.template: + src: templates/nftables.conf + dest: /etc/nftables.conf + mode: 0755 + owner: root + notify: + - Reload nftables + tags: + - nftables + +- name: enable nftables service + ansible.builtin.systemd_service: + name: nftables + enabled: yes + state: started + tags: + - nftables diff --git a/ansible/roles/nftables/templates/nftables.conf b/ansible/roles/nftables/templates/nftables.conf new file mode 100755 index 00000000..5f7b50cc --- /dev/null +++ b/ansible/roles/nftables/templates/nftables.conf @@ -0,0 +1,41 @@ +#!/usr/sbin/nft -f +# +# Nftables configuration script +# +# Managed by ansible +# roles/nftables/templates/nftables.conf +# +# The ruleset is applied atomically + +flush ruleset + +table inet filter { + chain input { + type filter hook input priority 0; + policy drop; + iif lo accept comment "Accept incoming traffic from localhost" + ct state invalid drop + ct state established,related accept comment "Accept traffic related to outgoing connections" + icmp type echo-request accept + icmpv6 type echo-request counter packets 0 bytes 0 accept + icmpv6 type { nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } ip6 hoplimit 1 accept + icmpv6 type { nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } ip6 hoplimit 255 counter packets 1 bytes 72 accept + } + + chain forward { + type filter hook forward priority 0; + policy accept; + } + + chain output { + type filter hook output priority 0; + policy accept; + } +} + +# Configure TCP traffic rules +include "/etc/ooni/nftables/tcp/*.nft" + +# Configure any other rule +include "/etc/ooni/nftables/*.nft" + diff --git a/ansible/roles/nginx/files/ffdhe2048_dhparam.pem b/ansible/roles/nginx/files/ffdhe2048_dhparam.pem new file mode 100644 index 00000000..9b182b72 --- /dev/null +++ b/ansible/roles/nginx/files/ffdhe2048_dhparam.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz ++8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a +87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7 +YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi +7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD +ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg== +-----END DH PARAMETERS----- diff --git a/ansible/roles/nginx/files/ssl_intermediate.conf b/ansible/roles/nginx/files/ssl_intermediate.conf new file mode 100644 index 00000000..96d2e6e2 --- /dev/null +++ b/ansible/roles/nginx/files/ssl_intermediate.conf @@ -0,0 +1,3 @@ +# Oldest compatible clients: Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7 +ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE +ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS'; diff --git a/ansible/roles/nginx/files/ssl_modern.conf b/ansible/roles/nginx/files/ssl_modern.conf new file mode 100644 index 00000000..9ad7c11d --- /dev/null +++ b/ansible/roles/nginx/files/ssl_modern.conf @@ -0,0 +1,4 @@ +# Oldest compatible clients: Firefox 27, Chrome 30, IE 11 on Windows 7, Edge, Opera 17, Safari 9, Android 5.0, and Java 8 +ssl_protocols TLSv1.2; +ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; +# NB: technically, it does not require ssl_dhparam as it has no DHE, only ECDHE. diff --git a/ansible/roles/nginx/handlers/main.yml b/ansible/roles/nginx/handlers/main.yml new file mode 100644 index 00000000..eb1d1671 --- /dev/null +++ b/ansible/roles/nginx/handlers/main.yml @@ -0,0 +1,15 @@ +- name: test nginx config + command: /usr/sbin/nginx -t -c /etc/nginx/nginx.conf + listen: + - restart nginx + - reload nginx + +- name: restart nginx + service: + name: nginx + state: restarted + +- name: reload nginx + service: + name: nginx + state: reloaded diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100644 index 00000000..b93304c1 --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: install nginx + include_role: + name: nginxinc.nginx + +# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4 +# Guide https://wiki.mozilla.org/Security/Server_Side_TLS#Pre-defined_DHE_groups +# suggests ffdhe2048 instead of `openssl dhparam` to avoid https://weakdh.org/ +- name: copy nginx configuration snippets + copy: src={{item}} dest=/etc/nginx/{{ item }} mode=0444 owner=root group=root + with_items: + - ffdhe2048_dhparam.pem # ffdhe2048 Diffie-Hellman parameters + - ssl_intermediate.conf + - ssl_modern.conf + tags: + - nginx + +- name: remove `default` vhost + file: path={{item}} state=absent + notify: reload nginx + with_items: + - /etc/nginx/conf.d/default.conf + - /etc/nginx/sites-available/default + - /etc/nginx/sites-enabled/default + tags: + - nginx + +- name: Create nginx sites directory + ansible.builtin.file: + path: "/etc/nginx/sites-enabled/" + state: directory + tags: + - nginx + +- name: set nginx.conf + template: src=nginx.conf dest=/etc/nginx/nginx.conf mode=0444 + notify: reload nginx + tags: + - nginx diff --git a/ansible/roles/nginx/templates/nginx.conf b/ansible/roles/nginx/templates/nginx.conf new file mode 100644 index 00000000..f43bf7c5 --- /dev/null +++ b/ansible/roles/nginx/templates/nginx.conf @@ -0,0 +1,122 @@ +# NB: system nginx uses `www-data` user! +user nginx; +worker_processes 2; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + geo $is_ooni { + # TODO: this is not implemented ATM + default 0; + } + + map $http_x_request_id $has_request_id { # check for `X-Request-ID` + "" 0; + default 1; + } + + map "$is_ooni:$has_request_id" $ooni_request_id { + "1:1" $http_x_request_id; # use `X-Request-ID` if it's okay + default $request_id; + } + + # IPv4 is anonymized to /24, IPv6 to /48 - according to OONI Data Policy. + # https://ooni.torproject.org/about/data-policy/ + # IP is recorded to track possible abusers, not to distinguish users, so the + # address is truncated down to ISP (min routable prefix) instead of hashing. + map $remote_addr $ooni_remote_addr { + default "0.0.0.0"; + # variables in map value require nginx/1.11.0+ + "~(?P\d+\.\d+\.\d+)\.\d+" "$ip.0"; + # :: means at least TWO zero 16bit fields, https://tools.ietf.org/html/rfc5952#section-4.2.2 + "~(?P[0-9a-f]+:[0-9a-f]+:[0-9a-f]+):[0-9a-f:]+" "$ip::"; + "~(?P[0-9a-f]+:[0-9a-f]+)::[0-9a-f:]+" "$ip::"; + "~(?P[0-9a-f]+)::[0-9a-f:]+" "$ip::"; + } + + # $server_name is important as mtail does not distinguish log lines from + # different files, $host is required to log actual `Host` header. + # $request is split into separate fields to ease awk and mtail parsing. + # $scheme is used instead of $https to ease eye-reading. + # TCP_INFO is logged for random fun. + log_format mtail_pub + '$time_iso8601\t$msec\t$server_name\t' + '$ooni_remote_addr\t' # pub/int diff + '$request_completion\t$request_time\t$status\t$bytes_sent\t$body_bytes_sent\t' + '$upstream_cache_status\t$upstream_addr\t$upstream_status\t$upstream_connect_time\t$upstream_header_time\t$upstream_response_time\t' + '$scheme\t$server_protocol\t$request_length\t$request_method\t$host\t$request_uri\t' + '$tcpinfo_rtt\t$tcpinfo_rttvar\t' + '$http_referer\t$http_user_agent\t$ooni_request_id'; + + log_format mtail_int + '$time_iso8601\t$msec\t$server_name\t' + '$remote_addr\t' # pub/int diff + '$request_completion\t$request_time\t$status\t$bytes_sent\t$body_bytes_sent\t' + '$upstream_cache_status\t$upstream_addr\t$upstream_status\t$upstream_connect_time\t$upstream_header_time\t$upstream_response_time\t' + '$scheme\t$server_protocol\t$request_length\t$request_method\t$host\t$request_uri\t' + '$tcpinfo_rtt\t$tcpinfo_rttvar\t' + '$http_referer\t$http_user_agent\t$ooni_request_id'; + + log_format oolog '$ooni_remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" "$host"'; + + log_format oolog_mtail '$time_iso8601\t$msec\t$server_name\t' + '$ooni_remote_addr\t' # pub/int diff + '$request_completion\t$request_time\t$status\t$bytes_sent\t$body_bytes_sent\t' + '$upstream_cache_status\t$upstream_addr\t$upstream_status\t$upstream_connect_time\t$upstream_header_time\t$upstream_response_time\t' + '$scheme\t$server_protocol\t$request_length\t$request_method\t$host\t$request_uri\t' + '$tcpinfo_rtt\t$tcpinfo_rttvar\t' + '$http_referer\t$http_user_agent\t$ooni_request_id'; + + access_log /var/log/nginx/access.log mtail_int; + + sendfile on; + tcp_nopush on; # TCP_CORK HTTP headers with sendfile() body into single packet + + keepalive_timeout 120 120; # Firefox has 115s, http://kb.mozillazine.org/Network.http.keep-alive.timeout + + server_tokens off; + + # SSL based on https://wiki.mozilla.org/Security/Server_Side_TLS (doc v4.1) + ssl_session_timeout 1d; + ssl_session_cache shared:GLOBAL:1m; # 1m of cache is ~4000 sessions + ssl_session_tickets off; # needs accurate key rotation + ssl_dhparam /etc/nginx/ffdhe2048_dhparam.pem; # https://tools.ietf.org/html/rfc7919 + ssl_prefer_server_ciphers on; + #TODO: ssl_stapling on; # needs `resolver` or `ssl_stapling_file` + #TODO: ssl_stapling_verify on; # needs `ssl_trusted_certificate` + #TODO: resolver ; + # Define in server{} + # - include /etc/nginx/ssl_modern.conf | /etc/nginx/ssl_intermediate.conf + # - ssl_certificate /etc/letsencrypt/live/example.org/fullchain.pem; + # - ssl_certificate_key /etc/letsencrypt/live/example.org/privkey.pem + # - ssl_trusted_certificate /etc/letsencrypt/live/example.org/chain.pem; # for ssl_stapling_verify + # - add_header Strict-Transport-Security max-age=15768000; # HSTS (15768000 seconds = 6 months) + ### + + gzip on; + gzip_types text/html text/plain text/css text/xml text/javascript application/x-javascript application/json application/xml; # default is only `text/html` + gzip_disable "msie6"; + #gzip_proxied any; + + # Host, X-Real-IP, X-Forwarded-For, X-Forwarded-Proto are from + # file /etc/nginx/proxy_params from nginx-common package + # NB: adding `proxy_set_header` in another location overwrites whole set! + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Request-ID $ooni_request_id; + + include /etc/nginx/conf.d/*.conf; + include /etc/nginx/sites-enabled/*; +} diff --git a/ansible/roles/oonidata/defaults/main.yml b/ansible/roles/oonidata/defaults/main.yml new file mode 100644 index 00000000..c2b0d9d8 --- /dev/null +++ b/ansible/roles/oonidata/defaults/main.yml @@ -0,0 +1,8 @@ +miniconda_install_dir: /opt/miniconda +jupyterhub_config_dir: /etc/jupyterhub +jupyterhub_runtime_dir: /srv/jupyterhub +oonipipeline_runtime_dir: /srv/oonipipeline +tls_cert_dir: /etc/letsencrypt/live +admin_group_name: admin +enable_oonipipeline_worker: true +enable_jupyterhub: true diff --git a/ansible/roles/oonidata/handlers/main.yml b/ansible/roles/oonidata/handlers/main.yml new file mode 100644 index 00000000..f12d0aa6 --- /dev/null +++ b/ansible/roles/oonidata/handlers/main.yml @@ -0,0 +1,16 @@ +- name: Restart jupyterhub + ansible.builtin.systemd_service: + name: jupyterhub + state: restarted + daemon_reload: true + +- name: Restart oonipipeline-worker + ansible.builtin.systemd_service: + name: oonipipeline-worker + state: restarted + daemon_reload: true + +- name: Reload nginx + ansible.builtin.systemd_service: + name: nginx + state: reloaded diff --git a/ansible/roles/oonidata/tasks/jupyterhub.yml b/ansible/roles/oonidata/tasks/jupyterhub.yml new file mode 100644 index 00000000..b6fa2f07 --- /dev/null +++ b/ansible/roles/oonidata/tasks/jupyterhub.yml @@ -0,0 +1,82 @@ +--- +- name: Install jupyterhub + ansible.builtin.shell: + cmd: "{{ miniconda_install_dir }}/bin/conda install -c conda-forge -y jupyterhub" + tags: + - jupyterhub + +- name: Install jupyterlab and notebook + ansible.builtin.shell: + cmd: "{{ miniconda_install_dir }}/bin/conda install -y jupyterlab notebook" + tags: + - jupyterhub + +- name: Install jupyterhub packages + ansible.builtin.apt: + name: + - npm + tags: + - jupyterhub + +- name: Install configurable-http-proxy + ansible.builtin.shell: + cmd: "npm install -g configurable-http-proxy" + tags: + - jupyterhub + +- name: Create jupyterhub directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + loop: + - "{{ jupyterhub_config_dir }}" + - "{{ jupyterhub_runtime_dir }}" + - "{{ jupyterhub_runtime_dir }}/state" + tags: + - jupyterhub + +- name: Write jupyterhub config + ansible.builtin.template: + src: jupyterhub_config.py.j2 + dest: "{{ jupyterhub_config_dir }}/config.py" + owner: root + mode: "0640" + notify: + - Restart jupyterhub + tags: + - jupyterhub + - config + +- name: Write jupyterhub service + ansible.builtin.template: + src: jupyterhub.service.j2 + dest: "/etc/systemd/system/jupyterhub.service" + owner: root + group: root + mode: "0644" + notify: + - Restart jupyterhub + tags: + - jupyterhub + +- name: Ensure the JupyterHub service is started with daemon-reload + ansible.builtin.systemd: + name: jupyterhub + state: started + enabled: true + daemon_reload: true + tags: + - jupyterhub + - config + +- name: Setup oonidata nginx config + ansible.builtin.template: + src: nginx-jupyterhub.j2 + dest: /etc/nginx/sites-enabled/01-jupyterhub + owner: oonipipeline + mode: "0655" + notify: + - Reload nginx + tags: + - jupyterhub + - config \ No newline at end of file diff --git a/ansible/roles/oonidata/tasks/main.yml b/ansible/roles/oonidata/tasks/main.yml new file mode 100644 index 00000000..fa19b49b --- /dev/null +++ b/ansible/roles/oonidata/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: create oonipipeline user + ansible.builtin.user: + name: oonipipeline + state: present + shell: /bin/false + createhome: no + tags: + - oonipipeline + - jupyterhub + +- ansible.builtin.include_role: + name: miniconda + tags: + - conda + +- ansible.builtin.import_tasks: jupyterhub.yml + when: enable_jupyterhub + tags: + - jupyterhub + +- ansible.builtin.include_role: + name: nginx + tags: + - nginx + +- ansible.builtin.include_role: + name: geerlingguy.certbot + tags: + - certbot + vars: + certbot_admin_email: admin@ooni.org + certbot_create_extra_args: "" + certbot_create_if_missing: true + certbot_create_standalone_stop_services: + - nginx + certbot_certs: + - domains: + - "{{ inventory_hostname }}" + +- name: Install oonipipeline requirements + ansible.builtin.apt: + name: + - net-tools + - curl + - git + tags: + - oonipipeline + +- name: Install OONI pipeline from pip + ansible.builtin.shell: + cmd: "{{ miniconda_install_dir }}/bin/pip install -e 'git+https://github.com/ooni/data#egg=oonipipeline&subdirectory=oonipipeline'" + tags: + - oonipipeline + +- ansible.builtin.import_tasks: oonipipeline-worker.yml + when: enable_oonipipeline_worker + tags: + - oonipipeline diff --git a/ansible/roles/oonidata/tasks/oonipipeline-worker.yml b/ansible/roles/oonidata/tasks/oonipipeline-worker.yml new file mode 100644 index 00000000..cbb5ef70 --- /dev/null +++ b/ansible/roles/oonidata/tasks/oonipipeline-worker.yml @@ -0,0 +1,57 @@ +- name: create pipeline configuration + ansible.builtin.file: + path: "/etc/ooni/pipeline/" + state: directory + owner: oonipipeline + tags: + - oonipipeline + +- name: create pipeline configuration + ansible.builtin.file: + path: "{{ oonipipeline_runtime_dir }}" + state: directory + owner: oonipipeline + tags: + - oonipipeline + +- name: copy configuration files + ansible.builtin.copy: + content: "{{ lookup('amazon.aws.aws_secret', 'oonidevops/{{ item }}', profile='oonidevops_user_prod') }}" + dest: /etc/ooni/pipeline/{{item}} + owner: oonipipeline + mode: "0600" + loop: + - ooni-pipeline.uuhzf.crt + - ooni-pipeline.uuhzf.key + tags: + - oonipipeline + +- name: write oonipipeline configuration + ansible.builtin.template: + src: oonipipeline-config.toml.j2 + dest: /etc/ooni/pipeline/oonipipeline-config.toml + owner: oonipipeline + mode: "0600" + tags: + - oonipipeline + +- name: Write oonipipeline service + ansible.builtin.template: + src: oonipipeline-worker.service.j2 + dest: "/etc/systemd/system/oonipipeline-worker.service" + owner: root + group: root + mode: "0644" + notify: + - Restart oonipipeline-worker + tags: + - oonipipeline + +- name: Ensure the OONI pipeline worker service is started with daemon-reload + ansible.builtin.systemd: + name: oonipipeline-worker + state: started + enabled: true + daemon_reload: true + tags: + - oonipipeline diff --git a/ansible/roles/oonidata/templates/jupyterhub.service.j2 b/ansible/roles/oonidata/templates/jupyterhub.service.j2 new file mode 100644 index 00000000..479a48f9 --- /dev/null +++ b/ansible/roles/oonidata/templates/jupyterhub.service.j2 @@ -0,0 +1,17 @@ +# JupyterHub systemd service +[Unit] + +[Service] +User=root +Restart=always +WorkingDirectory={{ jupyterhub_runtime_dir}}/state +PrivateTmp=yes +ProtectKernelTunables=yes +ProtectKernelModules=yes +# Run upgrade-db before starting, in case Hub version has changed +# This is a no-op when no db exists or no upgrades are needed +ExecStart={{ miniconda_install_dir }}/bin/python -m jupyterhub.app -f {{ jupyterhub_config_dir }}/config.py --upgrade-db + +[Install] +# Start service when system boots +WantedBy=multi-user.target diff --git a/ansible/roles/oonidata/templates/jupyterhub_config.py.j2 b/ansible/roles/oonidata/templates/jupyterhub_config.py.j2 new file mode 100644 index 00000000..ec6bd238 --- /dev/null +++ b/ansible/roles/oonidata/templates/jupyterhub_config.py.j2 @@ -0,0 +1,3 @@ +c.JupyterHub.bind_url = 'http://127.0.0.1:8888' +c.Spawner.cmd = ['{{ miniconda_install_dir }}/bin/jupyterhub-singleuser'] +c.Authenticator.allow_all = True diff --git a/ansible/roles/oonidata/templates/nginx-jupyterhub.j2 b/ansible/roles/oonidata/templates/nginx-jupyterhub.j2 new file mode 100644 index 00000000..1d6ae57a --- /dev/null +++ b/ansible/roles/oonidata/templates/nginx-jupyterhub.j2 @@ -0,0 +1,40 @@ +# ansible-managed in ooni/devops.git + +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +server { + listen 443 ssl http2; + + include /etc/nginx/ssl_intermediate.conf; + + ssl_certificate {{ tls_cert_dir }}/{{ inventory_hostname }}/fullchain.pem; + ssl_certificate_key {{ tls_cert_dir }}/{{ inventory_hostname }}/privkey.pem; + ssl_trusted_certificate {{ tls_cert_dir }}/{{ inventory_hostname }}/chain.pem; + + server_name _; + access_log /var/log/nginx/{{ inventory_hostname }}.access.log; + error_log /var/log/nginx/{{ inventory_hostname }}.log warn; + + add_header Access-Control-Allow-Origin *; + + ## JupyterHub configuration + location / { + proxy_pass http://127.0.0.1:8888; + + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + client_max_body_size 100M; + + # WebSocket support + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header X-Scheme $scheme; + proxy_buffering off; + } +} diff --git a/ansible/roles/oonidata/templates/oonipipeline-config.toml.j2 b/ansible/roles/oonidata/templates/oonipipeline-config.toml.j2 new file mode 100644 index 00000000..a41dcb43 --- /dev/null +++ b/ansible/roles/oonidata/templates/oonipipeline-config.toml.j2 @@ -0,0 +1,7 @@ +temporal_address = "ooni-pipeline.uuhzf.tmprl.cloud:7233" +temporal_namespace = "ooni-pipeline.uuhzf" +temporal_tls_client_cert_path = "/etc/ooni/pipeline/ooni-pipeline.uuhzf.crt" +temporal_tls_client_key_path = "/etc/ooni/pipeline/ooni-pipeline.uuhzf.key" +clickhouse_write_batch_size = 30000 +prometheus_bind_address = "127.0.0.1:9998" +data_dir = "/srv/oonipipeline/data_dir" \ No newline at end of file diff --git a/ansible/roles/oonidata/templates/oonipipeline-worker.service.j2 b/ansible/roles/oonidata/templates/oonipipeline-worker.service.j2 new file mode 100644 index 00000000..fcde42dc --- /dev/null +++ b/ansible/roles/oonidata/templates/oonipipeline-worker.service.j2 @@ -0,0 +1,17 @@ +# OONI Pipeline worker service +[Unit] + +[Service] +User=oonipipeline +Restart=always +WorkingDirectory={{ oonipipeline_runtime_dir }} +PrivateTmp=yes +PrivateDevices=yes +ProtectKernelTunables=yes +ProtectKernelModules=yes +Environment="CONFIG_FILE=/etc/ooni/pipeline/oonipipeline-config.toml" +ExecStart={{ miniconda_install_dir }}/bin/python -m oonipipeline.main startworkers + +[Install] +# Start service when system boots +WantedBy=multi-user.target diff --git a/ansible/roles/prometheus_node_exporter/handlers/main.yml b/ansible/roles/prometheus_node_exporter/handlers/main.yml new file mode 100644 index 00000000..8face3a3 --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/handlers/main.yml @@ -0,0 +1,21 @@ +- name: Test nginx config + command: /usr/sbin/nginx -t -c /etc/nginx/nginx.conf + listen: + - Restart nginx-prometheus + - Reload nginx-prometheus + +- name: Reload nginx + ansible.builtin.systemd_service: + name: nginx + state: reloaded + +- name: Restart nginx + ansible.builtin.systemd_service: + name: nginx + state: restarted + +- name: Reload nftables + tags: nftables + ansible.builtin.systemd_service: + name: nftables + state: reloaded diff --git a/ansible/roles/prometheus_node_exporter/tasks/main.yml b/ansible/roles/prometheus_node_exporter/tasks/main.yml new file mode 100644 index 00000000..d33fe013 --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/tasks/main.yml @@ -0,0 +1,56 @@ +- ansible.builtin.include_role: + name: nginx + tags: + - nginx + - node_exporter + +- ansible.builtin.include_role: + name: geerlingguy.node_exporter + vars: + node_exporter_host: "localhost" + node_exporter_port: 8100 + tags: + - node_exporter + +- name: create ooni configuration directory + ansible.builtin.file: + path: "/etc/ooni/" + state: directory + owner: root + tags: + - node_exporter + +- name: Add a user to a password file and ensure permissions are set + community.general.htpasswd: + path: /etc/ooni/prometheus_passwd + name: prom + password: "{{ prometheus_metrics_password }}" + owner: root + group: www-data + mode: 0640 + tags: + - node_exporter + +- name: Setup oonidata nginx config + ansible.builtin.template: + src: nginx-prometheus.j2 + dest: /etc/nginx/sites-enabled/01-prometheus + mode: "0655" + notify: + - Restart nginx + tags: + - node_exporter + - config + +- name: Allow prometheus monitoring + ansible.builtin.blockinfile: + path: /etc/ooni/nftables/tcp/9100.nft + create: yes + block: | + add rule inet filter input tcp dport 9100 counter accept comment "Incoming prometheus monitoring" + notify: + - Reload nftables + tags: + - nftables + - node_exporter + - config diff --git a/ansible/roles/prometheus_node_exporter/templates/nginx-prometheus.j2 b/ansible/roles/prometheus_node_exporter/templates/nginx-prometheus.j2 new file mode 100644 index 00000000..7d9fbab1 --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/templates/nginx-prometheus.j2 @@ -0,0 +1,20 @@ +# ansible-managed in ooni/devops.git + +server { + listen 9100; + + server_name _; + access_log /var/log/nginx/{{ inventory_hostname }}.access.log; + error_log /var/log/nginx/{{ inventory_hostname }}.log warn; + + location /metrics { + auth_basic "Administrator’s Area"; + auth_basic_user_file /etc/ooni/prometheus_passwd; + + proxy_pass http://127.0.0.1:8100; + + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } +} \ No newline at end of file diff --git a/ansible/roles/ssh_users/tasks/main.yml b/ansible/roles/ssh_users/tasks/main.yml index a4a701a2..0d994377 100644 --- a/ansible/roles/ssh_users/tasks/main.yml +++ b/ansible/roles/ssh_users/tasks/main.yml @@ -81,7 +81,7 @@ state: absent - name: configure sshd - include_role: + include_role: name: willshersystems.sshd vars: sshd_skip_defaults: false diff --git a/tf/environments/prod/dns_records.tf b/tf/environments/prod/dns_records.tf index 06f68e81..c2d680a7 100644 --- a/tf/environments/prod/dns_records.tf +++ b/tf/environments/prod/dns_records.tf @@ -997,3 +997,11 @@ resource "aws_route53_record" "openvpn-server1-ooni-io-_A_" { type = "A" zone_id = local.dns_root_zone_ooni_io } + +resource "aws_route53_record" "notebook-ooni-org-_A_" { + name = "notebook.ooni.org" + records = ["138.201.19.39"] + ttl = "60" + type = "A" + zone_id = local.dns_root_zone_ooni_org +} From 815d9e43307a545f6d801ba7056aa699b67dadad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arturo=20Filast=C3=B2?= Date: Wed, 2 Oct 2024 10:45:51 +0200 Subject: [PATCH 10/10] Add full list of SANs to TLS cert creation --- tf/environments/prod/main.tf | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tf/environments/prod/main.tf b/tf/environments/prod/main.tf index e899839c..70e0ae5d 100644 --- a/tf/environments/prod/main.tf +++ b/tf/environments/prod/main.tf @@ -595,10 +595,9 @@ locals { "4.th.ooni.org" : local.dns_root_zone_ooni_org, "5.th.ooni.org" : local.dns_root_zone_ooni_org, "6.th.ooni.org" : local.dns_root_zone_ooni_org, - # TODO: add these once we unlock the quota for maximum certificates - #"ooniauth.${local.environment}.ooni.io" : local.dns_zone_ooni_io, - #"ooniprobe.${local.environment}.ooni.io" : local.dns_zone_ooni_io, - #"oonirun.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "ooniauth.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "ooniprobe.${local.environment}.ooni.io" : local.dns_zone_ooni_io, + "oonirun.${local.environment}.ooni.io" : local.dns_zone_ooni_io, } ooniapi_frontend_main_domain_name = "api.${local.environment}.ooni.io" ooniapi_frontend_main_domain_name_zone_id = local.dns_zone_ooni_io