Skip to content

Commit

Permalink
Merge branch 'feature/PI-506-upgrade_lambdas' into release/2024-09-13
Browse files Browse the repository at this point in the history
  • Loading branch information
megan-bower4 committed Sep 13, 2024
2 parents 10d9fed + b887849 commit 4d85185
Show file tree
Hide file tree
Showing 13 changed files with 5,281 additions and 58 deletions.
7 changes: 4 additions & 3 deletions infrastructure/terraform/etc/prod.tfvars
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
account_name = "prod"
environment = "prod"
domain = "api.cpm.national.nhs.uk"
account_name = "prod"
environment = "prod"
domain = "api.cpm.national.nhs.uk"
lambda_memory_size = 1536
7 changes: 4 additions & 3 deletions infrastructure/terraform/etc/qa.tfvars
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
account_name = "qa"
environment = "qa"
domain = "api.cpm.qa.national.nhs.uk"
account_name = "qa"
environment = "qa"
domain = "api.cpm.qa.national.nhs.uk"
lambda_memory_size = 1536
1 change: 1 addition & 0 deletions infrastructure/terraform/per_workspace/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ module "lambdas" {
resources = local.permission_resource_map[replace(file, ".json", "")]
}
}
memory_size = var.lambda_memory_size
}

module "authoriser" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ module "lambda_function" {
handler = "api.${var.name}.index.handler"
runtime = var.python_version
timeout = 10
memory_size = var.memory_size

timeouts = {
create = "5m"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,7 @@ variable "attach_policy_statements" {
variable "policy_statements" {
default = {}
}

variable "memory_size" {
default = 128
}
4 changes: 4 additions & 0 deletions infrastructure/terraform/per_workspace/vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,7 @@ variable "python_version" {
variable "domain" {
type = string
}

variable "lambda_memory_size" {
default = 128
}
3 changes: 2 additions & 1 deletion scripts/test/test.mk
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ SDS_DEV_APIKEY =
USE_CPM_PROD ?= FALSE
TEST_COUNT =
COMPARISON_ENV ?= local
RUN_SPEEDTEST = ?= FALSE

_pytest:
AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN) poetry run python -m pytest $(PYTEST_FLAGS) $(_INTERNAL_FLAGS) $(_CACHE_CLEAR)
Expand Down Expand Up @@ -40,4 +41,4 @@ test--feature--%--auto-retry: ## Autoretry of failed feature (gherkin) tests
$(MAKE) test--feature--$* _INTERNAL_FLAGS="--define='auto_retry=true'"

test--sds--matrix: ## Run end-to-end smoke tests that check data matches betweeen cpm and ldap
SDS_PROD_APIKEY=$(SDS_PROD_APIKEY) SDS_DEV_APIKEY=$(SDS_DEV_APIKEY) USE_CPM_PROD=$(USE_CPM_PROD) TEST_COUNT=$(TEST_COUNT) COMPARISON_ENV=$(COMPARISON_ENV) poetry run python -m pytest $(PYTEST_FLAGS) -m 'matrix' --ignore=src/layers --ignore=src/etl $(_CACHE_CLEAR)
SDS_PROD_APIKEY=$(SDS_PROD_APIKEY) SDS_DEV_APIKEY=$(SDS_DEV_APIKEY) USE_CPM_PROD=$(USE_CPM_PROD) TEST_COUNT=$(TEST_COUNT) COMPARISON_ENV=$(COMPARISON_ENV) RUN_SPEEDTEST=$(RUN_SPEEDTEST) poetry run python -m pytest $(PYTEST_FLAGS) -m 'matrix' --ignore=src/layers --ignore=src/etl $(_CACHE_CLEAR)
133 changes: 133 additions & 0 deletions src/api/tests/sds_data_tests/calculation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
import ast
import glob
import json
import math
import os
import statistics

from event.json import json_loads


def preprocess_json_file(file_path):
with open(file_path, "r") as f:
content = f.read().strip()
# Add brackets around the content to form a valid JSON array
content = f"[{content}]"

# Replace ',{' with '},{' to ensure proper JSON array formatting
content = content.replace("},{", "},{")

# Remove any trailing commas (`,]` is not valid JSON)
content = content.replace(",]", "]")

try:
data = json_loads(content) # noqa: T201
except json.JSONDecodeError as e:
print(f"Error parsing JSON in file {file_path}: {e}") # noqa: T201
raise
return data


def transform_params(params_str):
# Convert the string representation of the dictionary to an actual dictionary
try:
params_dict = ast.literal_eval(params_str)
except (ValueError, SyntaxError) as e:
print(f"Error parsing string to dictionary: {e}") # noqa: T201
return {}

# Transform the dictionary, excluding "use_cpm"
transformed_dict = {
f"request.params.{key}": value
for key, value in params_dict.items()
if key != "use_cpm" # Ignore the "use_cpm" key
}

return transformed_dict


def extract_response_times(json_files):
ldap_times = []
cpm_times = []
params = []

for file in json_files:
data = preprocess_json_file(file)
for entry in data:
ldap_times.append(entry["ldap_response_time"])
cpm_times.append(entry["cpm_response_time"])
params_dict = transform_params(entry["params"])
params_dict["path"] = entry["path"]
params.append(params_dict)

return ldap_times, cpm_times, params


def format_value(value):
return f"{value:.2f} ms"


def calculate_statistics(times_list):
if not times_list:
return {
"mean": "0.00 ms",
"mean_under_1s": "0.00 ms",
"mode": "N/A",
"lowest": "0.00 ms",
"highest": "0.00 ms",
"median": "0.00 ms",
}

try:
mode_value = statistics.mode(times_list)
except statistics.StatisticsError:
mode_value = "N/A" # No unique mode found

geometric_mean = math.exp(sum(math.log(x) for x in times_list) / len(times_list))

# Filter times under 1000ms
times_under_1s = [time for time in times_list if time < 1000]

mean_under_1s = sum(times_under_1s) / len(times_under_1s) if times_under_1s else 0

return {
"mean": format_value(sum(times_list) / len(times_list)),
"mean_under_1s": format_value(mean_under_1s),
"mode": format_value(mode_value) if mode_value != "N/A" else mode_value,
"lowest": format_value(min(times_list)),
"highest": format_value(max(times_list)),
"median": format_value(statistics.median(times_list)),
}


def write_to_json_file(output_file_path, data_list):
# Check if the file already exists
if os.path.exists(output_file_path):
print( # noqa: T201
f"The file '{output_file_path}' already exists. No action will be taken." # noqa: T201
) # noqa: T201
return

# Write the list to the JSON file if it does not exist
with open(output_file_path, "w") as file:
json.dump(data_list, file, indent=4)


# Get all JSON files in the directory
# json_files = ["test_success_0.json", "test_success_1.json", "test_success_2.json"]
json_files = glob.glob("src/api/tests/sds_data_tests/test_success_*.json")
# json_files = glob.glob("*.json")

# Extract response times
ldap_times, cpm_times, params = extract_response_times(json_files)
output_file_path = (
"src/api/tests/sds_data_tests/data/sds_fhir_api.speed_test_queries.device.json"
)

# Calculate statistics
ldap_stats = calculate_statistics(ldap_times)
cpm_stats = calculate_statistics(cpm_times)

print(f"LDAP Response Time Stats: {ldap_stats}") # noqa: T201
print(f"CPM Response Time Stats: {cpm_stats}") # noqa: T201
write_to_json_file(output_file_path, params)
15 changes: 13 additions & 2 deletions src/api/tests/sds_data_tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,22 @@
import json
import subprocess

import pytest

current_file_index = 0
entry_count = 0
max_entries_per_file = 2000
file_name_template = "test_success_{}.json"
file_name_template = "src/api/tests/sds_data_tests/test_success_{}.json"


@pytest.fixture(scope="session", autouse=True)
def run_after_tests():
# This code will run after all tests have finished
yield
# Code to run after tests
subprocess.run(
["python", "src/api/tests/sds_data_tests/calculation.py"], check=True
)


def get_current_file():
Expand Down Expand Up @@ -41,7 +52,7 @@ def pytest_runtest_logreport(report):
str(report.longreprtext)
)
output = {"failed_request": failed_request, "error": assertion_error}
with open("test_failure.json", "a") as f:
with open("src/api/tests/sds_data_tests/test_failure.json", "a") as f:
f.write(json.dumps(output))
f.write(",")
if report.when == "call" and report.passed and hasattr(pytest, "success_message"):
Expand Down
Loading

0 comments on commit 4d85185

Please sign in to comment.