-
Notifications
You must be signed in to change notification settings - Fork 4
first commit with e2e test cleanup improvements #1368
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
a299cba
42f878c
267557d
12109bf
cdb8e65
1883e4f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,6 +4,10 @@ | |
| import allure | ||
| import pytest | ||
| from dotenv import load_dotenv | ||
| from src.dynamoDB.dynamo_db_helper import ( | ||
| cleanup_failed_audit_records_before_tests, | ||
| cleanup_failed_audit_records_for_filename, | ||
| ) | ||
| from utilities.api_fhir_immunization_helper import ( | ||
| empty_folder, | ||
| get_response_body_for_display, | ||
|
|
@@ -76,6 +80,12 @@ def global_context(): | |
| s3_env = os.getenv("S3_env") | ||
| aws_account_id = os.getenv("aws_account_id") | ||
| mns_validation_required = os.getenv("mns_validation_required", "false").strip().lower() == "true" | ||
|
|
||
| # Pre-test cleanup: update any 'Failed' audit records from the last 48 hours | ||
| # so they don't block the batch_processor_filter from processing new files. | ||
| if s3_env and aws_profile_name: | ||
| cleanup_failed_audit_records_before_tests(aws_profile_name, s3_env) | ||
|
|
||
| if s3_env and aws_account_id and mns_validation_required: | ||
| purge_all_queues(s3_env, aws_account_id) | ||
|
|
||
|
|
@@ -84,7 +94,12 @@ def global_context(): | |
| def temp_apigee_apps(): | ||
| if use_temp_apigee_apps(): | ||
| apigee_app_mgr = ApigeeOnDemandAppManager() | ||
| created_apps = apigee_app_mgr.setup_apps_and_product() | ||
| try: | ||
| created_apps = apigee_app_mgr.setup_apps_and_product() | ||
| except Exception as e: | ||
| print(f"[WARN] Apigee on-demand app setup failed — tests requiring dynamic apps will fail individually: {e}") | ||
| yield None | ||
| return | ||
|
|
||
| for test_app in created_apps: | ||
| os.environ[f"{test_app.supplier}_client_Id"] = test_app.client_id | ||
|
|
@@ -150,40 +165,80 @@ def pytest_bdd_after_scenario(request, feature, scenario): | |
| if "Delete_cleanUp" in tags: | ||
| if context.ImmsID is not None: | ||
| print(f"\n Delete Request is {context.url}/{context.ImmsID}") | ||
| context.response = http_requests_session.delete(f"{context.url}/{context.ImmsID}", headers=context.headers) | ||
| assert context.response.status_code == 204, ( | ||
| f"Expected status code 204, but got {context.response.status_code}. Response: {get_response_body_for_display(context.response)}" | ||
| ) | ||
| if context.mns_validation_required.strip().lower() == "true": | ||
| mns_event_will_be_triggered_with_correct_data_for_deleted_event(context) | ||
| else: | ||
| print("MNS validation not required, skipping MNS event verification for deleted event.") | ||
| try: | ||
| context.response = http_requests_session.delete( | ||
| f"{context.url}/{context.ImmsID}", headers=context.headers | ||
| ) | ||
| if context.response.status_code in (401, 403): | ||
| # Apigee token has expired during a long test session (~13 min run). | ||
| # Refresh the token and retry the DELETE once before giving up. | ||
| print( | ||
| f"[TEARDOWN][WARN] DELETE returned {context.response.status_code} for " | ||
| f"{context.ImmsID} — Apigee token likely expired. Refreshing token and retrying..." | ||
| ) | ||
| try: | ||
| get_tokens(context, context.supplier_name) | ||
| get_delete_url_header(context) | ||
| context.response = http_requests_session.delete( | ||
| f"{context.url}/{context.ImmsID}", headers=context.headers | ||
| ) | ||
| except Exception as refresh_err: | ||
| print( | ||
| f"[TEARDOWN][WARN] Token refresh failed for {context.ImmsID}: {refresh_err}. Skipping teardown." | ||
| ) | ||
| context.response = None | ||
|
|
||
| if context.response is None: | ||
| pass | ||
| elif context.response.status_code in (401, 403): | ||
| print( | ||
| f"[TEARDOWN][WARN] DELETE still returned {context.response.status_code} for " | ||
| f"{context.ImmsID} after token refresh. Skipping teardown assertion." | ||
| ) | ||
| else: | ||
| assert context.response.status_code == 204, ( | ||
| f"Expected status code 204, but got {context.response.status_code}. " | ||
| f"Response: {get_response_body_for_display(context.response)}" | ||
| ) | ||
| if context.mns_validation_required.strip().lower() == "true": | ||
| mns_event_will_be_triggered_with_correct_data_for_deleted_event(context) | ||
| else: | ||
| print("MNS validation not required, skipping MNS event verification for deleted event.") | ||
| except AssertionError: | ||
| raise | ||
| except Exception as e: | ||
| print(f"[TEARDOWN][WARN] Delete cleanup error for {context.ImmsID}: {e}") | ||
| else: | ||
| print("Skipping delete: ImmsID is None") | ||
|
|
||
| if "delete_cleanup_batch" in tags: | ||
| if "IMMS_ID" in context.vaccine_df.columns and context.vaccine_df["IMMS_ID"].notna().any(): | ||
| get_tokens(context, context.supplier_name) | ||
|
|
||
| df = context.vaccine_df.dropna(subset=["IMMS_ID"]).copy() | ||
| df["IMMS_ID_CLEAN"] = df["IMMS_ID"].astype(str).str.replace("Immunization#", "", regex=False) | ||
| context.vaccine_df["IMMS_ID_CLEAN"] = ( | ||
| context.vaccine_df["IMMS_ID"].astype(str).str.replace("Immunization#", "", regex=False) | ||
| ) | ||
|
|
||
| for row in df.itertuples(index=False): | ||
| imms_id = row.IMMS_ID_CLEAN | ||
| for imms_id in context.vaccine_df["IMMS_ID_CLEAN"].dropna().unique(): | ||
| delete_url = f"{context.url}/{imms_id}" | ||
|
|
||
| print(f"Sending DELETE request to: {delete_url}") | ||
|
|
||
| response = http_requests_session.delete(delete_url, headers=context.headers) | ||
|
|
||
| if response.status_code != 204: | ||
| print( | ||
| f"Cleanup DELETE returned {response.status_code} for {imms_id} " | ||
| f"(teardown best-effort, not failing test). " | ||
| f"Response: {get_response_body_for_display(response)}" | ||
| f"Cleanup DELETE returned {response.status_code} for {imms_id} (teardown best-effort, not failing test). Response: {get_response_body_for_display(response)}" | ||
| ) | ||
| else: | ||
| print(f"Deleted {imms_id} successfully.") | ||
| print("Batch cleanup finished.") | ||
|
|
||
| print("Batch cleanup finished.") | ||
| else: | ||
| print("No IMMS_ID values available — skipping delete cleanup.") | ||
| print("No IMMS_ID column or no values present as test failed due to an exception — skipping delete cleanup.") | ||
|
|
||
| # Unconditional audit table cleanup for every batch scenario. | ||
| # This handles the case where the @when archive-wait assert raised before | ||
| # the @then step containing the old inline cleanup call could execute, | ||
| # leaving a "Failed" record in the next test run's DynamoDB query. | ||
| if hasattr(context, "filename") and context.filename and hasattr(context, "S3_env") and context.S3_env: | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I thought we agreed not to clear the audit table after the test run, since we rely on that data for debugging — especially when we are doing cleanup before each test run
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you can filter by status = "Not processed - Automation testing" to see all test-generated failures. I've also added a cleaned_at timestamp to the update expression for when automation reset the record. once at start of every test run we can check for the cleaned_at timestamp to see where the update happened. The allure report will also capture the failed test so should be good in terms of debugging.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the detailed explanation One recommendation from my side: since this issue is only occurring in the Batch File Validation tests, it would be cleaner and more efficient to scope the after‑scenario cleanup using the Batch_File_Validation_Feature tag rather than running it for every scenario. If we continue to run the cleanup globally in pytest_bdd_after_scenario, then the pre‑test execution cleanup becomes redundant and just adds extra delay to the test run. We also need to remember that this is test code, and some failures are expected by design — so we shouldn’t over‑clean in a way that hides useful debugging information. Using the tag‑based approach keeps the behaviour targeted, avoids unnecessary scans, and still gives us the audit visibility we rely on for debugging. |
||
| cleanup_failed_audit_records_for_filename(context.filename, context.aws_profile_name, context.S3_env) | ||
Uh oh!
There was an error while loading. Please reload this page.