Skip to content

Commit 0174644

Browse files
committed
Refactor Lambda event source mappings and update deployment workflows
- Removed the management of shared Lambda triggers for blue/green deployments from the deploy-backend.yml workflow. - Introduced a new workflow step to apply Lambda event source mappings from a dedicated directory. - Added a script to adopt existing event source mappings for Lambda functions, streamlining the deployment process. - Updated the pr-teardown.yml workflow to include the destruction of Lambda event source mappings. - Created new Terraform configuration files for managing event source mappings, including variables and outputs. - Updated README.md to reflect changes in the Lambda trigger management process.
1 parent 4f72ae9 commit 0174644

13 files changed

Lines changed: 424 additions & 183 deletions

File tree

.github/workflows/deploy-backend.yml

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -251,10 +251,6 @@ jobs:
251251
working-directory: infrastructure/instance
252252
run: make workspace
253253

254-
- name: Adopt shared Lambda triggers for blue/green deploys
255-
working-directory: infrastructure/instance
256-
run: bash ../../utilities/scripts/manage_blue_green_event_source_mappings.sh prepare-state
257-
258254
- name: Terraform Plan
259255
# Ignore cancellations to prevent Terraform from being killed while it holds a state lock
260256
# A stuck process can still be killed with the force-cancel API operation
@@ -305,11 +301,6 @@ jobs:
305301
working-directory: infrastructure/instance
306302
run: make workspace
307303

308-
- name: Remove stale Lambda triggers for blue/green deploys
309-
if: ${{ !failure() }}
310-
working-directory: infrastructure/instance
311-
run: bash ../../utilities/scripts/manage_blue_green_event_source_mappings.sh cleanup-stale
312-
313304
- name: Terraform Apply
314305
# Ignore cancellations to prevent Terraform from being killed while it holds a state lock
315306
# A stuck process can still be killed with the force-cancel API operation
@@ -319,6 +310,14 @@ jobs:
319310
make apply-ci
320311
echo "ID_SYNC_QUEUE_ARN=$(make -s output name=id_sync_queue_arn)" >> $GITHUB_ENV
321312
313+
- name: Apply Lambda event source mappings
314+
if: ${{ !failure() }}
315+
working-directory: infrastructure/event_source_mappings
316+
run: |
317+
make init
318+
make adopt
319+
make apply
320+
322321
- name: Install poetry
323322
if: ${{ inputs.environment == 'dev' && inputs.create_mns_subscription }}
324323
run: pip install poetry==2.1.4

.github/workflows/pr-teardown.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,12 @@ jobs:
9292
echo "Unsubscribing SQS to MNS for notifications..."
9393
make unsubscribe
9494
95+
- name: Destroy Lambda event source mappings
96+
working-directory: infrastructure/event_source_mappings
97+
run: |
98+
make init apigee_environment=$APIGEE_ENVIRONMENT environment=$BACKEND_ENVIRONMENT sub_environment=$BACKEND_SUB_ENVIRONMENT
99+
make destroy apigee_environment=$APIGEE_ENVIRONMENT environment=$BACKEND_ENVIRONMENT sub_environment=$BACKEND_SUB_ENVIRONMENT
100+
95101
- name: Terraform Destroy
96102
working-directory: infrastructure/instance
97103
run: |

infrastructure/event_source_mappings/.terraform.lock.hcl

Lines changed: 25 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
-include .env
2+
3+
apigee_environment ?= $(APIGEE_ENVIRONMENT)
4+
environment ?= $(ENVIRONMENT)
5+
sub_environment ?= $(SUB_ENVIRONMENT)
6+
sub_environment_dir := $(if $(findstring pr-,$(sub_environment)),pr,$(sub_environment))
7+
tf_var_file := ../instance/environments/$(environment)/$(sub_environment_dir)/variables.tfvars
8+
has_sub_environment_scope = $(shell awk -F= '/^has_sub_environment_scope/ { gsub(/[[:space:]]/, "", $$2); print $$2 }' "$(tf_var_file)")
9+
workspace_name = $(if $(filter false,$(has_sub_environment_scope)),$(environment),$(sub_environment))
10+
11+
tf_cmd = AWS_PROFILE=$(AWS_PROFILE) terraform
12+
13+
bucket_name = $(if $(filter dev,$(environment)),immunisation-$(apigee_environment),immunisation-$(environment))-terraform-state-files
14+
15+
tf_state = \
16+
-backend-config="bucket=$(bucket_name)" \
17+
-backend-config="key=event-source-mappings/state"
18+
19+
tf_vars = \
20+
-var="sub_environment=$(sub_environment)" \
21+
-var-file="$(tf_var_file)"
22+
23+
init:
24+
$(tf_cmd) init $(tf_state) -upgrade
25+
26+
workspace:
27+
$(tf_cmd) workspace select -or-create $(workspace_name) && echo "Switched to workspace/environment: $(workspace_name)"
28+
29+
adopt: workspace
30+
ENVIRONMENT='$(environment)' SUB_ENVIRONMENT='$(sub_environment)' RESOURCE_SCOPE='$(workspace_name)' bash ../../utilities/scripts/adopt_event_source_mappings.sh $(tf_vars)
31+
32+
plan: workspace
33+
$(tf_cmd) plan $(tf_vars)
34+
35+
apply: workspace
36+
$(tf_cmd) apply $(tf_vars) --auto-approve
37+
38+
destroy: workspace
39+
$(tf_cmd) destroy $(tf_vars) -auto-approve
40+
$(tf_cmd) workspace select default
41+
$(tf_cmd) workspace delete $(workspace_name)
42+
43+
output:
44+
$(tf_cmd) output -raw $(name)
45+
46+
.PHONY : init workspace adopt plan apply destroy output
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
terraform {
2+
required_providers {
3+
aws = {
4+
source = "hashicorp/aws"
5+
version = "~> 6"
6+
}
7+
}
8+
backend "s3" {
9+
region = "eu-west-2"
10+
key = "event-source-mappings/state"
11+
use_lockfile = true
12+
}
13+
required_version = ">= 1.5.0"
14+
}
15+
16+
provider "aws" {
17+
region = var.aws_region
18+
default_tags {
19+
tags = {
20+
Project = var.project_name
21+
Environment = local.resource_scope
22+
Service = var.service
23+
}
24+
}
25+
}
26+
27+
locals {
28+
resource_scope = var.has_sub_environment_scope ? var.sub_environment : var.environment
29+
short_prefix = "${var.project_short_name}-${var.sub_environment}"
30+
events_table_name = "imms-${local.resource_scope}-imms-events"
31+
id_sync_queue_name = "imms-${local.resource_scope}-id-sync-queue"
32+
delta_lambda_name = "${local.short_prefix}-delta-lambda"
33+
delta_dlq_name = "${local.short_prefix}-delta-dlq"
34+
id_sync_lambda_name = "${local.short_prefix}-id-sync-lambda"
35+
}
36+
37+
data "aws_dynamodb_table" "events" {
38+
name = local.events_table_name
39+
}
40+
41+
data "aws_sqs_queue" "delta_dlq" {
42+
name = local.delta_dlq_name
43+
}
44+
45+
data "aws_sqs_queue" "id_sync" {
46+
name = local.id_sync_queue_name
47+
}
48+
49+
data "aws_lambda_function" "delta" {
50+
function_name = local.delta_lambda_name
51+
}
52+
53+
data "aws_lambda_function" "id_sync" {
54+
function_name = local.id_sync_lambda_name
55+
}
56+
57+
resource "aws_lambda_event_source_mapping" "delta_trigger" {
58+
event_source_arn = data.aws_dynamodb_table.events.stream_arn
59+
function_name = data.aws_lambda_function.delta.function_name
60+
starting_position = "TRIM_HORIZON"
61+
62+
destination_config {
63+
on_failure {
64+
destination_arn = data.aws_sqs_queue.delta_dlq.arn
65+
}
66+
}
67+
68+
maximum_retry_attempts = 0
69+
}
70+
71+
resource "aws_lambda_event_source_mapping" "id_sync_sqs_trigger" {
72+
event_source_arn = data.aws_sqs_queue.id_sync.arn
73+
function_name = data.aws_lambda_function.id_sync.arn
74+
75+
batch_size = 10
76+
maximum_batching_window_in_seconds = 5
77+
function_response_types = ["ReportBatchItemFailures"]
78+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
output "id_sync_queue_arn" {
2+
description = "The ARN of the ID Sync (MNS NHS Number change) SQS queue"
3+
value = data.aws_sqs_queue.id_sync.arn
4+
}
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
variable "environment" {
2+
type = string
3+
description = "Environment (AWS Account) name - dev, preprod or prod"
4+
}
5+
6+
variable "sub_environment" {
7+
type = string
8+
description = "Sub-environment name, e.g. internal-dev, int-blue, blue"
9+
}
10+
11+
variable "has_sub_environment_scope" {
12+
description = "True if resources are scoped to the sub-environment. False for blue/green shared resources."
13+
type = bool
14+
default = false
15+
}
16+
17+
variable "project_name" {
18+
type = string
19+
default = "immunisation"
20+
}
21+
22+
variable "project_short_name" {
23+
type = string
24+
default = "imms"
25+
}
26+
27+
variable "service" {
28+
type = string
29+
default = "fhir-api"
30+
}
31+
32+
variable "aws_region" {
33+
type = string
34+
default = "eu-west-2"
35+
36+
validation {
37+
condition = var.aws_region == "eu-west-2"
38+
error_message = "AWS Region must be set to eu-west-2."
39+
}
40+
}
41+
42+
variable "immunisation_account_id" {
43+
type = string
44+
description = "Immunisation AWS Account ID"
45+
}
46+
47+
variable "dspp_core_account_id" {
48+
type = string
49+
description = "DSPP Core AWS Account ID"
50+
}
51+
52+
variable "mns_account_id" {
53+
type = string
54+
description = "MNS AWS account ID - trusted source for MNS notifications"
55+
default = "631615744739"
56+
}
57+
58+
variable "pds_environment" {
59+
type = string
60+
default = "int"
61+
}
62+
63+
variable "mns_environment" {
64+
type = string
65+
default = "int"
66+
}
67+
68+
variable "error_alarm_notifications_enabled" {
69+
default = true
70+
description = "Switch to enable error alarm notifications to Slack"
71+
type = bool
72+
}
73+
74+
variable "create_mesh_processor" {
75+
type = bool
76+
default = false
77+
}
78+
79+
variable "mesh_no_invocation_period_seconds" {
80+
type = number
81+
default = 300
82+
}
83+
84+
variable "dspp_submission_s3_bucket_name" {
85+
type = string
86+
default = "nhsd-dspp-core-ref-s3-submission-upload"
87+
}
88+
89+
variable "dspp_submission_kms_key_alias" {
90+
type = string
91+
default = "nhsd-dspp-core-ref-s3-submission-upload-key"
92+
}
93+
94+
variable "dynamodb_point_in_time_recovery_enabled" {
95+
type = bool
96+
default = false
97+
}

infrastructure/instance/README.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@ Note: If you switch environment configuration in .env ensure that you run `make
3434
If you want to apply Terraform to a workspace created by a PR you can set the above SUB_ENVIRONMENT to the `PR-number` and ENVIRONMENT set to `dev`.
3535
E.g. `pr-57`. You can use this to test out changes when tests fail in CI.
3636

37-
## Blue/Green Lambda Trigger Handoff
37+
## Lambda Trigger Handoff
3838

39-
For split sub-environments such as `int-blue`/`int-green` and `prod-blue`/`prod-green`, the deploy workflow now reimports the shared `delta_trigger` and `id_sync_sqs_trigger` resources into the target Terraform workspace before planning. On apply, it also deletes any stale trigger that still points at the target side's old dedicated Lambda function.
40-
41-
This removes the release-time `Disable delta` and `Disable ID sync` steps from the repository-managed deployment flow. The remaining operational follow-up is outside this repository: update the Jira Smart Checklist release templates to remove those manual checklist items once the automated flow has been rolled out.
39+
The `delta_trigger` and `id_sync_sqs_trigger` event source mappings are managed from `../event_source_mappings` so the main instance plan does not rewrite shared backend state. The deploy workflow applies the main instance first, then adopts or updates the trigger mappings from the dedicated trigger workspace.

infrastructure/instance/delta.tf

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -70,20 +70,6 @@ resource "aws_lambda_function" "delta_sync_lambda" {
7070
]
7171
}
7272

73-
74-
resource "aws_lambda_event_source_mapping" "delta_trigger" {
75-
event_source_arn = aws_dynamodb_table.events-dynamodb-table.stream_arn
76-
function_name = aws_lambda_function.delta_sync_lambda.function_name
77-
starting_position = "TRIM_HORIZON"
78-
destination_config {
79-
on_failure {
80-
destination_arn = aws_sqs_queue.dlq.arn
81-
}
82-
}
83-
maximum_retry_attempts = 0
84-
}
85-
86-
8773
resource "aws_sqs_queue" "dlq" {
8874
name = "${local.short_prefix}-${local.dlq_name}"
8975
}

infrastructure/instance/id_sync_lambda.tf

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -258,20 +258,3 @@ resource "aws_cloudwatch_metric_alarm" "id_sync_error_alarm" {
258258
alarm_actions = [data.aws_sns_topic.imms_system_alert_errors.arn]
259259
treat_missing_data = "notBreaching"
260260
}
261-
262-
263-
264-
# delete config_lambda_notification / new_s3_invoke_permission - not required; duplicate
265-
266-
# NEW
267-
resource "aws_lambda_event_source_mapping" "id_sync_sqs_trigger" {
268-
event_source_arn = aws_sqs_queue.id_sync_queue.arn
269-
function_name = aws_lambda_function.id_sync_lambda.arn
270-
271-
# Optional: Configure batch size and other settings
272-
batch_size = 10
273-
maximum_batching_window_in_seconds = 5
274-
275-
# Optional: Configure error handling
276-
function_response_types = ["ReportBatchItemFailures"]
277-
}

0 commit comments

Comments
 (0)