diff --git a/infrastructure/modules/kinesis_firehose/data.tf b/infrastructure/modules/kinesis_firehose/data.tf new file mode 100644 index 000000000..8fc4b38cc --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} diff --git a/infrastructure/modules/kinesis_firehose/default_variables.tf b/infrastructure/modules/kinesis_firehose/default_variables.tf new file mode 120000 index 000000000..062daf614 --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/default_variables.tf @@ -0,0 +1 @@ +../_shared/default_variables.tf \ No newline at end of file diff --git a/infrastructure/modules/kinesis_firehose/kinesis_firehose_delivery_stream.tf b/infrastructure/modules/kinesis_firehose/kinesis_firehose_delivery_stream.tf new file mode 100644 index 000000000..a0937d692 --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/kinesis_firehose_delivery_stream.tf @@ -0,0 +1,29 @@ +resource "aws_kinesis_firehose_delivery_stream" "eligibility_audit_firehose_delivery_stream" { + name = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.project_name}-${var.environment}-${var.audit_firehose_delivery_stream_name}" + destination = "extended_s3" + + extended_s3_configuration { + role_arn = var.audit_firehose_role_arn + bucket_arn = var.s3_audit_bucket_arn + + buffering_size = 1 + buffering_interval = 60 + compression_format = "UNCOMPRESSED" + + kms_key_arn = aws_kms_key.firehose_cmk.arn + + cloudwatch_logging_options { + enabled = true + log_group_name = var.kinesis_cloud_watch_log_group_name + log_stream_name = var.kinesis_cloud_watch_log_stream + } + } + + server_side_encryption { + enabled = true + key_arn = aws_kms_key.firehose_cmk.arn + key_type = "CUSTOMER_MANAGED_CMK" + } + + tags = var.tags +} diff --git a/infrastructure/modules/kinesis_firehose/kms.tf b/infrastructure/modules/kinesis_firehose/kms.tf new file mode 100644 index 000000000..3eadd10ba --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/kms.tf @@ -0,0 +1,94 @@ +resource "aws_kms_key" "firehose_cmk" { + description = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.audit_firehose_delivery_stream_name} Master Key" + deletion_window_in_days = 14 + is_enabled = true + enable_key_rotation = true + tags = var.tags +} + + +resource "aws_kms_alias" "firehose_cmk" { + name = "alias/${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.audit_firehose_delivery_stream_name}-cmk" + target_key_id = aws_kms_key.firehose_cmk.key_id +} + +resource "aws_kms_key_policy" "firehose_key_policy" { + key_id = aws_kms_key.firehose_cmk.id + policy = data.aws_iam_policy_document.firehose_kms_key_policy.json +} + + +data "aws_iam_policy_document" "firehose_kms_key_policy" { + #checkov:skip=CKV_AWS_111: Root user needs full KMS key management + #checkov:skip=CKV_AWS_356: Root user needs full KMS key management + #checkov:skip=CKV_AWS_109: Root user needs full KMS key management + statement { + sid = "EnableIamUserPermissions" + effect = "Allow" + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + actions = ["kms:*"] + resources = ["*"] + } + statement { + sid = "EnableRootUserPermissions" + effect = "Allow" + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + + actions = ["kms:*"] + resources = ["*"] + } + + # Your existing statements below... + statement { + sid = "AllowFirehoseAccess" + effect = "Allow" + principals { + type = "Service" + identifiers = ["firehose.amazonaws.com"] + } + actions = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + resources = [aws_kms_key.firehose_cmk.arn] + } + + statement { + sid = "AllowFirehoseRoleUsage" + effect = "Allow" + principals { + type = "AWS" + identifiers = [var.audit_firehose_role_arn] + } + actions = ["kms:*"] + resources = [aws_kms_key.firehose_cmk.arn] + } + + statement { + sid = "AllowCloudWatchLogsUseOfTheKey" + effect = "Allow" + principals { + type = "Service" + identifiers = ["logs.${var.region}.amazonaws.com"] + } + actions = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + resources = [aws_kms_key.firehose_cmk.arn] + } +} + + diff --git a/infrastructure/modules/kinesis_firehose/outputs.tf b/infrastructure/modules/kinesis_firehose/outputs.tf new file mode 100644 index 000000000..d457b669a --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/outputs.tf @@ -0,0 +1,7 @@ +output "firehose_stream_name" { + value = aws_kinesis_firehose_delivery_stream.eligibility_audit_firehose_delivery_stream.name +} + +output "kinesis_firehose_cmk_arn" { + value = aws_kms_key.firehose_cmk.arn +} diff --git a/infrastructure/modules/kinesis_firehose/variables.tf b/infrastructure/modules/kinesis_firehose/variables.tf new file mode 100644 index 000000000..69740d182 --- /dev/null +++ b/infrastructure/modules/kinesis_firehose/variables.tf @@ -0,0 +1,27 @@ +variable "audit_firehose_delivery_stream_name" { + description = "audit firehose delivery stream name" + type = string +} + +variable "audit_firehose_role_arn" { + description = "audit firehose role arn" + type = string +} + +variable "s3_audit_bucket_arn" { + description = "s3 audit bucket arn" + type = string +} + +variable "kinesis_cloud_watch_log_group_name" { + description = "kinesis cloud watch log group name" + type = string +} + +variable "kinesis_cloud_watch_log_stream" { + description = "kinesis cloud watch log stream" + type = string +} + + + diff --git a/infrastructure/modules/lambda/kms.tf b/infrastructure/modules/lambda/kms.tf index 7a963a9ae..55c5133f0 100644 --- a/infrastructure/modules/lambda/kms.tf +++ b/infrastructure/modules/lambda/kms.tf @@ -17,6 +17,19 @@ resource "aws_kms_key_policy" "lambda_cmk" { } data "aws_iam_policy_document" "lambda_cmk" { + #checkov:skip=CKV_AWS_111: Root user needs full KMS key management + #checkov:skip=CKV_AWS_356: Root user needs full KMS key management + #checkov:skip=CKV_AWS_109: Root user needs full KMS key management + statement { + sid = "EnableIamUserPermissions" + effect = "Allow" + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + actions = ["kms:*"] + resources = ["*"] + } statement { sid = "Enable IAM User Permissions for Lambda CMK" effect = "Allow" diff --git a/infrastructure/modules/lambda/lambda.tf b/infrastructure/modules/lambda/lambda.tf index e7f98f3f4..924de6c47 100644 --- a/infrastructure/modules/lambda/lambda.tf +++ b/infrastructure/modules/lambda/lambda.tf @@ -17,10 +17,11 @@ resource "aws_lambda_function" "eligibility_signposting_lambda" { environment { variables = { - PERSON_TABLE_NAME = var.eligibility_status_table_name, - RULES_BUCKET_NAME = var.eligibility_rules_bucket_name, - ENV = var.environment - LOG_LEVEL = var.log_level + PERSON_TABLE_NAME = var.eligibility_status_table_name, + RULES_BUCKET_NAME = var.eligibility_rules_bucket_name, + KINESIS_AUDIT_STREAM_TO_S3 = var.kinesis_audit_stream_to_s3_name + ENV = var.environment + LOG_LEVEL = var.log_level } } diff --git a/infrastructure/modules/lambda/variables.tf b/infrastructure/modules/lambda/variables.tf index f9c440e16..ca6d9b95d 100644 --- a/infrastructure/modules/lambda/variables.tf +++ b/infrastructure/modules/lambda/variables.tf @@ -38,6 +38,11 @@ variable "eligibility_status_table_name" { type = string } +variable "kinesis_audit_stream_to_s3_name" { + description = "kinesis audit stream to s3 name" + type = string +} + variable "log_level" { description = "log level" type = string diff --git a/infrastructure/stacks/api-layer/api_gateway.tf b/infrastructure/stacks/api-layer/api_gateway.tf index 745210a25..285d2d089 100644 --- a/infrastructure/stacks/api-layer/api_gateway.tf +++ b/infrastructure/stacks/api-layer/api_gateway.tf @@ -46,9 +46,9 @@ resource "aws_api_gateway_deployment" "eligibility_signposting_api" { resource "aws_api_gateway_stage" "eligibility-signposting-api" { #checkov:skip=CKV2_AWS_51: mTLS is enforced at the custom domain, not at the stage level #checkov:skip=CKV_AWS_120: We're not enabling caching for this API Gateway, yet - deployment_id = aws_api_gateway_deployment.eligibility_signposting_api.id - rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id - stage_name = "${local.workspace}-eligibility-signposting-api-live" + deployment_id = aws_api_gateway_deployment.eligibility_signposting_api.id + rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id + stage_name = "${local.workspace}-eligibility-signposting-api-live" xray_tracing_enabled = true access_log_settings { diff --git a/infrastructure/stacks/api-layer/cloudwatch.tf b/infrastructure/stacks/api-layer/cloudwatch.tf index 04e00d344..fb1af396b 100644 --- a/infrastructure/stacks/api-layer/cloudwatch.tf +++ b/infrastructure/stacks/api-layer/cloudwatch.tf @@ -9,3 +9,19 @@ resource "aws_cloudwatch_log_group" "lambda_logs" { Stack = local.stack_name } } + +resource "aws_cloudwatch_log_group" "firehose_audit" { + name = "/aws/kinesisfirehose/${var.project_name}-${var.environment}-audit" + retention_in_days = 365 + kms_key_id = module.eligibility_audit_firehose_delivery_stream.kinesis_firehose_cmk_arn + + tags = { + Name = "kinesis-firehose-logs" + Stack = local.stack_name + } +} + +resource "aws_cloudwatch_log_stream" "firehose_audit_stream" { + name = "audit_stream_log" + log_group_name = aws_cloudwatch_log_group.firehose_audit.name +} diff --git a/infrastructure/stacks/api-layer/cloudwatch_metrics.tf b/infrastructure/stacks/api-layer/cloudwatch_metrics.tf index 6805603e7..d45bddeea 100644 --- a/infrastructure/stacks/api-layer/cloudwatch_metrics.tf +++ b/infrastructure/stacks/api-layer/cloudwatch_metrics.tf @@ -1,120 +1,120 @@ locals { - cloudtrail_custom_metrics = [ - { - name = "UnauthorizedApiCalls" - namespace = "security" - filter = "{($.errorCode=\"*UnauthorizedOperation\") || ($.errorCode=\"AccessDenied*\")}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "ConsoleAuthenticationFailures" - namespace = "security" - filter = "{($.eventName=ConsoleLogin) && ($.errorMessage=\"Failed authentication\")}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "CloudTrailConfigChanges" - namespace = "security" - filter = "{($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "VPCChanges" - namespace = "security" - filter = "{($.eventName=CreateVpc) || ($.eventName=DeleteVpc) || ($.eventName=ModifyVpcAttribute) || ($.eventName=AcceptVpcPeeringConnection) || ($.eventName=CreateVpcPeeringConnection) || ($.eventName=DeleteVpcPeeringConnection) || ($.eventName=RejectVpcPeeringConnection) || ($.eventName=AttachClassicLinkVpc) || ($.eventName=DetachClassicLinkVpc) || ($.eventName=DisableVpcClassicLink) || ($.eventName=EnableVpcClassicLink)}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "AWSConfigChanges" - namespace = "security" - filter = "{($.eventSource=config.amazonaws.com) && (($.eventName=StopConfigurationRecorder) || ($.eventName=DeleteDeliveryChannel) || ($.eventName=PutDeliveryChannel) || ($.eventName=PutConfigurationRecorder))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "ModificationOfCMKs" - namespace = "security" - filter = "{($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "UnsuccessfulSwitchRole" - namespace = "security" - filter = "{ ( $.eventName = SwitchRole && $.responseElements.SwitchRole = Failure ) }" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "ConsoleLoginNoMFA" - namespace = "security" - filter = "{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "RootAccountUsage" - namespace = "security" - filter = "{$.userIdentity.type=\"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !=\"AwsServiceEvent\"}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "SecurityGroupChange" - namespace = "security" - filter = "{($.eventName=AuthorizeSecurityGroupIngress) || ($.eventName=AuthorizeSecurityGroupEgress) || ($.eventName=RevokeSecurityGroupIngress) || ($.eventName=RevokeSecurityGroupEgress) || ($.eventName=CreateSecurityGroup) || ($.eventName=DeleteSecurityGroup)}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "RouteTableChanges" - namespace = "security" - filter = "{($.eventSource=ec2.amazonaws.com) && (($.eventName=CreateRoute) || ($.eventName=CreateRouteTable) || ($.eventName=ReplaceRoute) || ($.eventName=ReplaceRouteTableAssociation) || ($.eventName=DeleteRouteTable) || ($.eventName=DeleteRoute) || ($.eventName=DisassociateRouteTable))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "IAMPolicyChanges" - namespace = "security" - filter = "{($.eventSource=iam.amazonaws.com) && (($.eventName=DeleteGroupPolicy) || ($.eventName=DeleteRolePolicy) || ($.eventName=DeleteUserPolicy) || ($.eventName=PutGroupPolicy) || ($.eventName=PutRolePolicy) || ($.eventName=PutUserPolicy) || ($.eventName=CreatePolicy) || ($.eventName=DeletePolicy) || ($.eventName=CreatePolicyVersion) || ($.eventName=DeletePolicyVersion) || ($.eventName=AttachRolePolicy) || ($.eventName=DetachRolePolicy) || ($.eventName=AttachUserPolicy) || ($.eventName=DetachUserPolicy) || ($.eventName=AttachGroupPolicy) || ($.eventName=DetachGroupPolicy))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "s3BucketPolicyChanges" - namespace = "security" - filter = "{($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutBucketPolicy) || ($.eventName=PutBucketCors) || ($.eventName=PutBucketLifecycle) || ($.eventName=PutBucketReplication) || ($.eventName=DeleteBucketPolicy) || ($.eventName=DeleteBucketCors) || ($.eventName=DeleteBucketLifecycle) || ($.eventName=DeleteBucketReplication))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "ChangesToNetworkGateways" - namespace = "security" - filter = "{($.eventName=CreateCustomerGateway) || ($.eventName=DeleteCustomerGateway) || ($.eventName=AttachInternetGateway) || ($.eventName=CreateInternetGateway) || ($.eventName=DeleteInternetGateway) || ($.eventName=DetachInternetGateway)}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "ChangesToNACLs" - namespace = "security" - filter = "{($.eventName=CreateNetworkAcl) || ($.eventName=CreateNetworkAclEntry) || ($.eventName=DeleteNetworkAcl) || ($.eventName=DeleteNetworkAclEntry) || ($.eventName=ReplaceNetworkAclEntry) || ($.eventName=ReplaceNetworkAclAssociation)}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "KMSKeyPolicyChanges" - namespace = "security" - filter = "{($.eventSource=kms.amazonaws.com) && (($.eventName=PutKeyPolicy) || ($.eventName=DeleteKeyPolicy))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "s3PublicAccessChanges" - namespace = "security" - filter = "{($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutObjectAcl))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "CloudWatchAlarmChanges" - namespace = "security" - filter = "{($.eventSource=cloudwatch.amazonaws.com) && (($.eventName=PutMetricAlarm) || ($.eventName=DeleteAlarms) || ($.eventName=SetAlarmState))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - { - name = "LambdaFunctionChanges" - namespace = "security" - filter = "{($.eventSource=lambda.amazonaws.com) && (($.eventName=CreateFunction20150331) || ($.eventName=DeleteFunction20150331) || ($.eventName=UpdateFunctionCode20150331) || ($.eventName=UpdateFunctionConfiguration20150331))}" - log_group_name = "NHSDAudit_trail_log_group" - }, - ] + cloudtrail_custom_metrics = [ + { + name = "UnauthorizedApiCalls" + namespace = "security" + filter = "{($.errorCode=\"*UnauthorizedOperation\") || ($.errorCode=\"AccessDenied*\")}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "ConsoleAuthenticationFailures" + namespace = "security" + filter = "{($.eventName=ConsoleLogin) && ($.errorMessage=\"Failed authentication\")}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "CloudTrailConfigChanges" + namespace = "security" + filter = "{($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "VPCChanges" + namespace = "security" + filter = "{($.eventName=CreateVpc) || ($.eventName=DeleteVpc) || ($.eventName=ModifyVpcAttribute) || ($.eventName=AcceptVpcPeeringConnection) || ($.eventName=CreateVpcPeeringConnection) || ($.eventName=DeleteVpcPeeringConnection) || ($.eventName=RejectVpcPeeringConnection) || ($.eventName=AttachClassicLinkVpc) || ($.eventName=DetachClassicLinkVpc) || ($.eventName=DisableVpcClassicLink) || ($.eventName=EnableVpcClassicLink)}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "AWSConfigChanges" + namespace = "security" + filter = "{($.eventSource=config.amazonaws.com) && (($.eventName=StopConfigurationRecorder) || ($.eventName=DeleteDeliveryChannel) || ($.eventName=PutDeliveryChannel) || ($.eventName=PutConfigurationRecorder))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "ModificationOfCMKs" + namespace = "security" + filter = "{($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "UnsuccessfulSwitchRole" + namespace = "security" + filter = "{ ( $.eventName = SwitchRole && $.responseElements.SwitchRole = Failure ) }" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "ConsoleLoginNoMFA" + namespace = "security" + filter = "{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "RootAccountUsage" + namespace = "security" + filter = "{$.userIdentity.type=\"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !=\"AwsServiceEvent\"}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "SecurityGroupChange" + namespace = "security" + filter = "{($.eventName=AuthorizeSecurityGroupIngress) || ($.eventName=AuthorizeSecurityGroupEgress) || ($.eventName=RevokeSecurityGroupIngress) || ($.eventName=RevokeSecurityGroupEgress) || ($.eventName=CreateSecurityGroup) || ($.eventName=DeleteSecurityGroup)}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "RouteTableChanges" + namespace = "security" + filter = "{($.eventSource=ec2.amazonaws.com) && (($.eventName=CreateRoute) || ($.eventName=CreateRouteTable) || ($.eventName=ReplaceRoute) || ($.eventName=ReplaceRouteTableAssociation) || ($.eventName=DeleteRouteTable) || ($.eventName=DeleteRoute) || ($.eventName=DisassociateRouteTable))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "IAMPolicyChanges" + namespace = "security" + filter = "{($.eventSource=iam.amazonaws.com) && (($.eventName=DeleteGroupPolicy) || ($.eventName=DeleteRolePolicy) || ($.eventName=DeleteUserPolicy) || ($.eventName=PutGroupPolicy) || ($.eventName=PutRolePolicy) || ($.eventName=PutUserPolicy) || ($.eventName=CreatePolicy) || ($.eventName=DeletePolicy) || ($.eventName=CreatePolicyVersion) || ($.eventName=DeletePolicyVersion) || ($.eventName=AttachRolePolicy) || ($.eventName=DetachRolePolicy) || ($.eventName=AttachUserPolicy) || ($.eventName=DetachUserPolicy) || ($.eventName=AttachGroupPolicy) || ($.eventName=DetachGroupPolicy))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "s3BucketPolicyChanges" + namespace = "security" + filter = "{($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutBucketPolicy) || ($.eventName=PutBucketCors) || ($.eventName=PutBucketLifecycle) || ($.eventName=PutBucketReplication) || ($.eventName=DeleteBucketPolicy) || ($.eventName=DeleteBucketCors) || ($.eventName=DeleteBucketLifecycle) || ($.eventName=DeleteBucketReplication))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "ChangesToNetworkGateways" + namespace = "security" + filter = "{($.eventName=CreateCustomerGateway) || ($.eventName=DeleteCustomerGateway) || ($.eventName=AttachInternetGateway) || ($.eventName=CreateInternetGateway) || ($.eventName=DeleteInternetGateway) || ($.eventName=DetachInternetGateway)}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "ChangesToNACLs" + namespace = "security" + filter = "{($.eventName=CreateNetworkAcl) || ($.eventName=CreateNetworkAclEntry) || ($.eventName=DeleteNetworkAcl) || ($.eventName=DeleteNetworkAclEntry) || ($.eventName=ReplaceNetworkAclEntry) || ($.eventName=ReplaceNetworkAclAssociation)}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "KMSKeyPolicyChanges" + namespace = "security" + filter = "{($.eventSource=kms.amazonaws.com) && (($.eventName=PutKeyPolicy) || ($.eventName=DeleteKeyPolicy))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "s3PublicAccessChanges" + namespace = "security" + filter = "{($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutObjectAcl))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "CloudWatchAlarmChanges" + namespace = "security" + filter = "{($.eventSource=cloudwatch.amazonaws.com) && (($.eventName=PutMetricAlarm) || ($.eventName=DeleteAlarms) || ($.eventName=SetAlarmState))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + { + name = "LambdaFunctionChanges" + namespace = "security" + filter = "{($.eventSource=lambda.amazonaws.com) && (($.eventName=CreateFunction20150331) || ($.eventName=DeleteFunction20150331) || ($.eventName=UpdateFunctionCode20150331) || ($.eventName=UpdateFunctionConfiguration20150331))}" + log_group_name = "NHSDAudit_trail_log_group" + }, + ] } resource "aws_cloudwatch_log_metric_filter" "cloudtrail_custom_metrics" { diff --git a/infrastructure/stacks/api-layer/data.tf b/infrastructure/stacks/api-layer/data.tf index f4dfb5827..6b159ad98 100644 --- a/infrastructure/stacks/api-layer/data.tf +++ b/infrastructure/stacks/api-layer/data.tf @@ -1,10 +1,10 @@ data "aws_caller_identity" "current" {} data "aws_acm_certificate" "imported_cert" { - domain = "${var.environment}.${local.api_domain_name}" - types = ["IMPORTED"] - provider = aws.eu-west-2 - key_types = ["RSA_4096"] + domain = "${var.environment}.${local.api_domain_name}" + types = ["IMPORTED"] + provider = aws.eu-west-2 + key_types = ["RSA_4096"] } data "aws_acm_certificate" "validation_cert" { @@ -20,11 +20,11 @@ data "aws_kms_alias" "networking_ssm_key" { } data "aws_ssm_parameter" "mtls_api_client_cert" { - name = "/${var.environment}/mtls/api_client_cert" + name = "/${var.environment}/mtls/api_client_cert" with_decryption = true } data "aws_ssm_parameter" "mtls_api_ca_cert" { - name = "/${var.environment}/mtls/api_ca_cert" + name = "/${var.environment}/mtls/api_ca_cert" with_decryption = true } diff --git a/infrastructure/stacks/api-layer/iam_policies.tf b/infrastructure/stacks/api-layer/iam_policies.tf index 4890bb3d6..46e1d9e46 100644 --- a/infrastructure/stacks/api-layer/iam_policies.tf +++ b/infrastructure/stacks/api-layer/iam_policies.tf @@ -56,6 +56,41 @@ resource "aws_iam_role_policy" "lambda_s3_read_policy" { policy = data.aws_iam_policy_document.s3_rules_bucket_policy.json } +# Attach s3 write policy to kinesis firehose role +resource "aws_iam_role_policy" "kinesis_firehose_s3_write_policy" { + name = "S3WriteAccess" + role = aws_iam_role.eligibility_audit_firehose_role.id + policy = data.aws_iam_policy_document.s3_audit_bucket_policy.json +} + +# Policy doc for firehose logging +resource "aws_iam_role_policy" "kinesis_firehose_logs_policy" { + name = "CloudWatchLogsAccess" + role = aws_iam_role.eligibility_audit_firehose_role.id + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = "arn:aws:logs:${var.default_aws_region}:${data.aws_caller_identity.current.account_id}:log-group:/aws/kinesisfirehose/${module.eligibility_audit_firehose_delivery_stream.firehose_stream_name}:log-stream:*" + }, + { + Effect = "Allow", + Action = [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams" + ], + Resource = "arn:aws:logs:${var.default_aws_region}:${data.aws_caller_identity.current.account_id}:log-group:/aws/kinesisfirehose/${module.eligibility_audit_firehose_delivery_stream.firehose_stream_name}" + } + ] + }) +} + # Attach AWSLambdaVPCAccessExecutionRole to Lambda resource "aws_iam_role_policy_attachment" "AWSLambdaVPCAccessExecutionRole" { role = aws_iam_role.eligibility_lambda_role.id @@ -64,8 +99,8 @@ resource "aws_iam_role_policy_attachment" "AWSLambdaVPCAccessExecutionRole" { #Attach AWSLambdaBasicExecutionRole to Lambda resource "aws_iam_role_policy_attachment" "lambda_logs_policy_attachment" { - policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" role = aws_iam_role.eligibility_lambda_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" } # Policy doc for S3 Audit bucket @@ -178,9 +213,9 @@ data "aws_iam_policy_document" "s3_audit_kms_key_policy" { effect = "Allow" principals { type = "AWS" - identifiers = [aws_iam_role.eligibility_lambda_role.arn] + identifiers = [aws_iam_role.eligibility_lambda_role.arn, aws_iam_role.eligibility_audit_firehose_role.arn] } - actions = [ + actions = [ "kms:Decrypt", "kms:Encrypt", "kms:GenerateDataKey", @@ -194,3 +229,29 @@ resource "aws_kms_key_policy" "s3_audit_kms_key" { key_id = module.s3_audit_bucket.storage_bucket_kms_key_arn policy = data.aws_iam_policy_document.s3_audit_kms_key_policy.json } + +data "aws_iam_policy_document" "lambda_firehose_write_policy" { + statement { + sid = "AllowLambdaToPutToFirehose" + effect = "Allow" + actions = [ + "firehose:PutRecord", + "firehose:PutRecordBatch" + ] + resources = [ + "arn:aws:firehose:${var.default_aws_region}:${data.aws_caller_identity.current.account_id}:deliverystream/${module.eligibility_audit_firehose_delivery_stream.firehose_stream_name}" + ] + } +} + +resource "aws_iam_role_policy" "lambda_firehose_policy" { + name = "LambdaFirehoseWritePolicy" + role = aws_iam_role.eligibility_lambda_role.id + policy = data.aws_iam_policy_document.lambda_firehose_write_policy.json +} + + + + + + diff --git a/infrastructure/stacks/api-layer/iam_roles.tf b/infrastructure/stacks/api-layer/iam_roles.tf index c53f9102e..2fe2618dc 100644 --- a/infrastructure/stacks/api-layer/iam_roles.tf +++ b/infrastructure/stacks/api-layer/iam_roles.tf @@ -22,6 +22,18 @@ data "aws_iam_policy_document" "dps_assume_role" { } } +# Trust policy kinesis firehose +data "aws_iam_policy_document" "firehose_assume_role" { + statement { + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["firehose.amazonaws.com"] + } + } +} + +# Roles resource "aws_iam_role" "eligibility_lambda_role" { name = "eligibility_lambda-role${terraform.workspace == "default" ? "" : "-${terraform.workspace}"}" @@ -36,3 +48,9 @@ resource "aws_iam_role" "write_access_role" { assume_role_policy = data.aws_iam_policy_document.dps_assume_role.json permissions_boundary = aws_iam_policy.assumed_role_permissions_boundary.arn } + +resource "aws_iam_role" "eligibility_audit_firehose_role" { + name = "eligibility_audit_firehose-role${terraform.workspace == "default" ? "" : "-${terraform.workspace}"}" + assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json + permissions_boundary = aws_iam_policy.assumed_role_permissions_boundary.arn +} diff --git a/infrastructure/stacks/api-layer/kinesis_firehose.tf b/infrastructure/stacks/api-layer/kinesis_firehose.tf new file mode 100644 index 000000000..90196d0e3 --- /dev/null +++ b/infrastructure/stacks/api-layer/kinesis_firehose.tf @@ -0,0 +1,12 @@ +module "eligibility_audit_firehose_delivery_stream" { + source = "../../modules/kinesis_firehose" + audit_firehose_delivery_stream_name = "audit_stream_to_s3" + audit_firehose_role_arn = aws_iam_role.eligibility_audit_firehose_role.arn + s3_audit_bucket_arn = module.s3_audit_bucket.storage_bucket_arn + environment = local.environment + stack_name = local.stack_name + workspace = local.workspace + tags = local.tags + kinesis_cloud_watch_log_group_name = aws_cloudwatch_log_group.firehose_audit.name + kinesis_cloud_watch_log_stream = aws_cloudwatch_log_stream.firehose_audit_stream.name +} diff --git a/infrastructure/stacks/api-layer/lambda.tf b/infrastructure/stacks/api-layer/lambda.tf index b12d9dd70..09f56ac03 100644 --- a/infrastructure/stacks/api-layer/lambda.tf +++ b/infrastructure/stacks/api-layer/lambda.tf @@ -11,17 +11,18 @@ data "aws_subnet" "private_subnets" { } module "eligibility_signposting_lambda_function" { - source = "../../modules/lambda" - eligibility_lambda_role_arn = aws_iam_role.eligibility_lambda_role.arn - workspace = local.workspace - environment = var.environment - lambda_func_name = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}eligibility_signposting_api" - security_group_ids = [data.aws_security_group.main_sg.id] - vpc_intra_subnets = [for v in data.aws_subnet.private_subnets : v.id] - file_name = "../../../dist/lambda.zip" - handler = "eligibility_signposting_api.app.lambda_handler" - eligibility_rules_bucket_name = module.s3_rules_bucket.storage_bucket_name - eligibility_status_table_name = module.eligibility_status_table.table_name - log_level = "INFO" - stack_name = local.stack_name + source = "../../modules/lambda" + eligibility_lambda_role_arn = aws_iam_role.eligibility_lambda_role.arn + workspace = local.workspace + environment = var.environment + lambda_func_name = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}eligibility_signposting_api" + security_group_ids = [data.aws_security_group.main_sg.id] + vpc_intra_subnets = [for v in data.aws_subnet.private_subnets : v.id] + file_name = "../../../dist/lambda.zip" + handler = "eligibility_signposting_api.app.lambda_handler" + eligibility_rules_bucket_name = module.s3_rules_bucket.storage_bucket_name + eligibility_status_table_name = module.eligibility_status_table.table_name + kinesis_audit_stream_to_s3_name = module.eligibility_audit_firehose_delivery_stream.firehose_stream_name + log_level = "INFO" + stack_name = local.stack_name } diff --git a/infrastructure/stacks/api-layer/patient_check.tf b/infrastructure/stacks/api-layer/patient_check.tf index 17b0254a3..a0cf18a8e 100644 --- a/infrastructure/stacks/api-layer/patient_check.tf +++ b/infrastructure/stacks/api-layer/patient_check.tf @@ -1,8 +1,8 @@ resource "aws_api_gateway_request_validator" "patient_check_validator" { - rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id - name = "validate-path-params" - validate_request_body = false + rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id + name = "validate-path-params" + validate_request_body = false validate_request_parameters = true } @@ -17,7 +17,7 @@ resource "aws_api_gateway_method" "get_patient_check" { request_validator_id = aws_api_gateway_request_validator.patient_check_validator.id request_parameters = { - "method.request.path.id" = true # Require the 'id' path parameter + "method.request.path.id" = true # Require the 'id' path parameter } depends_on = [ diff --git a/infrastructure/stacks/networking/locals.tf b/infrastructure/stacks/networking/locals.tf index 549b9b390..5acda7626 100644 --- a/infrastructure/stacks/networking/locals.tf +++ b/infrastructure/stacks/networking/locals.tf @@ -14,13 +14,14 @@ locals { # VPC Interface Endpoints vpc_interface_endpoints = { - kms = "com.amazonaws.${local.region}.kms" - cloudwatch-logs = "com.amazonaws.${local.region}.logs" - ssm = "com.amazonaws.${local.region}.ssm" - secrets-manager = "com.amazonaws.${local.region}.secretsmanager" - lambda = "com.amazonaws.${local.region}.lambda" - sts = "com.amazonaws.${local.region}.sts" - sqs = "com.amazonaws.${local.region}.sqs" + kms = "com.amazonaws.${local.region}.kms" + cloudwatch-logs = "com.amazonaws.${local.region}.logs" + ssm = "com.amazonaws.${local.region}.ssm" + secrets-manager = "com.amazonaws.${local.region}.secretsmanager" + lambda = "com.amazonaws.${local.region}.lambda" + sts = "com.amazonaws.${local.region}.sts" + sqs = "com.amazonaws.${local.region}.sqs" + kinesis-firehose = "com.amazonaws.${local.region}.kinesis-firehose" } # VPC Gateway Endpoints diff --git a/src/eligibility_signposting_api/config/config.py b/src/eligibility_signposting_api/config/config.py index 1922bc06d..722e90133 100644 --- a/src/eligibility_signposting_api/config/config.py +++ b/src/eligibility_signposting_api/config/config.py @@ -15,13 +15,18 @@ AwsRegion = NewType("AwsRegion", str) AwsAccessKey = NewType("AwsAccessKey", str) AwsSecretAccessKey = NewType("AwsSecretAccessKey", str) +AwsKinesisFirehoseStreamName = NewType("AwsKinesisFirehoseStreamName", str) @cache def config() -> dict[str, Any]: person_table_name = TableName(os.getenv("PERSON_TABLE_NAME", "test_eligibility_datastore")) rules_bucket_name = BucketName(os.getenv("RULES_BUCKET_NAME", "test-rules-bucket")) + audit_bucket_name = BucketName(os.getenv("AUDIT_BUCKET_NAME", "test-audit-bucket")) aws_default_region = AwsRegion(os.getenv("AWS_DEFAULT_REGION", "eu-west-1")) + kinesis_audit_stream_to_s3 = AwsKinesisFirehoseStreamName( + os.getenv("KINESIS_AUDIT_STREAM_TO_S3", "test_kinesis_audit_stream_to_s3") + ) log_level = LOG_LEVEL if os.getenv("ENV"): @@ -33,6 +38,9 @@ def config() -> dict[str, Any]: "person_table_name": person_table_name, "s3_endpoint": None, "rules_bucket_name": rules_bucket_name, + "audit_bucket_name": audit_bucket_name, + "firehose_endpoint": None, + "kinesis_audit_stream_to_s3": kinesis_audit_stream_to_s3, "log_level": log_level, } @@ -44,6 +52,9 @@ def config() -> dict[str, Any]: "person_table_name": person_table_name, "s3_endpoint": URL(os.getenv("S3_ENDPOINT", "http://localhost:4566")), "rules_bucket_name": rules_bucket_name, + "audit_bucket_name": audit_bucket_name, + "firehose_endpoint": URL(os.getenv("FIREHOSE_ENDPOINT", "http://localhost:4566")), + "kinesis_audit_stream_to_s3": kinesis_audit_stream_to_s3, "log_level": log_level, } diff --git a/src/eligibility_signposting_api/config/contants.py b/src/eligibility_signposting_api/config/contants.py index 9756b3081..3ac359875 100644 --- a/src/eligibility_signposting_api/config/contants.py +++ b/src/eligibility_signposting_api/config/contants.py @@ -1,3 +1,3 @@ MAGIC_COHORT_LABEL = "elid_all_people" RULE_STOP_DEFAULT = False -NHS_NUMBER_HEADER_NAME = "nhs-login-nhs-number" +NHS_NUMBER_HEADER = "nhs-login-nhs-number" diff --git a/src/eligibility_signposting_api/repos/factory.py b/src/eligibility_signposting_api/repos/factory.py index 9da79a5e8..22adbd154 100644 --- a/src/eligibility_signposting_api/repos/factory.py +++ b/src/eligibility_signposting_api/repos/factory.py @@ -35,3 +35,11 @@ def dynamodb_resource_factory( def s3_service_factory(session: Session, s3_endpoint: Annotated[URL, Inject(param="s3_endpoint")]) -> BaseClient: endpoint_url = str(s3_endpoint) if s3_endpoint is not None else None return session.client("s3", endpoint_url=endpoint_url) + + +@service(qualifier="firehose") +def firehose_client_factory( + session: Session, firehose_endpoint: Annotated[URL, Inject(param="firehose_endpoint")] +) -> BaseClient: + endpoint_url = str(firehose_endpoint) if firehose_endpoint is not None else None + return session.client("firehose", endpoint_url=endpoint_url) diff --git a/src/eligibility_signposting_api/services/audit_service.py b/src/eligibility_signposting_api/services/audit_service.py new file mode 100644 index 000000000..b1d8b411e --- /dev/null +++ b/src/eligibility_signposting_api/services/audit_service.py @@ -0,0 +1,38 @@ +import json +import logging +from typing import Annotated + +from botocore.client import BaseClient +from wireup import Inject, service + +from eligibility_signposting_api.config.config import AwsKinesisFirehoseStreamName + +logger = logging.getLogger(__name__) + + +@service +class AuditService: # pragma: no cover + def __init__( + self, + firehose: Annotated[BaseClient, Inject(qualifier="firehose")], + audit_delivery_stream: Annotated[AwsKinesisFirehoseStreamName, Inject(param="kinesis_audit_stream_to_s3")], + ) -> None: + super().__init__() + self.firehose = firehose + self.audit_delivery_stream = audit_delivery_stream + + def audit(self, audit_record: dict) -> None: + """ + Sends an audit record to the configured Firehose delivery stream. + + Args: + audit_record (dict): The audit data to send. + + Returns: + str: The Firehose record ID. + """ + response = self.firehose.put_record( + DeliveryStreamName=self.audit_delivery_stream, + Record={"Data": (json.dumps(audit_record) + "\n").encode("utf-8")}, + ) + logger.info("Successfully sent to the Firehose", extra={"firehose_record_id": response["RecordId"]}) diff --git a/src/eligibility_signposting_api/services/eligibility_services.py b/src/eligibility_signposting_api/services/eligibility_services.py index c8d9c5b50..032b68816 100644 --- a/src/eligibility_signposting_api/services/eligibility_services.py +++ b/src/eligibility_signposting_api/services/eligibility_services.py @@ -4,6 +4,7 @@ from eligibility_signposting_api.model import eligibility from eligibility_signposting_api.repos import CampaignRepo, NotFoundError, PersonRepo +from eligibility_signposting_api.services.audit_service import AuditService from eligibility_signposting_api.services.calculators import eligibility_calculator as calculator logger = logging.getLogger(__name__) @@ -23,11 +24,13 @@ def __init__( self, person_repo: PersonRepo, campaign_repo: CampaignRepo, + audit_service: AuditService, calculator_factory: calculator.EligibilityCalculatorFactory, ) -> None: super().__init__() self.person_repo = person_repo self.campaign_repo = campaign_repo + self.audit_service = audit_service self.calculator_factory = calculator_factory def get_eligibility_status( @@ -51,6 +54,7 @@ def get_eligibility_status( raise UnknownPersonError from e else: calc: calculator.EligibilityCalculator = self.calculator_factory.get(person_data, campaign_configs) + self.audit_service.audit({"test_audit": "check if audit works"}) return calc.evaluate_eligibility(include_actions_flag=include_actions_flag) raise UnknownPersonError # pragma: no cover diff --git a/src/eligibility_signposting_api/wrapper.py b/src/eligibility_signposting_api/wrapper.py index ff4ff63bc..f5b0c8b57 100644 --- a/src/eligibility_signposting_api/wrapper.py +++ b/src/eligibility_signposting_api/wrapper.py @@ -4,7 +4,7 @@ from mangum.types import LambdaContext, LambdaEvent -from eligibility_signposting_api.config.contants import NHS_NUMBER_HEADER_NAME +from eligibility_signposting_api.config.contants import NHS_NUMBER_HEADER logger = logging.getLogger(__name__) @@ -20,7 +20,7 @@ def wrapper(event: LambdaEvent, context: LambdaContext) -> dict[str, int | str]: headers = event.get("headers", {}) path_params = event.get("pathParameters", {}) - header_nhs = headers.get(NHS_NUMBER_HEADER_NAME) + header_nhs = headers.get(NHS_NUMBER_HEADER) path_nhs = path_params.get("id") logger.info("nhs numbers from the request", extra={"header_nhs": header_nhs, "path_nhs": path_nhs}) diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 8cf0ec1e8..e6b90dd5a 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -9,7 +9,7 @@ services: # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ - DEBUG=${LOCALSTACK_DEBUG:-0} - DEFAULT_REGION=${AWS_DEFAULT_REGION:-eu-west-1} - - LAMBDA_EXECUTOR=local + - LAMBDA_EXECUTOR=docker volumes: - "${LOCALSTACK_VOLUME_DIR:-../volume}:/var/lib/localstack" - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 6aaf6c4f7..d6ebc0f4f 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -97,6 +97,11 @@ def s3_client(boto3_session: Session, localstack: URL) -> BaseClient: return boto3_session.client("s3", endpoint_url=str(localstack)) +@pytest.fixture(scope="session") +def firehose_client(boto3_session: Session, localstack: URL) -> BaseClient: + return boto3_session.client("firehose", endpoint_url=str(localstack)) + + @pytest.fixture(scope="session") def iam_role(iam_client: BaseClient) -> Generator[str]: role_name = "LambdaExecutionRole" @@ -190,6 +195,7 @@ def flask_function(lambda_client: BaseClient, iam_role: str, lambda_zip: Path) - "Variables": { "DYNAMODB_ENDPOINT": os.getenv("LOCALSTACK_INTERNAL_ENDPOINT", "http://localstack:4566/"), "S3_ENDPOINT": os.getenv("LOCALSTACK_INTERNAL_ENDPOINT", "http://localstack:4566/"), + "FIREHOSE_ENDPOINT": os.getenv("LOCALSTACK_INTERNAL_ENDPOINT", "http://localstack:4566/"), "AWS_REGION": AWS_REGION, "LOG_LEVEL": "DEBUG", } @@ -372,15 +378,43 @@ def persisted_person_pc_sw19(person_table: Any, faker: Faker) -> Generator[eligi @pytest.fixture(scope="session") -def bucket(s3_client: BaseClient) -> Generator[BucketName]: +def rules_bucket(s3_client: BaseClient) -> Generator[BucketName]: bucket_name = BucketName(os.getenv("RULES_BUCKET_NAME", "test-rules-bucket")) s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": AWS_REGION}) yield bucket_name s3_client.delete_bucket(Bucket=bucket_name) +@pytest.fixture(scope="session") +def audit_bucket(s3_client: BaseClient) -> Generator[BucketName]: + bucket_name = BucketName(os.getenv("AUDIT_BUCKET_NAME", "test-audit-bucket")) + s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": AWS_REGION}) + yield bucket_name + + # Delete all objects in the bucket before deletion + objects = s3_client.list_objects_v2(Bucket=bucket_name).get("Contents", []) + for obj in objects: + s3_client.delete_object(Bucket=bucket_name, Key=obj["Key"]) + s3_client.delete_bucket(Bucket=bucket_name) + + +@pytest.fixture(autouse=True) +def firehose_delivery_stream(firehose_client: BaseClient, audit_bucket: BucketName) -> dict[str, Any]: + return firehose_client.create_delivery_stream( + DeliveryStreamName="test_kinesis_audit_stream_to_s3", + DeliveryStreamType="DirectPut", + ExtendedS3DestinationConfiguration={ + "BucketARN": f"arn:aws:s3:::{audit_bucket}", + "RoleARN": "arn:aws:iam::000000000000:role/firehose_delivery_role", + "Prefix": "audit-logs/", + "BufferingHints": {"SizeInMBs": 1, "IntervalInSeconds": 60}, + "CompressionFormat": "UNCOMPRESSED", + }, + ) + + @pytest.fixture(scope="class") -def campaign_config(s3_client: BaseClient, bucket: BucketName) -> Generator[rules.CampaignConfig]: +def campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[rules.CampaignConfig]: campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", iterations=[ @@ -402,14 +436,16 @@ def campaign_config(s3_client: BaseClient, bucket: BucketName) -> Generator[rule ) campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} s3_client.put_object( - Bucket=bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" + Bucket=rules_bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" ) yield campaign - s3_client.delete_object(Bucket=bucket, Key=f"{campaign.name}.json") + s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") @pytest.fixture(scope="class") -def campaign_config_with_magic_cohort(s3_client: BaseClient, bucket: BucketName) -> Generator[rules.CampaignConfig]: +def campaign_config_with_magic_cohort( + s3_client: BaseClient, rules_bucket: BucketName +) -> Generator[rules.CampaignConfig]: campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( target="COVID", iterations=[ @@ -424,15 +460,15 @@ def campaign_config_with_magic_cohort(s3_client: BaseClient, bucket: BucketName) ) campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} s3_client.put_object( - Bucket=bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" + Bucket=rules_bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" ) yield campaign - s3_client.delete_object(Bucket=bucket, Key=f"{campaign.name}.json") + s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") @pytest.fixture(scope="class") def campaign_config_with_missing_descriptions_missing_rule_text( - s3_client: BaseClient, bucket: BucketName + s3_client: BaseClient, rules_bucket: BucketName ) -> Generator[rules.CampaignConfig]: campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( target="FLU", @@ -456,7 +492,7 @@ def campaign_config_with_missing_descriptions_missing_rule_text( ) campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} s3_client.put_object( - Bucket=bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" + Bucket=rules_bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" ) yield campaign - s3_client.delete_object(Bucket=bucket, Key=f"{campaign.name}.json") + s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") diff --git a/tests/integration/lambda/test_app_running_as_lambda.py b/tests/integration/lambda/test_app_running_as_lambda.py index 360410f4b..cdd1149d3 100644 --- a/tests/integration/lambda/test_app_running_as_lambda.py +++ b/tests/integration/lambda/test_app_running_as_lambda.py @@ -15,6 +15,7 @@ from eligibility_signposting_api.model.eligibility import NHSNumber from eligibility_signposting_api.model.rules import CampaignConfig +from eligibility_signposting_api.repos.campaign_repo import BucketName logger = logging.getLogger(__name__) @@ -152,10 +153,12 @@ def get_log_messages(flask_function: str, logs_client: BaseClient) -> list[str]: return [e["message"] for e in log_events["events"]] -def test_given_nhs_number_in_path_matches_with_nhs_number_in_headers( +def test_given_nhs_number_in_path_matches_with_nhs_number_in_headers( # noqa: PLR0913 lambda_client: BaseClient, # noqa:ARG001 persisted_person: NHSNumber, campaign_config: CampaignConfig, # noqa:ARG001 + s3_client: BaseClient, + audit_bucket: BucketName, api_gateway_endpoint: URL, ): # Given @@ -173,6 +176,12 @@ def test_given_nhs_number_in_path_matches_with_nhs_number_in_headers( is_response().with_status_code(HTTPStatus.OK).and_body(is_json_that(has_key("processedSuggestions"))), ) + objects = s3_client.list_objects_v2(Bucket=audit_bucket).get("Contents", []) + object_keys = [obj["Key"] for obj in objects] + latest_key = sorted(object_keys)[-1] + audit_data = json.loads(s3_client.get_object(Bucket=audit_bucket, Key=latest_key)["Body"].read()) + assert_that(audit_data, has_entries(test_audit="check if audit works")) + def test_given_nhs_number_in_path_does_not_match_with_nhs_number_in_headers_results_in_error_response( lambda_client: BaseClient, # noqa:ARG001 diff --git a/tests/integration/repo/test_campaign_repo.py b/tests/integration/repo/test_campaign_repo.py index 30eaa8871..5870d1b32 100644 --- a/tests/integration/repo/test_campaign_repo.py +++ b/tests/integration/repo/test_campaign_repo.py @@ -12,19 +12,19 @@ @pytest.fixture(scope="module") -def campaign_config(s3_client: BaseClient, bucket: BucketName) -> Generator[CampaignConfig]: +def campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = CampaignConfigFactory.build() campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} s3_client.put_object( - Bucket=bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" + Bucket=rules_bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" ) yield campaign - s3_client.delete_object(Bucket=bucket, Key=f"{campaign.name}.json") + s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -def test_get_campaign_config(s3_client: BaseClient, bucket: BucketName, campaign_config: CampaignConfig): +def test_get_campaign_config(s3_client: BaseClient, rules_bucket: BucketName, campaign_config: CampaignConfig): # Given - repo = CampaignRepo(s3_client, bucket) + repo = CampaignRepo(s3_client, rules_bucket) # When actual = list(repo.get_campaign_configs()) diff --git a/tests/unit/repos/test_factory.py b/tests/unit/repos/test_factory.py index cd16bc07c..b6a8a7c62 100644 --- a/tests/unit/repos/test_factory.py +++ b/tests/unit/repos/test_factory.py @@ -6,7 +6,11 @@ from botocore.client import BaseClient from yarl import URL -from eligibility_signposting_api.repos.factory import dynamodb_resource_factory, s3_service_factory +from eligibility_signposting_api.repos.factory import ( + dynamodb_resource_factory, + firehose_client_factory, + s3_service_factory, +) @pytest.fixture @@ -54,3 +58,24 @@ def test_s3_service_factory_without_endpoint(mock_session): mock_session.client.assert_called_once_with("s3", endpoint_url=None) assert result is mock_client + + +def test_firehose_service_factory_with_endpoint(mock_session): + mock_client = MagicMock(spec=BaseClient) + mock_session.client = MagicMock(return_value=mock_client) + endpoint = URL("http://localhost:4566") + + result = firehose_client_factory(mock_session, endpoint) + + mock_session.client.assert_called_once_with("firehose", endpoint_url="http://localhost:4566") + assert result is mock_client + + +def test_firehose_service_factory_without_endpoint(mock_session): + mock_client = MagicMock(spec=BaseClient) + mock_session.client = MagicMock(return_value=mock_client) + + result = firehose_client_factory(mock_session, None) + + mock_session.client.assert_called_once_with("firehose", endpoint_url=None) + assert result is mock_client diff --git a/tests/unit/services/test_eligibility_services.py b/tests/unit/services/test_eligibility_services.py index c99f3b73a..a96c8b2bd 100644 --- a/tests/unit/services/test_eligibility_services.py +++ b/tests/unit/services/test_eligibility_services.py @@ -6,6 +6,7 @@ from eligibility_signposting_api.model.eligibility import NHSNumber from eligibility_signposting_api.repos import CampaignRepo, NotFoundError, PersonRepo from eligibility_signposting_api.services import EligibilityService, UnknownPersonError +from eligibility_signposting_api.services.audit_service import AuditService from eligibility_signposting_api.services.calculators.eligibility_calculator import EligibilityCalculatorFactory from tests.fixtures.matchers.eligibility import is_eligibility_status @@ -14,8 +15,9 @@ def test_eligibility_service_returns_from_repo(): # Given person_repo = MagicMock(spec=PersonRepo) campaign_repo = MagicMock(spec=CampaignRepo) + audit_service = MagicMock(spec=AuditService) person_repo.get_eligibility = MagicMock(return_value=[]) - service = EligibilityService(person_repo, campaign_repo, EligibilityCalculatorFactory()) + service = EligibilityService(person_repo, campaign_repo, audit_service, EligibilityCalculatorFactory()) # When actual = service.get_eligibility_status(NHSNumber("1234567890")) @@ -28,8 +30,9 @@ def test_eligibility_service_for_nonexistent_nhs_number(): # Given person_repo = MagicMock(spec=PersonRepo) campaign_repo = MagicMock(spec=CampaignRepo) + audit_service = MagicMock(spec=AuditService) person_repo.get_eligibility_data = MagicMock(side_effect=NotFoundError) - service = EligibilityService(person_repo, campaign_repo, EligibilityCalculatorFactory()) + service = EligibilityService(person_repo, campaign_repo, audit_service, EligibilityCalculatorFactory()) # When with pytest.raises(UnknownPersonError):