diff --git a/.github/workflows/cicd-2-publish.yaml b/.github/workflows/cicd-2-publish.yaml index d41b1de16..979cd9832 100644 --- a/.github/workflows/cicd-2-publish.yaml +++ b/.github/workflows/cicd-2-publish.yaml @@ -141,7 +141,7 @@ jobs: # asset_name: lambda-${{ needs.metadata.outputs.version }}.zip # asset_content_type: application/zip - name: "Notify Slack on PR merge" - uses: slackapi/slack-github-action@v2.1.0 + uses: slackapi/slack-github-action@v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: webhook-trigger diff --git a/.tool-versions b/.tool-versions index 4ee8707ce..2bb2a6b76 100644 --- a/.tool-versions +++ b/.tool-versions @@ -3,8 +3,9 @@ terraform 1.12.1 pre-commit 4.2.0 vale 3.11.2 -poetry 2.1.3 +poetry 2.1.4 act 0.2.77 +nodejs 22.18.0 # ============================================================================== # The section below is reserved for Docker image versions. diff --git a/README.md b/README.md index 28b9197d3..d27ef9560 100644 --- a/README.md +++ b/README.md @@ -189,15 +189,25 @@ graph TB direction TB App["app.py (WireUp DI)"] Config["config.py, error_handler.py"] + subgraph "Audit Layer" + direction TB + Audit["audit/audit_service.py"] + AuditModels["audit/audit_models.py"] + end + subgraph "Validation Layer" + direction TB + Validator["common/request_validator.py"] + ApiErrResp["common/api_error_response.py"] + end subgraph "Presentation Layer" direction TB View["views/eligibility.py"] - ResponseModel["views/response_model/eligibility.py"] + ResponseModel["views/response_model/eligibility_response.py"] end subgraph "Business Logic Layer" direction TB Service["services/eligibility_services.py"] - Operators["services/rules/operators.py"] + Operators["services/operators/operators.py"] end subgraph "Data Access Layer" direction TB @@ -207,24 +217,30 @@ graph TB end subgraph "Models" direction TB - ModelElig["model/eligibility.py"] - ModelRules["model/rules.py"] + ModelElig["model/eligibility_status.py"] + ModelRules["model/campaign_config.py"] end end Lambda -->|"loads"| App App -->|injects| View View -->|calls| Service + View -->|validates via| Validator + View -->|audits via| Audit + View -->|uses| RespModel + Audit -->|uses| AuditModels + Validator -->|uses| ApiErrResp + Service -->|calls| Operators Service -->|calls| PersonRepo Service -->|calls| CampaignRepo PersonRepo -->|uses| DynamoDB CampaignRepo -->|uses| S3Bucket - View -->|uses| ResponseModel App -->|reads| Config + App -->|wires| Factory + Service -->|uses| ModelElig Operators -->|uses| ModelRules - App -->|wires| Factory ``` diff --git a/infrastructure/modules/dynamodb/dynamodb.tf b/infrastructure/modules/dynamodb/dynamodb.tf index 6f8f39a80..4730d2f8d 100644 --- a/infrastructure/modules/dynamodb/dynamodb.tf +++ b/infrastructure/modules/dynamodb/dynamodb.tf @@ -2,6 +2,7 @@ resource "aws_dynamodb_table" "dynamodb_table" { name = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.project_name}-${var.environment}-${var.table_name_suffix}" billing_mode = "PAY_PER_REQUEST" hash_key = var.partition_key + deletion_protection_enabled = var.environment == "prod" attribute { name = var.partition_key diff --git a/infrastructure/modules/lambda/lambda.tf b/infrastructure/modules/lambda/lambda.tf index 9013c8386..f31a6e762 100644 --- a/infrastructure/modules/lambda/lambda.tf +++ b/infrastructure/modules/lambda/lambda.tf @@ -22,6 +22,7 @@ resource "aws_lambda_function" "eligibility_signposting_lambda" { KINESIS_AUDIT_STREAM_TO_S3 = var.kinesis_audit_stream_to_s3_name ENV = var.environment LOG_LEVEL = var.log_level + ENABLE_XRAY_PATCHING = var.enable_xray_patching } } diff --git a/infrastructure/modules/lambda/variables.tf b/infrastructure/modules/lambda/variables.tf index ca6d9b95d..229c1fbb4 100644 --- a/infrastructure/modules/lambda/variables.tf +++ b/infrastructure/modules/lambda/variables.tf @@ -47,3 +47,8 @@ variable "log_level" { description = "log level" type = string } + +variable "enable_xray_patching"{ + description = "flag to enable xray tracing, which puts an entry for dynamodb, s3 and firehose in trace map" + type = string +} diff --git a/infrastructure/modules/s3/s3.tf b/infrastructure/modules/s3/s3.tf index e0138c065..8dc3c8744 100644 --- a/infrastructure/modules/s3/s3.tf +++ b/infrastructure/modules/s3/s3.tf @@ -105,6 +105,49 @@ data "aws_iam_policy_document" "access_logs_s3_bucket_policy" { variable = "aws:SecureTransport" } } + + # Allow S3 Log Delivery service to write access logs + statement { + sid = "S3ServerAccessLogsPolicy" + effect = "Allow" + principals { + type = "Service" + identifiers = ["logging.s3.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = [ + "${aws_s3_bucket.storage_bucket_access_logs.arn}/*" + ] + condition { + test = "ArnEquals" + variable = "aws:SourceArn" + values = [aws_s3_bucket.storage_bucket.arn] + } + } + + # Allow S3 Log Delivery service to check bucket location and get bucket ACL + statement { + sid = "S3ServerAccessLogsDeliveryRootAccess" + effect = "Allow" + principals { + type = "Service" + identifiers = ["logging.s3.amazonaws.com"] + } + actions = [ + "s3:GetBucketAcl", + "s3:ListBucket" + ] + resources = [ + aws_s3_bucket.storage_bucket_access_logs.arn + ] + condition { + test = "ArnEquals" + variable = "aws:SourceArn" + values = [aws_s3_bucket.storage_bucket.arn] + } + } } resource "aws_s3_bucket_server_side_encryption_configuration" "storage_bucket_access_logs_server_side_encryption_config" { @@ -112,7 +155,7 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "storage_bucket_ac rule { apply_server_side_encryption_by_default { - sse_algorithm = "aws:kms" + sse_algorithm = "aws:kms" kms_master_key_id = aws_kms_key.storage_bucket_cmk.arn } } diff --git a/infrastructure/stacks/api-layer/assumed_role_permissions_boundary.tf b/infrastructure/stacks/api-layer/assumed_role_permissions_boundary.tf index 2fd4e8454..980bf8e61 100644 --- a/infrastructure/stacks/api-layer/assumed_role_permissions_boundary.tf +++ b/infrastructure/stacks/api-layer/assumed_role_permissions_boundary.tf @@ -33,7 +33,8 @@ data "aws_iam_policy_document" "assumed_role_permissions_boundary" { "support:*", "sqs:*", "tag:*", - "trustedadvisor:*" + "trustedadvisor:*", + "xray:*" ] resources = ["*"] diff --git a/infrastructure/stacks/api-layer/cloudwatch_alarms.tf b/infrastructure/stacks/api-layer/cloudwatch_alarms.tf new file mode 100644 index 000000000..4c252a38d --- /dev/null +++ b/infrastructure/stacks/api-layer/cloudwatch_alarms.tf @@ -0,0 +1,420 @@ +locals { + # Security alarms based on CloudTrail custom metrics + cloudwatch_alarm_config = { + UnauthorizedApiCalls = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Unauthorized API calls detected - immediate alert on any occurrence" + actions_enabled = false # Disabling as cloudhealth role is triggering this alarm + } + ConsoleAuthenticationFailures = { + threshold = 3 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Multiple console authentication failures detected within 5 minutes" + actions_enabled = true + } + CloudTrailConfigChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "CloudTrail configuration changes detected - immediate alert" + actions_enabled = true + } + VPCChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "VPC configuration changes detected" + actions_enabled = true + } + AWSConfigChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "AWS Config service changes detected" + actions_enabled = true + } + ModificationOfCMKs = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "KMS Customer Managed Key modifications detected - critical security alert" + actions_enabled = true + } + UnsuccessfulSwitchRole = { + threshold = 5 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 900 + statistic = "Sum" + alarm_description = "Multiple unsuccessful role switch attempts detected within 15 minutes" + actions_enabled = true + } + ConsoleLoginNoMFA = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Console login without MFA detected - security policy violation" + actions_enabled = true + } + RootAccountUsage = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Root account usage detected - immediate critical alert" + actions_enabled = true + } + SecurityGroupChange = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Security group changes detected" + actions_enabled = true + } + RouteTableChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Route table changes detected" + actions_enabled = true + } + IAMPolicyChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "IAM policy changes detected - immediate security alert" + actions_enabled = true + } + s3BucketPolicyChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "S3 bucket policy changes detected" + actions_enabled = true + } + ChangesToNetworkGateways = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Network gateway changes detected" + actions_enabled = true + } + ChangesToNACLs = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "Network ACL changes detected" + actions_enabled = true + } + KMSKeyPolicyChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "KMS key policy changes detected - critical security alert" + actions_enabled = true + } + s3PublicAccessChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "S3 public access changes detected - potential data exposure risk" + actions_enabled = true + } + CloudWatchAlarmChanges = { + threshold = 1 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 300 + statistic = "Sum" + alarm_description = "CloudWatch alarm configuration changes detected" + actions_enabled = true + } + LambdaFunctionChanges = { + threshold = 2 + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 1 + period = 600 + statistic = "Sum" + alarm_description = "Multiple Lambda function changes detected within 10 minutes" + actions_enabled = true + } + } + + # API Gateway alarm configuration + api_gateway_alarm_config = { + "5XXError" = { + metric_name = "5XXError" + namespace = "AWS/ApiGateway" + statistic = "Sum" + threshold = 0 + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + period = 300 + alarm_description = "API Gateway 5XX errors detected - critical server-side issues" + severity = "critical" + treat_missing_data = "notBreaching" + } + "4XXError" = { + metric_name = "4XXError" + namespace = "AWS/ApiGateway" + statistic = "Sum" + threshold = 50 # Adjust based on expected traffic + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + period = 300 + alarm_description = "High rate of API Gateway 4XX errors - client-side issues or auth problems" + severity = "high" + treat_missing_data = "notBreaching" + } + "LatencyP95" = { + metric_name = "Latency" + namespace = "AWS/ApiGateway" + statistic = "Average" # Use Average for ExtendedStatistic + extended_statistic = "p95" + threshold = 1000 + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + period = 300 + alarm_description = "API Gateway P95 latency > 1000ms - performance degradation" + severity = "high" + treat_missing_data = "notBreaching" + } + "IntegrationLatencyP95" = { + metric_name = "IntegrationLatency" + namespace = "AWS/ApiGateway" + statistic = "Average" # Use Average for ExtendedStatistic + extended_statistic = "p95" + threshold = 900 + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + period = 300 + alarm_description = "API Gateway backend (Lambda) P95 latency > 900ms - backend performance issues" + severity = "high" + treat_missing_data = "notBreaching" + } + "CountDrop" = { + metric_name = "Count" + namespace = "AWS/ApiGateway" + statistic = "Sum" + threshold = 10 # Minimum expected requests per 5min - adjust when live + comparison_operator = "LessThanThreshold" + evaluation_periods = 2 + period = 300 + alarm_description = "API Gateway request volume drop - possible outage (enable when service is live)" + severity = "high" + treat_missing_data = "breaching" # Missing data could indicate outage + actions_enabled = false # Disable until service is live + } + } + + # Lambda alarm configuration + lambda_alarm_config = { + "Errors" = { + metric_name = "Errors" + namespace = "AWS/Lambda" + statistic = "Sum" + threshold = 0 + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + period = 300 + alarm_description = "Lambda invocation errors detected - critical function failures" + severity = "critical" + treat_missing_data = "notBreaching" + } + "Throttles" = { + metric_name = "Throttles" + namespace = "AWS/Lambda" + statistic = "Sum" + threshold = 0 + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + period = 300 + alarm_description = "Lambda throttling detected - concurrency limits reached" + severity = "critical" + treat_missing_data = "notBreaching" + } + "Duration" = { + metric_name = "Duration" + namespace = "AWS/Lambda" + statistic = "Average" + threshold = 27000 # 90% of 30s timeout (adjust based on actual timeout) + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + period = 300 + alarm_description = "Lambda duration approaching timeout - function performance warning" + severity = "warning" + treat_missing_data = "notBreaching" + } + "InvocationsDrop" = { + metric_name = "Invocations" + namespace = "AWS/Lambda" + statistic = "Sum" + threshold = 5 # Minimum expected invocations per 5min - adjust when live + comparison_operator = "LessThanThreshold" + evaluation_periods = 2 + period = 300 + alarm_description = "Lambda invocation volume drop - possible outage (enable when service is live)" + severity = "high" + treat_missing_data = "breaching" # Missing data could indicate outage + actions_enabled = false # Disable until service is live + } + } +} + +# SNS Topic for CloudWatch Alarms +resource "aws_sns_topic" "cloudwatch_alarms" { + name = "cloudwatch-security-alarms" + + kms_master_key_id = aws_kms_key.sns_encryption_key.id + + tags = { + Environment = var.environment + Purpose = "security-alerting" + ManagedBy = "terraform" + } +} + +resource "aws_kms_key" "sns_encryption_key" { + description = "KMS key for encrypting CloudWatch alarms SNS topic" + deletion_window_in_days = 7 + enable_key_rotation = true + + + tags = { + Name = "cloudwatch-alarms-sns-encryption-key" + Environment = var.environment + Purpose = "sns-encryption" + ManagedBy = "terraform" + } +} + +# Security Alarms (CloudTrail-based) +resource "aws_cloudwatch_metric_alarm" "cloudtrail_custom_metric_alarms" { + # checkov:skip=CKV_AWS_319: Disabling some alarms until service is live + for_each = local.cloudwatch_alarm_config + + alarm_name = "SecurityAlert-${each.key}" + alarm_description = each.value.alarm_description + actions_enabled = each.value.actions_enabled + metric_name = each.key + namespace = "security" + statistic = each.value.statistic + period = each.value.period + evaluation_periods = each.value.evaluation_periods + threshold = each.value.threshold + comparison_operator = each.value.comparison_operator + + # Treat missing data as not breaching (common for security metrics) + treat_missing_data = "notBreaching" + + # Add standard tags for organization + tags = { + Environment = "production" + AlertType = "security" + Severity = contains(["RootAccountUsage", "ModificationOfCMKs", "KMSKeyPolicyChanges", "ConsoleLoginNoMFA"], each.key) ? "critical" : "high" + ManagedBy = "terraform" + } + + alarm_actions = [aws_sns_topic.cloudwatch_alarms.arn] +} + +# API Gateway CloudWatch Alarms +resource "aws_cloudwatch_metric_alarm" "api_gateway_alarms" { + # checkov:skip=CKV_AWS_319: Disabling some alarms until service is live + for_each = local.api_gateway_alarm_config + + alarm_name = "APIGateway-${each.key}" + alarm_description = each.value.alarm_description + actions_enabled = lookup(each.value, "actions_enabled", true) + metric_name = each.value.metric_name + namespace = each.value.namespace + statistic = lookup(each.value, "extended_statistic", null) == null ? each.value.statistic : null + extended_statistic = lookup(each.value, "extended_statistic", null) + period = each.value.period + evaluation_periods = each.value.evaluation_periods + threshold = each.value.threshold + comparison_operator = each.value.comparison_operator + treat_missing_data = each.value.treat_missing_data + + # Add dimensions for API Gateway + dimensions = { + ApiName = "eligibility-signposting-api" + } + + tags = { + Environment = var.environment + AlertType = "performance" + Service = "api-gateway" + Severity = each.value.severity + ManagedBy = "terraform" + } + + alarm_actions = [aws_sns_topic.cloudwatch_alarms.arn] +} + +# Lambda CloudWatch Alarms +resource "aws_cloudwatch_metric_alarm" "lambda_alarms" { + # checkov:skip=CKV_AWS_319: Disabling some alarms until service is live + for_each = local.lambda_alarm_config + + alarm_name = "Lambda-${each.key}" + alarm_description = each.value.alarm_description + actions_enabled = lookup(each.value, "actions_enabled", true) + metric_name = each.value.metric_name + namespace = each.value.namespace + statistic = each.value.statistic + period = each.value.period + evaluation_periods = each.value.evaluation_periods + threshold = each.value.threshold + comparison_operator = each.value.comparison_operator + treat_missing_data = each.value.treat_missing_data + + # Add dimensions for Lambda + dimensions = { + FunctionName = module.eligibility_signposting_lambda_function.aws_lambda_function_name + } + + tags = { + Environment = var.environment + AlertType = "performance" + Service = "lambda" + Severity = each.value.severity + ManagedBy = "terraform" + } + + alarm_actions = [aws_sns_topic.cloudwatch_alarms.arn] +} diff --git a/infrastructure/stacks/api-layer/iam_policies.tf b/infrastructure/stacks/api-layer/iam_policies.tf index 00b5d914f..5f384895c 100644 --- a/infrastructure/stacks/api-layer/iam_policies.tf +++ b/infrastructure/stacks/api-layer/iam_policies.tf @@ -1,7 +1,7 @@ # Read-only policy for DynamoDB data "aws_iam_policy_document" "dynamodb_read_policy_doc" { statement { - actions = ["dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan"] + actions = ["dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan"] resources = [module.eligibility_status_table.arn] } } @@ -16,7 +16,7 @@ resource "aws_iam_role_policy" "lambda_dynamodb_read_policy" { # Write-only policy for DynamoDB data "aws_iam_policy_document" "dynamodb_write_policy_doc" { statement { - actions = ["dynamodb:PutItem", "dynamodb:UpdateItem", "dynamodb:DeleteItem", "dynamodb:BatchWriteItem"] + actions = ["dynamodb:PutItem", "dynamodb:UpdateItem", "dynamodb:DeleteItem", "dynamodb:BatchWriteItem"] resources = [module.eligibility_status_table.arn] } } @@ -37,7 +37,7 @@ data "aws_iam_policy_document" "dynamo_kms_access_policy_doc" { # Attach dynamoDB write policy to external write role resource "aws_iam_role_policy" "external_dynamodb_write_policy" { - count = length(aws_iam_role.write_access_role) + count = length(aws_iam_role.write_access_role) name = "DynamoDBWriteAccess" role = aws_iam_role.write_access_role[count.index].id policy = data.aws_iam_policy_document.dynamodb_write_policy_doc.json @@ -45,7 +45,7 @@ resource "aws_iam_role_policy" "external_dynamodb_write_policy" { # Attach dynamo KMS policy to external write role resource "aws_iam_role_policy" "external_kms_access_policy" { - count = length(aws_iam_role.write_access_role) + count = length(aws_iam_role.write_access_role) name = "KMSAccessForDynamoDB" role = aws_iam_role.write_access_role[count.index].id policy = data.aws_iam_policy_document.dynamo_kms_access_policy_doc.json @@ -65,7 +65,7 @@ data "aws_iam_policy_document" "s3_rules_bucket_policy" { ] condition { test = "Bool" - values = ["true"] + values = ["true"] variable = "aws:SecureTransport" } } @@ -90,7 +90,7 @@ data "aws_iam_policy_document" "rules_s3_bucket_policy" { "${module.s3_rules_bucket.storage_bucket_arn}/*", ] principals { - type = "*" + type = "*" identifiers = ["*"] } condition { @@ -121,7 +121,7 @@ data "aws_iam_policy_document" "audit_s3_bucket_policy" { "${module.s3_audit_bucket.storage_bucket_arn}/*", ] principals { - type = "*" + type = "*" identifiers = ["*"] } condition { @@ -192,7 +192,7 @@ resource "aws_iam_role_policy_attachment" "lambda_logs_policy_attachment" { # Policy doc for S3 Audit bucket data "aws_iam_policy_document" "s3_audit_bucket_policy" { statement { - sid = "AllowSSLRequestsOnly" + sid = "AllowSSLRequestsOnly" actions = ["s3:*"] resources = [ module.s3_audit_bucket.storage_bucket_arn, @@ -200,7 +200,7 @@ data "aws_iam_policy_document" "s3_audit_bucket_policy" { ] condition { test = "Bool" - values = ["true"] + values = ["true"] variable = "aws:SecureTransport" } } @@ -222,10 +222,10 @@ data "aws_iam_policy_document" "dynamodb_kms_key_policy" { sid = "EnableIamUserPermissions" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] } - actions = ["kms:*"] + actions = ["kms:*"] resources = ["*"] } @@ -233,7 +233,7 @@ data "aws_iam_policy_document" "dynamodb_kms_key_policy" { sid = "AllowLambdaDecrypt" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = [aws_iam_role.eligibility_lambda_role.arn] } actions = [ @@ -260,10 +260,10 @@ data "aws_iam_policy_document" "s3_rules_kms_key_policy" { sid = "EnableIamUserPermissions" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] } - actions = ["kms:*"] + actions = ["kms:*"] resources = ["*"] } @@ -271,10 +271,10 @@ data "aws_iam_policy_document" "s3_rules_kms_key_policy" { sid = "AllowLambdaDecrypt" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = [aws_iam_role.eligibility_lambda_role.arn] } - actions = ["kms:Decrypt"] + actions = ["kms:Decrypt"] resources = ["*"] } } @@ -293,17 +293,17 @@ data "aws_iam_policy_document" "s3_audit_kms_key_policy" { sid = "EnableIamUserPermissions" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] } - actions = ["kms:*"] + actions = ["kms:*"] resources = ["*"] } statement { sid = "AllowLambdaFullWrite" effect = "Allow" principals { - type = "AWS" + type = "AWS" identifiers = [aws_iam_role.eligibility_lambda_role.arn, aws_iam_role.eligibility_audit_firehose_role.arn] } actions = [ @@ -340,3 +340,70 @@ resource "aws_iam_role_policy" "lambda_firehose_policy" { role = aws_iam_role.eligibility_lambda_role.id policy = data.aws_iam_policy_document.lambda_firehose_write_policy.json } + +data "aws_iam_policy_document" "lambda_xray_tracing_permissions_policy" { + statement { + sid = "AllowLambdaToPutToXRay" + effect = "Allow" + actions = [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords" + ] + resources = ["*"] + } +} + +resource "aws_iam_role_policy" "lambda_xray_tracing_policy" { + name = "LambdaXRayWritePolicy" + role = aws_iam_role.eligibility_lambda_role.id + policy = data.aws_iam_policy_document.lambda_xray_tracing_permissions_policy.json +} + +# KMS Key Policy for SNS encryption +resource "aws_kms_key_policy" "sns_encryption_key_policy" { + key_id = aws_kms_key.sns_encryption_key.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "EnableIAMRootPermissions" + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "kms:*" + Resource = "*" + }, + { + Sid = "AllowCloudWatchAlarmsAccess" + Effect = "Allow" + Principal = { + Service = "cloudwatch.amazonaws.com" + } + Action = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + Resource = "*" + }, + { + Sid = "AllowSNSServiceAccess" + Effect = "Allow" + Principal = { + Service = "sns.amazonaws.com" + } + Action = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + Resource = "*" + } + ] + }) +} diff --git a/infrastructure/stacks/api-layer/lambda.tf b/infrastructure/stacks/api-layer/lambda.tf index 09f56ac03..68885b6d7 100644 --- a/infrastructure/stacks/api-layer/lambda.tf +++ b/infrastructure/stacks/api-layer/lambda.tf @@ -24,5 +24,6 @@ module "eligibility_signposting_lambda_function" { eligibility_status_table_name = module.eligibility_status_table.table_name kinesis_audit_stream_to_s3_name = module.eligibility_audit_firehose_delivery_stream.firehose_stream_name log_level = "INFO" + enable_xray_patching = "true" stack_name = local.stack_name } diff --git a/infrastructure/stacks/api-layer/patient_check.tf b/infrastructure/stacks/api-layer/patient_check.tf index a0cf18a8e..030e69a65 100644 --- a/infrastructure/stacks/api-layer/patient_check.tf +++ b/infrastructure/stacks/api-layer/patient_check.tf @@ -1,4 +1,3 @@ - resource "aws_api_gateway_request_validator" "patient_check_validator" { rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id name = "validate-path-params" @@ -27,12 +26,12 @@ resource "aws_api_gateway_method" "get_patient_check" { } resource "aws_api_gateway_integration" "get_patient_check" { - rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id - resource_id = aws_api_gateway_resource.patient.id - http_method = aws_api_gateway_method.get_patient_check.http_method + rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id + resource_id = aws_api_gateway_resource.patient.id + http_method = aws_api_gateway_method.get_patient_check.http_method integration_http_method = "POST" # Needed for lambda proxy integration - type = "AWS_PROXY" - uri = module.eligibility_signposting_lambda_function.aws_lambda_invoke_arn + type = "AWS_PROXY" + uri = module.eligibility_signposting_lambda_function.aws_lambda_invoke_arn depends_on = [ aws_api_gateway_method.get_patient_check @@ -47,3 +46,43 @@ resource "aws_lambda_permission" "get_patient_check" { source_arn = "${module.eligibility_signposting_api_gateway.execution_arn}/*/*" } + +resource "aws_api_gateway_gateway_response" "bad_request_parameters" { + rest_api_id = module.eligibility_signposting_api_gateway.rest_api_id + response_type = "BAD_REQUEST_PARAMETERS" + status_code = "400" + + response_templates = { + "application/json" = jsonencode({ + resourceType = "OperationOutcome" + id = "$context.requestId" + meta = { + lastUpdated = "$context.requestTime" + } + issue = [ + { + severity = "error" + code = "invalid" + details = { + coding = [ + { + system = "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", + code = "BAD_REQUEST", + display = "Bad Request" + } + ] + } + diagnostics = "Missing required NHS Number from path parameters", + location = [ + "parameters/id" + ] + } + ] + }) + } + + response_parameters = { + "gatewayresponse.header.Access-Control-Allow-Origin" = "'*'" + "gatewayresponse.header.Content-Type" = "'application/fhir+json'" + } +} diff --git a/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf b/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf index 78e909d86..6b69b5025 100644 --- a/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf +++ b/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf @@ -190,6 +190,7 @@ resource "aws_iam_policy" "api_infrastructure" { "ssm:DescribeParameters", "ec2:Describe*", "ec2:DescribeVpcs", + "ec2:ModifyVpcBlockPublicAccessOptions", # API Gateway domain and deployment "apigateway:*", # ACM for certs @@ -204,6 +205,7 @@ resource "aws_iam_policy" "api_infrastructure" { "logs:PutLogEvents", # IAM PassRole for logging role association (if needed) "iam:PassRole" + ], Resource = "*" #checkov:skip=CKV_AWS_289: Actions require wildcard resource @@ -473,6 +475,50 @@ resource "aws_iam_policy" "firehose_readonly" { tags = merge(local.tags, { Name = "firehose-describe-access" }) } +resource "aws_iam_policy" "cloudwatch_alarms" { + name = "cloudwatch-alarms-management" + description = "Allow GitHub Actions to manage CloudWatch alarms and SNS topics" + path = "/service-policies/" + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + # CloudWatch Alarms management + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarms", + "cloudwatch:DescribeAlarmsForMetric", + "cloudwatch:ListTagsForResource", + "cloudwatch:TagResource", + "cloudwatch:UntagResource", + # SNS Topic management for alarm notifications + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:GetTopicAttributes", + "sns:SetTopicAttributes", + "sns:ListTopics", + "sns:ListTagsForResource", + "sns:TagResource", + "sns:UntagResource", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic" + ], + Resource = [ + "arn:aws:cloudwatch:${var.default_aws_region}:${data.aws_caller_identity.current.account_id}:alarm:*", + "arn:aws:sns:${var.default_aws_region}:${data.aws_caller_identity.current.account_id}:cloudwatch-security-alarms*" + ] + } + ] + }) + + tags = merge(local.tags, { Name = "cloudwatch-alarms-management" }) +} + # Attach the policies to the role resource "aws_iam_role_policy_attachment" "terraform_state" { role = aws_iam_role.github_actions.name @@ -518,3 +564,8 @@ resource "aws_iam_role_policy_attachment" "firehose_readonly_attach" { role = aws_iam_role.github_actions.name policy_arn = aws_iam_policy.firehose_readonly.arn } + +resource "aws_iam_role_policy_attachment" "cloudwatch_alarms" { + role = aws_iam_role.github_actions.name + policy_arn = aws_iam_policy.cloudwatch_alarms.arn +} diff --git a/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf b/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf index 27909c885..8d7940668 100644 --- a/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf +++ b/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf @@ -33,7 +33,8 @@ data "aws_iam_policy_document" "permissions_boundary" { "support:*", "sqs:*", "tag:*", - "trustedadvisor:*" + "trustedadvisor:*", + "xray:*" ] resources = ["*"] diff --git a/infrastructure/stacks/networking/locals.tf b/infrastructure/stacks/networking/locals.tf index 5acda7626..9114c1144 100644 --- a/infrastructure/stacks/networking/locals.tf +++ b/infrastructure/stacks/networking/locals.tf @@ -22,6 +22,8 @@ locals { sts = "com.amazonaws.${local.region}.sts" sqs = "com.amazonaws.${local.region}.sqs" kinesis-firehose = "com.amazonaws.${local.region}.kinesis-firehose" + xray = "com.amazonaws.${local.region}.xray" + } # VPC Gateway Endpoints diff --git a/infrastructure/stacks/networking/vpc.tf b/infrastructure/stacks/networking/vpc.tf index a3ad8a04d..e88ce7894 100644 --- a/infrastructure/stacks/networking/vpc.tf +++ b/infrastructure/stacks/networking/vpc.tf @@ -21,3 +21,8 @@ resource "aws_default_security_group" "default_vpc" { } ) } + +# EC2.172 - block internet gateway access at the account level +resource "aws_vpc_block_public_access_options" "default_vpc" { + internet_gateway_block_mode = "block-bidirectional" +} diff --git a/poetry.lock b/poetry.lock index 963595b81..641bd9b07 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -14,103 +14,103 @@ files = [ [[package]] name = "aiohttp" -version = "3.12.13" +version = "3.12.15" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5421af8f22a98f640261ee48aae3a37f0c41371e99412d55eaf2f8a46d5dad29"}, - {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fcda86f6cb318ba36ed8f1396a6a4a3fd8f856f84d426584392083d10da4de0"}, - {file = "aiohttp-3.12.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cd71c9fb92aceb5a23c4c39d8ecc80389c178eba9feab77f19274843eb9412d"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34ebf1aca12845066c963016655dac897651e1544f22a34c9b461ac3b4b1d3aa"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:893a4639694c5b7edd4bdd8141be296042b6806e27cc1d794e585c43010cc294"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:663d8ee3ffb3494502ebcccb49078faddbb84c1d870f9c1dd5a29e85d1f747ce"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0f8f6a85a0006ae2709aa4ce05749ba2cdcb4b43d6c21a16c8517c16593aabe"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1582745eb63df267c92d8b61ca655a0ce62105ef62542c00a74590f306be8cb5"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d59227776ee2aa64226f7e086638baa645f4b044f2947dbf85c76ab11dcba073"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06b07c418bde1c8e737d8fa67741072bd3f5b0fb66cf8c0655172188c17e5fa6"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:9445c1842680efac0f81d272fd8db7163acfcc2b1436e3f420f4c9a9c5a50795"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:09c4767af0b0b98c724f5d47f2bf33395c8986995b0a9dab0575ca81a554a8c0"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f3854fbde7a465318ad8d3fc5bef8f059e6d0a87e71a0d3360bb56c0bf87b18a"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2332b4c361c05ecd381edb99e2a33733f3db906739a83a483974b3df70a51b40"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1561db63fa1b658cd94325d303933553ea7d89ae09ff21cc3bcd41b8521fbbb6"}, - {file = "aiohttp-3.12.13-cp310-cp310-win32.whl", hash = "sha256:a0be857f0b35177ba09d7c472825d1b711d11c6d0e8a2052804e3b93166de1ad"}, - {file = "aiohttp-3.12.13-cp310-cp310-win_amd64.whl", hash = "sha256:fcc30ad4fb5cb41a33953292d45f54ef4066746d625992aeac33b8c681173178"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c229b1437aa2576b99384e4be668af1db84b31a45305d02f61f5497cfa6f60c"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04076d8c63471e51e3689c93940775dc3d12d855c0c80d18ac5a1c68f0904358"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:55683615813ce3601640cfaa1041174dc956d28ba0511c8cbd75273eb0587014"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:921bc91e602d7506d37643e77819cb0b840d4ebb5f8d6408423af3d3bf79a7b7"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e72d17fe0974ddeae8ed86db297e23dba39c7ac36d84acdbb53df2e18505a013"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0653d15587909a52e024a261943cf1c5bdc69acb71f411b0dd5966d065a51a47"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a77b48997c66722c65e157c06c74332cdf9c7ad00494b85ec43f324e5c5a9b9a"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6946bae55fd36cfb8e4092c921075cde029c71c7cb571d72f1079d1e4e013bc"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f95db8c8b219bcf294a53742c7bda49b80ceb9d577c8e7aa075612b7f39ffb7"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:03d5eb3cfb4949ab4c74822fb3326cd9655c2b9fe22e4257e2100d44215b2e2b"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6383dd0ffa15515283c26cbf41ac8e6705aab54b4cbb77bdb8935a713a89bee9"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6548a411bc8219b45ba2577716493aa63b12803d1e5dc70508c539d0db8dbf5a"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:81b0fcbfe59a4ca41dc8f635c2a4a71e63f75168cc91026c61be665945739e2d"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6a83797a0174e7995e5edce9dcecc517c642eb43bc3cba296d4512edf346eee2"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5734d8469a5633a4e9ffdf9983ff7cdb512524645c7a3d4bc8a3de45b935ac3"}, - {file = "aiohttp-3.12.13-cp311-cp311-win32.whl", hash = "sha256:fef8d50dfa482925bb6b4c208b40d8e9fa54cecba923dc65b825a72eed9a5dbd"}, - {file = "aiohttp-3.12.13-cp311-cp311-win_amd64.whl", hash = "sha256:9a27da9c3b5ed9d04c36ad2df65b38a96a37e9cfba6f1381b842d05d98e6afe9"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5"}, - {file = "aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf"}, - {file = "aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3"}, - {file = "aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd"}, - {file = "aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:36f6c973e003dc9b0bb4e8492a643641ea8ef0e97ff7aaa5c0f53d68839357b4"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6cbfc73179bd67c229eb171e2e3745d2afd5c711ccd1e40a68b90427f282eab1"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1e8b27b2d414f7e3205aa23bb4a692e935ef877e3a71f40d1884f6e04fd7fa74"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eabded0c2b2ef56243289112c48556c395d70150ce4220d9008e6b4b3dd15690"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:003038e83f1a3ff97409999995ec02fe3008a1d675478949643281141f54751d"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b6f46613031dbc92bdcaad9c4c22c7209236ec501f9c0c5f5f0b6a689bf50f3"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c332c6bb04650d59fb94ed96491f43812549a3ba6e7a16a218e612f99f04145e"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fea41a2c931fb582cb15dc86a3037329e7b941df52b487a9f8b5aa960153cbd"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:846104f45d18fb390efd9b422b27d8f3cf8853f1218c537f36e71a385758c896"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d6c85ac7dd350f8da2520bac8205ce99df4435b399fa7f4dc4a70407073e390"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5a1ecce0ed281bec7da8550da052a6b89552db14d0a0a45554156f085a912f48"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5304d74867028cca8f64f1cc1215eb365388033c5a691ea7aa6b0dc47412f495"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:64d1f24ee95a2d1e094a4cd7a9b7d34d08db1bbcb8aa9fb717046b0a884ac294"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:119c79922a7001ca6a9e253228eb39b793ea994fd2eccb79481c64b5f9d2a055"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bb18f00396d22e2f10cd8825d671d9f9a3ba968d708a559c02a627536b36d91c"}, - {file = "aiohttp-3.12.13-cp39-cp39-win32.whl", hash = "sha256:0022de47ef63fd06b065d430ac79c6b0bd24cdae7feaf0e8c6bac23b805a23a8"}, - {file = "aiohttp-3.12.13-cp39-cp39-win_amd64.whl", hash = "sha256:29e08111ccf81b2734ae03f1ad1cb03b9615e7d8f616764f22f71209c094f122"}, - {file = "aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce"}, + {file = "aiohttp-3.12.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b6fc902bff74d9b1879ad55f5404153e2b33a82e72a95c89cec5eb6cc9e92fbc"}, + {file = "aiohttp-3.12.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:098e92835b8119b54c693f2f88a1dec690e20798ca5f5fe5f0520245253ee0af"}, + {file = "aiohttp-3.12.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:40b3fee496a47c3b4a39a731954c06f0bd9bd3e8258c059a4beb76ac23f8e421"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ce13fcfb0bb2f259fb42106cdc63fa5515fb85b7e87177267d89a771a660b79"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3beb14f053222b391bf9cf92ae82e0171067cc9c8f52453a0f1ec7c37df12a77"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c39e87afe48aa3e814cac5f535bc6199180a53e38d3f51c5e2530f5aa4ec58c"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5f1b4ce5bc528a6ee38dbf5f39bbf11dd127048726323b72b8e85769319ffc4"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1004e67962efabbaf3f03b11b4c43b834081c9e3f9b32b16a7d97d4708a9abe6"}, + {file = "aiohttp-3.12.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8faa08fcc2e411f7ab91d1541d9d597d3a90e9004180edb2072238c085eac8c2"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fe086edf38b2222328cdf89af0dde2439ee173b8ad7cb659b4e4c6f385b2be3d"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:79b26fe467219add81d5e47b4a4ba0f2394e8b7c7c3198ed36609f9ba161aecb"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b761bac1192ef24e16706d761aefcb581438b34b13a2f069a6d343ec8fb693a5"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e153e8adacfe2af562861b72f8bc47f8a5c08e010ac94eebbe33dc21d677cd5b"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fc49c4de44977aa8601a00edbf157e9a421f227aa7eb477d9e3df48343311065"}, + {file = "aiohttp-3.12.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2776c7ec89c54a47029940177e75c8c07c29c66f73464784971d6a81904ce9d1"}, + {file = "aiohttp-3.12.15-cp310-cp310-win32.whl", hash = "sha256:2c7d81a277fa78b2203ab626ced1487420e8c11a8e373707ab72d189fcdad20a"}, + {file = "aiohttp-3.12.15-cp310-cp310-win_amd64.whl", hash = "sha256:83603f881e11f0f710f8e2327817c82e79431ec976448839f3cd05d7afe8f830"}, + {file = "aiohttp-3.12.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d3ce17ce0220383a0f9ea07175eeaa6aa13ae5a41f30bc61d84df17f0e9b1117"}, + {file = "aiohttp-3.12.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:010cc9bbd06db80fe234d9003f67e97a10fe003bfbedb40da7d71c1008eda0fe"}, + {file = "aiohttp-3.12.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f9d7c55b41ed687b9d7165b17672340187f87a773c98236c987f08c858145a9"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc4fbc61bb3548d3b482f9ac7ddd0f18c67e4225aaa4e8552b9f1ac7e6bda9e5"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7fbc8a7c410bb3ad5d595bb7118147dfbb6449d862cc1125cf8867cb337e8728"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74dad41b3458dbb0511e760fb355bb0b6689e0630de8a22b1b62a98777136e16"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b6f0af863cf17e6222b1735a756d664159e58855da99cfe965134a3ff63b0b0"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5b7fe4972d48a4da367043b8e023fb70a04d1490aa7d68800e465d1b97e493b"}, + {file = "aiohttp-3.12.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6443cca89553b7a5485331bc9bedb2342b08d073fa10b8c7d1c60579c4a7b9bd"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6c5f40ec615e5264f44b4282ee27628cea221fcad52f27405b80abb346d9f3f8"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:2abbb216a1d3a2fe86dbd2edce20cdc5e9ad0be6378455b05ec7f77361b3ab50"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:db71ce547012a5420a39c1b744d485cfb823564d01d5d20805977f5ea1345676"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ced339d7c9b5030abad5854aa5413a77565e5b6e6248ff927d3e174baf3badf7"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:7c7dd29c7b5bda137464dc9bfc738d7ceea46ff70309859ffde8c022e9b08ba7"}, + {file = "aiohttp-3.12.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:421da6fd326460517873274875c6c5a18ff225b40da2616083c5a34a7570b685"}, + {file = "aiohttp-3.12.15-cp311-cp311-win32.whl", hash = "sha256:4420cf9d179ec8dfe4be10e7d0fe47d6d606485512ea2265b0d8c5113372771b"}, + {file = "aiohttp-3.12.15-cp311-cp311-win_amd64.whl", hash = "sha256:edd533a07da85baa4b423ee8839e3e91681c7bfa19b04260a469ee94b778bf6d"}, + {file = "aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7"}, + {file = "aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444"}, + {file = "aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545"}, + {file = "aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea"}, + {file = "aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3"}, + {file = "aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1"}, + {file = "aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34"}, + {file = "aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315"}, + {file = "aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd"}, + {file = "aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d"}, + {file = "aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64"}, + {file = "aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51"}, + {file = "aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0"}, + {file = "aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84"}, + {file = "aiohttp-3.12.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:691d203c2bdf4f4637792efbbcdcd157ae11e55eaeb5e9c360c1206fb03d4d98"}, + {file = "aiohttp-3.12.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e995e1abc4ed2a454c731385bf4082be06f875822adc4c6d9eaadf96e20d406"}, + {file = "aiohttp-3.12.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bd44d5936ab3193c617bfd6c9a7d8d1085a8dc8c3f44d5f1dcf554d17d04cf7d"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46749be6e89cd78d6068cdf7da51dbcfa4321147ab8e4116ee6678d9a056a0cf"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0c643f4d75adea39e92c0f01b3fb83d57abdec8c9279b3078b68a3a52b3933b6"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a23918fedc05806966a2438489dcffccbdf83e921a1170773b6178d04ade142"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74bdd8c864b36c3673741023343565d95bfbd778ffe1eb4d412c135a28a8dc89"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a146708808c9b7a988a4af3821379e379e0f0e5e466ca31a73dbdd0325b0263"}, + {file = "aiohttp-3.12.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7011a70b56facde58d6d26da4fec3280cc8e2a78c714c96b7a01a87930a9530"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3bdd6e17e16e1dbd3db74d7f989e8af29c4d2e025f9828e6ef45fbdee158ec75"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:57d16590a351dfc914670bd72530fd78344b885a00b250e992faea565b7fdc05"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bc9a0f6569ff990e0bbd75506c8d8fe7214c8f6579cca32f0546e54372a3bb54"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:536ad7234747a37e50e7b6794ea868833d5220b49c92806ae2d7e8a9d6b5de02"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f0adb4177fa748072546fb650d9bd7398caaf0e15b370ed3317280b13f4083b0"}, + {file = "aiohttp-3.12.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14954a2988feae3987f1eb49c706bff39947605f4b6fa4027c1d75743723eb09"}, + {file = "aiohttp-3.12.15-cp39-cp39-win32.whl", hash = "sha256:b784d6ed757f27574dca1c336f968f4e81130b27595e458e69457e6878251f5d"}, + {file = "aiohttp-3.12.15-cp39-cp39-win_amd64.whl", hash = "sha256:86ceded4e78a992f835209e236617bffae649371c4a50d5e5a3987f237db84b8"}, + {file = "aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2"}, ] [package.dependencies] aiohappyeyeballs = ">=2.5.0" -aiosignal = ">=1.1.2" +aiosignal = ">=1.4.0" attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" @@ -122,14 +122,14 @@ speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (> [[package]] name = "aiosignal" -version = "1.3.2" +version = "1.4.0" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, - {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, + {file = "aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e"}, + {file = "aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7"}, ] [package.dependencies] @@ -168,40 +168,20 @@ doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] -[[package]] -name = "arrow" -version = "1.3.0" -description = "Better dates & times for Python" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, - {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, -] - -[package.dependencies] -python-dateutil = ">=2.7.0" -types-python-dateutil = ">=2.8.10" - -[package.extras] -doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] -test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] - [[package]] name = "asgiref" -version = "3.8.1" +version = "3.9.1" description = "ASGI specs, helper code, and adapters" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, - {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, + {file = "asgiref-3.9.1-py3-none-any.whl", hash = "sha256:f3bba7092a48005b5f5bacd747d36ee4a5a61f4a269a6df590b43144355ebd2c"}, + {file = "asgiref-3.9.1.tar.gz", hash = "sha256:a5ab6582236218e5ef1648f242fd9f10626cfd4de8dc377db215d5d5098e3142"}, ] [package.extras] -tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] +tests = ["mypy (>=1.14.0)", "pytest", "pytest-asyncio"] [[package]] name = "attrs" @@ -209,7 +189,7 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, @@ -238,20 +218,36 @@ files = [ [package.dependencies] cryptography = "*" +[[package]] +name = "aws-xray-sdk" +version = "2.14.0" +description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aws_xray_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:cfbe6feea3d26613a2a869d14c9246a844285c97087ad8f296f901633554ad94"}, + {file = "aws_xray_sdk-2.14.0.tar.gz", hash = "sha256:aab843c331af9ab9ba5cefb3a303832a19db186140894a523edafc024cc0493c"}, +] + +[package.dependencies] +botocore = ">=1.11.3" +wrapt = "*" + [[package]] name = "awscli" -version = "1.40.41" +version = "1.40.45" description = "Universal Command Line Environment for AWS." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "awscli-1.40.41-py3-none-any.whl", hash = "sha256:d75cc6c654418ac4d30eb996081033e90024fa7a661db8ab40de4b5a545eaa79"}, - {file = "awscli-1.40.41.tar.gz", hash = "sha256:553c3a3ba7879be18c5db219f9a710daf90d750044eb604297b25805b05ebc42"}, + {file = "awscli-1.40.45-py3-none-any.whl", hash = "sha256:017cdb820e9d1a1ff72abd968b27eea8c36f5d0a2f30dad555d027a8c53c18fe"}, + {file = "awscli-1.40.45.tar.gz", hash = "sha256:41c06b168de2bb4e573804de8034f061e1d856bd0a362609badef8f47cd33bed"}, ] [package.dependencies] -botocore = "1.38.42" +botocore = "1.38.46" colorama = ">=0.2.5,<0.4.7" docutils = ">=0.18.1,<=0.19" PyYAML = ">=3.10,<6.1" @@ -275,18 +271,6 @@ localstack-client = "*" [package.extras] ver1 = ["awscli"] -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -groups = ["main"] -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "beautifulsoup4" version = "4.13.4" @@ -365,14 +349,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.38.42" +version = "1.38.46" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "botocore-1.38.42-py3-none-any.whl", hash = "sha256:fbbeac30c045b5c19f1c3bb063ea2b6315ce2d6fcb3d898e87d1c1846297961c"}, - {file = "botocore-1.38.42.tar.gz", hash = "sha256:3a14188e48f6e26be561164373d34150fa9cb39f7ad32cc745dcd3ab05f43683"}, + {file = "botocore-1.38.46-py3-none-any.whl", hash = "sha256:89ca782ffbf2e8769ca9c89234cfa5ca577f1987d07d913ee3c68c4776b1eb5b"}, + {file = "botocore-1.38.46.tar.gz", hash = "sha256:8798e5a418c27cf93195b077153644aea44cb171fcd56edc1ecebaa1e49e226e"}, ] [package.dependencies] @@ -547,25 +531,13 @@ files = [ [package.dependencies] pycparser = "*" -[[package]] -name = "chardet" -version = "5.2.0" -description = "Universal encoding detector for Python 3" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, - {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, -] - [[package]] name = "charset-normalizer" version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -687,6 +659,7 @@ files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\""} [[package]] name = "coverage" @@ -1025,18 +998,6 @@ werkzeug = ">=3.1.0" async = ["asgiref (>=3.2)"] dotenv = ["python-dotenv"] -[[package]] -name = "fqdn" -version = "1.5.1" -description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -optional = false -python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" -groups = ["main"] -files = [ - {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, - {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, -] - [[package]] name = "freezegun" version = "1.5.2" @@ -1199,14 +1160,14 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.44" +version = "3.1.45" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110"}, - {file = "gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269"}, + {file = "gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77"}, + {file = "gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c"}, ] [package.dependencies] @@ -1216,18 +1177,6 @@ gitdb = ">=4.0.1,<5" doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock ; python_version < \"3.8\"", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions ; python_version < \"3.11\""] -[[package]] -name = "graphql-core" -version = "3.2.6" -description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." -optional = false -python-versions = "<4,>=3.6" -groups = ["main"] -files = [ - {file = "graphql_core-3.2.6-py3-none-any.whl", hash = "sha256:78b016718c161a6fb20a7d97bbf107f331cd1afe53e45566c59f776ed7f0b45f"}, - {file = "graphql_core-3.2.6.tar.gz", hash = "sha256:c08eec22f9e40f0bd61d805907e3b3b1b9a320bc606e23dc145eebca07c8fbab"}, -] - [[package]] name = "h11" version = "0.16.0" @@ -1240,24 +1189,6 @@ files = [ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, ] -[[package]] -name = "harfile" -version = "0.3.0" -description = "Writer for HTTP Archive (HAR) files" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "harfile-0.3.0-py3-none-any.whl", hash = "sha256:ac11177e06c88c9553c8c73c16ab20428a176d1d2ebe00b41ce527ff0bdc47e6"}, - {file = "harfile-0.3.0.tar.gz", hash = "sha256:23be8037e1296bb4787a15543a37835ed91f408c8296988f9ba022a44accad9e"}, -] - -[package.extras] -bench = ["pytest-codspeed (==2.2.1)"] -cov = ["coverage-enable-subprocess", "coverage[toml] (>=7)"] -dev = ["coverage (>=7)", "coverage-enable-subprocess", "coverage[toml] (>=7)", "hypothesis (>=6)", "hypothesis-jsonschema (>=0.23.1)", "jsonschema (>=4.18.0)", "pytest (>=6.2.0,<8)", "pytest-codspeed (==2.2.1)"] -tests = ["coverage (>=7)", "hypothesis (>=6)", "hypothesis-jsonschema (>=0.23.1)", "jsonschema (>=4.18.0)", "pytest (>=6.2.0,<8)"] - [[package]] name = "httpcore" version = "1.0.9" @@ -1305,77 +1236,6 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "hypothesis" -version = "6.137.1" -description = "A library for property-based testing" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "hypothesis-6.137.1-py3-none-any.whl", hash = "sha256:7cbda6a98ed4d32aad31a5fc5bff5e119b9275fe2579a7b08863cba313a4b9be"}, - {file = "hypothesis-6.137.1.tar.gz", hash = "sha256:b086e644456da79ad460fdaf8fbf90a41a661e8a4076232dd4ea64cfbc0d0529"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -sortedcontainers = ">=2.1.0,<3.0.0" - -[package.extras] -all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.93)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.24)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\"", "watchdog (>=4.0.0)"] -cli = ["black (>=19.10b0)", "click (>=7.0)", "rich (>=9.0.0)"] -codemods = ["libcst (>=0.3.16)"] -crosshair = ["crosshair-tool (>=0.0.93)", "hypothesis-crosshair (>=0.0.24)"] -dateutil = ["python-dateutil (>=1.4)"] -django = ["django (>=4.2)"] -dpcontracts = ["dpcontracts (>=0.4)"] -ghostwriter = ["black (>=19.10b0)"] -lark = ["lark (>=0.10.1)"] -numpy = ["numpy (>=1.19.3)"] -pandas = ["pandas (>=1.1)"] -pytest = ["pytest (>=4.6)"] -pytz = ["pytz (>=2014.1)"] -redis = ["redis (>=3.0.0)"] -watchdog = ["watchdog (>=4.0.0)"] -zoneinfo = ["tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\""] - -[[package]] -name = "hypothesis-graphql" -version = "0.11.1" -description = "Hypothesis strategies for GraphQL queries" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "hypothesis_graphql-0.11.1-py3-none-any.whl", hash = "sha256:a6968f703bcdc31fbe1b26be69185aa2c824eb3b478057a66aa85967c81cadca"}, - {file = "hypothesis_graphql-0.11.1.tar.gz", hash = "sha256:bd49ab6804a3f488ecab2e39c20dba6dfc2101525c6742f5831cfa9eff95285a"}, -] - -[package.dependencies] -graphql-core = ">=3.1.0,<3.3.0" -hypothesis = ">=6.84.3,<7.0" - -[package.extras] -cov = ["coverage-enable-subprocess", "coverage[toml] (>=7)"] -dev = ["coverage (>=7)", "coverage-enable-subprocess", "coverage[toml] (>=7)", "pytest (>=6.2.0,<8)", "pytest-xdist (>=2.5,<3.0)"] -tests = ["coverage (>=7)", "pytest (>=6.2.0,<8)", "pytest-xdist (>=2.5,<3.0)"] - -[[package]] -name = "hypothesis-jsonschema" -version = "0.23.1" -description = "Generate test data from JSON schemata with Hypothesis" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "hypothesis-jsonschema-0.23.1.tar.gz", hash = "sha256:f4ac032024342a4149a10253984f5a5736b82b3fe2afb0888f3834a31153f215"}, - {file = "hypothesis_jsonschema-0.23.1-py3-none-any.whl", hash = "sha256:a4d74d9516dd2784fbbae82e009f62486c9104ac6f4e3397091d98a1d5ee94a2"}, -] - -[package.dependencies] -hypothesis = ">=6.84.3" -jsonschema = ">=4.18.0" - [[package]] name = "idna" version = "3.10" @@ -1397,27 +1257,12 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, ] -[[package]] -name = "isoduration" -version = "20.11.0" -description = "Operations with ISO 8601 durations" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, - {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, -] - -[package.dependencies] -arrow = ">=0.15.0" - [[package]] name = "itsdangerous" version = "2.2.0" @@ -1476,47 +1321,27 @@ decorator = "*" ply = "*" six = "*" -[[package]] -name = "jsonpointer" -version = "3.0.0" -description = "Identify specific nodes in a JSON document (RFC 6901)" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, - {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, -] - [[package]] name = "jsonschema" -version = "4.25.0" +version = "4.24.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ - {file = "jsonschema-4.25.0-py3-none-any.whl", hash = "sha256:24c2e8da302de79c8b9382fee3e76b355e44d2a4364bb207159ce10b517bd716"}, - {file = "jsonschema-4.25.0.tar.gz", hash = "sha256:e63acf5c11762c0e6672ffb61482bdf57f0876684d8d249c0fe2d730d48bc55f"}, + {file = "jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d"}, + {file = "jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196"}, ] [package.dependencies] attrs = ">=22.2.0" -fqdn = {version = "*", optional = true, markers = "extra == \"format\""} -idna = {version = "*", optional = true, markers = "extra == \"format\""} -isoduration = {version = "*", optional = true, markers = "extra == \"format\""} -jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format\""} jsonschema-specifications = ">=2023.03.6" referencing = ">=0.28.4" -rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format\""} -rfc3987 = {version = "*", optional = true, markers = "extra == \"format\""} rpds-py = ">=0.7.1" -uri-template = {version = "*", optional = true, markers = "extra == \"format\""} -webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format\""} [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "rfc3987-syntax (>=1.1.0)", "uri-template", "webcolors (>=24.6.0)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-path" @@ -1542,7 +1367,7 @@ version = "2025.4.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"}, {file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"}, @@ -1551,21 +1376,6 @@ files = [ [package.dependencies] referencing = ">=0.31.0" -[[package]] -name = "junit-xml" -version = "1.9" -description = "Creates JUnit XML test result documents that can be read by tools such as Jenkins" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "junit-xml-1.9.tar.gz", hash = "sha256:de16a051990d4e25a3982b2dd9e89d671067548718866416faec14d9de56db9f"}, - {file = "junit_xml-1.9-py2.py3-none-any.whl", hash = "sha256:ec5ca1a55aefdd76d28fcc0b135251d156c7106fa979686a4b48d62b761b4732"}, -] - -[package.dependencies] -six = "*" - [[package]] name = "lazy-object-proxy" version = "1.11.0" @@ -1592,21 +1402,21 @@ files = [ [[package]] name = "localstack" -version = "4.5.0" +version = "4.6.0" description = "LocalStack - A fully functional local Cloud stack" optional = false python-versions = "*" groups = ["dev"] files = [ - {file = "localstack-4.5.0.tar.gz", hash = "sha256:f8ebf3a9af1826c595cfe4196c6d52792152db374e437e1a574ac52aedc53a18"}, + {file = "localstack-4.6.0.tar.gz", hash = "sha256:2b0ba609816241dba507d7d7b20ca44ee598b4c730f6c56bed3e6f5472dc6c7d"}, ] [package.dependencies] localstack-core = "*" -localstack-ext = "4.5.0" +localstack-ext = "4.6.0" [package.extras] -runtime = ["localstack-core[runtime]", "localstack-ext[runtime] (==4.5.0)"] +runtime = ["localstack-core[runtime]", "localstack-ext[runtime] (==4.6.0)"] [[package]] name = "localstack-client" @@ -1627,14 +1437,14 @@ test = ["black", "coverage", "flake8", "isort", "localstack", "pytest"] [[package]] name = "localstack-core" -version = "4.5.0" +version = "4.6.0" description = "The core library and runtime of LocalStack" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "localstack_core-4.5.0-py3-none-any.whl", hash = "sha256:ab0099d840ff9e718a268315bd9965152f92241dfaf3124b7eaa1305dd58b0f6"}, - {file = "localstack_core-4.5.0.tar.gz", hash = "sha256:2930c0a67dad7f88d2690c0b8720a8b605774c6edf819593ab0328fd14a3e395"}, + {file = "localstack_core-4.6.0-py3-none-any.whl", hash = "sha256:98c1b8b6406f7b72a3906434dfb9fcf3644ff9191211a607073dd76f6f6e51a4"}, + {file = "localstack_core-4.6.0.tar.gz", hash = "sha256:e324a8ece8acb1b15a8961ea1a103a50e24e61bbbd30e608b9f5a03ff3b0b0ad"}, ] [package.dependencies] @@ -1655,21 +1465,21 @@ semver = ">=2.10" tailer = ">=0.4.1" [package.extras] -base-runtime = ["Werkzeug (>=3.1.3)", "awscrt (>=0.13.14,!=0.27.1)", "boto3 (==1.38.27)", "botocore (==1.38.27)", "cbor2 (>=5.5.0)", "dnspython (>=1.16.0)", "docker (>=6.1.1)", "hypercorn (>=0.14.4)", "jsonpatch (>=1.24)", "localstack-twisted (>=23.0)", "openapi-core (>=0.19.2)", "pyopenssl (>=23.0.0)", "readerwriterlock (>=1.0.7)", "requests-aws4auth (>=1.0)", "rolo (>=0.7)", "urllib3 (>=2.0.7)", "xmltodict (>=0.13.0)"] +base-runtime = ["Werkzeug (>=3.1.3)", "awscrt (>=0.13.14,!=0.27.1)", "boto3 (==1.38.46)", "botocore (==1.38.46)", "cbor2 (>=5.5.0)", "dnspython (>=1.16.0)", "docker (>=6.1.1)", "hypercorn (>=0.14.4)", "jsonpatch (>=1.24)", "localstack-twisted (>=23.0)", "openapi-core (>=0.19.2)", "pyopenssl (>=23.0.0)", "readerwriterlock (>=1.0.7)", "requests-aws4auth (>=1.0)", "rolo (>=0.7)", "urllib3 (>=2.0.7)", "xmltodict (>=0.13.0)"] dev = ["Cython", "coveralls (>=3.3.1)", "localstack-core[test]", "mypy", "networkx (>=2.8.4)", "openapi-spec-validator (>=0.7.1)", "pandoc", "pre-commit (>=3.5.0)", "pypandoc", "rstr (>=3.2.0)", "ruff (>=0.3.3)"] -runtime = ["airspeed-ext (>=0.6.3)", "antlr4-python3-runtime (==4.13.2)", "apispec (>=5.1.1)", "aws-sam-translator (>=1.15.1)", "awscli (>=1.37.0)", "crontab (>=0.22.6)", "cryptography (>=41.0.5)", "jpype1-ext (>=0.0.1)", "json5 (>=0.9.11)", "jsonpath-ng (>=1.6.1)", "jsonpath-rw (>=1.4.0)", "kclpy-ext (>=3.0.0)", "localstack-core[base-runtime]", "moto-ext[all] (==5.1.5.post1)", "opensearch-py (>=2.4.1)", "pymongo (>=4.2.0)", "pyopenssl (>=23.0.0)"] +runtime = ["airspeed-ext (>=0.6.3)", "antlr4-python3-runtime (==4.13.2)", "apispec (>=5.1.1)", "aws-sam-translator (>=1.15.1)", "awscli (>=1.37.0)", "crontab (>=0.22.6)", "cryptography (>=41.0.5)", "jpype1-ext (>=0.0.1)", "json5 (>=0.9.11)", "jsonpath-ng (>=1.6.1)", "jsonpath-rw (>=1.4.0)", "kclpy-ext (>=3.0.0)", "localstack-core[base-runtime]", "moto-ext[all] (==5.1.6.post2)", "opensearch-py (>=2.4.1)", "pymongo (>=4.2.0)", "pyopenssl (>=23.0.0)"] test = ["aws-cdk-lib (>=2.88.0)", "coverage[toml] (>=5.5)", "deepdiff (>=6.4.1)", "httpx[http2] (>=0.25)", "localstack-core[runtime]", "localstack-snapshot (>=0.1.1)", "pluggy (>=1.3.0)", "pytest (>=7.4.2)", "pytest-httpserver (>=1.1.2)", "pytest-rerunfailures (>=12.0)", "pytest-split (>=0.8.0)", "pytest-tinybird (>=0.5.0)", "websocket-client (>=1.7.0)"] typehint = ["boto3-stubs[acm,acm-pca,amplify,apigateway,apigatewayv2,appconfig,appconfigdata,application-autoscaling,appsync,athena,autoscaling,backup,batch,ce,cloudcontrol,cloudformation,cloudfront,cloudtrail,cloudwatch,codebuild,codecommit,codeconnections,codedeploy,codepipeline,codestar-connections,cognito-identity,cognito-idp,dms,docdb,dynamodb,dynamodbstreams,ec2,ecr,ecs,efs,eks,elasticache,elasticbeanstalk,elbv2,emr,emr-serverless,es,events,firehose,fis,glacier,glue,iam,identitystore,iot,iot-data,iotanalytics,iotwireless,kafka,kinesis,kinesisanalytics,kinesisanalyticsv2,kms,lakeformation,lambda,logs,managedblockchain,mediaconvert,mediastore,mq,mwaa,neptune,opensearch,organizations,pi,pinpoint,pipes,qldb,qldb-session,rds,rds-data,redshift,redshift-data,resource-groups,resourcegroupstaggingapi,route53,route53resolver,s3,s3control,sagemaker,sagemaker-runtime,secretsmanager,serverlessrepo,servicediscovery,ses,sesv2,sns,sqs,ssm,sso-admin,stepfunctions,sts,timestream-query,timestream-write,transcribe,verifiedpermissions,wafv2,xray]", "localstack-core[dev]"] [[package]] name = "localstack-ext" -version = "4.5.0" +version = "4.6.0" description = "Extensions for LocalStack" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "localstack_ext-4.5.0.tar.gz", hash = "sha256:7d7c30ce3edbe822a5ff3db063d323ae3360c346d6719c83447fbd9188462556"}, + {file = "localstack_ext-4.6.0.tar.gz", hash = "sha256:7b9fcd712877bd5678b394bb24e8ebdb1df9fb4c669177049c0d97e005c8615c"}, ] [package.dependencies] @@ -1677,7 +1487,7 @@ build = "*" dill = ">=0.3.2" dnslib = ">=0.9.10" dnspython = ">=1.16.0" -localstack-core = "4.5.0" +localstack-core = "4.6.0" packaging = "*" plux = ">=1.10.0" PyJWT = {version = ">=1.7.0", extras = ["crypto"]} @@ -1689,8 +1499,8 @@ windows-curses = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] package = ["python-minifier (<2.11.3)"] -runtime = ["Whoosh (>=2.7.4)", "amazon.ion (>=0.9.3)", "avro (>=1.11.0)", "aws-encryption-sdk (>=3.1.0)", "aws-json-term-matcher (>=0.1.5)", "cedarpy (>=4.1.0)", "confluent-kafka", "dirtyjson (>=1.0.7)", "distro", "dulwich (>=0.19.16)", "graphql-core (>=3.0.3)", "janus (>=0.5.0)", "jsonpatch (>=1.32)", "kafka-python", "kubernetes (>=21.7.0)", "libvirt-python", "localstack-core[runtime] (==4.5.0)", "mysql-replication", "paho-mqtt (>=1.5)", "parquet[snappy] (>=1.3.1)", "parse (>=1.19.0)", "pg8000 (>=1.10)", "postgres (>=2.2.2)", "postgresql-proxy (>=0.2.0)", "pproxy-ext (>=2.7.9)", "presto-python-client (>=0.7.0)", "pure-sasl (>=0.6.2)", "pycdlib (>=1.14.0)", "pycognito (>=2024.5.1)", "pyftpdlib (>=1.5.6)", "pyhive[hive-pure-sasl] (>=0.7.0)", "pyion2json (>=0.0.2)", "pymysql", "pyqldb (>=3.2,<4.0)", "python-dxf (>=12.1.1)", "python-snappy (>=0.6)", "readerwriterlock (>=1.0.7)", "redis (>=5.0)", "rsa (>=4.0)", "sql-metadata (>=2.6.0)", "sqlglot[rs]", "srp-ext (>=1.0.7.1)", "testing.common.database (>=1.1.0)", "thrift (>=0.10.0)", "thrift_sasl (>=0.1.0)", "tornado (>=6.0)", "websockets (>=8.1,<14)"] -test = ["PyAthena[pandas]", "aiohttp", "async-timeout", "aws-cdk-lib (>=2.88.0)", "aws-cdk.aws-cognito-identitypool-alpha", "aws_cdk.aws_neptune_alpha", "aws_cdk.aws_redshift_alpha", "aws_xray_sdk (>=2.4.2)", "awsiotsdk", "awsiotsdk", "awswrangler (>=3.5.2)", "coverage[toml] (>=5.0.0)", "deepdiff (>=5.5.0)", "gremlinpython", "jws (>=0.1.3)", "localstack-core[test] (==4.5.0)", "localstack-ext[runtime]", "msal", "msal-extensions", "msrest", "mysql-connector-python", "neo4j", "nest-asyncio (>=1.4.1)", "paramiko", "playwright", "portalocker", "pre-commit (>=3.5.0)", "pyarrow", "pymongo", "pymssql (>=2.2.8)", "pytest-httpserver (>=1.0.1)", "pytest-instafail (>=0.4.2)", "pytest-mock (>=3.14.0)", "pytest-playwright", "python-terraform", "redshift_connector", "ruff (>=0.1.0)", "stomp.py (>=8.0.1)"] +runtime = ["Whoosh (>=2.7.4)", "amazon.ion (>=0.9.3)", "avro (>=1.11.0)", "aws-encryption-sdk (>=3.1.0)", "aws-json-term-matcher (>=0.1.5)", "cedarpy (>=4.1.0)", "confluent-kafka", "dirtyjson (>=1.0.7)", "distro", "dulwich (>=0.19.16)", "graphql-core (>=3.0.3)", "janus (>=0.5.0)", "javascript", "jsonpatch (>=1.32)", "kafka-python", "kubernetes (>=21.7.0)", "libvirt-python", "localstack-core[runtime] (==4.6.0)", "mysql-replication", "paho-mqtt (>=1.5)", "parquet[snappy] (>=1.3.1)", "parse (>=1.19.0)", "pg8000 (>=1.10)", "postgres (>=2.2.2)", "postgresql-proxy (>=0.2.0)", "pproxy-ext (>=2.7.9)", "presto-python-client (>=0.7.0)", "pure-sasl (>=0.6.2)", "pycdlib (>=1.14.0)", "pycognito (>=2024.5.1)", "pyftpdlib (>=1.5.6)", "pyhive[hive-pure-sasl] (>=0.7.0)", "pyion2json (>=0.0.2)", "pymysql", "pyqldb (>=3.2,<4.0)", "python-dxf (>=12.1.1)", "python-snappy (>=0.6)", "readerwriterlock (>=1.0.7)", "redis (>=5.0)", "rsa (>=4.0)", "sql-metadata (>=2.6.0)", "sqlglot[rs]", "srp-ext (>=1.0.7.1)", "testing.common.database (>=1.1.0)", "thrift (>=0.10.0)", "thrift_sasl (>=0.1.0)", "tornado (>=6.0)", "websockets (>=8.1,<14)"] +test = ["PyAthena[pandas]", "aiohttp", "async-timeout", "aws-cdk-lib (>=2.88.0)", "aws-cdk.aws-cognito-identitypool-alpha", "aws_cdk.aws_neptune_alpha", "aws_cdk.aws_redshift_alpha", "aws_xray_sdk (>=2.4.2)", "awsiotsdk", "awsiotsdk", "awswrangler (>=3.5.2)", "coverage[toml] (>=5.0.0)", "deepdiff (>=5.5.0)", "gremlinpython", "jws (>=0.1.3)", "localstack-core[test] (==4.6.0)", "localstack-ext[runtime]", "msal", "msal-extensions", "msrest", "mysql-connector-python", "neo4j", "nest-asyncio (>=1.4.1)", "paramiko", "playwright", "portalocker", "pre-commit (>=3.5.0)", "pyarrow", "pymongo", "pymssql (>=2.2.8)", "pytest-httpserver (>=1.0.1)", "pytest-instafail (>=0.4.2)", "pytest-mock (>=3.14.0)", "pytest-playwright", "python-terraform", "redshift_connector", "ruff (>=0.1.0)", "stomp.py (>=8.0.1)"] typehint = ["boto3-stubs[acm,amplify,apigateway,apigatewayv2,appconfig,appsync,athena,autoscaling,backup,batch,bedrock,bedrock-runtime,ce,cloudcontrol,cloudformation,cloudfront,cloudtrail,cloudwatch,codecommit,cognito-identity,cognito-idp,dms,docdb,dynamodb,dynamodbstreams,ec2,ecr,ecs,efs,eks,elasticache,elasticbeanstalk,elbv2,emr,emr-serverless,es,events,firehose,fis,glacier,glue,iam,iot,iot-data,iotanalytics,iotwireless,kafka,kinesis,kinesisanalytics,kinesisanalyticsv2,kms,lakeformation,lambda,logs,mediaconvert,mediastore,mq,mwaa,neptune,opensearch,organizations,pi,qldb,qldb-session,rds,rds-data,redshift,redshift-data,resource-groups,resourcegroupstaggingapi,route53,route53resolver,s3,s3control,sagemaker,sagemaker-runtime,secretsmanager,serverlessrepo,servicediscovery,ses,sesv2,sns,sqs,ssm,sso-admin,stepfunctions,sts,timestream-query,timestream-write,transcribe,xray]", "localstack-ext[test]"] [[package]] @@ -1779,8 +1589,11 @@ files = [ {file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"}, {file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"}, {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:891f7f991a68d20c75cb13c5c9142b2a3f9eb161f1f12a9489c82172d1f133c0"}, {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ac7ba71f9561cd7d7b55e1ea5511543c0282e2b6450f122672a2694621d63b7e"}, {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ce31158630a6ac85bddd6b830cffd46085ff90498b397bd0a259f59d27a12188"}, {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"}, {file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"}, {file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"}, @@ -1860,7 +1673,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -1956,7 +1769,7 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1964,14 +1777,14 @@ files = [ [[package]] name = "moto" -version = "5.1.6" +version = "5.1.9" description = "A library that allows you to easily mock out tests based on AWS infrastructure" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "moto-5.1.6-py3-none-any.whl", hash = "sha256:e4a3092bc8fe9139caa77cd34cdcbad804de4d9671e2270ea3b4d53f5c645047"}, - {file = "moto-5.1.6.tar.gz", hash = "sha256:baf7afa9d4a92f07277b29cf466d0738f25db2ed2ee12afcb1dc3f2c540beebd"}, + {file = "moto-5.1.9-py3-none-any.whl", hash = "sha256:e9ba7e4764a6088ccc34e3cc846ae719861ca202409fa865573de40a3e805b9b"}, + {file = "moto-5.1.9.tar.gz", hash = "sha256:0c4f0387b06b5d24c0ce90f8f89f31a565cc05789189c5d59b5df02594f2e371"}, ] [package.dependencies] @@ -2196,7 +2009,7 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -2253,7 +2066,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -2315,32 +2128,6 @@ odmantic = ["odmantic (<1.0.0)", "pydantic[email]"] pydantic = ["pydantic[email] (>=1.10)"] sqlalchemy = ["sqlalchemy (>=1.4.29)"] -[[package]] -name = "prance" -version = "25.4.8.0" -description = "Resolving Swagger/OpenAPI 2.0 and 3.0.0 Parser" -optional = false -python-versions = ">=3.10" -groups = ["main"] -files = [ - {file = "prance-25.4.8.0-py3-none-any.whl", hash = "sha256:d3c362036d625b12aeee495621cb1555fd50b2af3632af3d825176bfb50e073b"}, - {file = "prance-25.4.8.0.tar.gz", hash = "sha256:2f72d2983d0474b6f53fd604eb21690c1ebdb00d79a6331b7ec95fb4f25a1f65"}, -] - -[package.dependencies] -chardet = ">=5.2" -packaging = ">=24.2" -requests = ">=2.32.3" -"ruamel.yaml" = ">=0.18.10" - -[package.extras] -cli = ["click (>=8.1.8)"] -dev = ["bumpversion (>=0.6.0)", "pytest (>=8.3.5)", "pytest-cov (>=6.0)", "sphinx (>=8.1.3)", "towncrier (>=24.8)", "tox (>=4.23.2)"] -flex = ["flex (>=6.14.1,<6.15.0)"] -icu = ["PyICU (>=2.14,<3.0)"] -osv = ["openapi-spec-validator (>=0.7.1,<0.8.0)"] -ssv = ["swagger-spec-validator (>=3.0.4,<3.1.0)"] - [[package]] name = "propcache" version = "0.3.2" @@ -2713,7 +2500,7 @@ version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -2788,32 +2575,16 @@ files = [ {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, ] -[[package]] -name = "pyrate-limiter" -version = "3.9.0" -description = "Python Rate-Limiter using Leaky-Bucket Algorithm" -optional = false -python-versions = "<4.0,>=3.8" -groups = ["main"] -files = [ - {file = "pyrate_limiter-3.9.0-py3-none-any.whl", hash = "sha256:77357840c8cf97a36d67005d4e090787043f54000c12c2b414ff65657653e378"}, - {file = "pyrate_limiter-3.9.0.tar.gz", hash = "sha256:6b882e2c77cda07a241d3730975daea4258344b39c878f1dd8849df73f70b0ce"}, -] - -[package.extras] -all = ["filelock (>=3.0)", "psycopg[pool] (>=3.1.18,<4.0.0)", "redis (>=5.0.0,<6.0.0)"] -docs = ["furo (>=2022.3.4,<2023.0.0)", "myst-parser (>=0.17)", "sphinx (>=4.3.0,<5.0.0)", "sphinx-autodoc-typehints (>=1.17,<2.0)", "sphinx-copybutton (>=0.5)", "sphinxcontrib-apidoc (>=0.3,<0.4)"] - [[package]] name = "pyright" -version = "1.1.402" +version = "1.1.403" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "pyright-1.1.402-py3-none-any.whl", hash = "sha256:2c721f11869baac1884e846232800fe021c33f1b4acb3929cff321f7ea4e2982"}, - {file = "pyright-1.1.402.tar.gz", hash = "sha256:85a33c2d40cd4439c66aa946fd4ce71ab2f3f5b8c22ce36a623f59ac22937683"}, + {file = "pyright-1.1.403-py3-none-any.whl", hash = "sha256:c0eeca5aa76cbef3fcc271259bbd785753c7ad7bcac99a9162b4c4c7daed23b3"}, + {file = "pyright-1.1.403.tar.gz", hash = "sha256:3ab69b9f41c67fb5bbb4d7a36243256f0d549ed3608678d381d5f51863921104"}, ] [package.dependencies] @@ -2831,7 +2602,7 @@ version = "8.4.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, @@ -2849,14 +2620,14 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests [[package]] name = "pytest-asyncio" -version = "1.0.0" +version = "1.1.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "pytest_asyncio-1.0.0-py3-none-any.whl", hash = "sha256:4f024da9f1ef945e680dc68610b52550e36590a67fd31bb3b4943979a1f90ef3"}, - {file = "pytest_asyncio-1.0.0.tar.gz", hash = "sha256:d15463d13f4456e1ead2594520216b225a16f781e144f8fdf6c5bb4667c48b3f"}, + {file = "pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf"}, + {file = "pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea"}, ] [package.dependencies] @@ -2888,14 +2659,14 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-docker" -version = "3.2.2" +version = "3.2.3" description = "Simple pytest fixtures for Docker and Docker Compose based tests" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "pytest_docker-3.2.2-py3-none-any.whl", hash = "sha256:2926033d48a10de611070fce17f6e67b9e81af2d8ccc59debbbf39872b8ebef9"}, - {file = "pytest_docker-3.2.2.tar.gz", hash = "sha256:58ce79f3173209634bfff8ccaed2ce5593463d5272325c912e1b52a53154f452"}, + {file = "pytest_docker-3.2.3-py3-none-any.whl", hash = "sha256:f973c35e6f2b674c8fc87e8b3354b02c15866a21994c0841a338c240a05de1eb"}, + {file = "pytest_docker-3.2.3.tar.gz", hash = "sha256:26a1c711d99ef01e86e7c9c007f69641552c1554df4fccb065b35581cca24206"}, ] [package.dependencies] @@ -2949,22 +2720,6 @@ toml = ">=0.10.2,<0.11.0" typing-extensions = ">=4.11.0,<5.0.0" wheel = ">=0.37.1,<0.39.0" -[[package]] -name = "pytest-subtests" -version = "0.14.2" -description = "unittest subTest() support and subtests fixture" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "pytest_subtests-0.14.2-py3-none-any.whl", hash = "sha256:8da0787c994ab372a13a0ad7d390533ad2e4385cac167b3ac501258c885d0b66"}, - {file = "pytest_subtests-0.14.2.tar.gz", hash = "sha256:7154a8665fd528ee70a76d00216a44d139dc3c9c83521a0f779f7b0ad4f800de"}, -] - -[package.dependencies] -attrs = ">=19.2.0" -pytest = ">=7.4" - [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -3016,7 +2771,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -3079,7 +2834,7 @@ version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, @@ -3095,7 +2850,7 @@ version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, @@ -3137,7 +2892,7 @@ version = "0.1.4" description = "A pure python RFC3339 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, @@ -3146,25 +2901,13 @@ files = [ [package.dependencies] six = "*" -[[package]] -name = "rfc3987" -version = "1.3.8" -description = "Parsing and validation of URIs (RFC 3986) and IRIs (RFC 3987)" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "rfc3987-1.3.8-py2.py3-none-any.whl", hash = "sha256:10702b1e51e5658843460b189b185c0366d2cf4cff716f13111b0ea9fd2dce53"}, - {file = "rfc3987-1.3.8.tar.gz", hash = "sha256:d3c4d257a560d544e9826b38bc81db676890c79ab9d7ac92b39c7a253d5ca733"}, -] - [[package]] name = "rich" version = "14.0.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, @@ -3183,7 +2926,7 @@ version = "0.25.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "rpds_py-0.25.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f4ad628b5174d5315761b67f212774a32f5bad5e61396d38108bd801c0a8f5d9"}, {file = "rpds_py-0.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c742af695f7525e559c16f1562cf2323db0e3f0fbdcabdf6865b095256b2d40"}, @@ -3319,82 +3062,6 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" -[[package]] -name = "ruamel-yaml" -version = "0.18.14" -description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "ruamel.yaml-0.18.14-py3-none-any.whl", hash = "sha256:710ff198bb53da66718c7db27eec4fbcc9aa6ca7204e4c1df2f282b6fe5eb6b2"}, - {file = "ruamel.yaml-0.18.14.tar.gz", hash = "sha256:7227b76aaec364df15936730efbf7d72b30c0b79b1d578bbb8e3dcb2d81f52b7"}, -] - -[package.dependencies] -"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.14\""} - -[package.extras] -docs = ["mercurial (>5.7)", "ryd"] -jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] - -[[package]] -name = "ruamel-yaml-clib" -version = "0.2.12" -description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" -optional = false -python-versions = ">=3.9" -groups = ["main"] -markers = "platform_python_implementation == \"CPython\" and python_version < \"3.14\"" -files = [ - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"}, - {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"}, - {file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"}, - {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"}, - {file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bc5f1e1c28e966d61d2519f2a3d451ba989f9ea0f2307de7bc45baa526de9e45"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a0e060aace4c24dcaf71023bbd7d42674e3b230f7e7b97317baf1e953e5b519"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"}, - {file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"}, - {file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"}, -] - [[package]] name = "ruff" version = "0.11.13" @@ -3441,47 +3108,6 @@ botocore = ">=1.37.4,<2.0a.0" [package.extras] crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] -[[package]] -name = "schemathesis" -version = "4.0.21" -description = "Property-based testing framework for Open API and GraphQL based apps" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "schemathesis-4.0.21-py3-none-any.whl", hash = "sha256:329ad303f0d119e3d8f187e48479027c9de27c6d04cb7112fc15867e5486c08d"}, - {file = "schemathesis-4.0.21.tar.gz", hash = "sha256:2004f6b0bba6508ec48377d00d22aca2eeaa0d1fc9613b6f0c8dd1f52c69c1a8"}, -] - -[package.dependencies] -backoff = ">=2.1.2,<3.0" -click = ">=8.0,<9" -colorama = ">=0.4,<1.0" -harfile = ">=0.3.0,<1.0" -httpx = ">=0.22.0,<1.0" -hypothesis = ">=6.108.0,<7" -hypothesis-graphql = ">=0.11.1,<1" -hypothesis-jsonschema = ">=0.23.1,<0.24" -jsonschema = {version = ">=4.18.0,<5.0", extras = ["format"]} -junit-xml = ">=1.9,<2.0" -pyrate-limiter = ">=3.0,<4.0" -pytest = ">=8,<9" -pytest-subtests = ">=0.11,<0.15.0" -pyyaml = ">=5.1,<7.0" -requests = ">=2.22,<3" -rich = ">=13.9.4" -starlette-testclient = ">=0.4.1,<1" -tomli = ">=2.2.1" -typing-extensions = ">=4.12.2" -werkzeug = ">=0.16.0,<4" - -[package.extras] -bench = ["pytest-codspeed (==2.2.1)"] -cov = ["coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["aiohttp (>=3.9.1,<4.0)", "coverage (>=6)", "coverage-enable-subprocess", "coverage[toml] (>=5.3)", "fastapi (>=0.86.0)", "flask (>=2.1.1,<3.0)", "hypothesis-openapi (>=0.2,<1) ; python_version >= \"3.10\"", "mkdocs-material", "mkdocstrings[python]", "pydantic (>=1.10.2)", "pytest-asyncio (>=0.18.0,<1.0)", "pytest-codspeed (==2.2.1)", "pytest-httpserver (>=1.0,<2.0)", "pytest-mock (>=3.7.0,<4.0)", "pytest-trio (>=0.8,<1.0)", "pytest-xdist (>=3,<4.0)", "strawberry-graphql[fastapi] (>=0.109.0)", "syrupy (>=2,<5.0)", "tomli-w (>=1.2.0)", "trustme (>=0.9.0,<1.0)"] -docs = ["mkdocs-material", "mkdocstrings[python]"] -tests = ["aiohttp (>=3.9.1,<4.0)", "coverage (>=6)", "fastapi (>=0.86.0)", "flask (>=2.1.1,<3.0)", "hypothesis-openapi (>=0.2,<1) ; python_version >= \"3.10\"", "pydantic (>=1.10.2)", "pytest-asyncio (>=0.18.0,<1.0)", "pytest-httpserver (>=1.0,<2.0)", "pytest-mock (>=3.7.0,<4.0)", "pytest-trio (>=0.8,<1.0)", "pytest-xdist (>=3,<4.0)", "strawberry-graphql[fastapi] (>=0.109.0)", "syrupy (>=2,<5.0)", "tomli-w (>=1.2.0)", "trustme (>=0.9.0,<1.0)"] - [[package]] name = "semver" version = "3.0.4" @@ -3530,18 +3156,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "sortedcontainers" -version = "2.4.0" -description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, - {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, -] - [[package]] name = "soupsieve" version = "2.7" @@ -3575,40 +3189,6 @@ docs = ["furo", "myst-parser", "prometheus-client", "sphinx (>=7.2.2)", "sphinx- tests = ["anyio", "dirty-equals", "pytest"] typing = ["mypy (>=1.4)"] -[[package]] -name = "starlette" -version = "0.47.2" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b"}, - {file = "starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8"}, -] - -[package.dependencies] -anyio = ">=3.6.2,<5" - -[package.extras] -full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] - -[[package]] -name = "starlette-testclient" -version = "0.4.1" -description = "A backport of Starlette TestClient using requests! ⏪️" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "starlette_testclient-0.4.1-py3-none-any.whl", hash = "sha256:dcf0eb237dc47f062ef5925f98330af46f67e547cb587119c9ae78c17ae6c1d1"}, - {file = "starlette_testclient-0.4.1.tar.gz", hash = "sha256:9e993ffe12fab45606116257813986612262fe15c1bb6dc9e39cc68693ac1fc5"}, -] - -[package.dependencies] -requests = "*" -starlette = ">=0.20.1" - [[package]] name = "tabulate" version = "0.9.0" @@ -3663,60 +3243,6 @@ files = [ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] -[[package]] -name = "tomli" -version = "2.2.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, -] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20250708" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "types_python_dateutil-2.9.0.20250708-py3-none-any.whl", hash = "sha256:4d6d0cc1cc4d24a2dc3816024e502564094497b713f7befda4d5bc7a8e3fd21f"}, - {file = "types_python_dateutil-2.9.0.20250708.tar.gz", hash = "sha256:ccdbd75dab2d6c9696c350579f34cffe2c281e4c5f27a585b2a2438dd1d5c8ab"}, -] - [[package]] name = "typing-extensions" version = "4.14.0" @@ -3756,21 +3282,6 @@ files = [ {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, ] -[[package]] -name = "uri-template" -version = "1.3.0" -description = "RFC 6570 URI Template Processor" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, - {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, -] - -[package.extras] -dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] - [[package]] name = "urllib3" version = "2.5.0" @@ -3789,18 +3300,6 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "webcolors" -version = "24.11.1" -description = "A library for working with the color formats defined by HTML and CSS." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9"}, - {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, -] - [[package]] name = "werkzeug" version = "3.1.3" @@ -3882,7 +3381,7 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -4099,4 +3598,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "8a3beb3f554d833b708cd02fe37440bb95158f42a995715351e2293e124ad087" +content-hash = "8e6e98116871c55e44eddc6f7312d0e01b268dfdb0b410d55d9a30035ac98a07" diff --git a/pyproject.toml b/pyproject.toml index 9931737ff..2de1b730d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,9 +25,9 @@ flask = {extras = ["async"], version = "^3.1.1"} httpx = "^0.28.1" yarl = "^1.18.3" pydantic = "^2.11.7" -asgiref = "^3.8.1" +asgiref = "^3.9.1" boto3 = "^1.37.3" -botocore = "^1.38.40" +botocore = "^1.38.46" eval-type-backport = "^0.2.2" mangum = "^0.19.0" wireup = "^2.0.0" @@ -35,31 +35,29 @@ python-json-logger = "^3.3.0" fhir-resources = "^8.0.0" python-dateutil = "^2.9.0" pyhamcrest = "^2.1.0" -schemathesis = "^4.0.21" -prance = "^25.4.8.0" -jsonschema = "^4.25.0" +aws-xray-sdk = "2.14.0" [tool.poetry.group.dev.dependencies] ruff = "^0.11.13" docopt = "^0.6.2" jsonpath-rw = "^1.4.0" semver = "^3.0.4" -gitpython = "^3.1.44" +gitpython = "^3.1.45" pytest = "^8.4.1" -pytest-asyncio = "^1.0.0" +pytest-asyncio = "^1.1.0" pytest-cov = "^6.0.0" pytest-nhsd-apim = "^5.0.0" -aiohttp = "^3.12.13" +aiohttp = "^3.12.15" awscli = "^1.37.24" awscli-local = "^0.22.0" polyfactory = "^2.20.0" -pyright = "^1.1.394" +pyright = "^1.1.403" brunns-matchers = "^2.9.0" -localstack = "^4.1.1" -pytest-docker = "^3.2.0" +localstack = "^4.6.0" +pytest-docker = "^3.2.3" stamina = "^25.1.0" pytest-freezer = "^0.4.9" -moto = "^5.1.5" +moto = "^5.1.9" requests = "^2.31.0" jsonschema = "^4.24.0" behave = "^1.2.6" diff --git a/scripts/config/gitleaks.toml b/scripts/config/gitleaks.toml index 175e20678..66a3d7e94 100644 --- a/scripts/config/gitleaks.toml +++ b/scripts/config/gitleaks.toml @@ -17,4 +17,4 @@ regexes = [ [allowlist] paths = ['''.terraform.lock.hcl''', '''poetry.lock''', '''yarn.lock'''] -stopwords = ['''dummy_key''', '''dummy_secret''', '''192.0.0.1'''] +stopwords = ['''dummy_key''', '''dummy_secret''', '''192.0.0.1''', '''prance = "^25.4.8.0"''', '''25.4.8.0'''] diff --git a/scripts/dependencies.sh b/scripts/dependencies.sh index 1977ab67a..9a35eb4e2 100755 --- a/scripts/dependencies.sh +++ b/scripts/dependencies.sh @@ -2,11 +2,19 @@ set -euo pipefail +# Use the python from PATH (set by setup-python) +PYTHON_BIN="${PYTHON_BIN:-python}" + + if ! [ -x "$(command -v poetry)" ]; then if ! [ -x "$(command -v pipx)" ]; then - python -m pip install --user pipx --isolated - python -m pipx ensurepath + $PYTHON_BIN -m pip install --user pipx --isolated + $PYTHON_BIN -m pipx ensurepath fi - pipx install poetry + pipx install poetry --python $PYTHON_BIN fi + +# Ensure poetry uses the correct python environment +poetry env use $PYTHON_BIN + poetry self add poetry-plugin-lambda-build@2.1.0 poetry-plugin-export@1.9.0 diff --git a/src/eligibility_signposting_api/app.py b/src/eligibility_signposting_api/app.py index 9073bb779..ffa3cd14b 100644 --- a/src/eligibility_signposting_api/app.py +++ b/src/eligibility_signposting_api/app.py @@ -1,17 +1,25 @@ import logging +import os from typing import Any import wireup.integration.flask from asgiref.wsgi import WsgiToAsgi +from aws_xray_sdk.core import patch_all from flask import Flask from mangum import Mangum from mangum.types import LambdaContext, LambdaEvent from eligibility_signposting_api import audit, repos, services -from eligibility_signposting_api.config.config import config, init_logging -from eligibility_signposting_api.error_handler import handle_exception +from eligibility_signposting_api.common.error_handler import handle_exception +from eligibility_signposting_api.common.request_validator import validate_request_params +from eligibility_signposting_api.config.config import config +from eligibility_signposting_api.logging.logs_helper import log_request_ids_from_headers +from eligibility_signposting_api.logging.logs_manager import add_lambda_request_id_to_logger, init_logging +from eligibility_signposting_api.logging.tracing_helper import tracing_setup from eligibility_signposting_api.views import eligibility_blueprint -from eligibility_signposting_api.wrapper import validate_request_params + +if os.getenv("ENABLE_XRAY_PATCHING"): + patch_all() init_logging() logger = logging.getLogger(__name__) @@ -23,12 +31,16 @@ def main() -> None: # pragma: no cover app.run(debug=config()["log_level"] == logging.DEBUG) +@add_lambda_request_id_to_logger() +@tracing_setup() +@log_request_ids_from_headers() @validate_request_params() def lambda_handler(event: LambdaEvent, context: LambdaContext) -> dict[str, Any]: # pragma: no cover """Run the Flask app as an AWS Lambda.""" app = create_app() app.debug = config()["log_level"] == logging.DEBUG handler = Mangum(WsgiToAsgi(app), lifespan="off") + handler.config["text_mime_types"].append("application/fhir+json") return handler(event, context) diff --git a/src/eligibility_signposting_api/audit/audit_context.py b/src/eligibility_signposting_api/audit/audit_context.py index b52edbe9b..58bb49578 100644 --- a/src/eligibility_signposting_api/audit/audit_context.py +++ b/src/eligibility_signposting_api/audit/audit_context.py @@ -18,14 +18,16 @@ RequestAuditQueryParams, ) from eligibility_signposting_api.audit.audit_service import AuditService -from eligibility_signposting_api.model.eligibility import ( +from eligibility_signposting_api.model.eligibility_status import ( + BestIterationResult, CohortGroupResult, ConditionName, IterationResult, + MatchedActionDetail, + Reason, Status, SuggestedAction, ) -from eligibility_signposting_api.model.rules import CampaignID, CampaignVersion, Iteration, RuleName, RulePriority logger = logging.getLogger(__name__) @@ -59,17 +61,15 @@ def add_request_details(request: Request) -> None: @staticmethod def append_audit_condition( - suggested_actions: list[SuggestedAction] | None, condition_name: ConditionName, - best_results: tuple[Iteration | None, IterationResult | None, dict[str, CohortGroupResult] | None], - campaign_details: tuple[CampaignID | None, CampaignVersion | None], - redirect_rule_details: tuple[RulePriority | None, RuleName | None], + best_iteration_result: BestIterationResult, + action_detail: MatchedActionDetail, + cohort_results: list[CohortGroupResult], ) -> None: - audit_eligibility_cohorts, audit_eligibility_cohort_groups = [], [] - audit_filter_rule, audit_suitability_rule, audit_redirect_rule = None, None, None - best_active_iteration = best_results[0] - best_candidate = best_results[1] - best_cohort_results = best_results[2] + audit_eligibility_cohorts, audit_eligibility_cohort_groups, audit_actions = [], [], [] + best_active_iteration = best_iteration_result.active_iteration + best_candidate = best_iteration_result.iteration_result + best_cohort_results = best_iteration_result.cohort_results if best_cohort_results: for cohort_label, result in sorted(best_cohort_results.items(), key=lambda item: item[1].cohort_code): @@ -84,35 +84,53 @@ def append_audit_condition( ) ) - if result.audit_rules and best_candidate: - audit_filter_rule = AuditContext.create_audit_filter_rule(best_candidate, result) - audit_suitability_rule = AuditContext.create_audit_suitability_rule(best_candidate, result) + filter_audit_rules, suitability_audit_rules = [], [] + for result in cohort_results: + if result.status.name == Status.not_eligible.name: + filter_audit_rules.extend(result.audit_rules) + if result.status.name == Status.not_actionable.name: + suitability_audit_rules.extend(result.audit_rules) - if best_candidate and best_candidate.status and best_candidate.status.name == Status.actionable.name: - audit_redirect_rule = AuditRedirectRule( - rule_priority=str(redirect_rule_details[0]), rule_name=redirect_rule_details[1] - ) + audit_filter_rule = AuditContext.create_audit_filter_rule(filter_audit_rules) + audit_suitability_rule = AuditContext.create_audit_suitability_rule(suitability_audit_rules) + + audit_action_rule = AuditContext.add_rule_name_and_priority_to_audit(best_candidate, action_detail) - audit_actions = AuditContext.create_audit_actions(suggested_actions) + audit_actions = AuditContext.create_audit_actions(action_detail.actions) audit_condition = AuditCondition( - campaign_id=campaign_details[0], - campaign_version=campaign_details[1], + campaign_id=best_iteration_result.campaign_id, + campaign_version=best_iteration_result.campaign_version, iteration_id=best_active_iteration.id if best_active_iteration else None, iteration_version=best_active_iteration.version if best_active_iteration else None, condition_name=condition_name, status=best_candidate.status.name if best_candidate and best_candidate.status else None, - status_text=best_candidate.status.name if best_candidate and best_candidate.status else None, + status_text=best_candidate.status.get_status_text(condition_name) if best_candidate else None, eligibility_cohorts=audit_eligibility_cohorts, eligibility_cohort_groups=audit_eligibility_cohort_groups, filter_rules=audit_filter_rule, suitability_rules=audit_suitability_rule, - action_rule=audit_redirect_rule, + action_rule=audit_action_rule, actions=audit_actions, ) g.audit_log.response.condition.append(audit_condition) + @staticmethod + def add_rule_name_and_priority_to_audit( + best_candidate: IterationResult | None, + action_detail: MatchedActionDetail, + ) -> AuditRedirectRule | None: + audit_action_rule = None + if best_candidate and best_candidate.status: + if action_detail.rule_priority is None and action_detail.rule_name is None: + audit_action_rule = None + else: + audit_action_rule = AuditRedirectRule( + rule_priority=str(action_detail.rule_priority), rule_name=action_detail.rule_name + ) + return audit_action_rule + @staticmethod def add_response_details(response_id: UUID, last_updated: datetime) -> None: g.audit_log.response.response_id = response_id @@ -142,24 +160,46 @@ def create_audit_actions(suggested_actions: list[SuggestedAction] | None) -> lis return audit_actions @staticmethod - def create_audit_suitability_rule( - best_candidate: IterationResult, result: CohortGroupResult - ) -> AuditSuitabilityRule | None: - audit_suitability_rule = None - if best_candidate.status and best_candidate.status.name == Status.not_actionable.name: - audit_suitability_rule = AuditSuitabilityRule( - rule_priority=result.audit_rules[0].rule_priority, - rule_name=result.audit_rules[0].rule_name, - rule_message=result.audit_rules[0].rule_description, + def create_audit_suitability_rule(reasons: list[Reason]) -> list[AuditSuitabilityRule] | None: + unique_reasons = AuditContext.deduplicate_reasons(reasons) + + suitability_audit = [ + AuditSuitabilityRule( + rule_priority=rule.rule_priority, + rule_name=rule.rule_name, + rule_message=rule.rule_description, ) - return audit_suitability_rule + for rule in unique_reasons + ] + + return suitability_audit if suitability_audit else None @staticmethod - def create_audit_filter_rule(best_candidate: IterationResult, result: CohortGroupResult) -> AuditFilterRule | None: - audit_filter_rule = None - if best_candidate.status and best_candidate.status.name == Status.not_eligible.name: - audit_filter_rule = AuditFilterRule( - rule_priority=result.audit_rules[0].rule_priority, - rule_name=result.audit_rules[0].rule_name, - ) - return audit_filter_rule + def create_audit_filter_rule(reasons: list[Reason]) -> list[AuditFilterRule] | None: + unique_reasons = AuditContext.deduplicate_reasons(reasons) + + filter_audit = [ + AuditFilterRule(rule_priority=rule.rule_priority, rule_name=rule.rule_name) for rule in unique_reasons + ] + + return filter_audit if len(filter_audit) > 0 else None + + @staticmethod + def deduplicate_reasons(reasons: list[Reason]) -> list[Reason]: + unique_rule_codes = set() + deduplicated_reasons = [] + + for reason in reasons: + if reason.rule_name not in unique_rule_codes and reason.rule_description: + unique_rule_codes.add(reason.rule_name) + deduplicated_reasons.append( + Reason( + reason.rule_type, + reason.rule_name, + reason.rule_priority, + reason.rule_description, + reason.matcher_matched, + ) + ) + + return deduplicated_reasons diff --git a/src/eligibility_signposting_api/audit/audit_models.py b/src/eligibility_signposting_api/audit/audit_models.py index 17467130f..2f1b0ee2d 100644 --- a/src/eligibility_signposting_api/audit/audit_models.py +++ b/src/eligibility_signposting_api/audit/audit_models.py @@ -70,23 +70,23 @@ class AuditAction(CamelCaseBaseModel): class AuditCondition(CamelCaseBaseModel): campaign_id: str | None = None - campaign_version: str | None = None + campaign_version: int | None = None iteration_id: str | None = None - iteration_version: str | None = None + iteration_version: int | None = None condition_name: str | None = None status: str | None = None status_text: str | None = None eligibility_cohorts: list[AuditEligibilityCohorts] | None = None eligibility_cohort_groups: list[AuditEligibilityCohortGroups] | None = None - filter_rules: AuditFilterRule | None = None - suitability_rules: AuditSuitabilityRule | None = None + filter_rules: list[AuditFilterRule] | None = None + suitability_rules: list[AuditSuitabilityRule] | None = None action_rule: AuditRedirectRule | None = None actions: list[AuditAction] | None = Field(default_factory=list) class ResponseAuditData(CamelCaseBaseModel): response_id: UUID | None = None - last_updated: str | None = None + last_updated: datetime | None = None condition: list[AuditCondition] = Field(default_factory=list) diff --git a/src/eligibility_signposting_api/services/rules/__init__.py b/src/eligibility_signposting_api/common/__init__.py similarity index 100% rename from src/eligibility_signposting_api/services/rules/__init__.py rename to src/eligibility_signposting_api/common/__init__.py diff --git a/src/eligibility_signposting_api/common/api-error-response-readme.md b/src/eligibility_signposting_api/common/api-error-response-readme.md new file mode 100644 index 000000000..08473002c --- /dev/null +++ b/src/eligibility_signposting_api/common/api-error-response-readme.md @@ -0,0 +1,88 @@ +# How to Use the API Error Response Module + +This document outlines how to use the `api_error_response.py` module for standardized error handling within the Eligibility Signposting API. The module ensures that all API errors are consistent, logged appropriately, and conform to the FHIR `OperationOutcome` standard. + +## Core Concepts + +The error handling mechanism is built around the class `APIErrorResponse`. + +1. **`APIErrorResponse` Class**: This class is a constructor for a specific type of error. An instance of this class holds configuration for an error, such as the `HTTPStatus`, severity, and various FHIR-specific codes. +2. **Pre-defined Error Instances**: The module defines several singleton instances of for common, application-specific errors. Examples include: + - `INVALID_CATEGORY_ERROR` + - `NHS_NUMBER_MISMATCH_ERROR` + - `INTERNAL_SERVER_ERROR` +3. **`log_and_generate_response()` Method**: This is the primary method to be used. When called on an `APIErrorResponse` instance, it performs two actions: + - Logs the error with a detailed internal message. + - Generates a complete HTTP response dictionary (`statusCode`, `headers`, `body`) containing a FHIR-compliant `OperationOutcome` payload. + +## How to Use + +The primary way to handle errors is to import a pre-defined error object from `api_error_response.py` and call its `log_and_generate_response()` method. + +### 1. Handling Specific, Known Errors + +For handling validation failures or other expected error conditions, use one of the pre-defined error instances. +The `wrapper.py` module uses this pattern to validate query parameters. If a parameter is invalid, it calls the corresponding error function. + +#### Example: Invalid "category" parameter + +``` python +# wrapper.py + +from eligibility_signposting_api.api_error_response import INVALID_CATEGORY_ERROR + +def get_category_error_response(category: str) -> dict[str, Any]: + """Generates an error response for an invalid category.""" + + return INVALID_CATEGORY_ERROR.log_and_generate_response( + log_message=f"Invalid category query param: '{category}'", + diagnostics=f"{category} is not a category that is supported by the API", + location_param="category" + ) +``` + +#### Key Parameters for `log_and_generate_response()` + +- `log_message`: A detailed message for internal logging. This should contain specific information useful for debugging. +- `diagnostics`: The user-facing error message that will be included in the API response body. +- `location_param` (optional): The name of the parameter that caused the error. This helps pinpoint the issue for API consumers. + +### 2. Handling Unexpected Exceptions (Global Error Handler) + +For unexpected errors, a global exception handler in `error_handler.py` catches any unhandled exception and returns a generic 500 Internal Server Error. This prevents sensitive information from leaking in stack traces. + +``` python +# error_handler.py + +from eligibility_signposting_api.api_error_response import INTERNAL_SERVER_ERROR + +def handle_exception(e: Exception) -> ResponseReturnValue | HTTPException: + # Generate a generic, safe response for the client + response = INTERNAL_SERVER_ERROR.log_and_generate_response( + log_message=f"An unexpected error occurred: {traceback.format_exception(e)}", + diagnostics="An unexpected error occurred." + ) + return make_response(response.get("body"), response.get("statusCode"), response.get("headers")) +``` + +### 3. Creating New Error Types + +If a new, reusable error condition is identified, you should add a new instance of `APIErrorResponse` to `api_error_response.py` +Follow the existing pattern: + +``` python +# api_error_response.py + +# ... (other error definitions) + +SOME_NEW_ERROR = APIErrorResponse( + status_code=HTTPStatus.BAD_REQUEST, + fhir_issue_code=FHIRIssueCode.VALUE, + fhir_issue_severity=FHIRIssueSeverity.ERROR, + fhir_coding_system=FHIR_SPINE_ERROR_CODE_SYSTEM, + fhir_error_code=FHIRSpineErrorCode.INVALID_PARAMETER, + fhir_display_message="A new specific error message for display", +) +``` + +By centralizing error definitions, we ensure that the API provides a consistent and predictable experience for its consumers. diff --git a/src/eligibility_signposting_api/api_error_response.py b/src/eligibility_signposting_api/common/api_error_response.py similarity index 85% rename from src/eligibility_signposting_api/api_error_response.py rename to src/eligibility_signposting_api/common/api_error_response.py index 9b81a740c..afdbfe962 100644 --- a/src/eligibility_signposting_api/api_error_response.py +++ b/src/eligibility_signposting_api/common/api_error_response.py @@ -22,29 +22,30 @@ class FHIRIssueCode(str, Enum): FORBIDDEN = "forbidden" PROCESSING = "processing" VALUE = "value" + INVALID = "invalid" class FHIRSpineErrorCode(str, Enum): - INVALID_NHS_NUMBER = "INVALID_NHS_NUMBER" + ACCESS_DENIED = "ACCESS_DENIED" INVALID_PARAMETER = "INVALID_PARAMETER" + BAD_REQUEST = "BAD_REQUEST" INTERNAL_SERVER_ERROR = "INTERNAL_SERVER_ERROR" REFERENCE_NOT_FOUND = "REFERENCE_NOT_FOUND" class APIErrorResponse: - def __init__( # noqa: PLR0913 + def __init__( self, status_code: HTTPStatus, fhir_issue_code: FHIRIssueCode, fhir_issue_severity: FHIRIssueSeverity, - fhir_coding_system: str, fhir_error_code: str, fhir_display_message: str, ) -> None: self.status_code = status_code self.fhir_issue_code = fhir_issue_code self.fhir_issue_severity = fhir_issue_severity - self.fhir_coding_system = fhir_coding_system + self.fhir_coding_system = "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" self.fhir_error_code = fhir_error_code self.fhir_display_message = fhir_display_message @@ -94,7 +95,6 @@ def log_and_generate_response( status_code=HTTPStatus.UNPROCESSABLE_ENTITY, fhir_issue_code=FHIRIssueCode.VALUE, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", fhir_error_code=FHIRSpineErrorCode.INVALID_PARAMETER, fhir_display_message="The supplied value was not recognised by the API.", ) @@ -103,7 +103,6 @@ def log_and_generate_response( status_code=HTTPStatus.UNPROCESSABLE_ENTITY, fhir_issue_code=FHIRIssueCode.VALUE, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", fhir_error_code=FHIRSpineErrorCode.INVALID_PARAMETER, fhir_display_message="The supplied category was not recognised by the API.", ) @@ -112,7 +111,6 @@ def log_and_generate_response( status_code=HTTPStatus.BAD_REQUEST, fhir_issue_code=FHIRIssueCode.VALUE, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", fhir_error_code=FHIRSpineErrorCode.INVALID_PARAMETER, fhir_display_message="The given conditions were not in the expected format.", ) @@ -121,7 +119,6 @@ def log_and_generate_response( status_code=HTTPStatus.NOT_FOUND, fhir_issue_code=FHIRIssueCode.PROCESSING, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", fhir_error_code=FHIRSpineErrorCode.REFERENCE_NOT_FOUND, fhir_display_message="The given NHS number was not found in our datasets. " "This could be because the number is incorrect or " @@ -132,7 +129,6 @@ def log_and_generate_response( status_code=HTTPStatus.INTERNAL_SERVER_ERROR, fhir_issue_code=FHIRIssueCode.PROCESSING, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", fhir_error_code=FHIRSpineErrorCode.INTERNAL_SERVER_ERROR, fhir_display_message="An unexpected internal server error occurred.", ) @@ -141,7 +137,15 @@ def log_and_generate_response( status_code=HTTPStatus.FORBIDDEN, fhir_issue_code=FHIRIssueCode.FORBIDDEN, fhir_issue_severity=FHIRIssueSeverity.ERROR, - fhir_coding_system="https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", - fhir_error_code=FHIRSpineErrorCode.INVALID_NHS_NUMBER, - fhir_display_message="The provided NHS number does not match the record.", + fhir_error_code=FHIRSpineErrorCode.ACCESS_DENIED, + fhir_display_message="Access has been denied to process this request.", +) + + +NHS_NUMBER_MISSING_ERROR = APIErrorResponse( + status_code=HTTPStatus.BAD_REQUEST, + fhir_issue_code=FHIRIssueCode.INVALID, + fhir_issue_severity=FHIRIssueSeverity.ERROR, + fhir_error_code=FHIRSpineErrorCode.BAD_REQUEST, + fhir_display_message="Bad Request", ) diff --git a/src/eligibility_signposting_api/error_handler.py b/src/eligibility_signposting_api/common/error_handler.py similarity index 85% rename from src/eligibility_signposting_api/error_handler.py rename to src/eligibility_signposting_api/common/error_handler.py index 5ff156d5b..662e8bdda 100644 --- a/src/eligibility_signposting_api/error_handler.py +++ b/src/eligibility_signposting_api/common/error_handler.py @@ -5,7 +5,7 @@ from flask.typing import ResponseReturnValue from werkzeug.exceptions import HTTPException -from eligibility_signposting_api.api_error_response import INTERNAL_SERVER_ERROR +from eligibility_signposting_api.common.api_error_response import INTERNAL_SERVER_ERROR logger = logging.getLogger(__name__) @@ -21,4 +21,4 @@ def handle_exception(e: Exception) -> ResponseReturnValue | HTTPException: response = INTERNAL_SERVER_ERROR.log_and_generate_response( log_message=f"An unexpected error occurred: {full_traceback}", diagnostics="An unexpected error occurred." ) - return make_response(response.get("body"), response.get("statusCode")) + return make_response(response.get("body"), response.get("statusCode"), response.get("headers")) diff --git a/src/eligibility_signposting_api/wrapper.py b/src/eligibility_signposting_api/common/request_validator.py similarity index 88% rename from src/eligibility_signposting_api/wrapper.py rename to src/eligibility_signposting_api/common/request_validator.py index c3437afea..9375fd729 100644 --- a/src/eligibility_signposting_api/wrapper.py +++ b/src/eligibility_signposting_api/common/request_validator.py @@ -6,17 +6,18 @@ from mangum.types import LambdaContext, LambdaEvent -from eligibility_signposting_api.api_error_response import ( +from eligibility_signposting_api.common.api_error_response import ( INVALID_CATEGORY_ERROR, INVALID_CONDITION_FORMAT_ERROR, INVALID_INCLUDE_ACTIONS_ERROR, NHS_NUMBER_MISMATCH_ERROR, + NHS_NUMBER_MISSING_ERROR, ) from eligibility_signposting_api.config.contants import NHS_NUMBER_HEADER logger = logging.getLogger(__name__) -condition_pattern = re.compile(r"^\s*[a-zA-Z0-9]+\s*$", re.IGNORECASE) +condition_pattern = re.compile(r"^\s*[a-z0-9]+\s*$", re.IGNORECASE) category_pattern = re.compile(r"^\s*(VACCINATIONS|SCREENING|ALL)\s*$", re.IGNORECASE) include_actions_pattern = re.compile(r"^\s*([YN])\s*$", re.IGNORECASE) @@ -59,12 +60,16 @@ def wrapper(event: LambdaEvent, context: LambdaContext) -> dict[str, Any] | None path_nhs_no = event.get("pathParameters", {}).get("id") header_nhs_no = event.get("headers", {}).get(NHS_NUMBER_HEADER) - if not validate_nhs_number(path_nhs_no, header_nhs_no): - message = f"NHS Number {path_nhs_no or ''} does not match the header NHS Number {header_nhs_no or ''}" - return NHS_NUMBER_MISMATCH_ERROR.log_and_generate_response( + if not path_nhs_no: + message = "Missing required NHS Number from path parameters" + return NHS_NUMBER_MISSING_ERROR.log_and_generate_response( log_message=message, diagnostics=message, location_param="id" ) + if not validate_nhs_number(path_nhs_no, header_nhs_no): + message = "You are not authorised to request information for the supplied NHS Number" + return NHS_NUMBER_MISMATCH_ERROR.log_and_generate_response(log_message=message, diagnostics=message) + query_params = event.get("queryStringParameters") if query_params: is_valid, problem = validate_query_params(query_params) diff --git a/src/eligibility_signposting_api/config/config.py b/src/eligibility_signposting_api/config/config.py index 722e90133..58be70258 100644 --- a/src/eligibility_signposting_api/config/config.py +++ b/src/eligibility_signposting_api/config/config.py @@ -1,10 +1,8 @@ import logging import os -from collections.abc import Sequence from functools import cache from typing import Any, NewType -from pythonjsonlogger.json import JsonFormatter from yarl import URL from eligibility_signposting_api.repos.campaign_repo import BucketName @@ -24,6 +22,7 @@ def config() -> dict[str, Any]: rules_bucket_name = BucketName(os.getenv("RULES_BUCKET_NAME", "test-rules-bucket")) audit_bucket_name = BucketName(os.getenv("AUDIT_BUCKET_NAME", "test-audit-bucket")) aws_default_region = AwsRegion(os.getenv("AWS_DEFAULT_REGION", "eu-west-1")) + enable_xray_patching = bool(os.getenv("ENABLE_XRAY_PATCHING", "false")) kinesis_audit_stream_to_s3 = AwsKinesisFirehoseStreamName( os.getenv("KINESIS_AUDIT_STREAM_TO_S3", "test_kinesis_audit_stream_to_s3") ) @@ -41,32 +40,22 @@ def config() -> dict[str, Any]: "audit_bucket_name": audit_bucket_name, "firehose_endpoint": None, "kinesis_audit_stream_to_s3": kinesis_audit_stream_to_s3, + "enable_xray_patching": enable_xray_patching, "log_level": log_level, } + local_stack_endpoint = "http://localhost:4566" return { "aws_access_key_id": AwsAccessKey(os.getenv("AWS_ACCESS_KEY_ID", "dummy_key")), "aws_default_region": aws_default_region, "aws_secret_access_key": AwsSecretAccessKey(os.getenv("AWS_SECRET_ACCESS_KEY", "dummy_secret")), - "dynamodb_endpoint": URL(os.getenv("DYNAMODB_ENDPOINT", "http://localhost:4566")), + "dynamodb_endpoint": URL(os.getenv("DYNAMODB_ENDPOINT", local_stack_endpoint)), "person_table_name": person_table_name, - "s3_endpoint": URL(os.getenv("S3_ENDPOINT", "http://localhost:4566")), + "s3_endpoint": URL(os.getenv("S3_ENDPOINT", local_stack_endpoint)), "rules_bucket_name": rules_bucket_name, "audit_bucket_name": audit_bucket_name, - "firehose_endpoint": URL(os.getenv("FIREHOSE_ENDPOINT", "http://localhost:4566")), + "firehose_endpoint": URL(os.getenv("FIREHOSE_ENDPOINT", local_stack_endpoint)), "kinesis_audit_stream_to_s3": kinesis_audit_stream_to_s3, + "enable_xray_patching": enable_xray_patching, "log_level": log_level, } - - -def init_logging(quieten: Sequence[str] = ("asyncio", "botocore", "boto3", "mangum", "urllib3")) -> None: - log_format = "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s" - formatter = JsonFormatter(log_format) - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logging.root.handlers = [] # Clear any existing handlers - logging.root.setLevel(LOG_LEVEL) # Set log level - logging.root.addHandler(handler) # Add handler - - for q in quieten: - logging.getLogger(q).setLevel(logging.WARNING) diff --git a/src/eligibility_signposting_api/logging/__init__.py b/src/eligibility_signposting_api/logging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/eligibility_signposting_api/logging/logs_helper.py b/src/eligibility_signposting_api/logging/logs_helper.py new file mode 100644 index 000000000..12d8a48db --- /dev/null +++ b/src/eligibility_signposting_api/logging/logs_helper.py @@ -0,0 +1,29 @@ +import logging +from collections.abc import Callable +from functools import wraps +from typing import Any + +from mangum.types import LambdaContext, LambdaEvent + +logger = logging.getLogger(__name__) + + +def log_request_ids_from_headers() -> Callable: + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(event: LambdaEvent, context: LambdaContext) -> dict[str, Any] | None: + gateway_request_id = (event.get("requestContext") or {}).get("requestId") + headers = event.get("headers") or {} + logger.info( + "request trace metadata", + extra={ + "x_request_id": headers.get("X-Request-ID"), + "x_correlation_id": headers.get("X-Correlation-ID"), + "gateway_request_id": gateway_request_id, + }, + ) + return func(event, context) + + return wrapper + + return decorator diff --git a/src/eligibility_signposting_api/logging/logs_manager.py b/src/eligibility_signposting_api/logging/logs_manager.py new file mode 100644 index 000000000..6ca253ef2 --- /dev/null +++ b/src/eligibility_signposting_api/logging/logs_manager.py @@ -0,0 +1,48 @@ +import logging +from collections.abc import Callable, Sequence +from contextvars import ContextVar +from functools import wraps +from typing import Any + +from mangum.types import LambdaContext, LambdaEvent +from pythonjsonlogger.json import JsonFormatter + +from eligibility_signposting_api.config.config import LOG_LEVEL + +request_id_context_var: ContextVar[str | None] = ContextVar("request_id", default=None) + +LOG_FORMAT = "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s" + + +def add_lambda_request_id_to_logger() -> Callable: + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(event: LambdaEvent, context: LambdaContext) -> dict[str, Any] | None: + aws_request_id = request_id_context_var.set(context.aws_request_id) + try: + return func(event, context) + finally: + request_id_context_var.reset(aws_request_id) + + return wrapper + + return decorator + + +class EnrichedJsonFormatter(JsonFormatter): + def add_fields(self, log_record: dict[str, Any], record: logging.LogRecord, message_dict: dict[str, Any]) -> None: + log_record["request_id"] = request_id_context_var.get() or "-" + super().add_fields(log_record, record, message_dict) + + +def init_logging(quieten: Sequence[str] = ("asyncio", "botocore", "boto3", "mangum", "urllib3")) -> None: + formatter = EnrichedJsonFormatter(LOG_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + logging.root.handlers = [] # Remove default handlers + logging.root.setLevel(LOG_LEVEL) + logging.root.addHandler(handler) + + for q in quieten: + logging.getLogger(q).setLevel(logging.WARNING) diff --git a/src/eligibility_signposting_api/logging/tracing_helper.py b/src/eligibility_signposting_api/logging/tracing_helper.py new file mode 100644 index 000000000..888adc507 --- /dev/null +++ b/src/eligibility_signposting_api/logging/tracing_helper.py @@ -0,0 +1,21 @@ +from collections.abc import Callable +from functools import wraps +from typing import Any + +from aws_xray_sdk.core import xray_recorder +from mangum.types import LambdaContext, LambdaEvent + + +def tracing_setup() -> Callable: + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(event: LambdaEvent, context: LambdaContext) -> dict[str, Any] | None: + xray_recorder.begin_subsegment("Lambda") + try: + return func(event, context) + finally: + xray_recorder.end_subsegment() + + return wrapper + + return decorator diff --git a/src/eligibility_signposting_api/model/rules.py b/src/eligibility_signposting_api/model/campaign_config.py similarity index 90% rename from src/eligibility_signposting_api/model/rules.py rename to src/eligibility_signposting_api/model/campaign_config.py index 541db6263..989f2e53d 100644 --- a/src/eligibility_signposting_api/model/rules.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -17,10 +17,10 @@ from pydantic import SerializationInfo CampaignName = NewType("CampaignName", str) -CampaignVersion = NewType("CampaignVersion", str) +CampaignVersion = NewType("CampaignVersion", int) CampaignID = NewType("CampaignID", str) IterationName = NewType("IterationName", str) -IterationVersion = NewType("IterationVersion", str) +IterationVersion = NewType("IterationVersion", int) IterationID = NewType("IterationID", str) IterationDate = NewType("IterationDate", date) RuleName = NewType("RuleName", str) @@ -42,6 +42,8 @@ class RuleType(StrEnum): filter = "F" suppression = "S" redirect = "R" + not_eligible_actions = "X" + not_actionable_actions = "Y" class RuleOperator(StrEnum): @@ -153,6 +155,8 @@ class Iteration(BaseModel): approval_maximum: int | None = Field(None, alias="ApprovalMaximum") type: Literal["A", "M", "S", "O"] = Field(..., alias="Type") default_comms_routing: str = Field(..., alias="DefaultCommsRouting") + default_not_eligible_routing: str = Field(..., alias="DefaultNotEligibleRouting") + default_not_actionable_routing: str = Field(..., alias="DefaultNotActionableRouting") iteration_cohorts: list[IterationCohort] = Field(..., alias="IterationCohorts") iteration_rules: list[IterationRule] = Field(..., alias="IterationRules") actions_mapper: ActionsMapper = Field(..., alias="ActionsMapper") @@ -181,9 +185,9 @@ class CampaignConfig(BaseModel): name: CampaignName = Field(..., alias="Name") type: Literal["V", "S"] = Field(..., alias="Type") target: Literal["COVID", "FLU", "MMR", "RSV"] = Field(..., alias="Target") - manager: str | None = Field(None, alias="Manager") - approver: str | None = Field(None, alias="Approver") - reviewer: str | None = Field(None, alias="Reviewer") + manager: list[str] | None = Field(None, alias="Manager") + approver: list[str] | None = Field(None, alias="Approver") + reviewer: list[str] | None = Field(None, alias="Reviewer") iteration_frequency: Literal["X", "D", "W", "M", "Q", "A"] = Field(..., alias="IterationFrequency") iteration_type: Literal["A", "M", "S", "O"] = Field(..., alias="IterationType") iteration_time: str | None = Field(None, alias="IterationTime") @@ -224,21 +228,6 @@ def check_no_overlapping_iterations(self) -> typing.Self: raise ValueError(message) return self - @model_validator(mode="after") - def check_has_iteration_from_start(self) -> typing.Self: - iterations_by_date = sorted(self.iterations, key=attrgetter("iteration_date")) - if first_iteration := next(iter(iterations_by_date), None): - if first_iteration.iteration_date > self.start_date: - message = ( - f"campaign {self.id} starts on {self.start_date}, " - f"1st iteration starts later - {first_iteration.iteration_date}" - ) - raise ValueError(message) - return self - # Should never happen, since we are constraining self.iterations with a min_length of 1 - message = f"campaign {self.id} has no iterations." - raise ValueError(message) - @cached_property def campaign_live(self) -> bool: today = datetime.now(tz=UTC).date() diff --git a/src/eligibility_signposting_api/model/eligibility.py b/src/eligibility_signposting_api/model/eligibility_status.py similarity index 64% rename from src/eligibility_signposting_api/model/eligibility.py rename to src/eligibility_signposting_api/model/eligibility_status.py index bad948361..9cc8809fc 100644 --- a/src/eligibility_signposting_api/model/eligibility.py +++ b/src/eligibility_signposting_api/model/eligibility_status.py @@ -4,10 +4,14 @@ from datetime import date from enum import Enum, StrEnum, auto from functools import total_ordering -from typing import NewType, Self +from typing import TYPE_CHECKING, NewType, Self from pydantic import HttpUrl +if TYPE_CHECKING: + from eligibility_signposting_api.model import campaign_config + from eligibility_signposting_api.model.campaign_config import CampaignID, CampaignVersion, CohortLabel, Iteration + NHSNumber = NewType("NHSNumber", str) DateOfBirth = NewType("DateOfBirth", date) Postcode = NewType("Postcode", str) @@ -24,11 +28,15 @@ UrlLink = NewType("UrlLink", HttpUrl) UrlLabel = NewType("UrlLabel", str) +StatusText = NewType("StatusText", str) + class RuleType(StrEnum): filter = "F" suppression = "S" redirect = "R" + not_eligible_actions = "X" + not_actionable_actions = "Y" @total_ordering @@ -65,6 +73,21 @@ def best(*statuses: Status) -> Status: """ return max(statuses) + def get_status_text(self, condition_name: ConditionName) -> StatusText: + status_to_text_mapping = { + self.not_eligible: lambda: StatusText("We do not believe you can have it"), + self.not_actionable: lambda: StatusText(f"You should have the {condition_name} vaccine"), + self.actionable: lambda: StatusText(f"You should have the {condition_name} vaccine"), + } + return status_to_text_mapping.get(self, lambda: StatusText("Unknown status provided"))() + + def get_action_rule_type(self) -> RuleType: + return { + self.not_eligible: RuleType.not_eligible_actions, + self.not_actionable: RuleType.not_actionable_actions, + self.actionable: RuleType.redirect, + }[self] + @dataclass class Reason: @@ -90,6 +113,8 @@ class Condition: condition_name: ConditionName status: Status cohort_results: list[CohortGroupResult] + suitability_rules: list[Reason] + status_text: StatusText actions: list[SuggestedAction] | None = None @@ -109,6 +134,22 @@ class IterationResult: actions: list[SuggestedAction] | None +@dataclass +class BestIterationResult: + iteration_result: IterationResult + active_iteration: Iteration | None = None + campaign_id: CampaignID | None = None + campaign_version: CampaignVersion | None = None + cohort_results: dict[CohortLabel, CohortGroupResult] | None = None + + +@dataclass +class MatchedActionDetail: + rule_name: campaign_config.RuleName | None = None + rule_priority: campaign_config.RulePriority | None = None + actions: list[SuggestedAction] | None = None + + @dataclass class EligibilityStatus: """Represents a person's eligibility for vaccination.""" diff --git a/src/eligibility_signposting_api/model/person.py b/src/eligibility_signposting_api/model/person.py new file mode 100644 index 000000000..eaaff6c64 --- /dev/null +++ b/src/eligibility_signposting_api/model/person.py @@ -0,0 +1,7 @@ +from dataclasses import dataclass +from typing import Any + + +@dataclass +class Person: + data: list[dict[str, Any]] diff --git a/src/eligibility_signposting_api/repos/campaign_repo.py b/src/eligibility_signposting_api/repos/campaign_repo.py index 8a2a212fd..26b701962 100644 --- a/src/eligibility_signposting_api/repos/campaign_repo.py +++ b/src/eligibility_signposting_api/repos/campaign_repo.py @@ -5,7 +5,7 @@ from botocore.client import BaseClient from wireup import Inject, service -from eligibility_signposting_api.model.rules import CampaignConfig, Rules +from eligibility_signposting_api.model.campaign_config import CampaignConfig, Rules BucketName = NewType("BucketName", str) diff --git a/src/eligibility_signposting_api/repos/person_repo.py b/src/eligibility_signposting_api/repos/person_repo.py index 41ea20745..9867b2844 100644 --- a/src/eligibility_signposting_api/repos/person_repo.py +++ b/src/eligibility_signposting_api/repos/person_repo.py @@ -5,7 +5,8 @@ from boto3.resources.base import ServiceResource from wireup import Inject, service -from eligibility_signposting_api.model.eligibility import NHSNumber +from eligibility_signposting_api.model.eligibility_status import NHSNumber +from eligibility_signposting_api.model.person import Person from eligibility_signposting_api.repos.exceptions import NotFoundError logger = logging.getLogger(__name__) @@ -35,7 +36,7 @@ def __init__(self, table: Annotated[Any, Inject(qualifier="person_table")]) -> N super().__init__() self.table = table - def get_eligibility_data(self, nhs_number: NHSNumber) -> list[dict[str, Any]]: + def get_eligibility_data(self, nhs_number: NHSNumber) -> Person: response = self.table.query(KeyConditionExpression=Key("NHS_NUMBER").eq(nhs_number)) logger.debug("response %r for %r", response, nhs_number, extra={"response": response, "nhs_number": nhs_number}) @@ -44,4 +45,5 @@ def get_eligibility_data(self, nhs_number: NHSNumber) -> list[dict[str, Any]]: raise NotFoundError(message) logger.debug("returning items %s", items, extra={"items": items}) - return items + + return Person(data=items) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index a64a8b878..66cba4664 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -1,101 +1,68 @@ from __future__ import annotations -from _operator import attrgetter +import logging from collections import defaultdict -from collections.abc import Collection, Iterable, Iterator, Mapping from dataclasses import dataclass, field -from itertools import groupby -from typing import TYPE_CHECKING, Any - -from eligibility_signposting_api.audit.audit_context import AuditContext - -if TYPE_CHECKING: - from eligibility_signposting_api.model.rules import ( - ActionsMapper, - CampaignID, - CampaignVersion, - Iteration, - IterationCohort, - RuleName, - RulePriority, - ) +from itertools import chain +from typing import TYPE_CHECKING from wireup import service -from eligibility_signposting_api.model import eligibility, rules -from eligibility_signposting_api.model.eligibility import ( - ActionCode, - ActionDescription, - ActionType, +from eligibility_signposting_api.audit.audit_context import AuditContext +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.eligibility_status import ( + BestIterationResult, CohortGroupResult, Condition, ConditionName, - InternalActionCode, + EligibilityStatus, IterationResult, + Reason, Status, - SuggestedAction, - UrlLabel, - UrlLink, -) -from eligibility_signposting_api.services.calculators.rule_calculator import ( - RuleCalculator, ) +from eligibility_signposting_api.services.processors.action_rule_handler import ActionRuleHandler +from eligibility_signposting_api.services.processors.campaign_evaluator import CampaignEvaluator +from eligibility_signposting_api.services.processors.rule_processor import RuleProcessor -Row = Collection[Mapping[str, Any]] +if TYPE_CHECKING: + from collections.abc import Collection + + from eligibility_signposting_api.model.campaign_config import ( + CampaignConfig, + CohortLabel, + IterationName, + ) + from eligibility_signposting_api.model.person import Person + +logger = logging.getLogger(__name__) @service class EligibilityCalculatorFactory: @staticmethod - def get(person_data: Row, campaign_configs: Collection[rules.CampaignConfig]) -> EligibilityCalculator: - return EligibilityCalculator(person_data=person_data, campaign_configs=campaign_configs) + def get(person: Person, campaign_configs: Collection[CampaignConfig]) -> EligibilityCalculator: + return EligibilityCalculator(person=person, campaign_configs=campaign_configs) @dataclass class EligibilityCalculator: - person_data: Row - campaign_configs: Collection[rules.CampaignConfig] - - results: list[eligibility.Condition] = field(default_factory=list) + person: Person + campaign_configs: Collection[CampaignConfig] - @property - def active_campaigns(self) -> list[rules.CampaignConfig]: - return [cc for cc in self.campaign_configs if cc.campaign_live] + campaign_evaluator: CampaignEvaluator = field(default_factory=CampaignEvaluator) + rule_processor: RuleProcessor = field(default_factory=RuleProcessor) + action_rule_handler: ActionRuleHandler = field(default_factory=ActionRuleHandler) - def campaigns_grouped_by_condition_name( - self, conditions: list[str], category: str - ) -> Iterator[tuple[eligibility.ConditionName, list[rules.CampaignConfig]]]: - """Generator that yields campaign groups filtered by condition names and campaign category.""" - - allowed_types = ( - {"V", "S"} if category == "ALL" else {category[0]} if category in {"VACCINATIONS", "SCREENING"} else set() - ) - filter_all_conditions = "ALL" in conditions - - for condition_name, campaign_group in groupby( - sorted(self.active_campaigns, key=attrgetter("target")), - key=attrgetter("target"), - ): - campaigns = list(campaign_group) - if campaigns[0].type in allowed_types and (filter_all_conditions or str(condition_name) in conditions): - yield condition_name, campaigns - - @property - def person_cohorts(self) -> set[str]: - cohorts_row: Mapping[str, dict[str, dict[str, dict[str, Any]]]] = next( - (row for row in self.person_data if row.get("ATTRIBUTE_TYPE") == "COHORTS"), - {}, - ) - return set(cohorts_row.get("COHORT_MAP", {}).get("cohorts", {}).get("M", {}).keys()) + results: list[eligibility_status.Condition] = field(default_factory=list) @staticmethod def get_the_best_cohort_memberships( - cohort_results: dict[str, CohortGroupResult], + cohort_results: dict[CohortLabel, CohortGroupResult], ) -> tuple[Status, list[CohortGroupResult]]: if not cohort_results: - return eligibility.Status.not_eligible, [] + return eligibility_status.Status.not_eligible, [] - best_status = eligibility.Status.best(*[result.status for result in cohort_results.values()]) + best_status = eligibility_status.Status.best(*[result.status for result in cohort_results.values()]) best_cohorts = [result for result in cohort_results.values() if result.status == best_status] best_cohorts = [ @@ -111,310 +78,137 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - @staticmethod - def get_exclusion_rules( - cohort: IterationCohort, filter_rules: Iterable[rules.IterationRule] - ) -> Iterator[rules.IterationRule]: - return ( - ir - for ir in filter_rules - if ir.cohort_label is None - or cohort.cohort_label == ir.cohort_label - or (isinstance(ir.cohort_label, (list, set, tuple)) and cohort.cohort_label in ir.cohort_label) - ) + def get_eligibility_status(self, include_actions: str, conditions: list[str], category: str) -> EligibilityStatus: + include_actions_flag = include_actions.upper() == "Y" + condition_results: dict[ConditionName, IterationResult] = {} + final_result = [] - @staticmethod - def get_rules_by_type( - active_iteration: Iteration, - ) -> tuple[tuple[rules.IterationRule, ...], tuple[rules.IterationRule, ...]]: - filter_rules, suppression_rules = ( - tuple(rule for rule in active_iteration.iteration_rules if attrgetter("type")(rule) == rule_type) - for rule_type in (rules.RuleType.filter, rules.RuleType.suppression) + requested_grouped_campaigns = self.campaign_evaluator.get_requested_grouped_campaigns( + self.campaign_configs, conditions, category ) - return filter_rules, suppression_rules + for condition_name, campaign_group in requested_grouped_campaigns: + best_iteration_result = self.get_best_iteration_result(campaign_group) - @staticmethod - def get_redirect_rules( - active_iteration: Iteration, - ) -> tuple[tuple[rules.IterationRule, ...], ActionsMapper, str]: - redirect_rules = tuple( - rule for rule in active_iteration.iteration_rules if rule.type in rules.RuleType.redirect - ) - default_comms = active_iteration.default_comms_routing - action_mapper = active_iteration.actions_mapper - return redirect_rules, action_mapper, default_comms + if best_iteration_result is None: + continue - def evaluate_eligibility( - self, include_actions: str, conditions: list[str], category: str - ) -> eligibility.EligibilityStatus: - include_actions_flag = include_actions.upper() == "Y" - condition_results: dict[ConditionName, IterationResult] = {} - actions: list[SuggestedAction] | None = [] - redirect_rule_priority, redirect_rule_name = None, None - - for condition_name, campaign_group in self.campaigns_grouped_by_condition_name(conditions, category): - best_active_iteration: Iteration | None - best_candidate: IterationResult - best_campaign_id: CampaignID | None - best_campaign_version: CampaignVersion | None - best_cohort_results: dict[str, CohortGroupResult] | None + matched_action_detail = self.action_rule_handler.get_actions( + self.person, + best_iteration_result.active_iteration, + best_iteration_result.iteration_result, + include_actions_flag=include_actions_flag, + ) - iteration_results: dict[ - str, tuple[Iteration, IterationResult, CampaignID, CampaignVersion, dict[str, CohortGroupResult]] - ] = {} + condition_results[condition_name] = best_iteration_result.iteration_result + condition_results[condition_name].actions = matched_action_detail.actions - for cc in campaign_group: - active_iteration = cc.current_iteration - cohort_results: dict[str, CohortGroupResult] = self.get_cohort_results(active_iteration) - - # Determine Result between cohorts - get the best - status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) - iteration_results[active_iteration.name] = ( - active_iteration, - IterationResult(status, best_cohorts, actions), - cc.id, - cc.version, - cohort_results, - ) + condition: Condition = self.build_condition( + iteration_result=condition_results[condition_name], condition_name=condition_name + ) + final_result.append(condition) - # Determine results between iterations - get the best - if iteration_results: - ( - best_iteration_name, - ( - best_active_iteration, - best_candidate, - best_campaign_id, - best_campaign_version, - best_cohort_results, - ), - ) = max(iteration_results.items(), key=lambda item: item[1][1].status.value) - else: - best_candidate = IterationResult(eligibility.Status.not_eligible, [], actions) - best_campaign_id = None - best_campaign_version = None - best_active_iteration = None - best_cohort_results = None - - condition_results[condition_name] = best_candidate - - if best_candidate.status == Status.actionable and best_active_iteration is not None: - if include_actions_flag: - actions, matched_r_rule_priority, matched_r_rule_name = self.handle_redirect_rules( - best_active_iteration - ) - redirect_rule_name = matched_r_rule_name - redirect_rule_priority = matched_r_rule_priority - else: - actions = None - - if best_candidate.status in (Status.not_eligible, Status.not_actionable) and not include_actions_flag: - actions = None - - # add actions to condition results - condition_results[condition_name].actions = actions - # reset actions for the next condition - actions: list[SuggestedAction] | None = [] - - # add audit data AuditContext.append_audit_condition( - condition_results[condition_name].actions, condition_name, - (best_active_iteration, best_candidate, best_cohort_results), - (best_campaign_id, best_campaign_version), - (redirect_rule_priority, redirect_rule_name), + best_iteration_result, + matched_action_detail, + condition_results[condition_name].cohort_results, ) # Consolidate all the results and return - final_result = self.build_condition_results(condition_results) - return eligibility.EligibilityStatus(conditions=final_result) - - def handle_redirect_rules( - self, best_active_iteration: Iteration - ) -> tuple[list[SuggestedAction] | None, RulePriority | None, RuleName | None]: - redirect_rules, action_mapper, default_comms = self.get_redirect_rules(best_active_iteration) - priority_getter = attrgetter("priority") - sorted_rules_by_priority = sorted(redirect_rules, key=priority_getter) - - actions: list[SuggestedAction] | None = self.get_actions_from_comms(action_mapper, default_comms) - matched_redirect_rule_priority, matched_redirect_rule_name = None, None - for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): - rule_group_list = list(rule_group) - matcher_matched_list = [ - RuleCalculator(person_data=self.person_data, rule=rule).evaluate_exclusion()[1].matcher_matched - for rule in rule_group_list - ] - - comms_routing = rule_group_list[0].comms_routing - if comms_routing and all(matcher_matched_list): - rule_actions = self.get_actions_from_comms(action_mapper, comms_routing) - if rule_actions and len(rule_actions) > 0: - actions = rule_actions - matched_redirect_rule_priority = rule_group_list[0].priority - matched_redirect_rule_name = rule_group_list[0].name - break - - return actions, matched_redirect_rule_priority, matched_redirect_rule_name - - def get_cohort_results(self, active_iteration: rules.Iteration) -> dict[str, CohortGroupResult]: - cohort_results: dict[str, CohortGroupResult] = {} - filter_rules, suppression_rules = self.get_rules_by_type(active_iteration) - for cohort in sorted(active_iteration.iteration_cohorts, key=attrgetter("priority")): - # Base Eligibility - check - if cohort.cohort_label in self.person_cohorts or cohort.is_magic_cohort: - # Eligibility - check - if self.is_eligible_by_filter_rules(cohort, cohort_results, filter_rules): - # Actionability - evaluation - self.evaluate_suppression_rules(cohort, cohort_results, suppression_rules) - - # Not base eligible - elif cohort.cohort_label is not None: - cohort_results[cohort.cohort_label] = CohortGroupResult( - cohort.cohort_group, - Status.not_eligible, - [], - cohort.negative_description, - [], - ) - return cohort_results + return eligibility_status.EligibilityStatus(conditions=final_result) + + def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: + iteration_results = self.get_iteration_results(campaign_group) + + if not iteration_results: + return None + + (best_iteration_name, best_iteration_result) = max( + iteration_results.items(), + key=lambda item: next(iter(item[1].cohort_results.values())).status.value + # Below handles the case where there are no cohort results + if item[1].cohort_results + else -1, + ) + + return best_iteration_result + + def get_iteration_results(self, campaign_group: list[CampaignConfig]) -> dict[IterationName, BestIterationResult]: + iteration_results: dict[IterationName, BestIterationResult] = {} + + for cc in campaign_group: + try: + active_iteration = cc.current_iteration + except StopIteration: + logger.info("Skipping campaign ID %s as no active iteration was found.", cc.id) + continue + cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( + self.person, active_iteration + ) + + # Determine Result between cohorts - get the best + status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) + iteration_results[active_iteration.name] = BestIterationResult( + IterationResult(status, best_cohorts, []), active_iteration, cc.id, cc.version, cohort_results + ) + return iteration_results + + @staticmethod + def build_condition(iteration_result: IterationResult, condition_name: ConditionName) -> Condition: + grouped_cohort_results = defaultdict(list) + + for cohort_result in iteration_result.cohort_results: + if iteration_result.status == cohort_result.status: + grouped_cohort_results[cohort_result.cohort_code].append(cohort_result) + + deduplicated_cohort_results: list[CohortGroupResult] = EligibilityCalculator.deduplicate_cohort_results( + grouped_cohort_results + ) + + overall_deduplicated_reasons_for_condition = EligibilityCalculator.deduplicate_reasons( + deduplicated_cohort_results + ) + + return Condition( + condition_name=condition_name, + status=iteration_result.status, + cohort_results=list(deduplicated_cohort_results), + suitability_rules=list(overall_deduplicated_reasons_for_condition), + actions=iteration_result.actions, + status_text=iteration_result.status.get_status_text(condition_name), + ) @staticmethod - def build_condition_results(condition_results: dict[ConditionName, IterationResult]) -> list[Condition]: - conditions: list[Condition] = [] - # iterate over conditions - for condition_name, active_iteration_result in condition_results.items(): - grouped_cohort_results = defaultdict(list) - # iterate over cohorts and group them by status and cohort_group - for cohort_result in active_iteration_result.cohort_results: - if active_iteration_result.status == cohort_result.status: - grouped_cohort_results[cohort_result.cohort_code].append(cohort_result) - - # deduplicate grouped cohort results by cohort_code - deduplicated_cohort_results = [ + def deduplicate_cohort_results( + grouped_cohort_results: dict[str, list[CohortGroupResult]], + ) -> list[CohortGroupResult]: + results = [] + + for cohort_code, group_results in grouped_cohort_results.items(): + if not group_results: + continue + + deduped_reasons: list[Reason] = EligibilityCalculator.deduplicate_reasons(group_results) + + description = next((c.description for c in group_results if c.description), group_results[0].description) + + results.append( CohortGroupResult( - cohort_code=group_cohort_code, - status=group[0].status, - # Flatten all reasons from the group - reasons=[reason for cohort in group for reason in cohort.reasons], - # get the first nonempty description - description=next((c.description for c in group if c.description), group[0].description), + cohort_code=cohort_code, + status=group_results[0].status, + reasons=list(deduped_reasons), + description=description, audit_rules=[], ) - for group_cohort_code, group in grouped_cohort_results.items() - if group - ] - - # return condition with cohort results - conditions.append( - Condition( - condition_name=condition_name, - status=active_iteration_result.status, - cohort_results=list(deduplicated_cohort_results), - actions=condition_results[condition_name].actions, - ) - ) - return conditions - - def is_eligible_by_filter_rules( - self, - cohort: IterationCohort, - cohort_results: dict[str, CohortGroupResult], - filter_rules: Iterable[rules.IterationRule], - ) -> bool: - is_eligible = True - priority_getter = attrgetter("priority") - sorted_rules_by_priority = sorted(self.get_exclusion_rules(cohort, filter_rules), key=priority_getter) - - for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): - status, group_inclusion_reasons, group_exclusion_reasons, rule_stop = self.evaluate_rules_priority_group( - rule_group - ) - if status.is_exclusion: - if cohort.cohort_label is not None: - cohort_results[cohort.cohort_label] = CohortGroupResult( - (cohort.cohort_group), - Status.not_eligible, - [], - cohort.negative_description, - group_exclusion_reasons, - ) - is_eligible = False - break - return is_eligible - - def evaluate_suppression_rules( - self, - cohort: IterationCohort, - cohort_results: dict[str, CohortGroupResult], - suppression_rules: Iterable[rules.IterationRule], - ) -> None: - is_actionable: bool = True - priority_getter = attrgetter("priority") - suppression_reasons = [] - - sorted_rules_by_priority = sorted(self.get_exclusion_rules(cohort, suppression_rules), key=priority_getter) - - for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): - status, group_inclusion_reasons, group_exclusion_reasons, rule_stop = self.evaluate_rules_priority_group( - rule_group ) - if status.is_exclusion: - is_actionable = False - suppression_reasons.extend(group_exclusion_reasons) - if rule_stop: - break - - if cohort.cohort_label is not None: - key = cohort.cohort_label - if is_actionable: - cohort_results[key] = CohortGroupResult( - cohort.cohort_group, Status.actionable, [], cohort.positive_description, suppression_reasons - ) - else: - cohort_results[key] = CohortGroupResult( - cohort.cohort_group, - Status.not_actionable, - suppression_reasons, - cohort.positive_description, - suppression_reasons, - ) - def evaluate_rules_priority_group( - self, rules_group: Iterator[rules.IterationRule] - ) -> tuple[eligibility.Status, list[eligibility.Reason], list[eligibility.Reason], bool]: - is_rule_stop = False - inclusion_reasons, exclusion_reasons = [], [] - best_status = eligibility.Status.not_eligible - - for rule in rules_group: - is_rule_stop = rule.rule_stop or is_rule_stop - rule_calculator = RuleCalculator(person_data=self.person_data, rule=rule) - status, reason = rule_calculator.evaluate_exclusion() - if status.is_exclusion: - best_status = eligibility.Status.best(status, best_status) - exclusion_reasons.append(reason) - else: - best_status = eligibility.Status.actionable - inclusion_reasons.append(reason) - - return best_status, inclusion_reasons, exclusion_reasons, is_rule_stop + return results @staticmethod - def get_actions_from_comms(action_mapper: ActionsMapper, comms: str) -> list[SuggestedAction] | None: - suggested_actions: list[SuggestedAction] = [] - for comm in comms.split("|"): - action = action_mapper.get(comm) - if action is not None: - suggested_actions.append( - SuggestedAction( - internal_action_code=InternalActionCode(comm), - action_type=ActionType(action.action_type), - action_code=ActionCode(action.action_code), - action_description=ActionDescription(action.action_description) - if action.action_description - else None, - url_link=UrlLink(action.url_link) if action.url_link else None, - url_label=UrlLabel(action.url_label) if action.url_label else None, - ) - ) - return suggested_actions + def deduplicate_reasons(group_results: list[CohortGroupResult]) -> list[Reason]: + all_reasons = chain.from_iterable(group_result.reasons for group_result in group_results) + deduped = {} + for reason in all_reasons: + key = (reason.rule_type, reason.rule_priority) + deduped.setdefault(key, reason) + return list(deduped.values()) diff --git a/src/eligibility_signposting_api/services/calculators/rule_calculator.py b/src/eligibility_signposting_api/services/calculators/rule_calculator.py index 145a1e89f..c9fd7f41b 100644 --- a/src/eligibility_signposting_api/services/calculators/rule_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/rule_calculator.py @@ -1,31 +1,37 @@ from __future__ import annotations -from collections.abc import Collection, Mapping -from dataclasses import dataclass -from typing import Any +from dataclasses import dataclass, field +from typing import TYPE_CHECKING from hamcrest.core.string_description import StringDescription -from eligibility_signposting_api.model import eligibility, rules -from eligibility_signposting_api.services.rules.operators import OperatorRegistry +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.campaign_config import IterationRule, RuleAttributeLevel, RuleType +from eligibility_signposting_api.services.operators.operators import OperatorRegistry +from eligibility_signposting_api.services.processors.person_data_reader import PersonDataReader -Row = Collection[Mapping[str, Any]] +if TYPE_CHECKING: + from collections.abc import Mapping + + from eligibility_signposting_api.model.person import Person @dataclass class RuleCalculator: - person_data: Row - rule: rules.IterationRule + person: Person + rule: IterationRule + + person_data_reader: PersonDataReader = field(default_factory=PersonDataReader) - def evaluate_exclusion(self) -> tuple[eligibility.Status, eligibility.Reason]: + def evaluate_exclusion(self) -> tuple[eligibility_status.Status, eligibility_status.Reason]: """Evaluate if a particular rule excludes this person. Return the result, and the reason for the result.""" attribute_value = self.get_attribute_value() status, reason, matcher_matched = self.evaluate_rule(attribute_value) - reason = eligibility.Reason( - rule_name=eligibility.RuleName(self.rule.name), - rule_type=eligibility.RuleType(self.rule.type), - rule_priority=eligibility.RulePriority(str(self.rule.priority)), - rule_description=eligibility.RuleDescription(self.rule.description), + reason = eligibility_status.Reason( + rule_name=eligibility_status.RuleName(self.rule.name), + rule_type=eligibility_status.RuleType(self.rule.type), + rule_priority=eligibility_status.RulePriority(str(self.rule.priority)), + rule_description=eligibility_status.RuleDescription(self.rule.description), matcher_matched=matcher_matched, ) return status, reason @@ -33,32 +39,24 @@ def evaluate_exclusion(self) -> tuple[eligibility.Status, eligibility.Reason]: def get_attribute_value(self) -> str | None: """Pull out the correct attribute for a rule from the person's data.""" match self.rule.attribute_level: - case rules.RuleAttributeLevel.PERSON: + case RuleAttributeLevel.PERSON: person: Mapping[str, str | None] | None = next( - (r for r in self.person_data if r.get("ATTRIBUTE_TYPE", "") == "PERSON"), None + (r for r in self.person.data if r.get("ATTRIBUTE_TYPE", "") == "PERSON"), None ) attribute_value = person.get(str(self.rule.attribute_name)) if person else None - case rules.RuleAttributeLevel.COHORT: + case RuleAttributeLevel.COHORT: cohorts: Mapping[str, str | None] | None = next( - (r for r in self.person_data if r.get("ATTRIBUTE_TYPE", "") == "COHORTS"), None + (r for r in self.person.data if r.get("ATTRIBUTE_TYPE", "") == "COHORTS"), None ) if cohorts: - attr_name = ( - "COHORT_MAP" - if not self.rule.attribute_name or self.rule.attribute_name == "COHORT_LABEL" - else self.rule.attribute_name - ) - cohort_map = self.get_value(cohorts, attr_name) - cohorts_dict = self.get_value(cohort_map, "cohorts") - m_dict = self.get_value(cohorts_dict, "M") - person_cohorts: set[str] = set(m_dict.keys()) + person_cohorts = self.person_data_reader.get_person_cohorts(self.person) attribute_value = ",".join(person_cohorts) else: attribute_value = None - case rules.RuleAttributeLevel.TARGET: + case RuleAttributeLevel.TARGET: target: Mapping[str, str | None] | None = next( - (r for r in self.person_data if r.get("ATTRIBUTE_TYPE", "") == self.rule.attribute_target), None + (r for r in self.person.data if r.get("ATTRIBUTE_TYPE", "") == self.rule.attribute_target), None ) attribute_value = target.get(str(self.rule.attribute_name)) if target else None case _: # pragma: no cover @@ -66,12 +64,7 @@ def get_attribute_value(self) -> str | None: raise NotImplementedError(msg) return attribute_value - @staticmethod - def get_value(dictionary: Mapping[str, Any] | None, key: str) -> dict: - v = dictionary.get(key, {}) if isinstance(dictionary, dict) else {} - return v if isinstance(v, dict) else {} - - def evaluate_rule(self, attribute_value: str | None) -> tuple[eligibility.Status, str, bool]: + def evaluate_rule(self, attribute_value: str | None) -> tuple[eligibility_status.Status, str, bool]: """Evaluate a rule against a person data attribute. Return the result, and the reason for the result.""" matcher_class = OperatorRegistry.get(self.rule.operator) matcher = matcher_class(rule_value=self.rule.comparator) @@ -81,10 +74,12 @@ def evaluate_rule(self, attribute_value: str | None) -> tuple[eligibility.Status if matcher_matched: matcher.describe_match(attribute_value, reason) status = { - rules.RuleType.filter: eligibility.Status.not_eligible, - rules.RuleType.suppression: eligibility.Status.not_actionable, - rules.RuleType.redirect: eligibility.Status.actionable, + RuleType.filter: eligibility_status.Status.not_eligible, + RuleType.suppression: eligibility_status.Status.not_actionable, + RuleType.redirect: eligibility_status.Status.actionable, + RuleType.not_eligible_actions: eligibility_status.Status.not_eligible, + RuleType.not_actionable_actions: eligibility_status.Status.not_actionable, }[self.rule.type] return status, str(reason), matcher_matched matcher.describe_mismatch(attribute_value, reason) - return eligibility.Status.actionable, str(reason), matcher_matched + return eligibility_status.Status.actionable, str(reason), matcher_matched diff --git a/src/eligibility_signposting_api/services/eligibility_services.py b/src/eligibility_signposting_api/services/eligibility_services.py index 48586290b..465f73b08 100644 --- a/src/eligibility_signposting_api/services/eligibility_services.py +++ b/src/eligibility_signposting_api/services/eligibility_services.py @@ -2,7 +2,7 @@ from wireup import service -from eligibility_signposting_api.model import eligibility +from eligibility_signposting_api.model import eligibility_status from eligibility_signposting_api.repos import CampaignRepo, NotFoundError, PersonRepo from eligibility_signposting_api.services.calculators import eligibility_calculator as calculator @@ -32,11 +32,11 @@ def __init__( def get_eligibility_status( self, - nhs_number: eligibility.NHSNumber, + nhs_number: eligibility_status.NHSNumber, include_actions: str, conditions: list[str], category: str, - ) -> eligibility.EligibilityStatus: + ) -> eligibility_status.EligibilityStatus: """Calculate a person's eligibility for vaccination given an NHS number.""" if nhs_number: try: @@ -55,6 +55,6 @@ def get_eligibility_status( raise UnknownPersonError from e else: calc: calculator.EligibilityCalculator = self.calculator_factory.get(person_data, campaign_configs) - return calc.evaluate_eligibility(include_actions, conditions, category) + return calc.get_eligibility_status(include_actions, conditions, category) raise UnknownPersonError # pragma: no cover diff --git a/src/eligibility_signposting_api/services/operators/__init__.py b/src/eligibility_signposting_api/services/operators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/eligibility_signposting_api/services/rules/operators.py b/src/eligibility_signposting_api/services/operators/operators.py similarity index 99% rename from src/eligibility_signposting_api/services/rules/operators.py rename to src/eligibility_signposting_api/services/operators/operators.py index 1f9c4af85..565503093 100644 --- a/src/eligibility_signposting_api/services/rules/operators.py +++ b/src/eligibility_signposting_api/services/operators/operators.py @@ -11,7 +11,7 @@ from hamcrest.core.base_matcher import BaseMatcher from hamcrest.core.description import Description -from eligibility_signposting_api.model.rules import RuleOperator +from eligibility_signposting_api.model.campaign_config import RuleOperator logger = logging.getLogger(__name__) diff --git a/src/eligibility_signposting_api/services/processors/__init__.py b/src/eligibility_signposting_api/services/processors/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/eligibility_signposting_api/services/processors/action_rule_handler.py b/src/eligibility_signposting_api/services/processors/action_rule_handler.py new file mode 100644 index 000000000..6ff38bd05 --- /dev/null +++ b/src/eligibility_signposting_api/services/processors/action_rule_handler.py @@ -0,0 +1,103 @@ +from itertools import groupby +from operator import attrgetter + +from eligibility_signposting_api.model.campaign_config import ( + ActionsMapper, + Iteration, + IterationRule, +) +from eligibility_signposting_api.model.eligibility_status import ( + ActionCode, + ActionDescription, + ActionType, + InternalActionCode, + IterationResult, + MatchedActionDetail, + RuleType, + SuggestedAction, + UrlLabel, + UrlLink, +) +from eligibility_signposting_api.model.person import Person +from eligibility_signposting_api.services.calculators.rule_calculator import RuleCalculator + + +class ActionRuleHandler: + def get_actions( + self, + person: Person, + active_iteration: Iteration | None, + best_iteration_result: IterationResult, + *, + include_actions_flag: bool, + ) -> MatchedActionDetail: + action_detail = MatchedActionDetail() + + if active_iteration is not None and include_actions_flag: + rule_type = best_iteration_result.status.get_action_rule_type() + action_detail = self._handle(person, active_iteration, rule_type) + + return action_detail + + def _handle(self, person: Person, best_active_iteration: Iteration, rule_type: RuleType) -> MatchedActionDetail: + action_rules, action_mapper, default_comms = self._get_action_rules_components(best_active_iteration, rule_type) + + priority_getter = attrgetter("priority") + sorted_rules_by_priority = sorted(action_rules, key=priority_getter) + + actions: list[SuggestedAction] | None = self._get_actions_from_comms(action_mapper, default_comms) # pyright: ignore[reportArgumentType] + + matched_action_rule_priority, matched_action_rule_name = None, None + for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): + rule_group_list = list(rule_group) + matcher_matched_list = [ + RuleCalculator(person=person, rule=rule).evaluate_exclusion()[1].matcher_matched + for rule in rule_group_list + ] + + comms_routing = rule_group_list[0].comms_routing + if comms_routing and all(matcher_matched_list): + rule_actions = self._get_actions_from_comms(action_mapper, comms_routing) + if rule_actions and len(rule_actions) > 0: + actions = rule_actions + matched_action_rule_priority = rule_group_list[0].priority + matched_action_rule_name = rule_group_list[0].name + break + + return MatchedActionDetail(matched_action_rule_name, matched_action_rule_priority, actions) + + @staticmethod + def _get_action_rules_components( + active_iteration: Iteration, rule_type: RuleType + ) -> tuple[tuple[IterationRule, ...], ActionsMapper, str | None]: + action_rules = tuple(rule for rule in active_iteration.iteration_rules if rule.type in rule_type) + + routing_map = { + RuleType.redirect: active_iteration.default_comms_routing, + RuleType.not_eligible_actions: active_iteration.default_not_eligible_routing, + RuleType.not_actionable_actions: active_iteration.default_not_actionable_routing, + } + + default_comms = routing_map.get(rule_type) + action_mapper = active_iteration.actions_mapper + return action_rules, action_mapper, default_comms + + @staticmethod + def _get_actions_from_comms(action_mapper: ActionsMapper, comms: str) -> list[SuggestedAction] | None: + suggested_actions: list[SuggestedAction] = [] + for comm in comms.split("|"): + action = action_mapper.get(comm) + if action is not None: + suggested_actions.append( + SuggestedAction( + internal_action_code=InternalActionCode(comm), + action_type=ActionType(action.action_type), + action_code=ActionCode(action.action_code), + action_description=ActionDescription(action.action_description) + if action.action_description + else None, + url_link=UrlLink(action.url_link) if action.url_link else None, + url_label=UrlLabel(action.url_label) if action.url_label else None, + ) + ) + return suggested_actions diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py new file mode 100644 index 000000000..864d45c8c --- /dev/null +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -0,0 +1,43 @@ +from collections.abc import Collection, Iterator +from itertools import groupby +from operator import attrgetter + +from wireup import service + +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.campaign_config import CampaignConfig + + +@service +class CampaignEvaluator: + """Filters and groups campaign configurations.""" + + def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: + return [cc for cc in campaign_configs if cc.campaign_live] + + def get_requested_grouped_campaigns( + self, campaign_configs: Collection[CampaignConfig], conditions: list[str], category: str + ) -> Iterator[tuple[eligibility_status.ConditionName, list[CampaignConfig]]]: + mapping = { + "ALL": {"V", "S"}, + "VACCINATIONS": {"V"}, + "SCREENING": {"S"}, + } + + allowed_types = mapping.get(category, set()) + + filter_all_conditions = "ALL" in conditions + + active_campaigns = self.get_active_campaigns(campaign_configs) + + for condition_name, campaign_group in groupby( + sorted(active_campaigns, key=attrgetter("target")), + key=attrgetter("target"), + ): + campaigns = list(campaign_group) + if ( + campaigns + and campaigns[0].type in allowed_types + and (filter_all_conditions or str(condition_name) in conditions) + ): + yield condition_name, campaigns diff --git a/src/eligibility_signposting_api/services/processors/cohort_handler.py b/src/eligibility_signposting_api/services/processors/cohort_handler.py new file mode 100644 index 000000000..d5848e52b --- /dev/null +++ b/src/eligibility_signposting_api/services/processors/cohort_handler.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +from eligibility_signposting_api.model.eligibility_status import CohortGroupResult, Status + +if TYPE_CHECKING: + from collections.abc import Iterable + + from eligibility_signposting_api.model.campaign_config import CohortLabel, IterationCohort, IterationRule + from eligibility_signposting_api.model.person import Person + from eligibility_signposting_api.services.processors.rule_processor import RuleProcessor + + +class CohortEligibilityHandler(ABC): + """Abstract base class for eligibility/actionability handlers.""" + + def __init__(self, next_handler: CohortEligibilityHandler | None = None) -> None: + self.next_handler = next_handler + + @abstractmethod + def handle( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + rules_processor: RuleProcessor, + ) -> None: + """Handles a part of the eligibility/actionability check or passes to the next handler.""" + + def next(self, next_handler: CohortEligibilityHandler) -> CohortEligibilityHandler: + """Sets the next handler in the chain and returns this handler for chaining.""" + self.next_handler = next_handler + return next_handler + + def pass_to_next( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + rules_processor: RuleProcessor, + ) -> None: + """Passes the request to the next handler in the chain if one exists.""" + if self.next_handler: + self.next_handler.handle(person, cohort, cohort_results, rules_processor) + + +class BaseEligibilityHandler(CohortEligibilityHandler): + """Handles the base eligibility check (person in cohort or magic cohort).""" + + def handle( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + rules_processor: RuleProcessor, + ) -> None: + if not rules_processor.is_base_eligible(person, cohort): + cohort_results[cohort.cohort_label] = CohortGroupResult( + cohort.cohort_group, + Status.not_eligible, + [], + cohort.negative_description, + [], + ) + return + + self.pass_to_next(person, cohort, cohort_results, rules_processor) + + +class FilterRuleHandler(CohortEligibilityHandler): + """Handles the eligibility check based on filter rules.""" + + def __init__( + self, filter_rules: Iterable[IterationRule], next_handler: CohortEligibilityHandler | None = None + ) -> None: + super().__init__(next_handler) + self.filter_rules = filter_rules + + def handle( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + rules_processor: RuleProcessor, + ) -> None: + if not rules_processor.is_eligible(person, cohort, cohort_results, self.filter_rules): + return + + self.pass_to_next(person, cohort, cohort_results, rules_processor) + + +class SuppressionRuleHandler(CohortEligibilityHandler): + """Handles the actionability check based on suppression rules.""" + + def __init__( + self, suppression_rules: Iterable[IterationRule], next_handler: CohortEligibilityHandler | None = None + ) -> None: + super().__init__(next_handler) + self.suppression_rules = suppression_rules + + def handle( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + rules_processor: RuleProcessor, + ) -> None: + rules_processor.is_actionable(person, cohort, cohort_results, self.suppression_rules) diff --git a/src/eligibility_signposting_api/services/processors/person_data_reader.py b/src/eligibility_signposting_api/services/processors/person_data_reader.py new file mode 100644 index 000000000..6b365ae7f --- /dev/null +++ b/src/eligibility_signposting_api/services/processors/person_data_reader.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from wireup import service + +from eligibility_signposting_api.model.person import Person + + +@service +class PersonDataReader: + """Handles extracting and interpreting person data.""" + + def get_person_cohorts(self, person: Person) -> set[str]: + cohorts_row: Person = Person([]) + for data in person.data: + if data.get("ATTRIBUTE_TYPE") == "COHORTS": + cohorts_row.data.append(data) + + person_cohorts = set() + + if cohorts_row.data: + for membership in cohorts_row.data[0].get("COHORT_MEMBERSHIPS", []): + if membership.get("COHORT_LABEL"): + person_cohorts.add(membership.get("COHORT_LABEL")) + + return person_cohorts diff --git a/src/eligibility_signposting_api/services/processors/rule_processor.py b/src/eligibility_signposting_api/services/processors/rule_processor.py new file mode 100644 index 000000000..8b49d2778 --- /dev/null +++ b/src/eligibility_signposting_api/services/processors/rule_processor.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from itertools import groupby +from operator import attrgetter +from typing import TYPE_CHECKING + +from wireup import service + +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.campaign_config import ( + CohortLabel, + Iteration, + IterationCohort, + IterationRule, + RuleType, +) +from eligibility_signposting_api.model.eligibility_status import CohortGroupResult, Status +from eligibility_signposting_api.services.calculators.rule_calculator import RuleCalculator +from eligibility_signposting_api.services.processors.cohort_handler import ( + BaseEligibilityHandler, + FilterRuleHandler, + SuppressionRuleHandler, +) +from eligibility_signposting_api.services.processors.person_data_reader import PersonDataReader + +if TYPE_CHECKING: + from collections.abc import Iterable, Iterator + + from eligibility_signposting_api.model.person import Person + + +@service +@dataclass +class RuleProcessor: + """Handles the processing and evaluation of different rules (filter, suppression) against person data.""" + + person_data_reader: PersonDataReader = field(default_factory=PersonDataReader) + + def is_base_eligible(self, person: Person, cohort: IterationCohort) -> bool: + person_cohorts = self.person_data_reader.get_person_cohorts(person) + return cohort.cohort_label in person_cohorts or cohort.is_magic_cohort + + def is_eligible( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + filter_rules: Iterable[IterationRule], + ) -> bool: + is_eligible = True + priority_getter = attrgetter("priority") + sorted_rules_by_priority = sorted(filter_rules, key=priority_getter) + + for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): + group_rules = list(rule_group) + if self._should_skip_rule_group(cohort, group_rules): + continue + status, group_exclusion_reasons, _ = self.evaluate_rules_priority_group(person, iter(group_rules)) + if status.is_exclusion: + if cohort.cohort_label is not None: + cohort_results[cohort.cohort_label] = CohortGroupResult( + cohort.cohort_group, + Status.not_eligible, + [], + cohort.negative_description, + group_exclusion_reasons, + ) + is_eligible = False + break + + return is_eligible + + def is_actionable( + self, + person: Person, + cohort: IterationCohort, + cohort_results: dict[CohortLabel, CohortGroupResult], + suppression_rules: Iterable[IterationRule], + ) -> None: + is_actionable: bool = True + priority_getter = attrgetter("priority") + suppression_reasons = [] + + sorted_rules_by_priority = sorted(suppression_rules, key=priority_getter) + + for _, rule_group in groupby(sorted_rules_by_priority, key=priority_getter): + group_rules = list(rule_group) + if self._should_skip_rule_group(cohort, group_rules): + continue + + status, group_exclusion_reasons, rule_stop = self.evaluate_rules_priority_group(person, iter(group_rules)) + if status.is_exclusion: + is_actionable = False + suppression_reasons.extend(group_exclusion_reasons) + if rule_stop: + break + + if cohort.cohort_label is not None: + key = cohort.cohort_label + if is_actionable: + cohort_results[key] = CohortGroupResult( + cohort.cohort_group, Status.actionable, [], cohort.positive_description, suppression_reasons + ) + else: + cohort_results[key] = CohortGroupResult( + cohort.cohort_group, + Status.not_actionable, + suppression_reasons, + cohort.positive_description, + suppression_reasons, + ) + + @staticmethod + def _should_skip_rule_group(cohort: IterationCohort, group_rules: list[IterationRule]) -> bool: + cohort_specific_rules = [rule for rule in group_rules if rule.cohort_label is not None] + matching_specific_rules = [rule for rule in cohort_specific_rules if rule.cohort_label == cohort.cohort_label] + return bool(cohort_specific_rules and not matching_specific_rules) + + def evaluate_rules_priority_group( + self, person: Person, rules_group: Iterator[IterationRule] + ) -> tuple[eligibility_status.Status, list[eligibility_status.Reason], bool]: + is_rule_stop = False + exclusion_reasons = [] + best_status = eligibility_status.Status.not_eligible + + for rule in rules_group: + is_rule_stop = rule.rule_stop or is_rule_stop + rule_calculator = RuleCalculator(person=person, rule=rule) + status, reason = rule_calculator.evaluate_exclusion() + if status.is_exclusion: + best_status = eligibility_status.Status.best(status, best_status) + exclusion_reasons.append(reason) + else: + best_status = eligibility_status.Status.actionable + + return best_status, exclusion_reasons, is_rule_stop + + @staticmethod + def get_exclusion_rules(cohort: IterationCohort, rules: Iterable[IterationRule]) -> Iterator[IterationRule]: + return ( + ir + for ir in rules + if ir.cohort_label is None + or cohort.cohort_label == ir.cohort_label + or (isinstance(ir.cohort_label, (list, set, tuple)) and cohort.cohort_label in ir.cohort_label) + ) + + def get_cohort_group_results( + self, person: Person, active_iteration: Iteration + ) -> dict[CohortLabel, CohortGroupResult]: + cohort_results: dict[CohortLabel, CohortGroupResult] = {} + filter_rules, suppression_rules = self.get_rules_by_type(active_iteration) + + cohort_base_handler = BaseEligibilityHandler() + filter_rule_handler = FilterRuleHandler(filter_rules=filter_rules) + suppression_rule_handler = SuppressionRuleHandler(suppression_rules=suppression_rules) + + cohort_base_handler.next(filter_rule_handler).next(suppression_rule_handler) + + for cohort in sorted(active_iteration.iteration_cohorts, key=attrgetter("priority")): + cohort_base_handler.handle(person, cohort, cohort_results, self) + + return cohort_results + + def get_not_base_eligible_results( + self, cohort: IterationCohort, cohort_results: dict[str, CohortGroupResult] + ) -> dict[str, CohortGroupResult]: + cohort_results[cohort.cohort_label] = CohortGroupResult( + cohort.cohort_group, + Status.not_eligible, + [], + cohort.negative_description, + [], + ) + return cohort_results + + @staticmethod + def get_rules_by_type(active_iteration: Iteration) -> tuple[tuple[IterationRule, ...], tuple[IterationRule, ...]]: + filter_rules, suppression_rules = ( + tuple(rule for rule in active_iteration.iteration_rules if attrgetter("type")(rule) == rule_type) + for rule_type in (RuleType.filter, RuleType.suppression) + ) + return filter_rules, suppression_rules diff --git a/src/eligibility_signposting_api/views/eligibility.py b/src/eligibility_signposting_api/views/eligibility.py index 0f507f65c..383d73d13 100644 --- a/src/eligibility_signposting_api/views/eligibility.py +++ b/src/eligibility_signposting_api/views/eligibility.py @@ -8,18 +8,18 @@ from flask.typing import ResponseReturnValue from wireup import Injected -from eligibility_signposting_api.api_error_response import NHS_NUMBER_NOT_FOUND_ERROR from eligibility_signposting_api.audit.audit_context import AuditContext from eligibility_signposting_api.audit.audit_service import AuditService -from eligibility_signposting_api.model.eligibility import Condition, EligibilityStatus, NHSNumber, Status +from eligibility_signposting_api.common.api_error_response import NHS_NUMBER_NOT_FOUND_ERROR +from eligibility_signposting_api.model.eligibility_status import Condition, EligibilityStatus, NHSNumber, Status from eligibility_signposting_api.services import EligibilityService, UnknownPersonError -from eligibility_signposting_api.views.response_model import eligibility -from eligibility_signposting_api.views.response_model.eligibility import ProcessedSuggestion +from eligibility_signposting_api.views.response_model import eligibility_response +from eligibility_signposting_api.views.response_model.eligibility_response import ProcessedSuggestion STATUS_MAPPING = { - Status.actionable: eligibility.Status.actionable, - Status.not_actionable: eligibility.Status.not_actionable, - Status.not_eligible: eligibility.Status.not_eligible, + Status.actionable: eligibility_response.Status.actionable, + Status.not_actionable: eligibility_response.Status.not_actionable, + Status.not_eligible: eligibility_response.Status.not_eligible, } logger = logging.getLogger(__name__) @@ -29,13 +29,6 @@ @eligibility_blueprint.before_request def before_request() -> None: - logger.info( - "request details", - extra={ - "X-Request-ID": request.headers.get("X-Request-ID"), - "X-Correlation-ID": request.headers.get("X-Correlation-ID"), - }, - ) AuditContext.add_request_details(request) @@ -56,11 +49,9 @@ def check_eligibility( except UnknownPersonError: return handle_unknown_person_error(nhs_number) else: - eligibility_response: eligibility.EligibilityResponse = build_eligibility_response(eligibility_status) + response: eligibility_response.EligibilityResponse = build_eligibility_response(eligibility_status) AuditContext.write_to_firehose(audit_service) - return make_response( - eligibility_response.model_dump(by_alias=True, mode="json", exclude_none=True), HTTPStatus.OK - ) + return make_response(response.model_dump(by_alias=True, mode="json", exclude_none=True), HTTPStatus.OK) def get_or_default_query_params() -> dict[str, Any]: @@ -100,10 +91,10 @@ def handle_unknown_person_error(nhs_number: NHSNumber) -> ResponseReturnValue: response = NHS_NUMBER_NOT_FOUND_ERROR.log_and_generate_response( log_message=diagnostics, diagnostics=diagnostics, location_param="id" ) - return make_response(response.get("body"), response.get("statusCode")) + return make_response(response.get("body"), response.get("statusCode"), response.get("headers")) -def build_eligibility_response(eligibility_status: EligibilityStatus) -> eligibility.EligibilityResponse: +def build_eligibility_response(eligibility_status: EligibilityStatus) -> eligibility_response.EligibilityResponse: """Return an object representing the API response we are going to send, given an evaluation of the person's eligibility.""" @@ -111,9 +102,9 @@ def build_eligibility_response(eligibility_status: EligibilityStatus) -> eligibi for condition in eligibility_status.conditions: suggestions = ProcessedSuggestion( # pyright: ignore[reportCallIssue] - condition=eligibility.ConditionName(condition.condition_name), # pyright: ignore[reportCallIssue] + condition=eligibility_response.ConditionName(condition.condition_name), # pyright: ignore[reportCallIssue] status=STATUS_MAPPING[condition.status], - statusText=eligibility.StatusText(f"{condition.status}"), # pyright: ignore[reportCallIssue] + statusText=eligibility_response.StatusText(condition.status_text), # pyright: ignore[reportCallIssue] eligibilityCohorts=build_eligibility_cohorts(condition), # pyright: ignore[reportCallIssue] suitabilityRules=build_suitability_results(condition), # pyright: ignore[reportCallIssue] actions=build_actions(condition), @@ -122,27 +113,29 @@ def build_eligibility_response(eligibility_status: EligibilityStatus) -> eligibi processed_suggestions.append(suggestions) response_id = uuid.uuid4() - updated = eligibility.LastUpdated(datetime.now(tz=UTC)) + updated = eligibility_response.LastUpdated(datetime.now(tz=UTC)) AuditContext.add_response_details(response_id, updated) - return eligibility.EligibilityResponse( # pyright: ignore[reportCallIssue] + return eligibility_response.EligibilityResponse( # pyright: ignore[reportCallIssue] responseId=response_id, # pyright: ignore[reportCallIssue] - meta=eligibility.Meta(lastUpdated=updated), + meta=eligibility_response.Meta(lastUpdated=updated), # pyright: ignore[reportCallIssue] processedSuggestions=processed_suggestions, ) -def build_actions(condition: Condition) -> list[eligibility.Action] | None: +def build_actions(condition: Condition) -> list[eligibility_response.Action] | None: if condition.actions is not None: return [ - eligibility.Action( - actionType=eligibility.ActionType(action.action_type), - actionCode=eligibility.ActionCode(action.action_code), - description=eligibility.Description(action.action_description or ""), - urlLabel=eligibility.UrlLabel(action.url_label or ""), - urlLink=eligibility.UrlLink(str(action.url_link)) if action.url_link else eligibility.UrlLink(""), + eligibility_response.Action( + actionType=eligibility_response.ActionType(action.action_type), + actionCode=eligibility_response.ActionCode(action.action_code), + description=eligibility_response.Description(action.action_description or ""), + urlLabel=eligibility_response.UrlLabel(action.url_label or ""), + urlLink=eligibility_response.UrlLink(str(action.url_link)) + if action.url_link + else eligibility_response.UrlLink(""), ) for action in condition.actions ] @@ -150,13 +143,13 @@ def build_actions(condition: Condition) -> list[eligibility.Action] | None: return None -def build_eligibility_cohorts(condition: Condition) -> list[eligibility.EligibilityCohort]: +def build_eligibility_cohorts(condition: Condition) -> list[eligibility_response.EligibilityCohort]: """Group Iteration cohorts and make only one entry per cohort group""" return [ - eligibility.EligibilityCohort( - cohortCode=eligibility.CohortCode(cohort_result.cohort_code), - cohortText=eligibility.CohortText(cohort_result.description), + eligibility_response.EligibilityCohort( + cohortCode=eligibility_response.CohortCode(cohort_result.cohort_code), + cohortText=eligibility_response.CohortText(cohort_result.description), cohortStatus=STATUS_MAPPING[cohort_result.status], ) for cohort_result in condition.cohort_results @@ -164,25 +157,16 @@ def build_eligibility_cohorts(condition: Condition) -> list[eligibility.Eligibil ] -def build_suitability_results(condition: Condition) -> list[eligibility.SuitabilityRule]: - """Make only one entry if there are duplicate rules""" +def build_suitability_results(condition: Condition) -> list[eligibility_response.SuitabilityRule]: if condition.status != Status.not_actionable: return [] - unique_rule_codes = set() - suitability_results = [] - - for cohort_result in condition.cohort_results: - if cohort_result.status == Status.not_actionable: - for reason in cohort_result.reasons: - if reason.rule_name not in unique_rule_codes and reason.rule_description: - unique_rule_codes.add(reason.rule_name) - suitability_results.append( - eligibility.SuitabilityRule( - ruleType=eligibility.RuleType(reason.rule_type.value), - ruleCode=eligibility.RuleCode(reason.rule_name), - ruleText=eligibility.RuleText(reason.rule_description), - ) - ) - - return suitability_results + return [ + eligibility_response.SuitabilityRule( + ruleType=eligibility_response.RuleType(reason.rule_type.value), + ruleCode=eligibility_response.RuleCode(reason.rule_name), + ruleText=eligibility_response.RuleText(reason.rule_description), + ) + for reason in condition.suitability_rules + if reason.rule_description + ] diff --git a/src/eligibility_signposting_api/views/response_model/eligibility.py b/src/eligibility_signposting_api/views/response_model/eligibility_response.py similarity index 100% rename from src/eligibility_signposting_api/views/response_model/eligibility.py rename to src/eligibility_signposting_api/views/response_model/eligibility_response.py diff --git a/src/rules_validation_api/README.md b/src/rules_validation_api/README.md new file mode 100644 index 000000000..68d314d1a --- /dev/null +++ b/src/rules_validation_api/README.md @@ -0,0 +1,26 @@ +# 🧪 Campaign-config Validation + +This Python script is designed to validate a campaign configuration JSON file. + +## 🛠 Requirements + +- Python 3.13 +- `rules_validation_api` must be installed and accessible +- Campaign configuration JSON file to verify + +## Steps to verify + +- Get to the `rules_validation_api` folder +- Run `python app.py --config_path ` + +## Results + +- `On success`: + + ```text + "Valid config" is printed + +- `On Failure`: + + ```text + "Errors" is printed diff --git a/src/rules_validation_api/__init__.py b/src/rules_validation_api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py new file mode 100644 index 000000000..feb9464ba --- /dev/null +++ b/src/rules_validation_api/app.py @@ -0,0 +1,29 @@ +import argparse +import json +import sys +from pathlib import Path + +from rules_validation_api.validators.rules_validator import RulesValidation + +GREEN = "\033[92m" # pragma: no cover +RESET = "\033[0m" # pragma: no cover +YELLOW = "\033[93m" # pragma: no cover +RED = "\033[91m" # pragma: no cover + + +def main() -> None: # pragma: no cover + parser = argparse.ArgumentParser(description="Validate campaign configuration.") + parser.add_argument("--config_path", required=True, help="Path to the campaign config JSON file") + args = parser.parse_args() + + try: + with Path(args.config_path).open() as file: + json_data = json.load(file) + RulesValidation(**json_data) + sys.stdout.write(f"{GREEN}Valid Config{RESET}\n") + except ValueError as e: + sys.stderr.write(f"{YELLOW}Validation Error:{RESET} {RED}{e}{RESET}\n") + + +if __name__ == "__main__": # pragma: no cover + main() diff --git a/src/rules_validation_api/validators/__init__.py b/src/rules_validation_api/validators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/rules_validation_api/validators/actions_mapper_validator.py b/src/rules_validation_api/validators/actions_mapper_validator.py new file mode 100644 index 000000000..200b056d1 --- /dev/null +++ b/src/rules_validation_api/validators/actions_mapper_validator.py @@ -0,0 +1,13 @@ +from pydantic import model_validator + +from eligibility_signposting_api.model.campaign_config import ActionsMapper + + +class ActionsMapperValidation(ActionsMapper): + @model_validator(mode="after") + def validate_keys(self) -> "ActionsMapperValidation": + invalid_keys = [key for key in self.root if key is None or key == ""] + if invalid_keys: + msg = f"Invalid keys found in ActionsMapper: {invalid_keys}" + raise ValueError(msg) + return self diff --git a/src/rules_validation_api/validators/available_action_validator.py b/src/rules_validation_api/validators/available_action_validator.py new file mode 100644 index 000000000..c8bf788d4 --- /dev/null +++ b/src/rules_validation_api/validators/available_action_validator.py @@ -0,0 +1,5 @@ +from eligibility_signposting_api.model.campaign_config import AvailableAction + + +class AvailableActionValidation(AvailableAction): + pass diff --git a/src/rules_validation_api/validators/campaign_config_validator.py b/src/rules_validation_api/validators/campaign_config_validator.py new file mode 100644 index 000000000..94b007346 --- /dev/null +++ b/src/rules_validation_api/validators/campaign_config_validator.py @@ -0,0 +1,29 @@ +import typing +from operator import attrgetter + +from pydantic import field_validator, model_validator + +from eligibility_signposting_api.model.campaign_config import CampaignConfig, Iteration +from rules_validation_api.validators.iteration_validator import IterationValidation + + +class CampaignConfigValidation(CampaignConfig): + @field_validator("iterations") + @classmethod + def validate_iterations(cls, iterations: list[Iteration]) -> list[IterationValidation]: + return [IterationValidation(**i.model_dump()) for i in iterations] + + @model_validator(mode="after") + def check_has_iteration_from_start(self) -> typing.Self: + iterations_by_date = sorted(self.iterations, key=attrgetter("iteration_date")) + if first_iteration := next(iter(iterations_by_date), None): + if first_iteration.iteration_date > self.start_date: + message = ( + f"campaign {self.id} starts on {self.start_date}, " + f"1st iteration starts later - {first_iteration.iteration_date}" + ) + raise ValueError(message) + return self + # Should never happen, since we are constraining self.iterations with a min_length of 1 + message = f"campaign {self.id} has no iterations." + raise ValueError(message) diff --git a/src/rules_validation_api/validators/iteration_cohort_validator.py b/src/rules_validation_api/validators/iteration_cohort_validator.py new file mode 100644 index 000000000..32e1a4b3a --- /dev/null +++ b/src/rules_validation_api/validators/iteration_cohort_validator.py @@ -0,0 +1,5 @@ +from eligibility_signposting_api.model.campaign_config import IterationCohort + + +class IterationCohortValidation(IterationCohort): + pass diff --git a/src/rules_validation_api/validators/iteration_rules_validator.py b/src/rules_validation_api/validators/iteration_rules_validator.py new file mode 100644 index 000000000..341a08c1f --- /dev/null +++ b/src/rules_validation_api/validators/iteration_rules_validator.py @@ -0,0 +1,18 @@ +from typing import Self + +from pydantic import model_validator + +from eligibility_signposting_api.model.campaign_config import IterationRule, RuleAttributeLevel, RuleAttributeName + + +class IterationRuleValidation(IterationRule): + @model_validator(mode="after") + def check_cohort_attribute_name(self) -> Self: + if ( + self.attribute_level == RuleAttributeLevel.COHORT + and self.attribute_name + and self.attribute_name != RuleAttributeName("COHORT_LABEL") + ): + msg = "When attribute_level is COHORT, attribute_name must be COHORT_LABEL or None (default:COHORT_LABEL)" + raise ValueError(msg) + return self diff --git a/src/rules_validation_api/validators/iteration_validator.py b/src/rules_validation_api/validators/iteration_validator.py new file mode 100644 index 000000000..c16286ab2 --- /dev/null +++ b/src/rules_validation_api/validators/iteration_validator.py @@ -0,0 +1,152 @@ +import typing + +from pydantic import Field, ValidationError, field_validator, model_validator +from pydantic_core import InitErrorDetails + +from eligibility_signposting_api.model.campaign_config import ( + ActionsMapper, + Iteration, + IterationCohort, + IterationRule, + RuleType, +) +from rules_validation_api.validators.actions_mapper_validator import ActionsMapperValidation +from rules_validation_api.validators.iteration_cohort_validator import IterationCohortValidation +from rules_validation_api.validators.iteration_rules_validator import IterationRuleValidation + + +class IterationValidation(Iteration): + iteration_cohorts: list[IterationCohort] = Field(..., alias="IterationCohorts") + iteration_rules: list[IterationRule] = Field(..., alias="IterationRules") + actions_mapper: ActionsMapper = Field(..., alias="ActionsMapper") + + @field_validator("iteration_rules") + @classmethod + def validate_iteration_rules(cls, iteration_rules: list[IterationRule]) -> list[IterationRuleValidation]: + return [IterationRuleValidation(**i.model_dump()) for i in iteration_rules] + + @field_validator("iteration_cohorts") + @classmethod + def validate_iteration_cohorts(cls, iteration_cohorts: list[IterationCohort]) -> list[IterationCohortValidation]: + return [IterationCohortValidation(**i.model_dump()) for i in iteration_cohorts] + + @field_validator("actions_mapper", mode="after") + @classmethod + def transform_actions_mapper(cls, action_mapper: ActionsMapper) -> ActionsMapper: + ActionsMapperValidation.model_validate(action_mapper.model_dump()) + return action_mapper + + @model_validator(mode="after") + def action_mapper_validation(self) -> typing.Self: + all_errors = [] + + for validator in [ + self.validate_default_comms_routing_in_actions_mapper, + self.validate_default_not_eligible_routing_in_actions_mapper, + self.validate_default_not_actionable_routing_in_actions_mapper, + self.validate_iteration_rules_against_actions_mapper, + ]: + try: + validator() + except ValidationError as ve: + all_errors.extend(ve.errors(include_input=False)) + + if all_errors: + raise ValidationError.from_exception_data(title="IterationValidation", line_errors=all_errors) + + return self + + def validate_default_comms_routing_in_actions_mapper(self) -> typing.Self: + default_routes = self.default_comms_routing + actions_keys = list(self.actions_mapper.root.keys()) + line_errors = [] + + for routing in default_routes.split("|"): + cleaned_routing = routing.strip() + if cleaned_routing and (not actions_keys or cleaned_routing not in actions_keys): + error = InitErrorDetails( + type="value_error", + loc=("actions_mapper",), + input=actions_keys, + ctx={"error": f"Missing entry for DefaultCommsRouting '{cleaned_routing}' in ActionsMapper"}, + ) + line_errors.append(error) + + if line_errors: + raise ValidationError.from_exception_data(title="IterationValidation", line_errors=line_errors) + + return self + + def validate_default_not_eligible_routing_in_actions_mapper(self) -> typing.Self: + default_not_eligibile_routes = self.default_not_eligible_routing + actions_keys = list(self.actions_mapper.root.keys()) + line_errors = [] + + for routing in default_not_eligibile_routes.split("|"): + cleaned_routing = routing.strip() + if cleaned_routing and (not actions_keys or cleaned_routing not in actions_keys): + error = InitErrorDetails( + type="value_error", + loc=("actions_mapper",), + input=actions_keys, + ctx={"error": f"Missing entry for DefaultNotEligibleRouting '{cleaned_routing}' in ActionsMapper"}, + ) + line_errors.append(error) + + if line_errors: + raise ValidationError.from_exception_data(title="IterationValidation", line_errors=line_errors) + + return self + + def validate_default_not_actionable_routing_in_actions_mapper(self) -> typing.Self: + default_not_actionable_routes = self.default_not_actionable_routing + actions_keys = list(self.actions_mapper.root.keys()) + line_errors = [] + + for routing in default_not_actionable_routes.split("|"): + cleaned_routing = routing.strip() + if cleaned_routing and (not actions_keys or cleaned_routing not in actions_keys): + error = InitErrorDetails( + type="value_error", + loc=("actions_mapper",), + input=actions_keys, + ctx={ + "error": f"Missing entry for DefaultNotActionableRouting '{cleaned_routing}' in ActionsMapper" + }, + ) + line_errors.append(error) + + if line_errors: + raise ValidationError.from_exception_data(title="IterationValidation", line_errors=line_errors) + + return self + + def validate_iteration_rules_against_actions_mapper(self) -> typing.Self: + actions_keys = list(self.actions_mapper.root.keys()) + line_errors = [] + + for rule in self.iteration_rules: + if ( + rule.type + in [ + RuleType.redirect, + RuleType.not_actionable_actions, + RuleType.not_eligible_actions, + ] + and rule.comms_routing + ): + for routing in rule.comms_routing.split("|"): + cleaned_routing = routing.strip() + if cleaned_routing and (not actions_keys or cleaned_routing not in actions_keys): + error = InitErrorDetails( + type="value_error", + loc=("iteration_rules",), + input=actions_keys, + ctx={"error": f"Missing entry for CommsRouting '{cleaned_routing}' in ActionsMapper"}, + ) + line_errors.append(error) + + if line_errors: + raise ValidationError.from_exception_data(title="IterationValidation", line_errors=line_errors) + + return self diff --git a/src/rules_validation_api/validators/rules_validator.py b/src/rules_validation_api/validators/rules_validator.py new file mode 100644 index 000000000..cacb143d0 --- /dev/null +++ b/src/rules_validation_api/validators/rules_validator.py @@ -0,0 +1,11 @@ +from pydantic import field_validator + +from eligibility_signposting_api.model.campaign_config import CampaignConfig, Rules +from rules_validation_api.validators.campaign_config_validator import CampaignConfigValidation + + +class RulesValidation(Rules): + @field_validator("campaign_config") + @classmethod + def validate_campaign_config(cls, campaign_config: CampaignConfig) -> CampaignConfig: + return CampaignConfigValidation(**campaign_config.model_dump()) diff --git a/tests/e2e/.hypothesis/unicode_data/15.1.0/charmap.json.gz b/tests/e2e/.hypothesis/unicode_data/15.1.0/charmap.json.gz new file mode 100644 index 000000000..d9cff3e81 Binary files /dev/null and b/tests/e2e/.hypothesis/unicode_data/15.1.0/charmap.json.gz differ diff --git a/tests/e2e/data/configs/storyTestConfigs/AUTO_RSV_ELI-365.json b/tests/e2e/data/configs/storyTestConfigs/AUTO_RSV_ELI-365.json index 0385102c8..534320670 100644 --- a/tests/e2e/data/configs/storyTestConfigs/AUTO_RSV_ELI-365.json +++ b/tests/e2e/data/configs/storyTestConfigs/AUTO_RSV_ELI-365.json @@ -301,6 +301,17 @@ "AttributeName": "ICB", "CommsRouting": "BOOK_LOCAL|BOOK_NBS|HELP_SUPPORT" }, + { + "Type": "R", + "Name": "Within CP Expansion ICB not 80 plus", + "Description": "Book an appointment on NBS as within CP expansion", + "Priority": 1200, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": "Y>", + "Comparator": "-80", + "CommsRouting": "BOOK_LOCAL|BOOK_NBS|HELP_SUPPORT" + }, { "Type": "R", "Name": "Within CP Expansion Local Authority", @@ -312,6 +323,17 @@ "AttributeName": "LOCAL_AUTHORITY", "CommsRouting": "BOOK_LOCAL|BOOK_NBS|HELP_SUPPORT" }, + { + "Type": "R", + "Name": "Within CP Expansion ICB not 80 plus", + "Description": "Book an appointment on NBS as within CP expansion", + "Priority": 1300, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": "Y>", + "Comparator": "-80", + "CommsRouting": "BOOK_LOCAL|BOOK_NBS|HELP_SUPPORT" + }, { "Type": "Y", "Name": "Already vaccinated default text", diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-320-12.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-320-12.json deleted file mode 100644 index 186f1a402..000000000 --- a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-320-12.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "scenario_name": "ELI-320 - Multiple Category Campaigns - Category=VACCINATIONS,screening", - "request_headers": { - "nhs-login-nhs-number": "9990032012" - }, - "query_params": { - "category": "VACCINATIONS,screening" - }, - "config_filenames": ["AUTO_RSV_ELI-320-COVID.json","AUTO_RSV_ELI-320-MMR.json", "AUTO_RSV_ELI-320-SCREENING-1.json", "AUTO_RSV_ELI-320-SCREENING-2.json"], - "data": [ - { - "NHS_NUMBER": "9990032012", - "ATTRIBUTE_TYPE": "COHORTS", - "COHORT_MEMBERSHIPS": [ - { - "COHORT_LABEL": "covid_cohort", - "DATE_JOINED": "20231020" - }, - { - "COHORT_LABEL": "rsv_cohort", - "DATE_JOINED": "20231020" - }, - { - "COHORT_LABEL": "FLU_screening_cohort", - "DATE_JOINED": "20231020" - } - ] - }, - { - "NHS_NUMBER": "9990032012", - "ATTRIBUTE_TYPE": "PERSON", - "DATE_OF_BIRTH": "19500601", - "GENDER": "0", - "POSTCODE": "SG8 6EG", - "POSTCODE_SECTOR": "SG86", - "POSTCODE_OUTCODE": "SG8", - "MSOA": "E02003792", - "LSOA": "E01018267", - "GP_PRACTICE_CODE": "D81046", - "PCN": "U75549", - "ICB": "QUE", - "COMMISSIONING_REGION": "Y61", - "13Q_FLAG": "N", - "CARE_HOME_FLAG": "N", - "DE_FLAG": "N" - }, - { - "NHS_NUMBER": "9990032012", - "ATTRIBUTE_TYPE": "RSV", - "BOOKED_APPOINTMENT_DATE": "<>", - "LAST_SUCCESSFUL_DATE": "<>" - }, - { - "NHS_NUMBER": "9990032012", - "ATTRIBUTE_TYPE": "COVID", - "BOOKED_APPOINTMENT_DATE": "<>", - "LAST_SUCCESSFUL_DATE": "<>" - } - ] -} diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_023.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_023.json index 6091129e0..a8d19465a 100644 --- a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_023.json +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_023.json @@ -3,7 +3,9 @@ "request_headers": { "nhs-login-nhs-number": "9900036523" }, - "config_filenames": ["AUTO_RSV_ELI-365.json"], + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], "data": [ { "NHS_NUMBER": "9900036523", @@ -12,6 +14,10 @@ { "COHORT_LABEL": "rsv_80_since_02_Sept_2024", "DATE_JOINED": "20231020" + }, + { + "COHORT_LABEL": "rsv_75to79", + "DATE_JOINED": "20231020" } ] }, @@ -38,7 +44,7 @@ "NHS_NUMBER": "9900036523", "ATTRIBUTE_TYPE": "RSV", "LAST_SUCCESSFUL_DATE": null, - "BOOKED_APPOINTMENT_DATE" : null, + "BOOKED_APPOINTMENT_DATE": null, "BOOKED_APPOINTMENT_PROVIDER": null } ] diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_024.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_024.json new file mode 100644 index 000000000..13b4135b9 --- /dev/null +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_024.json @@ -0,0 +1,47 @@ +{ + "scenario_name": "RSV - Actionable - In 3 actions - under 80 - Local Authority", + "request_headers": { + "nhs-login-nhs-number": "9900036526" + }, + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], + "data": [ + { + "NHS_NUMBER": "9900036526", + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + { + "COHORT_LABEL": "rsv_80_since_02_Sept_2024", + "DATE_JOINED": "20231020" + } + ] + }, + { + "NHS_NUMBER": "9900036526", + "ATTRIBUTE_TYPE": "PERSON", + "DATE_OF_BIRTH": "<>", + "GENDER": "0", + "POSTCODE": "SG8 6EG", + "POSTCODE_SECTOR": "SG86", + "POSTCODE_OUTCODE": "SG8", + "MSOA": "E02003792", + "LSOA": "E01018267", + "GP_PRACTICE_CODE": "D81046", + "PCN": "U75549", + "ICB": "zz1", + "LOCAL_AUTHORITY": "E08000014", + "COMMISSIONING_REGION": "Y61", + "13Q_FLAG": "N", + "CARE_HOME_FLAG": "N", + "DE_FLAG": "N" + }, + { + "NHS_NUMBER": "9900036526", + "ATTRIBUTE_TYPE": "RSV", + "LAST_SUCCESSFUL_DATE": null, + "BOOKED_APPOINTMENT_DATE": null, + "BOOKED_APPOINTMENT_PROVIDER": null + } + ] +} diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_025.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_025.json new file mode 100644 index 000000000..80a5ec4e2 --- /dev/null +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_025.json @@ -0,0 +1,47 @@ +{ + "scenario_name": "RSV - Actionable - In 3 actions - under 80 - ICB", + "request_headers": { + "nhs-login-nhs-number": "9900036527" + }, + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], + "data": [ + { + "NHS_NUMBER": "9900036527", + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + { + "COHORT_LABEL": "rsv_80_since_02_Sept_2024", + "DATE_JOINED": "20231020" + } + ] + }, + { + "NHS_NUMBER": "9900036527", + "ATTRIBUTE_TYPE": "PERSON", + "DATE_OF_BIRTH": "<>", + "GENDER": "0", + "POSTCODE": "SG8 6EG", + "POSTCODE_SECTOR": "SG86", + "POSTCODE_OUTCODE": "SG8", + "MSOA": "E02003792", + "LSOA": "E01018267", + "GP_PRACTICE_CODE": "D81046", + "PCN": "U75549", + "ICB": "QH8", + "LOCAL_AUTHORITY": "ZZ8000014", + "COMMISSIONING_REGION": "Y61", + "13Q_FLAG": "N", + "CARE_HOME_FLAG": "N", + "DE_FLAG": "N" + }, + { + "NHS_NUMBER": "9900036527", + "ATTRIBUTE_TYPE": "RSV", + "LAST_SUCCESSFUL_DATE": null, + "BOOKED_APPOINTMENT_DATE": null, + "BOOKED_APPOINTMENT_PROVIDER": null + } + ] +} diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_026.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_026.json new file mode 100644 index 000000000..892196765 --- /dev/null +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_026.json @@ -0,0 +1,47 @@ +{ + "scenario_name": "RSV - Actionable - 2 actions - 80 or over - ICB", + "request_headers": { + "nhs-login-nhs-number": "9900036524" + }, + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], + "data": [ + { + "NHS_NUMBER": "9900036524", + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + { + "COHORT_LABEL": "rsv_80_since_02_Sept_2024", + "DATE_JOINED": "20231020" + } + ] + }, + { + "NHS_NUMBER": "9900036524", + "ATTRIBUTE_TYPE": "PERSON", + "DATE_OF_BIRTH": "<>", + "GENDER": "0", + "POSTCODE": "SG8 6EG", + "POSTCODE_SECTOR": "SG86", + "POSTCODE_OUTCODE": "SG8", + "MSOA": "E02003792", + "LSOA": "E01018267", + "GP_PRACTICE_CODE": "D81046", + "PCN": "U75549", + "ICB": "QH8", + "LOCAL_AUTHORITY": "ZZ8000011", + "COMMISSIONING_REGION": "Y61", + "13Q_FLAG": "N", + "CARE_HOME_FLAG": "N", + "DE_FLAG": "N" + }, + { + "NHS_NUMBER": "9900036524", + "ATTRIBUTE_TYPE": "RSV", + "LAST_SUCCESSFUL_DATE": null, + "BOOKED_APPOINTMENT_DATE": null, + "BOOKED_APPOINTMENT_PROVIDER": null + } + ] +} diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_027.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_027.json new file mode 100644 index 000000000..0c90eab68 --- /dev/null +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_027.json @@ -0,0 +1,47 @@ +{ + "scenario_name": "RSV - Actionable - 2 actions - 80 or over - Local Authority", + "request_headers": { + "nhs-login-nhs-number": "9900036525" + }, + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], + "data": [ + { + "NHS_NUMBER": "9900036525", + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + { + "COHORT_LABEL": "rsv_80_since_02_Sept_2024", + "DATE_JOINED": "20231020" + } + ] + }, + { + "NHS_NUMBER": "9900036525", + "ATTRIBUTE_TYPE": "PERSON", + "DATE_OF_BIRTH": "<>", + "GENDER": "0", + "POSTCODE": "SG8 6EG", + "POSTCODE_SECTOR": "SG86", + "POSTCODE_OUTCODE": "SG8", + "MSOA": "E02003792", + "LSOA": "E01018267", + "GP_PRACTICE_CODE": "D81046", + "PCN": "U75549", + "ICB": "zz1", + "LOCAL_AUTHORITY": "E08000014", + "COMMISSIONING_REGION": "Y61", + "13Q_FLAG": "N", + "CARE_HOME_FLAG": "N", + "DE_FLAG": "N" + }, + { + "NHS_NUMBER": "9900036525", + "ATTRIBUTE_TYPE": "RSV", + "LAST_SUCCESSFUL_DATE": null, + "BOOKED_APPOINTMENT_DATE": null, + "BOOKED_APPOINTMENT_PROVIDER": null + } + ] +} diff --git a/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_028.json b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_028.json new file mode 100644 index 000000000..083693b2c --- /dev/null +++ b/tests/e2e/data/dynamoDB/storyTestData/AUTO_RSV_ELI-365_028.json @@ -0,0 +1,47 @@ +{ + "scenario_name": "RSV - Actionable - 2 actions - 80 or over - No ICB or Local Authority", + "request_headers": { + "nhs-login-nhs-number": "9900036528" + }, + "config_filenames": [ + "AUTO_RSV_ELI-365.json" + ], + "data": [ + { + "NHS_NUMBER": "9900036528", + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + { + "COHORT_LABEL": "rsv_80_since_02_Sept_2024", + "DATE_JOINED": "20231020" + } + ] + }, + { + "NHS_NUMBER": "9900036528", + "ATTRIBUTE_TYPE": "PERSON", + "DATE_OF_BIRTH": "<>", + "GENDER": "0", + "POSTCODE": "SG8 6EG", + "POSTCODE_SECTOR": "SG86", + "POSTCODE_OUTCODE": "SG8", + "MSOA": "E02003792", + "LSOA": "E01018267", + "GP_PRACTICE_CODE": "D81046", + "PCN": "U75549", + "ICB": "zz1", + "LOCAL_AUTHORITY": "ZZ8000014", + "COMMISSIONING_REGION": "Y61", + "13Q_FLAG": "N", + "CARE_HOME_FLAG": "N", + "DE_FLAG": "N" + }, + { + "NHS_NUMBER": "9900036528", + "ATTRIBUTE_TYPE": "RSV", + "LAST_SUCCESSFUL_DATE": null, + "BOOKED_APPOINTMENT_DATE": null, + "BOOKED_APPOINTMENT_PROVIDER": null + } + ] +} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-320-12.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-320-12.json deleted file mode 100644 index c57312555..000000000 --- a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-320-12.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "meta": { - "lastUpdated": "2025-07-15T14:52:52.785698+00:00" - }, - "processedSuggestions": [ - { - "actions": [], - "condition": "COVID", - "eligibilityCohorts": [ - { - "cohortCode": "covid_cohort_group", - "cohortStatus": "NotActionable", - "cohortText": "You are currently in a covid cohort" - } - ], - "status": "NotActionable", - "statusText": "You should have the COVID vaccine", - "suitabilityRules": [ - { - "ruleCode": "AlreadyVaccinated", - "ruleText": "##You've had your COVID vaccination\nWe believe you already had your COVID vaccination.", - "ruleType": "S" - } - ] - }, - { - "actions": [ - { - "actionCode": "AmendNBS", - "actionType": "ButtonWithAuthLink", - "description": "##You have an flu screening appointment\nYou can view, change or cancel your appointment below.", - "urlLabel": "Manage your appointment", - "urlLink": "http://www.nhs.uk/book-bs" - } - ], - "condition": "FLU", - "eligibilityCohorts": [ - { - "cohortCode": "FLU_screening_cohort_group", - "cohortStatus": "Actionable", - "cohortText": "You are currently in an flu SCREENING cohort" - } - ], - "status": "Actionable", - "statusText": "You should have the FLU vaccine", - "suitabilityRules": [] - }, - { - "actions": [], - "condition": "MMR", - "eligibilityCohorts": [ - { - "cohortCode": "mmr_cohort_group", - "cohortStatus": "NotEligible", - "cohortText": "You are not currently in an mmr cohort" - } - ], - "status": "NotEligible", - "statusText": "We do not believe you can have it", - "suitabilityRules": [] - }, - { - "actions": [], - "condition": "RSV", - "eligibilityCohorts": [ - { - "cohortCode": "rsv_screening_cohort_group", - "cohortStatus": "NotEligible", - "cohortText": "You are not currently in an RSV SCREENING cohort" - } - ], - "status": "NotEligible", - "statusText": "We do not believe you can have it", - "suitabilityRules": [] - } - ], - "responseId": "5c8d1cb3-8326-40b1-93ad-1b7fa24c2595" -} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_023.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_023.json index a4f4656b8..fe6f609a4 100644 --- a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_023.json +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_023.json @@ -22,6 +22,11 @@ ], "condition": "RSV", "eligibilityCohorts": [ + { + "cohortCode": "rsv_age", + "cohortStatus": "Actionable", + "cohortText": "are aged 75 to 79 years old" + }, { "cohortCode": "rsv_age_catchup", "cohortStatus": "Actionable", diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_024.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_024.json new file mode 100644 index 000000000..c0bb698d4 --- /dev/null +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_024.json @@ -0,0 +1,44 @@ +{ + "meta": { + "lastUpdated": "" + }, + "processedSuggestions": [ + { + "actions": [ + { + "actionCode": "BookLocal", + "actionType": "InfoText", + "description": "##Getting the vaccine\n\nYou can get an RSV vaccination at your GP surgery.\nYour GP surgery may contact you about getting the RSV vaccine. This may be by letter, text, phone call, email or through the NHS App. You do not need to wait to be contacted before booking your vaccination.", + "urlLabel": "", + "urlLink": "" + }, + { + "actionCode": "BookNBS", + "actionType": "ButtonWithAuthLink", + "description": "", + "urlLabel": "Continue to booking", + "urlLink": "http://www.nhs.uk/book-rsv" + }, + { + "actionCode": "HelpSupportInfo", + "actionType": "InfoText", + "description": "## CONTENT TBC\n\nBlah blah blah.", + "urlLabel": "", + "urlLink": "" + } + ], + "condition": "RSV", + "eligibilityCohorts": [ + { + "cohortCode": "rsv_age_catchup", + "cohortStatus": "Actionable", + "cohortText": "turned 80 after 1st September 2024" + } + ], + "status": "Actionable", + "statusText": "You should have the RSV vaccine", + "suitabilityRules": [] + } + ], + "responseId": "" +} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_025.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_025.json new file mode 100644 index 000000000..c0bb698d4 --- /dev/null +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_025.json @@ -0,0 +1,44 @@ +{ + "meta": { + "lastUpdated": "" + }, + "processedSuggestions": [ + { + "actions": [ + { + "actionCode": "BookLocal", + "actionType": "InfoText", + "description": "##Getting the vaccine\n\nYou can get an RSV vaccination at your GP surgery.\nYour GP surgery may contact you about getting the RSV vaccine. This may be by letter, text, phone call, email or through the NHS App. You do not need to wait to be contacted before booking your vaccination.", + "urlLabel": "", + "urlLink": "" + }, + { + "actionCode": "BookNBS", + "actionType": "ButtonWithAuthLink", + "description": "", + "urlLabel": "Continue to booking", + "urlLink": "http://www.nhs.uk/book-rsv" + }, + { + "actionCode": "HelpSupportInfo", + "actionType": "InfoText", + "description": "## CONTENT TBC\n\nBlah blah blah.", + "urlLabel": "", + "urlLink": "" + } + ], + "condition": "RSV", + "eligibilityCohorts": [ + { + "cohortCode": "rsv_age_catchup", + "cohortStatus": "Actionable", + "cohortText": "turned 80 after 1st September 2024" + } + ], + "status": "Actionable", + "statusText": "You should have the RSV vaccine", + "suitabilityRules": [] + } + ], + "responseId": "" +} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_026.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_026.json new file mode 100644 index 000000000..2800f4974 --- /dev/null +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_026.json @@ -0,0 +1,37 @@ +{ + "meta": { + "lastUpdated": "" + }, + "processedSuggestions": [ + { + "actions": [ + { + "actionCode": "BookLocal", + "actionType": "InfoText", + "description": "##Getting the vaccine\n\nYou can get an RSV vaccination at your GP surgery.\nYour GP surgery may contact you about getting the RSV vaccine. This may be by letter, text, phone call, email or through the NHS App. You do not need to wait to be contacted before booking your vaccination.", + "urlLabel": "", + "urlLink": "" + }, + { + "actionCode": "HelpSupportInfo", + "actionType": "InfoText", + "description": "## CONTENT TBC\n\nBlah blah blah.", + "urlLabel": "", + "urlLink": "" + } + ], + "condition": "RSV", + "eligibilityCohorts": [ + { + "cohortCode": "rsv_age_catchup", + "cohortStatus": "Actionable", + "cohortText": "turned 80 after 1st September 2024" + } + ], + "status": "Actionable", + "statusText": "You should have the RSV vaccine", + "suitabilityRules": [] + } + ], + "responseId": "" +} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_027.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_027.json new file mode 100644 index 000000000..2800f4974 --- /dev/null +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_027.json @@ -0,0 +1,37 @@ +{ + "meta": { + "lastUpdated": "" + }, + "processedSuggestions": [ + { + "actions": [ + { + "actionCode": "BookLocal", + "actionType": "InfoText", + "description": "##Getting the vaccine\n\nYou can get an RSV vaccination at your GP surgery.\nYour GP surgery may contact you about getting the RSV vaccine. This may be by letter, text, phone call, email or through the NHS App. You do not need to wait to be contacted before booking your vaccination.", + "urlLabel": "", + "urlLink": "" + }, + { + "actionCode": "HelpSupportInfo", + "actionType": "InfoText", + "description": "## CONTENT TBC\n\nBlah blah blah.", + "urlLabel": "", + "urlLink": "" + } + ], + "condition": "RSV", + "eligibilityCohorts": [ + { + "cohortCode": "rsv_age_catchup", + "cohortStatus": "Actionable", + "cohortText": "turned 80 after 1st September 2024" + } + ], + "status": "Actionable", + "statusText": "You should have the RSV vaccine", + "suitabilityRules": [] + } + ], + "responseId": "" +} diff --git a/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_028.json b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_028.json new file mode 100644 index 000000000..2800f4974 --- /dev/null +++ b/tests/e2e/data/responses/storyTestResponses/AUTO_RSV_ELI-365_028.json @@ -0,0 +1,37 @@ +{ + "meta": { + "lastUpdated": "" + }, + "processedSuggestions": [ + { + "actions": [ + { + "actionCode": "BookLocal", + "actionType": "InfoText", + "description": "##Getting the vaccine\n\nYou can get an RSV vaccination at your GP surgery.\nYour GP surgery may contact you about getting the RSV vaccine. This may be by letter, text, phone call, email or through the NHS App. You do not need to wait to be contacted before booking your vaccination.", + "urlLabel": "", + "urlLink": "" + }, + { + "actionCode": "HelpSupportInfo", + "actionType": "InfoText", + "description": "## CONTENT TBC\n\nBlah blah blah.", + "urlLabel": "", + "urlLink": "" + } + ], + "condition": "RSV", + "eligibilityCohorts": [ + { + "cohortCode": "rsv_age_catchup", + "cohortStatus": "Actionable", + "cohortText": "turned 80 after 1st September 2024" + } + ], + "status": "Actionable", + "statusText": "You should have the RSV vaccine", + "suitabilityRules": [] + } + ], + "responseId": "" +} diff --git a/tests/e2e/tests/conftest.py b/tests/e2e/tests/conftest.py index 30dd5808e..7be5cad4e 100644 --- a/tests/e2e/tests/conftest.py +++ b/tests/e2e/tests/conftest.py @@ -1,9 +1,7 @@ -import json import logging import os from pathlib import Path -import boto3 import pytest from dotenv import load_dotenv @@ -25,6 +23,7 @@ logger = logging.getLogger(__name__) + @pytest.fixture(scope="session") def eligibility_client(): return EligibilityApiClient(BASE_URL, cert_dir="certs") diff --git a/tests/e2e/tests/test_in_progress.py b/tests/e2e/tests/test_in_progress.py index 4204f4519..f3a6a5917 100644 --- a/tests/e2e/tests/test_in_progress.py +++ b/tests/e2e/tests/test_in_progress.py @@ -16,7 +16,9 @@ @pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) def test_run_in_progress_tests(filename, scenario, eligibility_client, get_scenario_params): - nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params( + scenario, config_path + ) actual_response = eligibility_client.make_request( nhs_number, headers=request_headers, query_params=query_params, strict_ssl=False diff --git a/tests/e2e/tests/test_regression_tests.py b/tests/e2e/tests/test_regression_tests.py index d4a17f288..e9a0ba88c 100644 --- a/tests/e2e/tests/test_regression_tests.py +++ b/tests/e2e/tests/test_regression_tests.py @@ -17,7 +17,9 @@ @pytest.mark.functionale2eregression @pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) def test_run_regression_tests(filename, scenario, eligibility_client, get_scenario_params): - nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params( + scenario, config_path + ) actual_response = eligibility_client.make_request(nhs_number, headers=request_headers, strict_ssl=False) expected_response = all_expected_responses.get(filename).get("response_items", {}) diff --git a/tests/e2e/tests/test_smoke_tests.py b/tests/e2e/tests/test_smoke_tests.py index 8b2271cf3..278fd8b52 100644 --- a/tests/e2e/tests/test_smoke_tests.py +++ b/tests/e2e/tests/test_smoke_tests.py @@ -17,7 +17,9 @@ @pytest.mark.sandboxtests @pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) def test_run_smoke_case(filename, scenario, eligibility_client, get_scenario_params): - nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params( + scenario, config_path + ) actual_response = eligibility_client.make_request( nhs_number, headers=request_headers, query_params=query_params, strict_ssl=False diff --git a/tests/e2e/tests/test_story_tests.py b/tests/e2e/tests/test_story_tests.py index 541fb7c77..689c51161 100644 --- a/tests/e2e/tests/test_story_tests.py +++ b/tests/e2e/tests/test_story_tests.py @@ -17,7 +17,9 @@ @pytest.mark.functionale2eregression @pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) def test_run_story_test_cases(filename, scenario, eligibility_client, get_scenario_params): - nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params( + scenario, config_path + ) actual_response = eligibility_client.make_request( nhs_number=nhs_number, headers=request_headers, query_params=query_params, strict_ssl=False diff --git a/tests/e2e/tests/test_vita_integration_tests.py b/tests/e2e/tests/test_vita_integration_tests.py index 5162ee839..63175fbc9 100644 --- a/tests/e2e/tests/test_vita_integration_tests.py +++ b/tests/e2e/tests/test_vita_integration_tests.py @@ -17,7 +17,9 @@ @pytest.mark.functionale2eregression @pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) def test_run_story_test_cases(filename, scenario, eligibility_client, get_scenario_params): - nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params( + scenario, config_path + ) actual_response = eligibility_client.make_request( nhs_number=nhs_number, headers=request_headers, query_params=query_params, strict_ssl=False diff --git a/tests/fixtures/builders/model/eligibility.py b/tests/fixtures/builders/model/eligibility.py index a1fd81f7d..7406bc38e 100644 --- a/tests/fixtures/builders/model/eligibility.py +++ b/tests/fixtures/builders/model/eligibility.py @@ -4,23 +4,40 @@ from polyfactory import Use from polyfactory.factories import DataclassFactory -from eligibility_signposting_api.model import eligibility -from eligibility_signposting_api.model.eligibility import UrlLink +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.eligibility_status import ( + RuleDescription, + RuleName, + RulePriority, + RuleType, + UrlLink, +) -class SuggestedActionFactory(DataclassFactory[eligibility.SuggestedAction]): +class SuggestedActionFactory(DataclassFactory[eligibility_status.SuggestedAction]): url_link = UrlLink("https://test-example.com") -class ConditionFactory(DataclassFactory[eligibility.Condition]): - actions = Use(SuggestedActionFactory.batch, size=2) +class ReasonFactory(DataclassFactory[eligibility_status.Reason]): + rule_type = RuleType.filter + rule_name = RuleName("name") + rule_priority = RulePriority("1") + rule_description = RuleDescription("description") + matcher_matched = False -class EligibilityStatusFactory(DataclassFactory[eligibility.EligibilityStatus]): - conditions = Use(ConditionFactory.batch, size=2) +class CohortResultFactory(DataclassFactory[eligibility_status.CohortGroupResult]): + reasons = Use(ReasonFactory.batch, size=2) + + +class ConditionFactory(DataclassFactory[eligibility_status.Condition]): + actions = Use(SuggestedActionFactory.batch, size=2) + cohort_results = Use(CohortResultFactory.batch, size=2) + suitability_rules = Use(ReasonFactory.batch, size=2) -class CohortResultFactory(DataclassFactory[eligibility.CohortGroupResult]): ... +class EligibilityStatusFactory(DataclassFactory[eligibility_status.EligibilityStatus]): + conditions = Use(ConditionFactory.batch, size=2) def random_str(length: int) -> str: diff --git a/tests/fixtures/builders/model/rule.py b/tests/fixtures/builders/model/rule.py index 5388b113b..4cc5a0ac1 100644 --- a/tests/fixtures/builders/model/rule.py +++ b/tests/fixtures/builders/model/rule.py @@ -5,7 +5,26 @@ from polyfactory import Use from polyfactory.factories.pydantic_factory import ModelFactory -from eligibility_signposting_api.model import rules +from eligibility_signposting_api.model.campaign_config import ( + ActionsMapper, + AvailableAction, + CampaignConfig, + CohortGroup, + CohortLabel, + CommsRouting, + Description, + Iteration, + IterationCohort, + IterationRule, + RuleAttributeLevel, + RuleAttributeName, + RuleComparator, + RuleDescription, + RuleName, + RuleOperator, + RulePriority, + RuleType, +) def past_date(days_behind: int = 365) -> date: @@ -16,18 +35,20 @@ def future_date(days_ahead: int = 365) -> date: return datetime.now(tz=UTC).date() + timedelta(days=randint(1, days_ahead)) -class IterationCohortFactory(ModelFactory[rules.IterationCohort]): - priority = rules.RulePriority(0) +class IterationCohortFactory(ModelFactory[IterationCohort]): + priority = RulePriority(0) -class IterationRuleFactory(ModelFactory[rules.IterationRule]): +class IterationRuleFactory(ModelFactory[IterationRule]): attribute_target = None - attribute_name = None + attribute_name = "DATE_OF_BIRTH" + operator = "Y>" + comparator = "-1" cohort_label = None rule_stop = False -class AvailableActionDetailFactory(ModelFactory[rules.AvailableAction]): +class AvailableActionDetailFactory(ModelFactory[AvailableAction]): action_type = "defaultcomms" action_code = "action_code" action_description = None @@ -35,11 +56,11 @@ class AvailableActionDetailFactory(ModelFactory[rules.AvailableAction]): url_label = None -class ActionsMapperFactory(ModelFactory[rules.ActionsMapper]): +class ActionsMapperFactory(ModelFactory[ActionsMapper]): root = Use(lambda: {"defaultcomms": AvailableActionDetailFactory.build()}) -class IterationFactory(ModelFactory[rules.Iteration]): +class IterationFactory(ModelFactory[Iteration]): iteration_cohorts = Use(IterationCohortFactory.batch, size=2) iteration_rules = Use(IterationRuleFactory.batch, size=2) iteration_date = Use(past_date) @@ -47,7 +68,7 @@ class IterationFactory(ModelFactory[rules.Iteration]): actions_mapper = Use(ActionsMapperFactory.build) -class RawCampaignConfigFactory(ModelFactory[rules.CampaignConfig]): +class RawCampaignConfigFactory(ModelFactory[CampaignConfig]): iterations = Use(IterationFactory.batch, size=2) start_date = Use(past_date) @@ -56,13 +77,13 @@ class RawCampaignConfigFactory(ModelFactory[rules.CampaignConfig]): class CampaignConfigFactory(RawCampaignConfigFactory): @classmethod - def build(cls, **kwargs) -> rules.CampaignConfig: + def build(cls, **kwargs) -> CampaignConfig: """Ensure invariants are met: * no iterations with duplicate iteration dates * must have iteration active from campaign start date""" processed_kwargs = cls.process_kwargs(**kwargs) start_date: date = processed_kwargs["start_date"] - iterations: list[rules.Iteration] = processed_kwargs["iterations"] + iterations: list[Iteration] = processed_kwargs["iterations"] CampaignConfigFactory.fix_iteration_date_invariants(iterations, start_date) @@ -70,7 +91,7 @@ def build(cls, **kwargs) -> rules.CampaignConfig: return cls.__model__(**data) @staticmethod - def fix_iteration_date_invariants(iterations: list[rules.Iteration], start_date: date) -> None: + def fix_iteration_date_invariants(iterations: list[Iteration], start_date: date) -> None: iterations.sort(key=attrgetter("iteration_date")) iterations[0].iteration_date = start_date @@ -87,89 +108,113 @@ def fix_iteration_date_invariants(iterations: list[rules.Iteration], start_date: # Iteration cohort factories class MagicCohortFactory(IterationCohortFactory): - cohort_label = rules.CohortLabel("elid_all_people") - cohort_group = rules.CohortGroup("magic cohort group") - positive_description = rules.Description("magic positive description") - negative_description = rules.Description("magic negative description") + cohort_label = CohortLabel("elid_all_people") + cohort_group = CohortGroup("magic cohort group") + positive_description = Description("magic positive description") + negative_description = Description("magic negative description") priority = 1 class Rsv75RollingCohortFactory(IterationCohortFactory): - cohort_label = rules.CohortLabel("rsv_75_rolling") - cohort_group = rules.CohortGroup("rsv_age_range") - positive_description = rules.Description("rsv_age_range positive description") - negative_description = rules.Description("rsv_age_range negative description") + cohort_label = CohortLabel("rsv_75_rolling") + cohort_group = CohortGroup("rsv_age_range") + positive_description = Description("rsv_age_range positive description") + negative_description = Description("rsv_age_range negative description") priority = 2 class Rsv75to79CohortFactory(IterationCohortFactory): - cohort_label = rules.CohortLabel("rsv_75to79_2024") - cohort_group = rules.CohortGroup("rsv_age_range") - positive_description = rules.Description("rsv_age_range positive description") - negative_description = rules.Description("rsv_age_range negative description") + cohort_label = CohortLabel("rsv_75to79_2024") + cohort_group = CohortGroup("rsv_age_range") + positive_description = Description("rsv_age_range positive description") + negative_description = Description("rsv_age_range negative description") priority = 3 class RsvPretendClinicalCohortFactory(IterationCohortFactory): - cohort_label = rules.CohortLabel("rsv_pretend_clinical_cohort") - cohort_group = rules.CohortGroup("rsv_clinical_cohort") - positive_description = rules.Description("rsv_clinical_cohort positive description") - negative_description = rules.Description("rsv_clinical_cohort negative description") + cohort_label = CohortLabel("rsv_pretend_clinical_cohort") + cohort_group = CohortGroup("rsv_clinical_cohort") + positive_description = Description("rsv_clinical_cohort positive description") + negative_description = Description("rsv_clinical_cohort negative description") priority = 4 # Iteration rule factories class PersonAgeSuppressionRuleFactory(IterationRuleFactory): - type = rules.RuleType.suppression - name = rules.RuleName("Exclude too young less than 75") - description = rules.RuleDescription("Exclude too young less than 75") - priority = rules.RulePriority(10) - operator = rules.RuleOperator.year_gt - attribute_level = rules.RuleAttributeLevel.PERSON - attribute_name = rules.RuleAttributeName("DATE_OF_BIRTH") - comparator = rules.RuleComparator("-75") + type = RuleType.suppression + name = RuleName("Exclude too young less than 75") + description = RuleDescription("Exclude too young less than 75") + priority = RulePriority(10) + operator = RuleOperator.year_gt + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("DATE_OF_BIRTH") + comparator = RuleComparator("-75") class PostcodeSuppressionRuleFactory(IterationRuleFactory): - type = rules.RuleType.suppression - name = rules.RuleName("Excluded postcode In SW19") - description = rules.RuleDescription("In SW19") - priority = rules.RulePriority(10) - operator = rules.RuleOperator.starts_with - attribute_level = rules.RuleAttributeLevel.PERSON - attribute_name = rules.RuleAttributeName("POSTCODE") - comparator = rules.RuleComparator("SW19") + type = RuleType.suppression + name = RuleName("Excluded postcode In SW19") + description = RuleDescription("In SW19") + priority = RulePriority(10) + operator = RuleOperator.starts_with + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("POSTCODE") + comparator = RuleComparator("SW19") class DetainedEstateSuppressionRuleFactory(IterationRuleFactory): - type = rules.RuleType.suppression - name = rules.RuleName("Detained - Suppress Individuals In Detained Estates") - description = rules.RuleDescription("Suppress where individual is identified as being in a Detained Estate") - priority = rules.RulePriority(160) - attribute_level = rules.RuleAttributeLevel.PERSON - attribute_name = rules.RuleAttributeName("DE_FLAG") - operator = rules.RuleOperator.equals - comparator = rules.RuleComparator("Y") + type = RuleType.suppression + name = RuleName("Detained - Suppress Individuals In Detained Estates") + description = RuleDescription("Suppress where individual is identified as being in a Detained Estate") + priority = RulePriority(160) + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("DE_FLAG") + operator = RuleOperator.equals + comparator = RuleComparator("Y") class ICBFilterRuleFactory(IterationRuleFactory): - type = rules.RuleType.filter - name = rules.RuleName("Not in QE1") - description = rules.RuleDescription("Not in QE1") - priority = rules.RulePriority(10) - operator = rules.RuleOperator.ne - attribute_level = rules.RuleAttributeLevel.PERSON - attribute_name = rules.RuleAttributeName("ICB") - comparator = rules.RuleComparator("QE1") + type = RuleType.filter + name = RuleName("Not in QE1") + description = RuleDescription("Not in QE1") + priority = RulePriority(10) + operator = RuleOperator.ne + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("ICB") + comparator = RuleComparator("QE1") class ICBRedirectRuleFactory(IterationRuleFactory): - type = rules.RuleType.redirect - name = rules.RuleName("In QE1") - description = rules.RuleDescription("In QE1") - priority = rules.RulePriority(20) - operator = rules.RuleOperator.equals - attribute_level = rules.RuleAttributeLevel.PERSON - attribute_name = rules.RuleAttributeName("ICB") - comparator = rules.RuleComparator("QE1") - comms_routing = rules.CommsRouting("ActionCode1") + type = RuleType.redirect + name = RuleName("In QE1") + description = RuleDescription("In QE1") + priority = RulePriority(20) + operator = RuleOperator.equals + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("ICB") + comparator = RuleComparator("QE1") + comms_routing = CommsRouting("ActionCode1") + + +class ICBNonEligibleActionRuleFactory(IterationRuleFactory): + type = RuleType.not_eligible_actions + name = RuleName("In QE1") + description = RuleDescription("In QE1") + priority = RulePriority(20) + operator = RuleOperator.equals + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("ICB") + comparator = RuleComparator("QE1") + comms_routing = CommsRouting("ActionCode1") + + +class ICBNonActionableActionRuleFactory(IterationRuleFactory): + type = RuleType.not_actionable_actions + name = RuleName("In QE1") + description = RuleDescription("In QE1") + priority = RulePriority(20) + operator = RuleOperator.equals + attribute_level = RuleAttributeLevel.PERSON + attribute_name = RuleAttributeName("ICB") + comparator = RuleComparator("QE1") + comms_routing = CommsRouting("ActionCode1") diff --git a/tests/fixtures/builders/repos/person.py b/tests/fixtures/builders/repos/person.py index 6cc418e19..eb2b96d64 100644 --- a/tests/fixtures/builders/repos/person.py +++ b/tests/fixtures/builders/repos/person.py @@ -5,6 +5,7 @@ from faker import Faker +from eligibility_signposting_api.model.person import Person from tests.conftest import PersonDetailProvider Gender = Literal["0", "1", "2", "9"] # 0 - Not known, 1- Male, 2 - Female, 9 - Not specified. I know, right? @@ -27,7 +28,7 @@ def person_rows_builder( # noqa:PLR0913 de: bool | None = ..., msoa: str | None = ..., lsoa: str | None = ..., -) -> list[dict[str, Any]]: +) -> Person: faker = Faker("en_UK") faker.add_provider(PersonDetailProvider) @@ -66,13 +67,9 @@ def person_rows_builder( # noqa:PLR0913 { "NHS_NUMBER": key, "ATTRIBUTE_TYPE": "COHORTS", - "COHORT_MAP": { - "cohorts": { - "M": { - cohort: {"M": {"dateJoined": {"S": faker.past_date().strftime("%Y%m%d")}}} for cohort in cohorts - } - } - }, + "COHORT_MEMBERSHIPS": [ + {"COHORT_LABEL": cohort, "DATE_JOINED": faker.past_date().strftime("%Y%m%d")} for cohort in cohorts + ], }, ] rows.extend( @@ -89,4 +86,5 @@ def person_rows_builder( # noqa:PLR0913 ) shuffle(rows) - return rows + + return Person(data=rows) diff --git a/tests/fixtures/builders/views/response_model/eligibility.py b/tests/fixtures/builders/views/response_model/eligibility.py index 061036908..3b8bff75b 100644 --- a/tests/fixtures/builders/views/response_model/eligibility.py +++ b/tests/fixtures/builders/views/response_model/eligibility.py @@ -1,23 +1,23 @@ from polyfactory import Use from polyfactory.factories.pydantic_factory import ModelFactory -from eligibility_signposting_api.views.response_model import eligibility +from eligibility_signposting_api.views.response_model import eligibility_response -class EligibilityCohortFactory(ModelFactory[eligibility.EligibilityCohort]): ... +class EligibilityCohortFactory(ModelFactory[eligibility_response.EligibilityCohort]): ... -class SuitabilityRuleFactory(ModelFactory[eligibility.SuitabilityRule]): ... +class SuitabilityRuleFactory(ModelFactory[eligibility_response.SuitabilityRule]): ... -class ActionFactory(ModelFactory[eligibility.Action]): ... +class ActionFactory(ModelFactory[eligibility_response.Action]): ... -class ProcessedSuggestionFactory(ModelFactory[eligibility.ProcessedSuggestion]): +class ProcessedSuggestionFactory(ModelFactory[eligibility_response.ProcessedSuggestion]): eligibility_cohorts = Use(EligibilityCohortFactory.batch, size=2) suitability_rules = Use(SuitabilityRuleFactory.batch, size=2) actions = Use(ActionFactory.batch, size=2) -class EligibilityResponseFactory(ModelFactory[eligibility.EligibilityResponse]): +class EligibilityResponseFactory(ModelFactory[eligibility_response.EligibilityResponse]): processed_suggestions = Use(ProcessedSuggestionFactory.batch, size=2) diff --git a/tests/fixtures/matchers/eligibility.py b/tests/fixtures/matchers/eligibility.py index 98bfb4138..731a3de76 100644 --- a/tests/fixtures/matchers/eligibility.py +++ b/tests/fixtures/matchers/eligibility.py @@ -1,7 +1,11 @@ from hamcrest.core.matcher import Matcher -from eligibility_signposting_api.model.eligibility import CohortGroupResult, Condition, EligibilityStatus, Reason -from eligibility_signposting_api.views.response_model.eligibility import Action, EligibilityCohort, SuitabilityRule +from eligibility_signposting_api.model.eligibility_status import CohortGroupResult, Condition, EligibilityStatus, Reason +from eligibility_signposting_api.views.response_model.eligibility_response import ( + Action, + EligibilityCohort, + SuitabilityRule, +) from .meta import BaseAutoMatcher diff --git a/tests/fixtures/matchers/rules.py b/tests/fixtures/matchers/rules.py index 4aaadf311..d289f0b7e 100644 --- a/tests/fixtures/matchers/rules.py +++ b/tests/fixtures/matchers/rules.py @@ -1,6 +1,6 @@ from hamcrest.core.matcher import Matcher -from eligibility_signposting_api.model.rules import CampaignConfig, Iteration, IterationRule +from eligibility_signposting_api.model.campaign_config import CampaignConfig, Iteration, IterationRule from .meta import BaseAutoMatcher diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 75cf97878..59d696121 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,3 +1,4 @@ +import datetime import json import logging import os @@ -16,7 +17,13 @@ from httpx import RequestError from yarl import URL -from eligibility_signposting_api.model import eligibility, rules +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.campaign_config import ( + CampaignConfig, + EndDate, + RuleType, + StartDate, +) from eligibility_signposting_api.repos.campaign_repo import BucketName from eligibility_signposting_api.repos.person_repo import TableName from tests.fixtures.builders.model import rule @@ -330,12 +337,12 @@ def person_table(dynamodb_resource: ServiceResource) -> Generator[Any]: @pytest.fixture -def persisted_person(person_table: Any, faker: Faker) -> Generator[eligibility.NHSNumber]: - nhs_number = eligibility.NHSNumber(faker.nhs_number()) - date_of_birth = eligibility.DateOfBirth(faker.date_of_birth(minimum_age=18, maximum_age=65)) +def persisted_person(person_table: Any, faker: Faker) -> Generator[eligibility_status.NHSNumber]: + nhs_number = eligibility_status.NHSNumber(faker.nhs_number()) + date_of_birth = eligibility_status.DateOfBirth(faker.date_of_birth(minimum_age=18, maximum_age=65)) for row in ( - rows := person_rows_builder(nhs_number, date_of_birth=date_of_birth, postcode="hp1", cohorts=["cohort1"]) + rows := person_rows_builder(nhs_number, date_of_birth=date_of_birth, postcode="hp1", cohorts=["cohort1"]).data ): person_table.put_item(Item=row) @@ -346,9 +353,9 @@ def persisted_person(person_table: Any, faker: Faker) -> Generator[eligibility.N @pytest.fixture -def persisted_77yo_person(person_table: Any, faker: Faker) -> Generator[eligibility.NHSNumber]: - nhs_number = eligibility.NHSNumber(faker.nhs_number()) - date_of_birth = eligibility.DateOfBirth(faker.date_of_birth(minimum_age=77, maximum_age=77)) +def persisted_77yo_person(person_table: Any, faker: Faker) -> Generator[eligibility_status.NHSNumber]: + nhs_number = eligibility_status.NHSNumber(faker.nhs_number()) + date_of_birth = eligibility_status.DateOfBirth(faker.date_of_birth(minimum_age=77, maximum_age=77)) for row in ( rows := person_rows_builder( @@ -356,7 +363,7 @@ def persisted_77yo_person(person_table: Any, faker: Faker) -> Generator[eligibil date_of_birth=date_of_birth, postcode="hp1", cohorts=["cohort1", "cohort2"], - ) + ).data ): person_table.put_item(Item=row) @@ -367,18 +374,18 @@ def persisted_77yo_person(person_table: Any, faker: Faker) -> Generator[eligibil @pytest.fixture -def persisted_person_all_cohorts(person_table: Any, faker: Faker) -> Generator[eligibility.NHSNumber]: - nhs_number = eligibility.NHSNumber(faker.nhs_number()) - date_of_birth = eligibility.DateOfBirth(faker.date_of_birth(minimum_age=74, maximum_age=74)) +def persisted_person_all_cohorts(person_table: Any, faker: Faker) -> Generator[eligibility_status.NHSNumber]: + nhs_number = eligibility_status.NHSNumber(faker.nhs_number()) + date_of_birth = eligibility_status.DateOfBirth(faker.date_of_birth(minimum_age=74, maximum_age=74)) for row in ( rows := person_rows_builder( nhs_number, date_of_birth=date_of_birth, - postcode="hp1", - cohorts=["cohort_label1", "cohort_label2", "cohort_label3"], + postcode="SW19", + cohorts=["cohort_label1", "cohort_label2", "cohort_label3", "cohort_label4", "cohort_label5"], icb="QE1", - ) + ).data ): person_table.put_item(Item=row) @@ -389,10 +396,10 @@ def persisted_person_all_cohorts(person_table: Any, faker: Faker) -> Generator[e @pytest.fixture -def persisted_person_no_cohorts(person_table: Any, faker: Faker) -> Generator[eligibility.NHSNumber]: - nhs_number = eligibility.NHSNumber(faker.nhs_number()) +def persisted_person_no_cohorts(person_table: Any, faker: Faker) -> Generator[eligibility_status.NHSNumber]: + nhs_number = eligibility_status.NHSNumber(faker.nhs_number()) - for row in (rows := person_rows_builder(nhs_number)): + for row in (rows := person_rows_builder(nhs_number).data): person_table.put_item(Item=row) yield nhs_number @@ -402,11 +409,11 @@ def persisted_person_no_cohorts(person_table: Any, faker: Faker) -> Generator[el @pytest.fixture -def persisted_person_pc_sw19(person_table: Any, faker: Faker) -> Generator[eligibility.NHSNumber]: - nhs_number = eligibility.NHSNumber( +def persisted_person_pc_sw19(person_table: Any, faker: Faker) -> Generator[eligibility_status.NHSNumber]: + nhs_number = eligibility_status.NHSNumber( faker.nhs_number(), ) - for row in (rows := person_rows_builder(nhs_number, postcode="SW19", cohorts=["cohort1"])): + for row in (rows := person_rows_builder(nhs_number, postcode="SW19", cohorts=["cohort1"]).data): person_table.put_item(Item=row) yield nhs_number @@ -452,13 +459,13 @@ def firehose_delivery_stream(firehose_client: BaseClient, audit_bucket: BucketNa @pytest.fixture(scope="class") -def campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[rules.CampaignConfig]: - campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( +def campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: + campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", iterations=[ rule.IterationFactory.build( iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(type=rules.RuleType.filter), + rule.PostcodeSuppressionRuleFactory.build(type=RuleType.filter), rule.PersonAgeSuppressionRuleFactory.build(), ], iteration_cohorts=[ @@ -481,14 +488,99 @@ def campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generato @pytest.fixture(scope="class") -def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[rules.CampaignConfig]]: +def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: + campaigns, campaign_data_keys = [], [] + + target_iteration_dates = { + "start_date": ("RSV", datetime.date(2025, 1, 1)), # Active Iteration Date + "start_date_plus_one_day": ("COVID", datetime.date(2025, 1, 2)), # Active Iteration Date + "today": ("FLU", datetime.date(2025, 8, 8)), # Active Iteration Date + "tomorrow": ("MMR", datetime.date(2025, 8, 9)), # Inactive Iteration Date + } + + for target, data in target_iteration_dates.items(): + campaign = rule.CampaignConfigFactory.build( + id=f"campaign_{target}", + target=data[0], + type="V", + iterations=[ + rule.IterationFactory.build( + iteration_rules=[rule.PersonAgeSuppressionRuleFactory.build()], + iteration_cohorts=[rule.IterationCohortFactory.build(cohort_label="cohort_label1")], + ) + ], + ) + + campaign.start_date = StartDate(datetime.date(2025, 1, 1)) + campaign.end_date = EndDate(datetime.date(2026, 1, 1)) + campaign.iterations[0].iteration_date = data[1] + + campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} + key = f"{campaign.name}.json" + s3_client.put_object( + Bucket=rules_bucket, Key=key, Body=json.dumps(campaign_data), ContentType="application/json" + ) + campaigns.append(campaign) + campaign_data_keys.append(key) + + yield campaigns + + for key in campaign_data_keys: + s3_client.delete_object(Bucket=rules_bucket, Key=key) + + +@pytest.fixture(scope="class") +def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: + campaign: CampaignConfig = rule.CampaignConfigFactory.build( + target="RSV", + iterations=[ + rule.IterationFactory.build( + iteration_rules=[ + rule.PostcodeSuppressionRuleFactory.build( + cohort_label="cohort2", + ), + rule.PersonAgeSuppressionRuleFactory.build(), + ], + iteration_cohorts=[ + rule.IterationCohortFactory.build( + cohort_label="cohort1", + cohort_group="cohort_group1", + positive_description="positive_description", + negative_description="negative_description", + ), + rule.IterationCohortFactory.build( + cohort_label="cohort2", + cohort_group="cohort_group2", + positive_description="positive_description", + negative_description="negative_description", + ), + ], + ) + ], + ) + campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} + s3_client.put_object( + Bucket=rules_bucket, Key=f"{campaign.name}.json", Body=json.dumps(campaign_data), ContentType="application/json" + ) + yield campaign + s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") + + +@pytest.fixture(scope="class") +def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: """Create and upload multiple campaign configs to S3, then clean up after tests.""" campaigns, campaign_data_keys = [], [] targets = ["RSV", "COVID", "FLU"] target_rules_map = { - targets[0]: [rule.PersonAgeSuppressionRuleFactory.build(type=rules.RuleType.filter)], - targets[1]: [rule.PersonAgeSuppressionRuleFactory.build()], + targets[0]: [ + rule.PersonAgeSuppressionRuleFactory.build(type=RuleType.filter, description="TOO YOUNG"), + rule.PostcodeSuppressionRuleFactory.build(type=RuleType.filter, priority=8, cohort_label="cohort_label4"), + ], + targets[1]: [ + rule.PersonAgeSuppressionRuleFactory.build(description="TOO YOUNG"), + rule.PostcodeSuppressionRuleFactory.build(priority=12, cohort_label="cohort_label2"), + ], targets[2]: [rule.ICBRedirectRuleFactory.build()], } @@ -506,7 +598,13 @@ def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) - cohort_group=f"cohort_group{i + 1}", positive_description=f"positive_desc_{i + 1}", negative_description=f"negative_desc_{i + 1}", - ) + ), + rule.IterationCohortFactory.build( + cohort_label="cohort_label4", + cohort_group="cohort_group4", + positive_description="positive_desc_4", + negative_description="negative_desc_4", + ), ], ) ], @@ -526,15 +624,13 @@ def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) - @pytest.fixture(scope="class") -def campaign_config_with_magic_cohort( - s3_client: BaseClient, rules_bucket: BucketName -) -> Generator[rules.CampaignConfig]: - campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( +def campaign_config_with_magic_cohort(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: + campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="COVID", iterations=[ rule.IterationFactory.build( iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(type=rules.RuleType.filter), + rule.PostcodeSuppressionRuleFactory.build(type=RuleType.filter), rule.PersonAgeSuppressionRuleFactory.build(), ], iteration_cohorts=[rule.MagicCohortFactory.build(cohort_label="elid_all_people")], @@ -552,13 +648,13 @@ def campaign_config_with_magic_cohort( @pytest.fixture(scope="class") def campaign_config_with_missing_descriptions_missing_rule_text( s3_client: BaseClient, rules_bucket: BucketName -) -> Generator[rules.CampaignConfig]: - campaign: rules.CampaignConfig = rule.CampaignConfigFactory.build( +) -> Generator[CampaignConfig]: + campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="FLU", iterations=[ rule.IterationFactory.build( iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(type=rules.RuleType.filter), + rule.PostcodeSuppressionRuleFactory.build(type=RuleType.filter), rule.PersonAgeSuppressionRuleFactory.build(), rule.PersonAgeSuppressionRuleFactory.build(name="Exclude 76 rolling", description=""), ], diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 5356eed59..5290563f1 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -11,10 +11,10 @@ has_key, ) -from eligibility_signposting_api.model.eligibility import ( +from eligibility_signposting_api.model.campaign_config import CampaignConfig +from eligibility_signposting_api.model.eligibility_status import ( NHSNumber, ) -from eligibility_signposting_api.model.rules import CampaignConfig class TestBaseLine: @@ -85,7 +85,7 @@ def test_not_base_eligible( ], "actions": [], "suitabilityRules": [], - "statusText": "Status.not_eligible", + "statusText": "We do not believe you can have it", } ] ), @@ -128,7 +128,7 @@ def test_not_eligible_by_rule( ], "actions": [], "suitabilityRules": [], - "statusText": "Status.not_eligible", + "statusText": "We do not believe you can have it", } ] ), @@ -177,7 +177,7 @@ def test_not_actionable( "ruleType": "S", } ], - "statusText": "Status.not_actionable", + "statusText": "You should have the RSV vaccine", } ] ), @@ -228,7 +228,58 @@ def test_actionable( } ], "suitabilityRules": [], - "statusText": "Status.actionable", + "statusText": "You should have the RSV vaccine", + } + ] + ), + ) + ) + ), + ) + + def test_actionable_with_and_rule( + self, + client: FlaskClient, + persisted_person: NHSNumber, + campaign_config_with_and_rule: CampaignConfig, # noqa: ARG002 + ): + # Given + + # When + response = client.get(f"/patient-check/{persisted_person}?includeActions=Y") + + # Then + assert_that( + response, + is_response() + .with_status_code(HTTPStatus.OK) + .and_text( + is_json_that( + has_entry( + "processedSuggestions", + equal_to( + [ + { + "condition": "RSV", + "status": "Actionable", + "eligibilityCohorts": [ + { + "cohortCode": "cohort_group1", + "cohortStatus": "Actionable", + "cohortText": "positive_description", + } + ], + "actions": [ + { + "actionCode": "action_code", + "actionType": "defaultcomms", + "description": "", + "urlLabel": "", + "urlLink": "", + } + ], + "suitabilityRules": [], + "statusText": "You should have the RSV vaccine", } ] ), @@ -273,7 +324,7 @@ def test_not_eligible_by_rule_when_only_magic_cohort_is_present( ], "actions": [], "suitabilityRules": [], - "statusText": "Status.not_eligible", + "statusText": "We do not believe you can have it", } ] ), @@ -322,7 +373,7 @@ def test_not_actionable_when_only_magic_cohort_is_present( "ruleType": "S", } ], - "statusText": "Status.not_actionable", + "statusText": "You should have the COVID vaccine", } ] ), @@ -373,7 +424,7 @@ def test_actionable_when_only_magic_cohort_is_present( } ], "suitabilityRules": [], - "statusText": "Status.actionable", + "statusText": "You should have the COVID vaccine", } ] ), @@ -412,7 +463,7 @@ def test_not_base_eligible( "eligibilityCohorts": [], "actions": [], "suitabilityRules": [], - "statusText": "Status.not_eligible", + "statusText": "We do not believe you can have it", } ] ), @@ -449,7 +500,7 @@ def test_not_eligible_by_rule( "eligibilityCohorts": [], "actions": [], "suitabilityRules": [], - "statusText": "Status.not_eligible", + "statusText": "We do not believe you can have it", } ] ), @@ -492,7 +543,7 @@ def test_not_actionable( "ruleType": "S", } ], - "statusText": "Status.not_actionable", + "statusText": "You should have the FLU vaccine", } ] ), @@ -537,7 +588,7 @@ def test_actionable( } ], "suitabilityRules": [], - "statusText": "Status.actionable", + "statusText": "You should have the FLU vaccine", } ] ), @@ -573,7 +624,7 @@ def test_actionable_no_actions( "status": "Actionable", "eligibilityCohorts": [], "suitabilityRules": [], - "statusText": "Status.actionable", + "statusText": "You should have the FLU vaccine", } ] ), diff --git a/tests/integration/lambda/test_app_running_as_lambda.py b/tests/integration/lambda/test_app_running_as_lambda.py index 54e0370f8..c54ea08c2 100644 --- a/tests/integration/lambda/test_app_running_as_lambda.py +++ b/tests/integration/lambda/test_app_running_as_lambda.py @@ -10,6 +10,7 @@ from brunns.matchers.data import json_matching as is_json_that from brunns.matchers.response import is_response from faker import Faker +from freezegun import freeze_time from hamcrest import ( assert_that, contains_exactly, @@ -23,8 +24,8 @@ ) from yarl import URL -from eligibility_signposting_api.model.eligibility import NHSNumber -from eligibility_signposting_api.model.rules import CampaignConfig +from eligibility_signposting_api.model.campaign_config import CampaignConfig +from eligibility_signposting_api.model.eligibility_status import NHSNumber from eligibility_signposting_api.repos.campaign_repo import BucketName logger = logging.getLogger(__name__) @@ -124,11 +125,11 @@ def test_install_and_call_flask_lambda_with_unknown_nhs_number( timeout=10, ) - # Then assert_that( response, is_response() .with_status_code(HTTPStatus.NOT_FOUND) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) .and_body( is_json_that( has_entries( @@ -153,7 +154,7 @@ def test_install_and_call_flask_lambda_with_unknown_nhs_number( ) ), ) - ) + ), ), ) @@ -232,7 +233,7 @@ def test_given_nhs_number_in_path_matches_with_nhs_number_in_headers_and_check_i "iterationVersion": campaign_config.iterations[0].version, "conditionName": campaign_config.target, "status": "not_actionable", - "statusText": "not_actionable", + "statusText": f"You should have the {campaign_config.target} vaccine", "eligibilityCohorts": [{"cohortCode": "cohort1", "cohortStatus": "not_actionable"}], "eligibilityCohortGroups": [ { @@ -242,11 +243,13 @@ def test_given_nhs_number_in_path_matches_with_nhs_number_in_headers_and_check_i } ], "filterRules": None, - "suitabilityRules": { - "rulePriority": "10", - "ruleName": "Exclude too young less than 75", - "ruleMessage": "Exclude too young less than 75", - }, + "suitabilityRules": [ + { + "rulePriority": "10", + "ruleName": "Exclude too young less than 75", + "ruleMessage": "Exclude too young less than 75", + } + ], "actionRule": None, "actions": [], } @@ -292,6 +295,7 @@ def test_given_nhs_number_in_path_does_not_match_with_nhs_number_in_headers_resu response, is_response() .with_status_code(HTTPStatus.FORBIDDEN) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) .and_body( is_json_that( has_entries( @@ -300,14 +304,13 @@ def test_given_nhs_number_in_path_does_not_match_with_nhs_number_in_headers_resu has_entries( severity="error", code="forbidden", - diagnostics=f"NHS Number {persisted_person} does " - f"not match the header NHS Number 123{persisted_person!s}", + diagnostics="You are not authorised to request information for the supplied NHS Number", details={ "coding": [ { "system": "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", - "code": "INVALID_NHS_NUMBER", - "display": "The provided NHS number does not match the record.", + "code": "ACCESS_DENIED", + "display": "Access has been denied to process this request.", } ] }, @@ -338,6 +341,7 @@ def test_given_nhs_number_not_present_in_headers_results_in_error_response( response, is_response() .with_status_code(HTTPStatus.FORBIDDEN) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) .and_body( is_json_that( has_entries( @@ -346,13 +350,13 @@ def test_given_nhs_number_not_present_in_headers_results_in_error_response( has_entries( severity="error", code="forbidden", - diagnostics=f"NHS Number {persisted_person} does not match the header NHS Number ", + diagnostics="You are not authorised to request information for the supplied NHS Number", details={ "coding": [ { "system": "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", - "code": "INVALID_NHS_NUMBER", - "display": "The provided NHS number does not match the record.", + "code": "ACCESS_DENIED", + "display": "Access has been denied to process this request.", } ] }, @@ -456,16 +460,19 @@ def test_given_person_has_unique_status_for_different_conditions_with_audit( # "iterationVersion": rsv_campaign.iterations[0].version, "conditionName": rsv_campaign.target, "status": "not_eligible", - "statusText": "not_eligible", - "eligibilityCohorts": [{"cohortCode": "cohort_label1", "cohortStatus": "not_eligible"}], + "statusText": "We do not believe you can have it", + "eligibilityCohorts": [ + {"cohortCode": "cohort_label1", "cohortStatus": "not_eligible"}, + {"cohortCode": "cohort_label4", "cohortStatus": "not_eligible"}, + ], "eligibilityCohortGroups": [ - { - "cohortCode": "cohort_group1", - "cohortText": "negative_desc_1", - "cohortStatus": "not_eligible", - } + {"cohortCode": "cohort_group1", "cohortText": "negative_desc_1", "cohortStatus": "not_eligible"}, + {"cohortCode": "cohort_group4", "cohortText": "negative_desc_4", "cohortStatus": "not_eligible"}, + ], + "filterRules": [ + {"rulePriority": "10", "ruleName": "Exclude too young less than 75"}, + {"rulePriority": "8", "ruleName": "Excluded postcode In SW19"}, ], - "filterRules": {"rulePriority": "10", "ruleName": "Exclude too young less than 75"}, "suitabilityRules": None, "actionRule": None, "actions": [], @@ -477,21 +484,20 @@ def test_given_person_has_unique_status_for_different_conditions_with_audit( # "iterationVersion": covid_campaign.iterations[0].version, "conditionName": covid_campaign.target, "status": "not_actionable", - "statusText": "not_actionable", - "eligibilityCohorts": [{"cohortCode": "cohort_label2", "cohortStatus": "not_actionable"}], + "statusText": f"You should have the {covid_campaign.target} vaccine", + "eligibilityCohorts": [ + {"cohortCode": "cohort_label2", "cohortStatus": "not_actionable"}, + {"cohortCode": "cohort_label4", "cohortStatus": "not_actionable"}, + ], "eligibilityCohortGroups": [ - { - "cohortCode": "cohort_group2", - "cohortText": "positive_desc_2", - "cohortStatus": "not_actionable", - } + {"cohortCode": "cohort_group2", "cohortText": "positive_desc_2", "cohortStatus": "not_actionable"}, + {"cohortCode": "cohort_group4", "cohortText": "positive_desc_4", "cohortStatus": "not_actionable"}, ], "filterRules": None, - "suitabilityRules": { - "rulePriority": "10", - "ruleName": "Exclude too young less than 75", - "ruleMessage": "Exclude too young less than 75", - }, + "suitabilityRules": [ + {"rulePriority": "10", "ruleName": "Exclude too young less than 75", "ruleMessage": "TOO YOUNG"}, + {"rulePriority": "12", "ruleName": "Excluded postcode In SW19", "ruleMessage": "In SW19"}, + ], "actionRule": None, "actions": [], }, @@ -502,14 +508,14 @@ def test_given_person_has_unique_status_for_different_conditions_with_audit( # "iterationVersion": flu_campaign.iterations[0].version, "conditionName": flu_campaign.target, "status": "actionable", - "statusText": "actionable", - "eligibilityCohorts": [{"cohortCode": "cohort_label3", "cohortStatus": "actionable"}], + "statusText": f"You should have the {flu_campaign.target} vaccine", + "eligibilityCohorts": [ + {"cohortCode": "cohort_label3", "cohortStatus": "actionable"}, + {"cohortCode": "cohort_label4", "cohortStatus": "actionable"}, + ], "eligibilityCohortGroups": [ - { - "cohortCode": "cohort_group3", - "cohortText": "positive_desc_3", - "cohortStatus": "actionable", - } + {"cohortCode": "cohort_group3", "cohortText": "positive_desc_3", "cohortStatus": "actionable"}, + {"cohortCode": "cohort_group4", "cohortText": "positive_desc_4", "cohortStatus": "actionable"}, ], "filterRules": None, "suitabilityRules": None, @@ -534,3 +540,40 @@ def test_given_person_has_unique_status_for_different_conditions_with_audit( # assert_that(audit_data["response"]["responseId"], is_not(equal_to(""))) assert_that(audit_data["response"]["lastUpdated"], is_not(equal_to(""))) assert_that(audit_data["response"]["condition"], contains_inanyorder(*expected_conditions)) + + +@freeze_time("2025-08-08") +def test_no_active_iteration_returns_empty_processed_suggestions( + lambda_client: BaseClient, # noqa:ARG001 + persisted_person_all_cohorts: NHSNumber, + inactive_iteration_config: list[CampaignConfig], # noqa:ARG001 + api_gateway_endpoint: URL, +): + invoke_url = f"{api_gateway_endpoint}/patient-check/{persisted_person_all_cohorts}" + response = httpx.get( + invoke_url, + headers={ + "nhs-login-nhs-number": str(persisted_person_all_cohorts), + "x_request_id": "x_request_id", + "x_correlation_id": "x_correlation_id", + "nhsd_end_user_organisation_ods": "nhsd_end_user_organisation_ods", + "nhsd_application_id": "nhsd_application_id", + }, + params={"includeActions": "Y", "category": "VACCINATIONS", "conditions": "COVID,FLU,RSV"}, + timeout=10, + ) + + assert_that( + response, + is_response().with_status_code(HTTPStatus.OK).and_body(is_json_that(has_key("processedSuggestions"))), + ) + + body = response.json() + assert_that( + body["processedSuggestions"], + contains_inanyorder( + has_entries("condition", "COVID"), + has_entries("condition", "RSV"), + has_entries("condition", "FLU"), + ), + ) diff --git a/tests/integration/repo/test_campaign_repo.py b/tests/integration/repo/test_campaign_repo.py index 5870d1b32..96742d38a 100644 --- a/tests/integration/repo/test_campaign_repo.py +++ b/tests/integration/repo/test_campaign_repo.py @@ -5,7 +5,7 @@ from botocore.client import BaseClient from hamcrest import assert_that, has_item -from eligibility_signposting_api.model.rules import CampaignConfig +from eligibility_signposting_api.model.campaign_config import CampaignConfig from eligibility_signposting_api.repos.campaign_repo import BucketName, CampaignRepo from tests.fixtures.builders.model.rule import CampaignConfigFactory from tests.fixtures.matchers.rules import is_campaign_config, is_iteration, is_iteration_rule diff --git a/tests/integration/repo/test_person_repo.py b/tests/integration/repo/test_person_repo.py index 7a444d20e..25905e9cf 100644 --- a/tests/integration/repo/test_person_repo.py +++ b/tests/integration/repo/test_person_repo.py @@ -4,7 +4,7 @@ from faker import Faker from hamcrest import assert_that, contains_inanyorder, has_entries -from eligibility_signposting_api.model.eligibility import NHSNumber +from eligibility_signposting_api.model.eligibility_status import NHSNumber from eligibility_signposting_api.repos import NotFoundError from eligibility_signposting_api.repos.person_repo import PersonRepo @@ -18,7 +18,7 @@ def test_person_found(person_table: Any, persisted_person: NHSNumber): # Then assert_that( - actual, + actual.data, contains_inanyorder( has_entries({"NHS_NUMBER": persisted_person, "ATTRIBUTE_TYPE": "PERSON"}), has_entries({"NHS_NUMBER": persisted_person, "ATTRIBUTE_TYPE": "COHORTS"}), diff --git a/tests/test_data/test_config/test_config.json b/tests/test_data/test_config/test_config.json index ab2672df5..8f9cb1445 100644 --- a/tests/test_data/test_config/test_config.json +++ b/tests/test_data/test_config/test_config.json @@ -5,22 +5,38 @@ "Name": "Test Config", "Type": "V", "Target": "RSV", - "Manager": "person@test.com", - "Approver": "person@test.com", - "Reviewer": "person@test.com", + "Manager": ["person@test.com"], + "Approver": ["person@test.com"], + "Reviewer": ["person@test.com"], + "StartDate": "20250101", + "EndDate": "20260101", + "ApprovalMinimum": 1, + "ApprovalMaximum": 5000000, "IterationFrequency": "X", "IterationType": "M", "IterationTime": "07:00:00", - "DefaultCommsRouting": "Default_Comms_1", "Iterations": [ { "ID": "id_100", + "Version": "1", + "Name": "Test Config", + "Type": "M", + "IterationDate": "20250101", + "IterationNumber": 1, + "CommsType": "R", + "ApprovalMinimum": 1, + "ApprovalMaximum": 5000000, "DefaultCommsRouting": "INTERNALCONTACTGP1", + "DefaultNotActionableRouting": "INTERNALCONTACTGP1", + "DefaultNotEligibleRouting": "INTERNALCONTACTGP1", "ActionsMapper": { "INTERNALCONTACTGP1": {"ExternalRoutingCode": "CONTACTGP","ActionDescription":"Contact GP Text1 description", "ActionType":"text1"}, - "INTERNALCONTACTGP2": {"ExternalRoutingCode": "CONTACTGP","ActionDescription":"Contact GP Link description", "ActionType":"link", "UrlLink": "link123", "UrlLabel": "link label"}, - "INTERNALTESCO": {"ExternalRoutingCode": "TESCO","ActionDescription":"Tesco description", "ActionType":"link", "UrlLink": "tesco link", "UrlLabel": "link label"}, - "INTERNALFINDWALKIN": {"ExternalRoutingCode": "FINDWALKIN","ActionDescription":"Find walkin description", "ActionType":"button"} + "INTERNALCONTACTGP2": {"ExternalRoutingCode": "CONTACTGP","ActionDescription":"Contact GP Link description", "ActionType":"link", "UrlLink": "https://www.link123.example", "UrlLabel": "link label"}, + "INTERNALTESCO": {"ExternalRoutingCode": "TESCO","ActionDescription":"Tesco description", "ActionType":"link", "UrlLink": "https://www.tesco_link.example", "UrlLabel": "link label"}, + "INTERNALFINDWALKIN": {"ExternalRoutingCode": "FINDWALKIN","ActionDescription":"Find walkin description", "ActionType":"button"}, + + "XRULEID1": {"ExternalRoutingCode": "FINDWALKIN","ActionDescription":"Find walkin description", "ActionType":"button"}, + "YRULEID1": {"ExternalRoutingCode": "FINDWALKIN","ActionDescription":"Find walkin description", "ActionType":"button"} }, "IterationCohorts": [ { @@ -91,21 +107,31 @@ "Operator": ">", "Comparator": "19000101", "CommsRouting": "INTERNALCONTACTGP1|INTERNALTESCO" + }, + { + "Type": "X", + "Name": "Test X Rule for not eligible", + "Description": "Test X Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "XRULEID1|INTERNALTESCO" + }, + { + "Type": "Y", + "Name": "Test Y Rule for not actionable", + "Description": "Test Y Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "YRULEID1|INTERNALTESCO" } - ], - "Version": "1", - "Name": "Test Config", - "Type": "M", - "IterationDate": "20250101", - "IterationNumber": 1, - "CommsType": "R", - "ApprovalMinimum": 1, - "ApprovalMaximum": 5000000 + ] } - ], - "StartDate": "20250101", - "EndDate": "20260101", - "ApprovalMinimum": 1, - "ApprovalMaximum": 5000000 + ] } } diff --git a/tests/unit/audit/test_audit_context.py b/tests/unit/audit/test_audit_context.py index 2ee9aafdc..05d78bd8e 100644 --- a/tests/unit/audit/test_audit_context.py +++ b/tests/unit/audit/test_audit_context.py @@ -9,14 +9,17 @@ from eligibility_signposting_api.audit.audit_context import AuditContext from eligibility_signposting_api.audit.audit_models import AuditAction, AuditEvent from eligibility_signposting_api.audit.audit_service import AuditService -from eligibility_signposting_api.model.eligibility import ( +from eligibility_signposting_api.model.campaign_config import CampaignID, CampaignVersion, RuleType +from eligibility_signposting_api.model.eligibility_status import ( ActionCode, ActionDescription, ActionType, + BestIterationResult, CohortGroupResult, ConditionName, InternalActionCode, IterationResult, + MatchedActionDetail, Reason, RuleDescription, RuleName, @@ -26,7 +29,6 @@ UrlLabel, UrlLink, ) -from eligibility_signposting_api.model.rules import CampaignID, CampaignVersion, Iteration, RuleType from tests.fixtures.builders.model.rule import IterationFactory @@ -83,9 +85,7 @@ def test_add_request_details_when_headers_are_empty_sets_audit_log_on_g(app): def test_append_audit_condition_adds_condition_to_audit_log_on_g(app): suggested_actions: list[SuggestedAction] | None condition_name: ConditionName - best_results: tuple[Iteration, IterationResult, dict[str, CohortGroupResult]] campaign_details: tuple[CampaignID | None, CampaignVersion | None] - redirect_rule_details: tuple[RulePriority | None, RuleName | None] suggested_actions = [ SuggestedAction( @@ -99,7 +99,7 @@ def test_append_audit_condition_adds_condition_to_audit_log_on_g(app): ] condition_name = ConditionName("Condition1") - iteration = IterationFactory.build() + iteration = IterationFactory.build(version=12345) audit_rules = [ Reason( rule_type=RuleType.filter, @@ -119,15 +119,18 @@ def test_append_audit_condition_adds_condition_to_audit_log_on_g(app): iteration_result = IterationResult( status=Status.actionable, cohort_results=[cohort_group_result], actions=suggested_actions ) - best_results = (iteration, iteration_result, {"CohortCode1": cohort_group_result}) - campaign_details = (CampaignID("CampaignID1"), CampaignVersion("CampaignVersion1")) - redirect_rule_details = (RulePriority("1"), RuleName("RedirectRuleName1")) + campaign_details = (CampaignID("CampaignID1"), CampaignVersion(123)) + matched_action_detail = MatchedActionDetail(RuleName("RedirectRuleName1"), RulePriority("1"), suggested_actions) + + best_iteration_results = BestIterationResult( + iteration_result, iteration, campaign_details[0], campaign_details[1], {"CohortCode1": cohort_group_result} + ) with app.app_context(): g.audit_log = AuditEvent() AuditContext.append_audit_condition( - suggested_actions, condition_name, best_results, campaign_details, redirect_rule_details + condition_name, best_iteration_results, matched_action_detail, [cohort_group_result] ) expected_audit_action = [ @@ -148,8 +151,8 @@ def test_append_audit_condition_adds_condition_to_audit_log_on_g(app): assert cond.campaign_version == campaign_details[1] assert cond.iteration_id == iteration.id assert cond.iteration_version == iteration.version - assert cond.status == best_results[1].status.name - assert cond.status_text == best_results[1].status.name + assert cond.status == "actionable" + assert cond.status_text == "You should have the Condition1 vaccine" assert cond.actions == expected_audit_action assert cond.action_rule.rule_priority == "1" assert cond.action_rule.rule_name == "RedirectRuleName1" @@ -190,3 +193,44 @@ def test_write_to_firehose_calls_audit_service_with_correct_data_from_g(app): assert g.audit_log.response.last_updated == last_updated mock_audit_service.audit.assert_called_once_with(g.audit_log.model_dump(by_alias=True)) + + +def test_no_duplicates_returns_same_list(): + reasons = [ + Reason(RuleType("F"), RuleName("code1"), RulePriority("1"), RuleDescription("desc1"), matcher_matched=True), + Reason(RuleType("S"), RuleName("code2"), RulePriority("2"), RuleDescription("desc2"), matcher_matched=False), + ] + expected = reasons + assert AuditContext.deduplicate_reasons(reasons) == expected + + +def test_duplicates_are_removed(): + reasons = [ + Reason(RuleType("F"), RuleName("code1"), RulePriority("1"), RuleDescription("desc1"), matcher_matched=True), + Reason(RuleType("S"), RuleName("code1"), RulePriority("2"), RuleDescription("desc2"), matcher_matched=False), + Reason(RuleType("R"), RuleName("code3"), RulePriority("3"), RuleDescription("desc3"), matcher_matched=True), + ] + expected = [ + Reason(RuleType("F"), RuleName("code1"), RulePriority("1"), RuleDescription("desc1"), matcher_matched=True), + Reason(RuleType("R"), RuleName("code3"), RulePriority("3"), RuleDescription("desc3"), matcher_matched=True), + ] + assert AuditContext.deduplicate_reasons(reasons) == expected + + +def test_empty_list_returns_empty_list(): + reasons = [] + expected = [] + assert AuditContext.deduplicate_reasons(reasons) == expected + + +def test_reasons_with_no_description_are_filtered_out(): + reasons = [ + Reason(RuleType("F"), RuleName("code1"), RulePriority("1"), RuleDescription("desc1"), matcher_matched=True), + Reason(RuleType("S"), RuleName("code2"), RulePriority("2"), None, matcher_matched=False), + Reason(RuleType("R"), RuleName("code3"), RulePriority("3"), RuleDescription("desc3"), matcher_matched=True), + ] + expected = [ + Reason(RuleType("F"), RuleName("code1"), RulePriority("1"), RuleDescription("desc1"), matcher_matched=True), + Reason(RuleType("R"), RuleName("code3"), RulePriority("3"), RuleDescription("desc3"), matcher_matched=True), + ] + assert AuditContext.deduplicate_reasons(reasons) == expected diff --git a/tests/unit/common/__init__.py b/tests/unit/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/common/test_request_validator.py b/tests/unit/common/test_request_validator.py new file mode 100644 index 000000000..0ec6726d5 --- /dev/null +++ b/tests/unit/common/test_request_validator.py @@ -0,0 +1,387 @@ +import json +import logging +from http import HTTPStatus +from unittest.mock import MagicMock + +import pytest + +from eligibility_signposting_api.common import request_validator +from eligibility_signposting_api.common.request_validator import logger + + +@pytest.fixture(autouse=True) +def setup_logging_for_tests(): + logger.handlers = [] + logger.setLevel(logging.INFO) + logger.addHandler(logging.NullHandler()) + + +class TestValidateNHSNumber: + @pytest.mark.parametrize( + ("path_nhs", "header_nhs", "expected_result", "expected_log_msg"), + [ + (None, None, False, "NHS number is not present"), + ("1234567890", None, False, "NHS number is not present"), + (None, "1234567890", False, "NHS number is not present"), + ("1234567890", "0987654321", False, "NHS number mismatch"), + ("1234567890", "1234567890", True, None), + ], + ) + def test_validate_nhs_number(self, path_nhs, header_nhs, expected_result, expected_log_msg, caplog): + with caplog.at_level(logging.ERROR): + result = request_validator.validate_nhs_number(path_nhs, header_nhs) + + assert result == expected_result + + if expected_log_msg: + assert any(expected_log_msg in record.message for record in caplog.records) + else: + assert not caplog.records + + +class TestValidateRequestParams: + def test_validate_request_params_success(self, caplog): + mock_handler = MagicMock() + mock_handler.__name__ = "mock_handler" + + mock_event_valid = { + "pathParameters": {"id": "1234567890"}, + "headers": {"nhs-login-nhs-number": "1234567890"}, + } + mock_context = {} + + decorator = request_validator.validate_request_params() + wrapped_handler = decorator(mock_handler) + with caplog.at_level(logging.INFO): + wrapped_handler(mock_event_valid, mock_context) + + assert any("NHS numbers from the request" in record.message for record in caplog.records) + assert not any(record.levelname == "ERROR" for record in caplog.records) + + def test_validate_request_params_nhs_mismatch(self, caplog): + mock_handler = MagicMock() + mock_context = {} + event = { + "pathParameters": {"id": "0987654321"}, + "headers": {"nhs-login-nhs-number": "1234567890"}, + } + + decorator = request_validator.validate_request_params() + wrapped_handler = decorator(mock_handler) + + with caplog.at_level(logging.ERROR): + response = wrapped_handler(event, mock_context) + + mock_handler.assert_not_called() + + assert response is not None + assert response["statusCode"] == HTTPStatus.FORBIDDEN + response_body = json.loads(response["body"]) + issue = response_body["issue"][0] + assert issue["code"] == "forbidden" + assert issue["details"]["coding"][0]["code"] == "ACCESS_DENIED" + assert issue["details"]["coding"][0]["display"] == "Access has been denied to process this request." + assert issue["diagnostics"] == "You are not authorised to request information for the supplied NHS Number" + + def test_validate_request_params_nhs_missing_in_path(self, caplog): + mock_handler = MagicMock() + mock_context = {} + event = { + "headers": {"nhs-login-nhs-number": "1234567890"}, + } + + decorator = request_validator.validate_request_params() + wrapped_handler = decorator(mock_handler) + + with caplog.at_level(logging.ERROR): + response = wrapped_handler(event, mock_context) + + mock_handler.assert_not_called() + + assert response is not None + assert response["statusCode"] == HTTPStatus.BAD_REQUEST + response_body = json.loads(response["body"]) + issue = response_body["issue"][0] + assert issue["code"] == "invalid" + assert issue["severity"] == "error" + assert issue["details"]["coding"][0]["code"] == "BAD_REQUEST" + assert issue["details"]["coding"][0]["display"] == "Bad Request" + assert issue["diagnostics"] == "Missing required NHS Number from path parameters" + assert issue["location"][0] == "parameters/id" + assert any( + (record.levelname == "ERROR" and "Missing required NHS Number from path parameters" in record.message) + for record in caplog.records + ) + + +class TestValidateQueryParameters: + @pytest.mark.parametrize( + ("conditions_input", "is_valid_expected", "expected_log_msg"), + [ + ("ALL", True, None), + ("COVID", True, None), + ("covid19", True, None), + ("FLU,MMR", True, None), + (" RSV , COVID19", True, None), + (" condition_with_spaces ", False, "Invalid condition query param: ' condition_with_spaces '"), + ("CONDITION_A,ANOTHER_ONE,123ABC", False, "Invalid condition query param: 'CONDITION_A'"), + ("condition1,", False, "Invalid condition query param: ''"), + (",condition2", False, "Invalid condition query param: ''"), + ("condition-invalid", False, "Invalid condition query param: 'condition-invalid'"), + ("condition with spaces", False, "Invalid condition query param: 'condition with spaces'"), + ("condition!", False, "Invalid condition query param: 'condition!'"), + ("condition@#$", False, "Invalid condition query param: 'condition@#$'"), + ], + ) + def test_validate_query_params_conditions(self, conditions_input, is_valid_expected, expected_log_msg, caplog): + params = {"conditions": conditions_input} + + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + + assert is_valid == is_valid_expected + if is_valid_expected: + assert problem is None + assert not caplog.records + else: + assert problem is not None + assert any( + (record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records + ) + + def test_validate_query_params_conditions_default(self, caplog): + params = {"category": "ALL", "includeActions": "Y"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is True + assert problem is None + assert not caplog.records + + @pytest.mark.parametrize( + ("category_input", "is_valid_expected", "expected_log_msg"), + [ + ("VACCINATIONS", True, None), + ("SCREENING", True, None), + ("ALL", True, None), + ("vaccinations", True, None), + ("screening", True, None), + ("all", True, None), + (" VACCINATIONS ", True, None), + ("OTHER_CATEGORY ", False, "Invalid category query param: 'OTHER_CATEGORY '"), + ("invalid!", False, "Invalid category query param: 'invalid!'"), + ("VACCINATION", False, "Invalid category query param: 'VACCINATION'"), + ], + ) + def test_validate_query_params_category(self, category_input, is_valid_expected, expected_log_msg, caplog): + params = {"category": category_input} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid == is_valid_expected + + if is_valid_expected: + assert problem is None + assert not caplog.records + else: + assert problem is not None + assert any( + (record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records + ) + + def test_validate_query_params_category_default(self, caplog): + params = {"conditions": "ALL", "includeActions": "Y"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is True + assert problem is None + assert not caplog.records + + @pytest.mark.parametrize( + ("include_actions_input", "is_valid_expected", "expected_log_msg"), + [ + ("Y", True, None), + ("N", True, None), + ("y", True, None), + ("n", True, None), + ("n ", True, None), + ("TRUE", False, "Invalid include actions query param: 'TRUE'"), + ("YES", False, "Invalid include actions query param: 'YES'"), + ("0", False, "Invalid include actions query param: '0'"), + ("1", False, "Invalid include actions query param: '1'"), + ("", False, "Invalid include actions query param: ''"), + (" ", False, "Invalid include actions query param: ' '"), + ], + ) + def test_validate_query_params_include_actions( + self, include_actions_input, is_valid_expected, expected_log_msg, caplog + ): + params = {"includeActions": include_actions_input} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid == is_valid_expected + + if is_valid_expected: + assert problem is None + assert not caplog.records + else: + assert problem is not None + assert any( + (record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records + ) + + def test_validate_query_params_include_actions_default(self, caplog): + params = {"conditions": "ALL", "category": "ALL"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is True + assert problem is None + assert not caplog.records + + def test_validate_query_params_all_valid_params(self, caplog): + params = {"conditions": "COND1,COND2", "category": "SCREENING", "includeActions": "N"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is True + assert problem is None + assert not caplog.records + + def test_validate_query_params_mixed_valid_invalid_conditions_fail_first(self, caplog): + params = {"conditions": "VALID_COND,INVALID!,ANOTHER_VALID", "category": "SCREENING", "includeActions": "N"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is False + assert problem is not None + assert any( + (record.levelname == "ERROR" and "Invalid condition query param: " in record.message) + for record in caplog.records + ) + + def test_validate_query_params_valid_conditions_invalid_category_fail_second(self, caplog): + params = {"conditions": "CONDITION", "category": "BAD_CAT", "includeActions": "N"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is False + assert problem is not None + assert any( + (record.levelname == "ERROR" and "Invalid category query param: " in record.message) + for record in caplog.records + ) + error_logs = [r for r in caplog.records if r.levelname == "ERROR"] + assert len(error_logs) == 1 + + def test_validate_query_params_valid_conditions_category_invalid_actions_fail_third(self, caplog): + params = {"conditions": "CONDITION", "category": "VACCINATIONS", "includeActions": "Nope"} + with caplog.at_level(logging.ERROR): + is_valid, problem = request_validator.validate_query_params(params) + assert is_valid is False + assert problem is not None + assert any( + (record.levelname == "ERROR" and "Invalid include actions query param: " in record.message) + for record in caplog.records + ) + error_logs = [r for r in caplog.records if r.levelname == "ERROR"] + assert len(error_logs) == 1 + + def test_validate_query_params_returns_correct_problem_details_for_conditions_error(self): + invalid_condition = "FLU&COVID" + params = {"conditions": invalid_condition} + + is_valid, problem = request_validator.validate_query_params(params) + + assert is_valid is False + assert problem is not None + assert problem["statusCode"] == HTTPStatus.BAD_REQUEST + assert problem["headers"]["Content-Type"] == "application/fhir+json" + + response_body = json.loads(problem["body"]) + + assert response_body["resourceType"] == "OperationOutcome" + assert "id" in response_body + assert "meta" in response_body + assert "lastUpdated" in response_body["meta"] + + assert len(response_body["issue"]) == 1 + issue = response_body["issue"][0] + + assert issue["severity"] == "error" + assert issue["code"] == "value" + assert issue["diagnostics"] == ( + f"{invalid_condition} should be a single or comma separated list of condition " + f"strings with no other punctuation or special characters" + ) + assert issue["location"] == ["parameters/conditions"] + assert "details" in issue + assert "coding" in issue["details"] + assert len(issue["details"]["coding"]) == 1 + coding = issue["details"]["coding"][0] + + assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" + assert coding["code"] == "INVALID_PARAMETER" + assert coding["display"] == "The given conditions were not in the expected format." + + def test_validate_query_params_returns_correct_problem_details_for_category_error(self): + invalid_category = "HEALTHCHECKS" + params = {"category": invalid_category} + + is_valid, problem = request_validator.validate_query_params(params) + + assert is_valid is False + assert problem is not None + assert problem["statusCode"] == HTTPStatus.UNPROCESSABLE_ENTITY + assert problem["headers"]["Content-Type"] == "application/fhir+json" + + response_body = json.loads(problem["body"]) + + assert response_body["resourceType"] == "OperationOutcome" + assert "id" in response_body + assert "meta" in response_body + assert "lastUpdated" in response_body["meta"] + + assert len(response_body["issue"]) == 1 + issue = response_body["issue"][0] + + assert issue["severity"] == "error" + assert issue["code"] == "value" + assert issue["diagnostics"] == f"{invalid_category} is not a category that is supported by the API" + assert issue["location"] == ["parameters/category"] + assert "details" in issue + assert "coding" in issue["details"] + assert len(issue["details"]["coding"]) == 1 + coding = issue["details"]["coding"][0] + + assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" + assert coding["code"] == "INVALID_PARAMETER" + assert coding["display"] == "The supplied category was not recognised by the API." + + def test_validate_query_params_returns_correct_problem_details_for_include_actions_error(self): + invalid_include_actions = "NAH" + params = {"includeActions": invalid_include_actions} + + is_valid, problem = request_validator.validate_query_params(params) + + assert is_valid is False + assert problem is not None + assert problem["statusCode"] == HTTPStatus.UNPROCESSABLE_ENTITY + assert problem["headers"]["Content-Type"] == "application/fhir+json" + + response_body = json.loads(problem["body"]) + + assert response_body["resourceType"] == "OperationOutcome" + assert "id" in response_body + assert "meta" in response_body + assert "lastUpdated" in response_body["meta"] + + assert len(response_body["issue"]) == 1 + issue = response_body["issue"][0] + + assert issue["severity"] == "error" + assert issue["code"] == "value" + assert issue["diagnostics"] == f"{invalid_include_actions} is not a value that is supported by the API" + assert issue["location"] == ["parameters/includeActions"] + assert "details" in issue + assert "coding" in issue["details"] + assert len(issue["details"]["coding"]) == 1 + coding = issue["details"]["coding"][0] + + assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" + assert coding["code"] == "INVALID_PARAMETER" + assert coding["display"] == "The supplied value was not recognised by the API." diff --git a/tests/unit/config/__init__.py b/tests/unit/config/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/test_config.py b/tests/unit/config/test_config.py similarity index 100% rename from tests/unit/test_config.py rename to tests/unit/config/test_config.py diff --git a/tests/unit/logging/__init__.py b/tests/unit/logging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/logging/test_logs_helper.py b/tests/unit/logging/test_logs_helper.py new file mode 100644 index 000000000..d11885bc9 --- /dev/null +++ b/tests/unit/logging/test_logs_helper.py @@ -0,0 +1,70 @@ +import logging +from http import HTTPStatus +from unittest.mock import Mock + +import pytest +from mangum.types import LambdaContext + + +@pytest.fixture +def lambda_context(): + context = Mock(spec=LambdaContext) + context.aws_request_id = "test-request-id" + return context + + +@pytest.mark.parametrize( + ("headers", "gateway_request_id", "expected_extra"), + [ + ( + {"X-Request-ID": "req-123", "X-Correlation-ID": "corr-abc"}, + "gw-id-999", + { + "x_request_id": "req-123", + "x_correlation_id": "corr-abc", + "gateway_request_id": "gw-id-999", + }, + ), + ( + {}, # No headers + "gw-id-000", + { + "x_request_id": None, + "x_correlation_id": None, + "gateway_request_id": "gw-id-000", + }, + ), + ( + {"X-Request-ID": "req-local"}, + None, # No requestContext (non-Gateway trigger) + { + "x_request_id": "req-local", + "x_correlation_id": None, + "gateway_request_id": None, + }, + ), + ], +) +def test_log_request_ids_decorator_logs_metadata(headers, gateway_request_id, expected_extra, lambda_context, caplog): + from eligibility_signposting_api.app import log_request_ids_from_headers + + event = {"headers": headers} + if gateway_request_id is not None: + event["requestContext"] = {"requestId": gateway_request_id} + + @log_request_ids_from_headers() + def test_handler(event, context): # noqa : ARG001 + logger = logging.getLogger("test_logger") + logger.info("Inside test handler") + return HTTPStatus.OK + + with caplog.at_level(logging.INFO): + test_handler(event, lambda_context) + + for record in caplog.records: + if record.message == "request trace metadata": + for key, val in expected_extra.items(): + assert getattr(record, key) == val + break + else: + pytest.fail("'request trace metadata' log not found") diff --git a/tests/unit/logging/test_logs_manager.py b/tests/unit/logging/test_logs_manager.py new file mode 100644 index 000000000..f78c13451 --- /dev/null +++ b/tests/unit/logging/test_logs_manager.py @@ -0,0 +1,120 @@ +import io +import json +import logging +import threading +from http import HTTPStatus +from unittest.mock import MagicMock, Mock + +import pytest +from mangum.types import LambdaContext + +from eligibility_signposting_api.logging.logs_manager import ( + LOG_FORMAT, + EnrichedJsonFormatter, + add_lambda_request_id_to_logger, + request_id_context_var, +) + + +def test_decorator_sets_request_id_in_context(): + test_request_id = "test-id-12345" + mock_context = MagicMock() + mock_context.aws_request_id = test_request_id + + @add_lambda_request_id_to_logger() + def decorated_handler(event, context): # noqa : ARG001 + return request_id_context_var.get() + + result = decorated_handler({}, mock_context) + + assert result == test_request_id + + +def test_decorator_preserves_function_return_value(): + expected_result = {"statusCode": 200, "body": "Success"} + mock_context = MagicMock() + mock_context.aws_request_id = "any-id" + + @add_lambda_request_id_to_logger() + def decorated_handler(event, context): # noqa : ARG001 + return expected_result + + result = decorated_handler({}, mock_context) + + assert result == expected_result + + +def test_request_id_context_is_properly_isolated(): + results = {} + + @add_lambda_request_id_to_logger() + def decorated_handler(event, context): # noqa : ARG001 + rid = request_id_context_var.get() + results[threading.current_thread().name] = rid + return rid + + def thread_func(name, rid): # noqa : ARG001 + mock_context = MagicMock(aws_request_id=rid) + decorated_handler({}, mock_context) + + threads = [ + threading.Thread(target=thread_func, name="Thread-A", args=("Thread-A", "id-A")), + threading.Thread(target=thread_func, name="Thread-B", args=("Thread-B", "id-B")), + threading.Thread(target=thread_func, name="Thread-C", args=("Thread-C", "id-C")), + ] + + for t in threads: + t.start() + for t in threads: + t.join() + + assert results["Thread-A"] == "id-A" + assert request_id_context_var.get() is None + + assert results["Thread-B"] == "id-B" + assert request_id_context_var.get() is None + + assert results["Thread-C"] == "id-C" + assert request_id_context_var.get() is None + + +@pytest.fixture +def lambda_context(): + context = Mock(spec=LambdaContext) + context.aws_request_id = "test-request-id" + return context + + +def test_enriched_json_formatter_adds_all_fields(lambda_context): + @add_lambda_request_id_to_logger() + def test_handler(event, context): # noqa : ARG001 + logger = logging.getLogger("test_logger") + logger.info("Test log inside handler") + return HTTPStatus.OK + + log_stream = io.StringIO() + handler = logging.StreamHandler(log_stream) + handler.setFormatter(EnrichedJsonFormatter(LOG_FORMAT)) + + test_logger = logging.getLogger("test_logger") + test_logger.handlers = [] + test_logger.addHandler(handler) + test_logger.setLevel(logging.INFO) + + result = test_handler({}, lambda_context) + log_output = log_stream.getvalue() + + test_logger.removeHandler(handler) + + assert result == HTTPStatus.OK + logged_json = json.loads(log_output) + + assert logged_json["request_id"] == lambda_context.aws_request_id + assert "asctime" in logged_json + assert logged_json["levelname"] == "INFO" + assert logged_json["name"] == "test_logger" + assert logged_json["module"] == "test_logs_manager" + assert logged_json["funcName"] == "test_handler" + assert "lineno" in logged_json + assert logged_json["message"] == "Test log inside handler" + assert request_id_context_var.get() is None diff --git a/tests/unit/model/test_rules.py b/tests/unit/model/test_campaign_config.py similarity index 81% rename from tests/unit/model/test_rules.py rename to tests/unit/model/test_campaign_config.py index 6419455d1..dbeebc73f 100644 --- a/tests/unit/model/test_rules.py +++ b/tests/unit/model/test_campaign_config.py @@ -5,7 +5,7 @@ from faker import Faker from hamcrest import assert_that -from eligibility_signposting_api.model.rules import IterationRule +from eligibility_signposting_api.model.campaign_config import IterationRule from tests.fixtures.builders.model.rule import IterationFactory, RawCampaignConfigFactory from tests.fixtures.matchers.rules import is_iteration_rule @@ -52,20 +52,6 @@ def test_iteration_with_overlapping_start_dates_not_allowed(faker: Faker): RawCampaignConfigFactory.build(start_date=start_date, iterations=[iteration1, iteration2]) -def test_iteration_must_have_active_iteration_from_its_start(faker: Faker): - # Given - start_date = faker.date_object() - iteration = IterationFactory.build(iteration_date=start_date + relativedelta(days=1)) - - # When, Then - with pytest.raises( - ValueError, - match=r"1 validation error for CampaignConfig\n" - r".*1st iteration starts later", - ): - RawCampaignConfigFactory.build(start_date=start_date, iterations=[iteration]) - - @pytest.mark.parametrize( ("rule_stop", "expected"), [ @@ -92,3 +78,23 @@ def test_iteration_rule_deserialisation(rule_stop: str, expected): # Then assert_that(actual, is_iteration_rule().with_rule_stop(expected)) + + +@pytest.mark.parametrize( + ("field_name", "value"), + [ + ("manager", "manager@test.com"), + ("approver", "approver@test.com"), + ("reviewer", "reviewer@test.com"), + ], +) +def test_campaign_should_accept_list_of_strings_for_different_role_emails(field_name: str, value: str): + # Given + kwargs = {field_name: value} + + # When, Then + with pytest.raises( + ValueError, + match=rf"1 validation error for CampaignConfig\n{field_name}\n\s+Input should be a valid list.*", + ): + RawCampaignConfigFactory.build(**kwargs) diff --git a/tests/unit/model/test_status.py b/tests/unit/model/test_status.py new file mode 100644 index 000000000..cc3c018d9 --- /dev/null +++ b/tests/unit/model/test_status.py @@ -0,0 +1,47 @@ +from eligibility_signposting_api.model.eligibility_status import ConditionName, RuleType, Status, StatusText + + +class TestStatus: + def test_ordering(self): + assert Status.not_eligible < Status.not_actionable + assert Status.not_actionable < Status.actionable + assert Status.actionable > Status.not_actionable + assert Status.not_actionable > Status.not_eligible + assert Status.not_eligible == Status.not_eligible + + def test_is_exclusion(self): + assert Status.not_eligible.is_exclusion + assert Status.not_actionable.is_exclusion + assert not Status.actionable.is_exclusion + + def test_worst_status(self): + assert Status.worst(Status.not_eligible, Status.actionable) == Status.not_eligible + assert Status.worst(Status.actionable, Status.not_actionable) == Status.not_actionable + assert Status.worst(Status.not_eligible, Status.not_actionable, Status.actionable) == Status.not_eligible + assert Status.worst(Status.actionable) == Status.actionable + + def test_best_status(self): + assert Status.best(Status.not_eligible, Status.actionable) == Status.actionable + assert Status.best(Status.actionable, Status.not_actionable) == Status.actionable + assert Status.best(Status.not_eligible, Status.not_actionable, Status.actionable) == Status.actionable + assert Status.best(Status.not_eligible) == Status.not_eligible + + def test_get_status_text(self): + assert Status.not_eligible.get_status_text(ConditionName("COVID")) == StatusText( + "We do not believe you can have it" + ) + + assert Status.not_actionable.get_status_text(ConditionName("FLU")) == StatusText( + "You should have the FLU vaccine" + ) + + assert Status.actionable.get_status_text(ConditionName("COVID")) == StatusText( + "You should have the COVID vaccine" + ) + + def test_get_action_rule_type(self): + assert Status.not_eligible.get_action_rule_type() == RuleType(RuleType.not_eligible_actions) + + assert Status.not_actionable.get_action_rule_type() == RuleType(RuleType.not_actionable_actions) + + assert Status.actionable.get_action_rule_type() == RuleType(RuleType.redirect) diff --git a/tests/unit/services/calculators/test_eligibility_calculator.py b/tests/unit/services/calculators/test_eligibility_calculator.py index c93edb77c..37a13e8ff 100644 --- a/tests/unit/services/calculators/test_eligibility_calculator.py +++ b/tests/unit/services/calculators/test_eligibility_calculator.py @@ -1,110 +1,58 @@ import datetime +import logging from typing import Any import pytest from faker import Faker +from flask import Flask from freezegun import freeze_time -from hamcrest import assert_that, contains_exactly, contains_inanyorder, equal_to, has_item, has_items, is_, is_in -from pydantic import HttpUrl, ValidationError - -from eligibility_signposting_api.model import rules -from eligibility_signposting_api.model import rules as rules_model -from eligibility_signposting_api.model.eligibility import ( +from hamcrest import assert_that, contains_exactly, contains_inanyorder, has_item, has_items, is_, is_in + +from eligibility_signposting_api.model import campaign_config as rules_model +from eligibility_signposting_api.model import eligibility_status +from eligibility_signposting_api.model.campaign_config import ( + CohortLabel, + Description, + RuleAttributeLevel, + RuleAttributeName, + RuleAttributeTarget, + RuleComparator, + RuleName, + RuleOperator, + RuleType, +) +from eligibility_signposting_api.model.eligibility_status import ( ActionCode, ActionDescription, ActionType, + CohortGroupResult, + Condition, ConditionName, DateOfBirth, InternalActionCode, + IterationResult, NHSNumber, Postcode, + Reason, RuleDescription, + RulePriority, Status, SuggestedAction, - UrlLabel, - UrlLink, ) -from eligibility_signposting_api.model.rules import ActionsMapper, AvailableAction from eligibility_signposting_api.services.calculators.eligibility_calculator import EligibilityCalculator from tests.fixtures.builders.model import rule as rule_builder +from tests.fixtures.builders.model.eligibility import ReasonFactory from tests.fixtures.builders.repos.person import person_rows_builder from tests.fixtures.matchers.eligibility import ( is_cohort_result, is_condition, is_eligibility_status, - is_reason, ) -from tests.fixtures.matchers.rules import is_iteration_rule - - -class TestEligibilityCalculator: - @staticmethod - def test_get_redirect_rules(): - # Given - - iteration = rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort2")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "ActionCode1": AvailableAction( - ActionType="ActionType1", - ExternalRoutingCode="ActionCode1", - ActionDescription="ActionDescription1", - UrlLink=HttpUrl("https://www.ActionUrl1.com"), - UrlLabel="ActionLabel1", - ), - "defaultcomms": AvailableAction( - ActionType="ActionType2", - ExternalRoutingCode="defaultcomms", - ActionDescription="ActionDescription2", - UrlLink=HttpUrl("https://www.ActionUrl2.com"), - UrlLabel="ActionLabel2", - ), - } - ), - iteration_rules=[rule_builder.ICBRedirectRuleFactory.build()], - ) - - # when - actual_rules, actual_action_mapper, actual_default_comms = EligibilityCalculator.get_redirect_rules(iteration) - - # then - assert_that(actual_rules, has_item(is_iteration_rule().with_name(iteration.iteration_rules[0].name))) - assert actual_action_mapper == iteration.actions_mapper - assert actual_default_comms == iteration.default_comms_routing - - -def test_not_base_eligible(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"]) - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort2")] - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.not_eligible)) - ), - ) +@pytest.fixture +def app(): + return Flask(__name__) @pytest.mark.parametrize( @@ -141,7 +89,7 @@ def test_base_eligible_with_when_magic_cohort_is_present( calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( @@ -153,54 +101,6 @@ def test_base_eligible_with_when_magic_cohort_is_present( ) -@freeze_time("2025-04-25") -def test_only_live_campaigns_considered(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - name="Live", - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort2")], - ) - ], - start_date=datetime.date(2025, 4, 20), - end_date=datetime.date(2025, 4, 30), - ), - rule_builder.CampaignConfigFactory.build( - name="No longer live", - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.IterationCohortFactory.build(cohort_label="cohort1"), - rule_builder.IterationCohortFactory.build(cohort_label="cohort2"), - ], - ) - ], - start_date=datetime.date(2025, 4, 1), - end_date=datetime.date(2025, 4, 24), - ), - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.not_eligible)) - ), - ) - - @pytest.mark.parametrize( "iteration_type", ["A", "M", "S", "O"], @@ -209,46 +109,13 @@ def test_campaigns_with_applicable_iteration_types_in_campaign_level_considered( # Given nhs_number = NHSNumber(faker.nhs_number()) - person_rows = person_rows_builder(nhs_number) + person_rows = person_rows_builder(nhs_number, cohorts=[]) campaign_configs = [rule_builder.CampaignConfigFactory.build(target="RSV", iteration_type=iteration_type)] calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(is_in([Status.actionable, Status.not_actionable, Status.not_eligible])) - ), - ), - ) - - -@pytest.mark.parametrize( - "iteration_type", - ["A", "M", "S", "O"], -) -def test_campaigns_with_applicable_iteration_types_in_iteration_level_considered(iteration_type: str, faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", iterations=[rule_builder.IterationFactory.build(type=iteration_type)] - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( @@ -263,90 +130,6 @@ def test_campaigns_with_applicable_iteration_types_in_iteration_level_considered ) -@pytest.mark.parametrize( - "iteration_type", - ["NA", "N", "FAKE", "F"], -) -def test_invalid_iteration_types_in_campaign_level_raises_validation_error(iteration_type: str): - with pytest.raises(ValidationError): - rule_builder.CampaignConfigFactory.build(target="RSV", iteration_type=iteration_type) - - -@pytest.mark.parametrize( - "iteration_type", - ["NA", "N", "FAKE", "F"], -) -def test_invalid_iteration_types_in_iteration_level_raises_validation_error(iteration_type: str): - with pytest.raises(ValidationError): - rule_builder.CampaignConfigFactory.build( - target="RSV", iterations=[rule_builder.IterationFactory.build(type=iteration_type)] - ) - - -def test_base_eligible_and_simple_rule_includes(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=76, maximum_age=79)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.actionable)) - ), - ) - - -def test_base_eligible_but_simple_rule_excludes(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=18, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.not_actionable)) - ), - ) - - @freeze_time("2025-04-25") def test_simple_rule_only_excludes_from_live_iteration(faker: Faker): # Given @@ -383,7 +166,7 @@ def test_simple_rule_only_excludes_from_live_iteration(faker: Faker): calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( @@ -394,86 +177,6 @@ def test_simple_rule_only_excludes_from_live_iteration(faker: Faker): ) -@pytest.mark.parametrize( - ("rule_type", "expected_status"), - [(rules_model.RuleType.suppression, Status.not_actionable), (rules_model.RuleType.filter, Status.not_eligible)], -) -def test_rule_types_cause_correct_statuses(rule_type: rules_model.RuleType, expected_status: Status, faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=18, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(type=rule_type)], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item( - is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status).and_actions([]) - ) - ), - ) - - -def test_multiple_rule_types_cause_correct_status(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=18, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_rules=[ - rule_builder.PersonAgeSuppressionRuleFactory.build( - priority=rules_model.RulePriority(5), type=rules_model.RuleType.suppression - ), - rule_builder.PersonAgeSuppressionRuleFactory.build( - priority=rules_model.RulePriority(10), type=rules_model.RuleType.filter - ), - rule_builder.PersonAgeSuppressionRuleFactory.build( - priority=rules_model.RulePriority(15), type=rules_model.RuleType.suppression - ), - ], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.not_eligible)) - ), - ) - - @pytest.mark.parametrize( ("test_comment", "rule1", "rule2", "expected_status"), [ @@ -552,7 +255,7 @@ def test_rules_with_same_priority_must_all_match_to_exclude( calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( @@ -564,251 +267,216 @@ def test_rules_with_same_priority_must_all_match_to_exclude( ) -def test_multiple_conditions_where_both_are_actionable(faker: Faker): +@pytest.mark.parametrize( + ("vaccine", "last_successful_date", "expected_status", "test_comment"), + [ + ("RSV", "20240601", Status.not_actionable, "last_successful_date is a past date"), + ("RSV", "20250101", Status.not_actionable, "last_successful_date is today"), + # Below is a non-ideal situation (might be due to a data entry error), so considered as actionable. + ("RSV", "20260101", Status.actionable, "last_successful_date is a future date"), + ("RSV", "20230601", Status.actionable, "last_successful_date is a long past"), + ("RSV", "", Status.actionable, "last_successful_date is empty"), + ("RSV", None, Status.actionable, "last_successful_date is none"), + ("COVID", "20240601", Status.actionable, "No RSV row"), + ], +) +@freeze_time("2025-01-01") +def test_status_on_target_based_on_last_successful_date( + vaccine: str, last_successful_date: str, expected_status: Status, test_comment: str, faker: Faker +): # Given nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=76, maximum_age=78)) - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"], icb="QE1") + target_rows = person_rows_builder( + nhs_number, + cohorts=["cohort1"], + vaccines=[ + ( + vaccine, + datetime.datetime.strptime(last_successful_date, "%Y%m%d").replace(tzinfo=datetime.UTC) + if last_successful_date + else None, + ) + ], + ) + campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={"rule_1_comms_routing": book_nbs_comms, "defaultcomms": default_comms_detail} - ), - ) - ], - ), - rule_builder.CampaignConfigFactory.build( - target="COVID", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], iteration_rules=[ - rule_builder.PersonAgeSuppressionRuleFactory.build(), - rule_builder.ICBRedirectRuleFactory.build(), + rule_builder.IterationRuleFactory.build( + type=RuleType.suppression, + name=RuleName("You have already been vaccinated against RSV in the last year"), + description=RuleDescription("Exclude anyone Completed RSV Vaccination in the last year"), + priority=10, + operator=RuleOperator.day_gte, + attribute_level=RuleAttributeLevel.TARGET, + attribute_name=RuleAttributeName("LAST_SUCCESSFUL_DATE"), + comparator=RuleComparator("-365"), + attribute_target=RuleAttributeTarget("RSV"), + ), + rule_builder.IterationRuleFactory.build( + type=RuleType.suppression, + name=RuleName("You have a vaccination date in the future for RSV"), + description=RuleDescription("Exclude anyone with future Completed RSV Vaccination"), + priority=10, + operator=RuleOperator.day_lte, + attribute_level=RuleAttributeLevel.TARGET, + attribute_name=RuleAttributeName("LAST_SUCCESSFUL_DATE"), + comparator=RuleComparator("0"), + attribute_target=RuleAttributeTarget("RSV"), + ), ], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={"ActionCode1": book_nbs_comms, "defaultcomms": default_comms_detail} - ), + iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], ) ], - ), + ) ] - calculator = EligibilityCalculator(person_rows, campaign_configs) + calculator = EligibilityCalculator(target_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( actual, is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(Status.actionable) - .and_actions( - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ) - ] - ), - is_condition() - .with_condition_name(ConditionName("COVID")) - .and_status(Status.actionable) - .and_actions( - [ - SuggestedAction( - internal_action_code=InternalActionCode("ActionCode1"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ] - ), - ) + has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) ), + test_comment, ) -def test_multiple_conditions_where_all_give_unique_statuses(faker: Faker): +@pytest.mark.parametrize( + ("person_cohorts", "expected_status", "test_comment"), + [ + (["cohort1", "cohort2"], Status.actionable, "cohort1 is not actionable, cohort 2 is actionable"), + (["cohort3", "cohort2"], Status.actionable, "cohort3 is not eligible, cohort 2 is actionable"), + (["cohort1"], Status.not_actionable, "cohort1 is not actionable"), + ], +) +def test_status_if_iteration_rules_contains_cohort_label_field( + person_cohorts, expected_status: Status, test_comment: str, faker: Faker +): # Given nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=76, maximum_age=78)) + date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) + person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=person_cohorts) campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - ) - ], - ), - rule_builder.CampaignConfigFactory.build( - target="COVID", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(comparator="-85")], - ) - ], - ), - rule_builder.CampaignConfigFactory.build( - target="FLU", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort2")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(comparator="-85")], + iteration_cohorts=[ + rule_builder.IterationCohortFactory.build(cohort_label="cohort1"), + rule_builder.IterationCohortFactory.build(cohort_label="cohort2"), + ], + iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(cohort_label="cohort1")], ) ], - ), + ) ] calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("N", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( actual, is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(Status.actionable) - .and_actions(None), - is_condition() - .with_condition_name(ConditionName("COVID")) - .and_status(Status.not_actionable) - .and_actions(None), - is_condition() - .with_condition_name(ConditionName("FLU")) - .and_status(Status.not_eligible) - .and_actions(None), - ) + has_items(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) ), + test_comment, ) @pytest.mark.parametrize( - ("test_comment", "campaign1", "campaign2"), + ("person_rows", "expected_status", "expected_cohort_group_and_description", "test_comment"), [ ( - "1st campaign allows, 2nd excludes", - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - ) - ], - ), - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(comparator="-85")], - ) - ], - ), + person_rows_builder(nhs_number="123", cohorts=[], postcode="AC01", de=True, icb="QE1"), + Status.not_eligible, + [ + ("magic cohort group", "magic negative description"), + ("rsv_age_range", "rsv_age_range negative description"), + ], + "rsv_75_rolling is not base-eligible & magic cohort group not eligible by F rules ", ), ( - "1st campaign excludes, 2nd allows", - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(comparator="-85")], - ) - ], - ), - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()], - ) - ], - ), + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=True, icb="QE1"), + Status.not_eligible, + [ + ("magic cohort group", "magic negative description"), + ("rsv_age_range", "rsv_age_range negative description"), + ], + "all the cohorts are not-eligible by F rules", ), - ], -) -def test_multiple_campaigns_for_single_condition( - test_comment: str, campaign1: rules_model.CampaignConfig, campaign2: rules_model.CampaignConfig, faker: Faker -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=76, maximum_age=78)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - campaign_configs = [campaign1, campaign2] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - contains_exactly(is_condition().with_condition_name(ConditionName("RSV")).and_status(Status.actionable)) + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="SW19", de=False, icb="QE1"), + Status.not_actionable, + [ + ("magic cohort group", "magic positive description"), + ("rsv_age_range", "rsv_age_range positive description"), + ], + "all the cohorts are not-actionable", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=False, icb="QE1"), + Status.actionable, + [ + ("magic cohort group", "magic positive description"), + ("rsv_age_range", "rsv_age_range positive description"), + ], + "all the cohorts are actionable", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=False, icb="NOT_QE1"), + Status.actionable, + [("magic cohort group", "magic positive description")], + "magic_cohort is actionable, but not others", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="SW19", de=False, icb="NOT_QE1"), + Status.not_actionable, + [("magic cohort group", "magic positive description")], + "magic_cohort is not-actionable, but others are not eligible", ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("icb", "rule_type", "expected_status"), - [ - ("QE1", rules_model.RuleType.suppression, Status.actionable), - ("QWU", rules_model.RuleType.suppression, Status.not_actionable), - ("", rules_model.RuleType.suppression, Status.not_actionable), - (None, rules_model.RuleType.suppression, Status.not_actionable), - ("QE1", rules_model.RuleType.filter, Status.actionable), - ("QWU", rules_model.RuleType.filter, Status.not_eligible), - ("", rules_model.RuleType.filter, Status.not_eligible), - (None, rules_model.RuleType.filter, Status.not_eligible), ], ) -def test_base_eligible_and_icb_example( - icb: str | None, rule_type: rules_model.RuleType, expected_status: Status, faker: Faker +def test_cohort_groups_and_their_descriptions_when_magic_cohort_is_present( + person_rows: list[dict[str, Any]], + expected_status: str, + expected_cohort_group_and_description: list[tuple[str, str]], + test_comment: str, ): # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb=icb) campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( - iteration_rules=[rule_builder.ICBFilterRuleFactory.build(type=rule_type)], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], + iteration_cohorts=[ + rule_builder.Rsv75RollingCohortFactory.build(), + rule_builder.MagicCohortFactory.build(), + ], + iteration_rules=[ + # F common rule + rule_builder.DetainedEstateSuppressionRuleFactory.build(type=RuleType.filter), + # F rules for rsv_75_rolling + rule_builder.ICBFilterRuleFactory.build( + type=RuleType.filter, cohort_label=CohortLabel("rsv_75_rolling") + ), + # S common rule + rule_builder.PostcodeSuppressionRuleFactory.build( + comparator=RuleComparator("SW19"), + ), + ], ) ], ) @@ -817,1590 +485,549 @@ def test_base_eligible_and_icb_example( calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( actual, is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) + has_items( + is_condition() + .with_condition_name(ConditionName("RSV")) + .and_cohort_results( + contains_exactly( + *[ + is_cohort_result() + .with_cohort_code(item[0]) + .with_description(item[1]) + .with_status(expected_status) + for item in expected_cohort_group_and_description + ] + ) + ) + ) ), + test_comment, ) @pytest.mark.parametrize( - ("vaccine", "last_successful_date", "expected_status", "test_comment"), + ("person_rows", "expected_description", "test_comment"), [ - ("RSV", "20240601", Status.not_actionable, "last_successful_date is a past date"), - ("RSV", "20250101", Status.not_actionable, "last_successful_date is today"), - # Below is a non-ideal situation (might be due to a data entry error), so considered as actionable. - ("RSV", "20260101", Status.actionable, "last_successful_date is a future date"), - ("RSV", "20230601", Status.actionable, "last_successful_date is a long past"), - ("RSV", "", Status.actionable, "last_successful_date is empty"), - ("RSV", None, Status.actionable, "last_successful_date is none"), - ("COVID", "20240601", Status.actionable, "No RSV row"), + ( + person_rows_builder(nhs_number="123", cohorts=[]), + "rsv_age_range negative description 1", + "status - not eligible", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling", "rsv_75to79_2024"], postcode="SW19"), + "rsv_age_range positive description 1", + "status - not actionable", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling", "rsv_75to79_2024"], postcode="hp"), + "rsv_age_range positive description 1", + "status - actionable", + ), + ( + person_rows_builder(nhs_number="123", cohorts=["rsv_75to79_2024"], postcode="hp"), + "rsv_age_range positive description 2", + "rsv_75to79_2024 - actionable and rsv_75_rolling is not eligible", + ), ], ) -@freeze_time("2025-01-01") -def test_status_on_target_based_on_last_successful_date( - vaccine: str, last_successful_date: str, expected_status: Status, test_comment: str, faker: Faker +def test_cohort_group_descriptions_are_selected_based_on_priority_when_cohorts_have_different_non_empty_descriptions( + person_rows: list[dict[str, Any]], expected_description: str, test_comment: str ): # Given - nhs_number = NHSNumber(faker.nhs_number()) - - target_rows = person_rows_builder( - nhs_number, - cohorts=["cohort1"], - vaccines=[ - ( - vaccine, - datetime.datetime.strptime(last_successful_date, "%Y%m%d").replace(tzinfo=datetime.UTC) - if last_successful_date - else None, - ) - ], - ) - campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( - iteration_rules=[ - rule_builder.IterationRuleFactory.build( - type=rules.RuleType.suppression, - name=rules.RuleName("You have already been vaccinated against RSV in the last year"), - description=rules.RuleDescription( - "Exclude anyone Completed RSV Vaccination in the last year" - ), - priority=10, - operator=rules.RuleOperator.day_gte, - attribute_level=rules.RuleAttributeLevel.TARGET, - attribute_name=rules.RuleAttributeName("LAST_SUCCESSFUL_DATE"), - comparator=rules.RuleComparator("-365"), - attribute_target=rules.RuleAttributeTarget("RSV"), + iteration_cohorts=[ + rule_builder.Rsv75to79CohortFactory.build( + positive_description=Description("rsv_age_range positive description 2"), + negative_description=Description("rsv_age_range negative description 2"), + priority=2, ), - rule_builder.IterationRuleFactory.build( - type=rules.RuleType.suppression, - name=rules.RuleName("You have a vaccination date in the future for RSV"), - description=rules.RuleDescription("Exclude anyone with future Completed RSV Vaccination"), - priority=10, - operator=rules.RuleOperator.day_lte, - attribute_level=rules.RuleAttributeLevel.TARGET, - attribute_name=rules.RuleAttributeName("LAST_SUCCESSFUL_DATE"), - comparator=rules.RuleComparator("0"), - attribute_target=rules.RuleAttributeTarget("RSV"), + rule_builder.Rsv75RollingCohortFactory.build( + positive_description=Description("rsv_age_range positive description 1"), + negative_description=Description("rsv_age_range negative description 1"), + priority=1, ), ], - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], + iteration_rules=[rule_builder.PostcodeSuppressionRuleFactory.build()], ) ], ) ] - calculator = EligibilityCalculator(target_rows, campaign_configs) + calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then assert_that( actual, is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) - ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("attribute_name", "expected_status", "test_comment"), - [ - ( - rules.RuleAttributeName("COHORT_LABEL"), - Status.not_eligible, - "cohort label provided", - ), - ( - None, - Status.not_eligible, - "cohort label is the default attribute name for the cohort attribute level", - ), - ( - rules.RuleAttributeName("LOCATION"), - Status.actionable, - "attribute name that is not cohort label", + has_items( + is_condition() + .with_condition_name(ConditionName("RSV")) + .and_cohort_results( + contains_exactly( + is_cohort_result().with_cohort_code("rsv_age_range").with_description(expected_description) + ) + ) + ) ), - ], -) -def test_status_on_cohort_attribute_level( - attribute_name: rules.RuleAttributeName, expected_status: Status, test_comment: str, faker: Faker -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_row: list[dict[str, Any]] = person_rows_builder( - nhs_number, cohorts=["cohort1", "covid_eligibility_complaint_list"] + test_comment, ) - person_row_with_extra_items_in_cohort_row = [ - {**r, "LOCATION": "HP1"} for r in person_row if r.get("ATTRIBUTE_TYPE", "") == "COHORTS" - ] + +@freeze_time("2025-04-25") +def test_no_active_iteration_returns_empty_conditions_with_single_active_campaign(faker: Faker): + # Given + person_rows = person_rows_builder(NHSNumber(faker.nhs_number())) campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( + name="inactive iteration", + iteration_rules=[], iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - iteration_rules=[ - rule_builder.IterationRuleFactory.build( - type=rules.RuleType.filter, - name=rules.RuleName("Exclude those in a complaint cohort"), - description=rules.RuleDescription( - "Ensure anyone who has registered a complaint is not shown as eligible" - ), - priority=15, - operator=rules.RuleOperator.member_of, - attribute_level=rules.RuleAttributeLevel.COHORT, - attribute_name=attribute_name, - comparator=rules.RuleComparator("covid_eligibility_complaint_list"), - ) - ], ) ], ) ] + # Need to set the iteration date to override CampaignConfigFactory.fix_iteration_date_invariants behavior + campaign_configs[0].iterations[0].iteration_date = datetime.date(2025, 5, 10) - calculator = EligibilityCalculator(person_row_with_extra_items_in_cohort_row, campaign_configs) + calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_item(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) - ), - test_comment, - ) + assert_that(actual, is_eligibility_status().with_conditions([])) -@pytest.mark.parametrize( - ("person_cohorts", "expected_status", "test_comment"), - [ - (["cohort1", "cohort2"], Status.actionable, "cohort1 is not actionable, cohort 2 is actionable"), - (["cohort3", "cohort2"], Status.actionable, "cohort3 is not eligible, cohort 2 is actionable"), - (["cohort1"], Status.not_actionable, "cohort1 is not actionable"), - ], -) -def test_status_if_iteration_rules_contains_cohort_label_field( - person_cohorts, expected_status: Status, test_comment: str, faker: Faker -): +@pytest.mark.usefixtures("caplog") +@freeze_time("2025-04-25") +def test_returns_no_condition_data_for_campaign_without_active_iteration(faker: Faker, caplog): # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=person_cohorts) + person_rows = person_rows_builder(NHSNumber(faker.nhs_number())) campaign_configs = [ rule_builder.CampaignConfigFactory.build( target="RSV", iterations=[ rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.IterationCohortFactory.build(cohort_label="cohort1"), - rule_builder.IterationCohortFactory.build(cohort_label="cohort2"), - ], - iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build(cohort_label="cohort1")], + name="inactive iteration", + iteration_rules=[], + iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], ) ], - ) + ), + rule_builder.CampaignConfigFactory.build( + target="COVID", + iterations=[ + rule_builder.IterationFactory.build( + name="active iteration", + iteration_rules=[], + iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], + ) + ], + ), ] + # Need to set the iteration date to override CampaignConfigFactory.fix_iteration_date_invariants behavior + rsv_campaign = campaign_configs[0] + rsv_campaign.iterations[0].iteration_date = datetime.date(2025, 5, 10) calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + with caplog.at_level(logging.INFO): + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items(is_condition().with_condition_name(ConditionName("RSV")).and_status(expected_status)) - ), - test_comment, - ) + condition_names = [condition.condition_name for condition in actual.conditions] + assert ConditionName("RSV") not in condition_names + assert ConditionName("COVID") in condition_names + assert f"Skipping campaign ID {rsv_campaign.id} as no active iteration was found." in caplog.text -@pytest.mark.parametrize( - ("rule_stop", "expected_reason_results", "test_comment"), # Changed expected_reasons to expected_reason_results - [ - ( - rules.RuleStop(True), # noqa: FBT003 - [ - RuleDescription("reason 1"), - RuleDescription("reason 2"), - ], - "rule_stop is True, last rule should not run", - ), - ( - rules.RuleStop(False), # noqa: FBT003 - [ - RuleDescription("reason 1"), - RuleDescription("reason 2"), - RuleDescription("reason 3"), - ], - "rule_stop is False, last rule should run", - ), - ], -) -def test_rules_stop_behavior( - rule_stop: rules.RuleStop, expected_reason_results: list[RuleDescription], test_comment: str, faker: Faker -) -> None: - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=["cohort1"]) - # Build campaign configuration - campaign_config = rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_rules=[ - rule_builder.PersonAgeSuppressionRuleFactory.build( - priority=10, description="reason 1", rule_stop=rule_stop - ), - rule_builder.PersonAgeSuppressionRuleFactory.build(priority=10, description="reason 2"), - rule_builder.PersonAgeSuppressionRuleFactory.build(priority=15, description="reason 3"), - ], - iteration_cohorts=[ - rule_builder.IterationCohortFactory.build(cohort_group="cohort_group1", cohort_label="cohort1") - ], - ) - ], - ) +@freeze_time("2025-04-25") +def test_no_active_campaign(faker: Faker): + # Given + person_rows = person_rows_builder(NHSNumber(faker.nhs_number())) + campaign_configs = [rule_builder.CampaignConfigFactory.build()] + # Need to set the campaign dates to override CampaignConfigFactory.fix_iteration_date_invariants behavior + campaign_configs[0].start_date = datetime.date(2025, 5, 10) - calculator = EligibilityCalculator(person_rows, [campaign_config]) + calculator = EligibilityCalculator(person_rows, campaign_configs) # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + actual = calculator.get_eligibility_status("Y", ["ALL"], "ALL") # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.not_actionable)) - .and_cohort_results( - has_items( - is_cohort_result().with_reasons( - contains_inanyorder( - *[ - is_reason().with_rule_description(equal_to(result)) - for result in expected_reason_results - ] - ) - ) - ) - ) + assert_that(actual, is_eligibility_status().with_conditions([])) + + +class TestEligibilityResultBuilder: + def test_build_condition_results_single_condition_single_cohort_actionable(self): + cohort_group_results = [CohortGroupResult("COHORT_A", Status.actionable, [], "Cohort A Description", [])] + suggested_actions = [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("CareCardWithText"), + action_code=ActionCode("BookLocal"), + action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), + url_link=None, + url_label=None, ) - ), - test_comment, - ) + ] + iteration_result = IterationResult(Status.actionable, cohort_group_results, suggested_actions) + + result = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) + + assert_that(result.condition_name, is_(ConditionName("RSV"))) + assert_that(result.status, is_(Status.actionable)) + assert_that(result.actions, is_(suggested_actions)) + assert_that(result.status_text, is_(Status.actionable.get_status_text(ConditionName("RSV")))) + + assert_that(len(result.cohort_results), is_(1)) + deduplicated_cohort = result.cohort_results[0] + assert_that(deduplicated_cohort.cohort_code, is_("COHORT_A")) + assert_that(deduplicated_cohort.status, is_(Status.actionable)) + assert_that(deduplicated_cohort.reasons, is_([])) + assert_that(deduplicated_cohort.description, is_("Cohort A Description")) + assert_that(deduplicated_cohort.audit_rules, is_([])) + assert_that(result.suitability_rules, is_([])) + + def test_build_condition_results_single_condition_single_cohort_not_eligible_with_reasons(self): + cohort_group_results = [CohortGroupResult("COHORT_A", Status.not_eligible, [], "Cohort A Description", [])] + suggested_actions = [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("CareCardWithText"), + action_code=ActionCode("BookLocal"), + action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), + url_link=None, + url_label=None, + ) + ] + iteration_result = IterationResult(Status.not_eligible, cohort_group_results, suggested_actions) + + result = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) + + assert_that(result.condition_name, is_(ConditionName("RSV"))) + assert_that(result.status, is_(Status.not_eligible)) + assert_that(result.actions, is_(suggested_actions)) + assert_that(result.status_text, is_(Status.not_eligible.get_status_text(ConditionName("RSV")))) + + assert_that(len(result.cohort_results), is_(1)) + deduplicated_cohort = result.cohort_results[0] + assert_that(deduplicated_cohort.cohort_code, is_("COHORT_A")) + assert_that(deduplicated_cohort.status, is_(Status.not_eligible)) + assert_that(deduplicated_cohort.reasons, is_([])) + assert_that(deduplicated_cohort.description, is_("Cohort A Description")) + assert_that(deduplicated_cohort.audit_rules, is_([])) + assert_that(result.suitability_rules, is_([])) + + def test_build_condition_results_single_condition_multiple_cohorts_same_cohort_code_same_status(self): + reason_1 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 1"), + RulePriority("1"), + RuleDescription("Filter Rule Description 2"), + matcher_matched=True, + ) + reason_2 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 2"), + RulePriority("2"), + RuleDescription("Filter Rule Description 2"), + matcher_matched=True, + ) + cohort_group_results = [ + CohortGroupResult("COHORT_A", Status.not_eligible, [reason_1], "", []), + # The below description will be picked up as the first one is empty + CohortGroupResult("COHORT_A", Status.not_eligible, [reason_2], "Cohort A Description 2", []), + CohortGroupResult("COHORT_A", Status.not_eligible, [], "Cohort A Description 3", []), + ] + suggested_actions = [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("CareCardWithText"), + action_code=ActionCode("BookLocal"), + action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), + url_link=None, + url_label=None, + ) + ] + iteration_result = IterationResult(Status.not_eligible, cohort_group_results, suggested_actions) + + result: Condition = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) + + assert_that(len(result.cohort_results), is_(1)) + + deduplicated_cohort = result.cohort_results[0] + assert_that(deduplicated_cohort.cohort_code, is_("COHORT_A")) + assert_that(deduplicated_cohort.status, is_(Status.not_eligible)) + assert_that(deduplicated_cohort.reasons, contains_inanyorder(reason_1, reason_2)) + assert_that(deduplicated_cohort.description, is_("Cohort A Description 2")) + assert_that(deduplicated_cohort.audit_rules, is_([])) + assert_that(result.suitability_rules, contains_inanyorder(reason_1, reason_2)) + + def test_build_condition_results_multiple_cohorts_different_cohort_code_same_status(self): + reason_1 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 1"), + RulePriority("1"), + RuleDescription("Filter Rule Description 2"), + matcher_matched=True, + ) + reason_2 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 2"), + RulePriority("2"), + RuleDescription("Filter Rule Description 2"), + matcher_matched=True, + ) + cohort_group_results = [ + CohortGroupResult("COHORT_X", Status.not_eligible, [reason_1], "Cohort X Description", []), + CohortGroupResult("COHORT_Y", Status.not_eligible, [reason_2], "Cohort Y Description", []), + ] + suggested_actions = [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("CareCardWithText"), + action_code=ActionCode("BookLocal"), + action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), + url_link=None, + url_label=None, + ) + ] + iteration_result = IterationResult(Status.not_eligible, cohort_group_results, suggested_actions) + + result = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) + + assert_that(len(result.cohort_results), is_(2)) + + expected_deduplicated_cohorts = [ + CohortGroupResult("COHORT_X", Status.not_eligible, [reason_1], "Cohort X Description", []), + CohortGroupResult("COHORT_Y", Status.not_eligible, [reason_2], "Cohort Y Description", []), + ] + assert_that(result.cohort_results, contains_inanyorder(*expected_deduplicated_cohorts)) + + def test_build_condition_results_cohorts_status_not_matching_iteration_status(self): + reason_1 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 1"), + RulePriority("1"), + RuleDescription("Matching"), + matcher_matched=True, + ) + reason_2 = Reason( + RuleType.suppression, + eligibility_status.RuleName("Filter Rule 2"), + RulePriority("2"), + RuleDescription("Not matching"), + matcher_matched=True, + ) + cohort_group_results = [ + CohortGroupResult("COHORT_X", Status.not_eligible, [reason_1], "Cohort X Description", []), + CohortGroupResult("COHORT_Y", Status.not_actionable, [reason_2], "Cohort Y Description", []), + ] + + iteration_result = IterationResult(Status.not_eligible, cohort_group_results, []) + + result = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) + + assert_that(len(result.cohort_results), is_(1)) + assert_that(result.cohort_results[0].cohort_code, is_("COHORT_X")) + assert_that(result.cohort_results[0].status, is_(Status.not_eligible)) @pytest.mark.parametrize( - ("person_cohorts", "iteration_cohorts", "expected_status", "expected_cohorts"), + ("reason_1", "reason_2", "reason_3", "expected_reasons"), [ + # Same rule name, type, and priority, different description ( - ["covid_cohort", "flu_cohort"], - ["rsv_clinical_cohort", "rsv_75_rolling"], - Status.not_eligible, - ["rsv_clinical_cohort_group", "rsv_75_rolling_group"], - ), - ( - ["rsv_clinical_cohort", "rsv_75_rolling"], - ["rsv_clinical_cohort", "rsv_75_rolling"], - Status.actionable, - ["rsv_clinical_cohort_group"], + ReasonFactory.build(rule_description="description1", matcher_matched=True), + ReasonFactory.build(rule_description="description2", matcher_matched=True), + ReasonFactory.build(rule_description="description3", matcher_matched=True), + [ReasonFactory.build(rule_description="description1", matcher_matched=True)], ), + # Different rule name, same type, same priority ( - ["covid_cohort", "rsv_75_rolling"], - ["rsv_clinical_cohort", "rsv_75_rolling"], - Status.not_actionable, - ["rsv_75_rolling_group"], + ReasonFactory.build(rule_name="Supress Rule 1", rule_description="description1", matcher_matched=True), + ReasonFactory.build(rule_name="Supress Rule 2", rule_description="description2", matcher_matched=True), + ReasonFactory.build(rule_name="Supress Rule 1", rule_description="description3", matcher_matched=True), + [ReasonFactory.build(rule_name="Supress Rule 1", rule_description="description1", matcher_matched=True)], ), + # Same rule name, same type, different priority ( - ["covid_cohort", "rsv_clinical_cohort"], - ["rsv_clinical_cohort", "rsv_75_rolling"], - Status.actionable, - ["rsv_clinical_cohort_group"], + ReasonFactory.build(rule_priority="1", rule_description="description1", matcher_matched=True), + ReasonFactory.build(rule_priority="2", rule_description="description2", matcher_matched=True), + ReasonFactory.build(rule_priority="1", rule_description="description3", matcher_matched=True), + [ + ReasonFactory.build(rule_priority="1", rule_description="description1", matcher_matched=True), + ReasonFactory.build(rule_priority="2", rule_description="description2", matcher_matched=True), + ], ), + # Same rule name, same priority, different type ( - ["rsv_75to79_2024", "rsv_75_rolling"], - ["rsv_75to79_2024", "rsv_75_rolling"], - Status.not_actionable, - ["rsv_75_rolling_group", "rsv_75to79_2024_group"], + ReasonFactory.build(rule_type=RuleType.suppression, rule_description="description1", matcher_matched=True), + ReasonFactory.build(rule_type=RuleType.filter, rule_description="description2", matcher_matched=True), + ReasonFactory.build(rule_type=RuleType.suppression, rule_description="description3", matcher_matched=True), + [ + ReasonFactory.build( + rule_type=RuleType.suppression, rule_description="description1", matcher_matched=True + ), + ReasonFactory.build(rule_type=RuleType.filter, rule_description="description2", matcher_matched=True), + ], ), ], ) -def test_eligibility_results_when_multiple_cohorts( - person_cohorts: list[str], - iteration_cohorts: list[str], - expected_status: Status, - expected_cohorts: list[str], - faker: Faker, -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - dob_person_less_than_75 = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=dob_person_less_than_75, cohorts=person_cohorts) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.IterationCohortFactory.build( - cohort_group=f"{cohorts}_group", - cohort_label=cohorts, - positive_description="positive description", - negative_description="negative description", - ) - for cohorts in iteration_cohorts - ], - iteration_rules=[ - rule_builder.PersonAgeSuppressionRuleFactory.build(cohort_label="rsv_75_rolling"), - rule_builder.PersonAgeSuppressionRuleFactory.build(cohort_label="rsv_75to79_2024"), - ], - ) - ], - ) +def test_build_condition_results_grouping_reasons(reason_1, reason_2, reason_3, expected_reasons): + cohort_group_results = [ + CohortGroupResult( + "COHORT_X", + Status.not_actionable, + [reason_1, reason_3], + "Cohort X Description", + [], + ), + CohortGroupResult( + "COHORT_Y", + Status.not_actionable, + [reason_2, reason_3], + "Cohort Y Description", + [], + ), ] - calculator = EligibilityCalculator(person_rows, campaign_configs) + iteration_result = IterationResult(Status.not_actionable, cohort_group_results, []) - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") + result: Condition = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(expected_status)) - .and_cohort_results( - contains_inanyorder( - *[ - is_cohort_result().with_cohort_code(equal_to(cohort_label)) - for cohort_label in expected_cohorts - ] - ) - ) - ) - ), - ) + assert_that(result.suitability_rules, contains_inanyorder(*expected_reasons)) @pytest.mark.parametrize( - ("person_rows", "expected_status", "expected_cohort_group_and_description", "test_comment"), + ("reason_2", "expected_reasons"), [ + # Same rule name, type, and priority, different description ( - person_rows_builder(nhs_number="123", cohorts=[], postcode="AC01", de=True, icb="QE1"), - Status.not_eligible, + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ), [ - ("magic cohort group", "magic negative description"), - ("rsv_age_range", "rsv_age_range negative description"), + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Not matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ) ], - "rsv_75_rolling is not base-eligible & magic cohort group not eligible by F rules ", ), + # Different rule name ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=True, icb="QE1"), - Status.not_eligible, + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Matching", + rule_name="Supress Rule 2", + rule_priority="1", + matcher_matched=True, + ), [ - ("magic cohort group", "magic negative description"), - ("rsv_age_range", "rsv_age_range negative description"), + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Not matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ) ], - "all the cohorts are not-eligible by F rules", ), + # Different priority ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="SW19", de=False, icb="QE1"), - Status.not_actionable, + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Matching", + rule_name="Supress Rule 1", + rule_priority="2", + matcher_matched=True, + ), [ - ("magic cohort group", "magic positive description"), - ("rsv_age_range", "rsv_age_range positive description"), + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Not matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ), + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Matching", + rule_name="Supress Rule 1", + rule_priority="2", + matcher_matched=True, + ), ], - "all the cohorts are not-actionable", ), + # Different type ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=False, icb="QE1"), - Status.actionable, + ReasonFactory.build( + rule_type=RuleType.filter, + rule_description="Matching", + rule_name="Supress Rule 1", + rule_priority="2", + matcher_matched=True, + ), [ - ("magic cohort group", "magic positive description"), - ("rsv_age_range", "rsv_age_range positive description"), + ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Not matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ), + ReasonFactory.build( + rule_type=RuleType.filter, + rule_description="Matching", + rule_name="Supress Rule 1", + rule_priority="2", + matcher_matched=True, + ), ], - "all the cohorts are actionable", - ), - ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="AC01", de=False, icb="NOT_QE1"), - Status.actionable, - [("magic cohort group", "magic positive description")], - "magic_cohort is actionable, but not others", - ), - ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling"], postcode="SW19", de=False, icb="NOT_QE1"), - Status.not_actionable, - [("magic cohort group", "magic positive description")], - "magic_cohort is not-actionable, but others are not eligible", ), ], ) -def test_cohort_groups_and_their_descriptions_when_magic_cohort_is_present( - person_rows: list[dict[str, Any]], - expected_status: str, - expected_cohort_group_and_description: list[tuple[str, str]], - test_comment: str, -): - # Given - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.Rsv75RollingCohortFactory.build(), - rule_builder.MagicCohortFactory.build(), - ], - iteration_rules=[ - # F common rule - rule_builder.DetainedEstateSuppressionRuleFactory.build(type=rules.RuleType.filter), - # F rules for rsv_75_rolling - rule_builder.ICBFilterRuleFactory.build( - type=rules.RuleType.filter, cohort_label=rules.CohortLabel("rsv_75_rolling") - ), - # S common rule - rule_builder.PostcodeSuppressionRuleFactory.build( - comparator=rules.RuleComparator("SW19"), - ), - ], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") +def test_build_condition_results_single_cohort(reason_2, expected_reasons): + reason_1 = ReasonFactory.build( + rule_type=RuleType.suppression, + rule_description="Not matching", + rule_name="Supress Rule 1", + rule_priority="1", + matcher_matched=True, + ) - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_cohort_results( - contains_exactly( - *[ - is_cohort_result() - .with_cohort_code(item[0]) - .with_description(item[1]) - .with_status(expected_status) - for item in expected_cohort_group_and_description - ] - ) - ) - ) - ), - test_comment, - ) - - -def test_cohort_groups_and_their_descriptions_when_best_status_is_not_eligible( - faker: Faker, -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=[]) - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.Rsv75RollingCohortFactory.build(), - rule_builder.Rsv75to79CohortFactory.build(), - rule_builder.RsvPretendClinicalCohortFactory.build(), - ], - iteration_rules=[rule_builder.PostcodeSuppressionRuleFactory.build()], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(Status.not_eligible) - .and_cohort_results( - contains_exactly( - is_cohort_result() - .with_cohort_code("rsv_age_range") - .with_description("rsv_age_range negative description"), - is_cohort_result() - .with_cohort_code("rsv_clinical_cohort") - .with_description("rsv_clinical_cohort negative description"), - ) - ) - ) - ), - ) - - -@pytest.mark.parametrize( - ("person_cohorts", "expected_cohort_group_and_description_and_s_rule_names", "test_comment"), - [ - ( - ["rsv_75_rolling"], - [("rsv_age_range", "rsv_age_range positive description", ["Excluded postcode In SW19"])], - "rsv_75_rolling is not-actionable, others are not-eligible", - ), - ( - ["rsv_75_rolling", "rsv_75to79_2024"], - [ - ( - "rsv_age_range", - "rsv_age_range positive description", - ["Excluded postcode In SW19", "Excluded postcode In SW19"], - ) - ], - "rsv_75_rolling, rsv_75to79_2024 is not-actionable, rsv_pretend_clinical_cohort are not-eligible", - ), - ( - ["rsv_75_rolling", "rsv_75to79_2024", "rsv_pretend_clinical_cohort"], - [ - ( - "rsv_age_range", - "rsv_age_range positive description", - ["Excluded postcode In SW19", "Excluded postcode In SW19"], - ), - ("rsv_clinical_cohort", "rsv_clinical_cohort positive description", ["Excluded postcode In SW19"]), - ], - "all are not-actionable", - ), - ], -) -def test_cohort_groups_and_their_descriptions_and_the_collection_of_s_rules_when_best_status_is_not_actionable( - person_cohorts: list[str], - expected_cohort_group_and_description_and_s_rule_names: list[tuple[str, str, list[str]]], - test_comment: str, - faker: Faker, -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=person_cohorts, postcode="SW19") - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.Rsv75RollingCohortFactory.build(), - rule_builder.Rsv75to79CohortFactory.build(), - rule_builder.RsvPretendClinicalCohortFactory.build(), - ], - iteration_rules=[rule_builder.PostcodeSuppressionRuleFactory.build()], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(Status.not_actionable) - .and_cohort_results( - contains_exactly( - *[ - is_cohort_result() - .with_cohort_code(item[0]) - .and_description(item[1]) - .and_reasons( - contains_exactly(*[is_reason().with_rule_name(rule_name) for rule_name in item[2]]) - ) - for item in expected_cohort_group_and_description_and_s_rule_names - ] - ) - ), - ) - ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("person_cohorts", "expected_cohort_group_and_description", "test_comment"), - [ - ( - ["rsv_75_rolling"], - [("rsv_age_range", "rsv_age_range positive description")], - "rsv_75_rolling is actionable, others are not-eligible", - ), - ( - ["rsv_75_rolling", "rsv_75to79_2024"], - [("rsv_age_range", "rsv_age_range positive description")], - "rsv_75_rolling, rsv_75to79_2024 is actionable, rsv_pretend_clinical_cohort are not-eligible", - ), - ( - ["rsv_75_rolling", "rsv_75to79_2024", "rsv_pretend_clinical_cohort"], - [ - ("rsv_age_range", "rsv_age_range positive description"), - ("rsv_clinical_cohort", "rsv_clinical_cohort positive description"), - ], - "all are actionable", - ), - ], -) -def test_cohort_group_and_descriptions_when_best_status_is_actionable( - person_cohorts: list[str], - expected_cohort_group_and_description: list[tuple[str, str]], - test_comment: str, - faker: Faker, -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - date_of_birth = DateOfBirth(faker.date_of_birth(minimum_age=66, maximum_age=74)) - - person_rows = person_rows_builder(nhs_number, date_of_birth=date_of_birth, cohorts=person_cohorts, postcode="hp") - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.Rsv75RollingCohortFactory.build(), - rule_builder.Rsv75to79CohortFactory.build(), - rule_builder.RsvPretendClinicalCohortFactory.build(), - ], - iteration_rules=[rule_builder.PostcodeSuppressionRuleFactory.build()], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(Status.actionable) - .and_cohort_results( - contains_exactly( - *[ - is_cohort_result().with_cohort_code(item[0]).with_description(item[1]) - for item in expected_cohort_group_and_description - ] - ) - ) - ) - ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("person_rows", "expected_description", "test_comment"), - [ - ( - person_rows_builder(nhs_number="123", cohorts=[]), - "rsv_age_range negative description 1", - "status - not eligible", - ), - ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling", "rsv_75to79_2024"], postcode="SW19"), - "rsv_age_range positive description 1", - "status - not actionable", - ), - ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75_rolling", "rsv_75to79_2024"], postcode="hp"), - "rsv_age_range positive description 1", - "status - actionable", - ), - ( - person_rows_builder(nhs_number="123", cohorts=["rsv_75to79_2024"], postcode="hp"), - "rsv_age_range positive description 2", - "rsv_75to79_2024 - actionable and rsv_75_rolling is not eligible", - ), - ], -) -def test_cohort_group_descriptions_are_selected_based_on_priority_when_cohorts_have_different_non_empty_descriptions( - person_rows: list[dict[str, Any]], expected_description: str, test_comment: str -): - # Given - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[ - rule_builder.Rsv75to79CohortFactory.build( - positive_description=rules.Description("rsv_age_range positive description 2"), - negative_description=rules.Description("rsv_age_range negative description 2"), - priority=2, - ), - rule_builder.Rsv75RollingCohortFactory.build( - positive_description=rules.Description("rsv_age_range positive description 1"), - negative_description=rules.Description("rsv_age_range negative description 1"), - priority=1, - ), - ], - iteration_rules=[rule_builder.PostcodeSuppressionRuleFactory.build()], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_cohort_results( - contains_exactly( - is_cohort_result().with_cohort_code("rsv_age_range").with_description(expected_description) - ) - ) - ) - ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("person_rows", "iteration_cohorts", "expected_cohort_group_and_description", "expected_status", "test_comment"), - [ - ( - person_rows_builder("123", postcode="SW19", cohorts=[], de=False), - [rule_builder.Rsv75to79CohortFactory.build(negative_description=None, priority=2)], - [("rsv_age_range", "")], - Status.not_eligible, - "if group has one cohort, with no description, expect no description", - ), - ( - person_rows_builder("123", postcode="SW19", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=False), - [rule_builder.Rsv75to79CohortFactory.build(negative_description=None, priority=2)], - [("rsv_age_range", "")], - Status.not_eligible, - "if group has one cohort, with no description, expect no description", - ), - ( - person_rows_builder("123", postcode="HP1", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=True), - [rule_builder.Rsv75to79CohortFactory.build(positive_description=None, priority=2)], - [("rsv_age_range", "")], - Status.not_actionable, - "if group has one cohort, with no description, expect no description", - ), - ( - person_rows_builder("123", postcode="HP1", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=False), - [rule_builder.Rsv75to79CohortFactory.build(positive_description=None, priority=2)], - [("rsv_age_range", "")], - Status.actionable, - "if group has one cohort, with no description, expect no description", - ), - ( - person_rows_builder("123", postcode="SW19", cohorts=[], de=False), - [ - rule_builder.Rsv75to79CohortFactory.build(priority=2, negative_description=None), - rule_builder.Rsv75RollingCohortFactory.build(priority=3, negative_description="rsv age range -ve 1"), - rule_builder.Rsv75RollingCohortFactory.build( - cohort_label="rsv_75_rolling_2", priority=4, negative_description="rsv age range -ve 2" - ), - ], - [("rsv_age_range", "rsv age range -ve 1")], - Status.not_eligible, - "if group has more than one cohort, at least one has description, expect first non empty description", - ), - ( - person_rows_builder("123", postcode="SW19", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=False), - [ - rule_builder.Rsv75to79CohortFactory.build(priority=2, negative_description=None), - rule_builder.Rsv75RollingCohortFactory.build(priority=3, negative_description="rsv age range -ve 1"), - rule_builder.Rsv75RollingCohortFactory.build( - cohort_label="rsv_75_rolling_2", priority=4, negative_description="rsv age range -ve 2" - ), - ], - [("rsv_age_range", "rsv age range -ve 1")], - Status.not_eligible, - "if group has more than one cohort, at least one has description, expect first non empty description", - ), - ( - person_rows_builder("123", postcode="HP1", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=True), - [ - rule_builder.Rsv75to79CohortFactory.build(priority=2, positive_description=None), - rule_builder.Rsv75RollingCohortFactory.build(priority=3, positive_description="rsv age range +ve 1"), - rule_builder.Rsv75RollingCohortFactory.build( - cohort_label="rsv_75_rolling_2", priority=4, positive_description="rsv age range +ve 2" - ), - ], - [("rsv_age_range", "rsv age range +ve 1")], - Status.not_actionable, - "if group has more than one cohort, at least one has description, expect first non empty description", - ), - ( - person_rows_builder("123", postcode="HP1", cohorts=["rsv_75to79_2024", "rsv_75_rolling"], de=False), - [ - rule_builder.Rsv75to79CohortFactory.build(priority=2, positive_description=None), - rule_builder.Rsv75RollingCohortFactory.build(priority=3, positive_description="rsv age range +ve 1"), - rule_builder.Rsv75RollingCohortFactory.build( - cohort_label="rsv_75_rolling_2", priority=4, positive_description="rsv age range +ve 2" - ), - ], - [("rsv_age_range", "rsv age range +ve 1")], - Status.actionable, - "if group has more than one cohort, at least one has description, expect first non empty description", - ), - ], -) -def test_cohort_group_descriptions_pick_first_non_empty_if_available( - person_rows: list[dict[str, Any]], - iteration_cohorts: list[rules.IterationCohort], - expected_cohort_group_and_description: list[tuple[str, str]], - expected_status: Status, - test_comment: str, -): - # Given - campaign_configs = [ - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=iteration_cohorts, - iteration_rules=[ - rule_builder.PostcodeSuppressionRuleFactory.build(type=rules.RuleType.filter), - rule_builder.DetainedEstateSuppressionRuleFactory.build(), - ], - ) - ], - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(expected_status) - .and_cohort_results( - contains_exactly( - *[ - is_cohort_result() - .with_cohort_code(item[0]) - .with_description(item[1]) - .with_status(expected_status) - for item in expected_cohort_group_and_description - ] - ) - ) - ) - ), - test_comment, - ) - - -book_nbs_comms = AvailableAction( - ActionType="ButtonAuthLink", - ExternalRoutingCode="BookNBS", - ActionDescription="Action description", - UrlLink=HttpUrl("https://www.nhs.uk/book-rsv"), - UrlLabel="Continue to booking", -) - -default_comms_detail = AvailableAction( - ActionType="CareCardWithText", - ExternalRoutingCode="BookLocal", - ActionDescription="You can get an RSV vaccination at your GP surgery", -) - - -@pytest.mark.parametrize( - ("test_comment", "default_comms_routing", "comms_routing", "actions_mapper", "expected_actions"), - [ - ( - """Rule match: default_comms_routing present, action_mapper present, - return actions from matching comms from rule""", - "defaultcomms", - "InternalBookNBS", - {"InternalBookNBS": book_nbs_comms, "defaultcomms": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("InternalBookNBS"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ], - ), - ( - """Rule match: default_comms_routing has multiple values, - comms missing in rule, all default comms should be returned in actions""", - "defaultcomms1|defaultcomms2", - None, - {"defaultcomms1": default_comms_detail, "defaultcomms2": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms1"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ), - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms2"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ), - ], - ), - ( - """Rule match: default_comms_routing has multiple values, - comms is empty string, all default comms should be returned in actions""", - "defaultcomms1", - "", - {"defaultcomms1": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms1"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ) - ], - ), - ( - """Rule match: default_comms_routing present, - action_mapper missing for matching comms, return default_comms in actions""", - "defaultcomms", - "InternalBookNBS", - {"defaultcomms": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ) - ], - ), - ( - """Rule match: default_comms_routing present, - rule has an incorrect comms key, return default_comms in actions""", - "defaultcomms", - "InvalidCode", - {"defaultcomms": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ) - ], - ), - ( - """Rule match: action_mapper present without url, - return actions from matching comms from rule""", - "defaultcomms", - "InternalBookNBS", - { - "InternalBookNBS": AvailableAction( - ActionType=book_nbs_comms.action_type, - ExternalRoutingCode=book_nbs_comms.action_code, - ActionDescription=book_nbs_comms.action_description, - ) - }, - [ - SuggestedAction( - internal_action_code=InternalActionCode("InternalBookNBS"), - action_type=ActionType(book_nbs_comms.action_type), - action_code=ActionCode(book_nbs_comms.action_code), - action_description=ActionDescription(book_nbs_comms.action_description), - url_link=None, - url_label=None, - ) - ], - ), - ( - """Rule match: default_comms_routing missing, - comms present in rule, action_mapper missing, return no actions""", - "", - "InternalBookNBS", - {}, - [], - ), - ( - """Rule match: default_comms_routing missing, but action_mapper present, - return actions from matching comms from rule""", - "", - "InternalBookNBS", - {"InternalBookNBS": book_nbs_comms}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("InternalBookNBS"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ], - ), - ( - """Rule match: default_comms_routing present, - comms present in rule, but action_mapper missing, return no actions""", - "defaultcommskeywithoutactionmapper", - "InternalBookNBS", - {}, - [], - ), - ( - """Rule match: default_comms_routing has multiple values, - one of the value is invalid, valid values should be returned in actions""", - "defaultcomms1|invaliddefault", - None, - {"defaultcomms1": default_comms_detail}, - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms1"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), - url_link=None, - url_label=None, - ) - ], - ), - ], -) -def test_correct_actions_determined_from_redirect_r_rules( # noqa: PLR0913 - test_comment: str, - default_comms_routing: str, - comms_routing: str, - actions_mapper: ActionsMapper, - expected_actions: list[SuggestedAction], - faker: Faker, -): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing=default_comms_routing, - actions_mapper=rule_builder.ActionsMapperFactory.build(root=actions_mapper), - iteration_rules=[rule_builder.ICBRedirectRuleFactory.build(comms_routing=comms_routing)], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions(equal_to(expected_actions)) - ) - ), - test_comment, - ) - - -@pytest.mark.parametrize( - ("test_comment", "redirect_r_rule_cohort_label"), - [ - ("cohort_label matches person cohort, result action ActionCode1", "cohort1"), - ("cohort_label NOT matches person cohort, result action ActionCode1", "cohort2"), - ], -) -def test_cohort_label_not_supported_used_in_r_rules(test_comment: str, redirect_r_rule_cohort_label: str, faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "ActionCode1": book_nbs_comms, - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build( - cohort_label=rules.CohortLabel(redirect_r_rule_cohort_label) - ) - ], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions( - equal_to( - [ - SuggestedAction( - internal_action_code=InternalActionCode("ActionCode1"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ] - ) - ) - ) - ), - test_comment, - ) - - -def test_multiple_r_rules_match_with_same_priority(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "rule_1_comms_routing": book_nbs_comms, - "rule_2_comms_routing": book_nbs_comms, - "rule_3_comms_routing": book_nbs_comms, - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build(comms_routing="rule_1_comms_routing"), - rule_builder.ICBRedirectRuleFactory.build(comms_routing="rule_2_comms_routing"), - rule_builder.ICBRedirectRuleFactory.build( - priority=2, - attribute_name=rules.RuleAttributeName("ICBMismatch"), - comms_routing="rule_3_comms_routing", - ), - ], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions( - equal_to( - [ - SuggestedAction( - internal_action_code=InternalActionCode("rule_1_comms_routing"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ] - ) - ) - ) - ), - ) - - -def test_multiple_r_rules_with_same_priority_one_rule_mismatch_should_return_default_comms(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "rule_1_comms_routing": book_nbs_comms, - "rule_2_comms_routing": book_nbs_comms, - "rule_3_comms_routing": book_nbs_comms, - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build(comms_routing="rule_1_comms_routing"), - rule_builder.ICBRedirectRuleFactory.build(comms_routing="rule_2_comms_routing"), - rule_builder.ICBRedirectRuleFactory.build( - attribute_name=rules.RuleAttributeName("ICBMismatch"), - comms_routing="rule_3_comms_routing", - ), - ], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions( - equal_to( - [ - SuggestedAction( - internal_action_code=InternalActionCode("defaultcomms"), - action_type=ActionType("CareCardWithText"), - action_code=ActionCode("BookLocal"), - action_description=ActionDescription( - "You can get an RSV vaccination at your GP surgery" - ), - url_link=None, - url_label=None, - ) - ] - ) - ) - ) - ), - ) - - -def test_only_highest_priority_rule_is_applied_and_return_actions_only_for_that_rule(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "rule_1_comms_routing": AvailableAction( - ActionType="ButtonAuthLink", - ExternalRoutingCode="BookNBS", - ActionDescription="Action description", - ), - "rule_2_comms_routing": AvailableAction( - ActionType="AuthLink", - ExternalRoutingCode="BookNBS", - ActionDescription="Action description", - UrlLink=HttpUrl("https://www.nhs.uk/book-rsv"), - UrlLabel="Continue to booking", - ), - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build(priority=2, comms_routing="rule_2_comms_routing"), - rule_builder.ICBRedirectRuleFactory.build(priority=1, comms_routing="rule_1_comms_routing"), - ], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - expected_actions = SuggestedAction( - internal_action_code=InternalActionCode("rule_1_comms_routing"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=None, - url_label=None, - ) - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions(equal_to([expected_actions])) - ) - ), - ) - - -def test_should_include_actions_when_include_actions_flag_is_true_when_status_is_actionable(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "book_nbs": book_nbs_comms, - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build(priority=2, comms_routing="book_nbs"), - ], - ) - ], - ) - ) - ] - - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("Y", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions( - equal_to( - [ - SuggestedAction( - internal_action_code=InternalActionCode("book_nbs"), - action_type=ActionType("ButtonAuthLink"), - action_code=ActionCode("BookNBS"), - action_description=ActionDescription("Action description"), - url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), - url_label=UrlLabel("Continue to booking"), - ) - ] - ) - ) - ) - ), - ) - - -def test_should_not_include_actions_when_include_actions_flag_is_false_when_status_is_actionable(faker: Faker): - # Given - nhs_number = NHSNumber(faker.nhs_number()) - - person_rows = person_rows_builder(nhs_number, cohorts=["cohort1"], icb="QE1") - campaign_configs = [ - ( - rule_builder.CampaignConfigFactory.build( - target="RSV", - iterations=[ - rule_builder.IterationFactory.build( - iteration_cohorts=[rule_builder.IterationCohortFactory.build(cohort_label="cohort1")], - default_comms_routing="defaultcomms", - actions_mapper=rule_builder.ActionsMapperFactory.build( - root={ - "book_nbs": book_nbs_comms, - "defaultcomms": default_comms_detail, - } - ), - iteration_rules=[ - rule_builder.ICBRedirectRuleFactory.build(priority=2, comms_routing="book_nbs"), - ], - ) - ], - ) - ) + cohort_group_results = [ + CohortGroupResult("COHORT_Y", Status.not_actionable, [reason_1, reason_2], "Cohort Y Description", []) ] - calculator = EligibilityCalculator(person_rows, campaign_configs) - - # When - actual = calculator.evaluate_eligibility("N", ["ALL"], "ALL") - - # Then - assert_that( - actual, - is_eligibility_status().with_conditions( - has_items( - is_condition() - .with_condition_name(ConditionName("RSV")) - .and_status(equal_to(Status.actionable)) - .and_actions(equal_to(None)) - ) - ), - ) - - -@pytest.mark.parametrize( - ("campaign_target", "campaign_type", "conditions_filter", "category_filter", "expected_result"), - [ - # Multiple matching campaigns under the same condition - ("RSV", "V", ["RSV"], "VACCINATIONS", [("RSV", "V")]), - ("RSV", "V", ["COVID"], "VACCINATIONS", []), - ("RSV", "S", ["RSV"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["ALL"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["RSV"], "VACCINATIONS", []), - # Multiple campaigns with different types under the same condition name - ("RSV", "V", ["RSV"], "ALL", [("RSV", "V")]), - # Campaign is live but condition not in filter (no yield) - ("FLU", "V", ["COVID", "RSV"], "ALL", []), - # Category is ALL and condition filter includes ALL (everything matches) - ("FLU", "S", ["ALL"], "ALL", [("FLU", "S")]), - # Condition filter is unknown (should not match anything) - ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", []), - # Campaign with the target matching one of several condition filters - ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", [("FLU", "V")]), - ], -) -def test_campaigns_grouped_by_condition_name_filters_correctly( - campaign_target, campaign_type, conditions_filter, category_filter, expected_result -): - campaign = rule_builder.CampaignConfigFactory.build(target=campaign_target, type=campaign_type, campaign_live=True) - - calculator = EligibilityCalculator(person_data=[], campaign_configs=[campaign]) - result = list(calculator.campaigns_grouped_by_condition_name(conditions_filter, category_filter)) + iteration_result = IterationResult(Status.not_actionable, cohort_group_results, []) + result = EligibilityCalculator.build_condition(iteration_result, ConditionName("RSV")) - assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) + assert_that(len(result.cohort_results), is_(1)) + assert_that(result.cohort_results[0].reasons, contains_inanyorder(*expected_reasons)) diff --git a/tests/unit/services/calculators/test_rule_calculator.py b/tests/unit/services/calculators/test_rule_calculator.py index b8069a16a..8e42013d9 100644 --- a/tests/unit/services/calculators/test_rule_calculator.py +++ b/tests/unit/services/calculators/test_rule_calculator.py @@ -1,31 +1,27 @@ -from collections.abc import Collection, Mapping -from typing import Any - import pytest -from eligibility_signposting_api.model import rules +from eligibility_signposting_api.model.campaign_config import IterationRule, RuleAttributeLevel +from eligibility_signposting_api.model.person import Person from eligibility_signposting_api.services.calculators.rule_calculator import RuleCalculator from tests.fixtures.builders.model import rule as rule_builder -Row = Collection[Mapping[str, Any]] - @pytest.mark.parametrize( ("person_data", "rule", "expected"), [ # PERSON attribute level ( - [{"ATTRIBUTE_TYPE": "PERSON", "POSTCODE": "SW19"}], + Person([{"ATTRIBUTE_TYPE": "PERSON", "POSTCODE": "SW19"}]), rule_builder.IterationRuleFactory.build( - attribute_level=rules.RuleAttributeLevel.PERSON, attribute_name="POSTCODE" + attribute_level=RuleAttributeLevel.PERSON, attribute_name="POSTCODE" ), "SW19", ), # TARGET attribute level ( - [{"ATTRIBUTE_TYPE": "RSV", "LAST_SUCCESSFUL_DATE": "20240101"}], + Person([{"ATTRIBUTE_TYPE": "RSV", "LAST_SUCCESSFUL_DATE": "20240101"}]), rule_builder.IterationRuleFactory.build( - attribute_level=rules.RuleAttributeLevel.TARGET, + attribute_level=RuleAttributeLevel.TARGET, attribute_name="LAST_SUCCESSFUL_DATE", attribute_target="RSV", ), @@ -33,17 +29,17 @@ ), # COHORT attribute level ( - [{"ATTRIBUTE_TYPE": "COHORTS", "COHORT_LABEL": ""}], + Person([{"ATTRIBUTE_TYPE": "COHORTS", "COHORT_LABEL": ""}]), rule_builder.IterationRuleFactory.build( - attribute_level=rules.RuleAttributeLevel.COHORT, attribute_name="COHORT_LABEL" + attribute_level=RuleAttributeLevel.COHORT, attribute_name="COHORT_LABEL" ), "", ), ], ) -def test_get_attribute_value_for_all_attribute_levels(person_data: Row, rule: rules.IterationRule, expected: str): +def test_get_attribute_value_for_all_attribute_levels(person_data: Person, rule: IterationRule, expected: str): # Given - calc = RuleCalculator(person_data=person_data, rule=rule) + calc = RuleCalculator(person=person_data, rule=rule) # When actual = calc.get_attribute_value() # Then diff --git a/tests/unit/services/operators/test_operators.py b/tests/unit/services/operators/test_operators.py index 1c2b2ba70..e1c74164e 100644 --- a/tests/unit/services/operators/test_operators.py +++ b/tests/unit/services/operators/test_operators.py @@ -2,8 +2,8 @@ from freezegun import freeze_time from hamcrest import assert_that, equal_to -from eligibility_signposting_api.model.rules import RuleOperator -from eligibility_signposting_api.services.rules.operators import Operator, OperatorRegistry +from eligibility_signposting_api.model.campaign_config import RuleOperator +from eligibility_signposting_api.services.operators.operators import Operator, OperatorRegistry # Test cases: person_data, rule_operator, rule_value, expected, test_comment cases: list[tuple[str | None, RuleOperator, str | None, bool, str]] = [] diff --git a/tests/unit/services/processors/__init__.py b/tests/unit/services/processors/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/services/processors/test_action_rule_handler.py b/tests/unit/services/processors/test_action_rule_handler.py new file mode 100644 index 000000000..13e9d4592 --- /dev/null +++ b/tests/unit/services/processors/test_action_rule_handler.py @@ -0,0 +1,805 @@ +from unittest.mock import Mock, call, patch + +import pytest +from hamcrest import assert_that, is_ +from pydantic import HttpUrl + +from eligibility_signposting_api.model.campaign_config import AvailableAction, RuleName, RulePriority, RuleType +from eligibility_signposting_api.model.eligibility_status import ( + ActionCode, + ActionDescription, + ActionType, + InternalActionCode, + IterationResult, + MatchedActionDetail, + Status, + SuggestedAction, + UrlLabel, + UrlLink, +) +from eligibility_signposting_api.model.person import Person +from eligibility_signposting_api.services.processors.action_rule_handler import ActionRuleHandler +from tests.fixtures.builders.model import rule as rule_builder +from tests.fixtures.builders.model.rule import ActionsMapperFactory, IterationFactory + +# flake8: noqa: SLF001 + + +@pytest.fixture +def handler(): + return ActionRuleHandler() + + +MOCK_PERSON = Person([{"ATTRIBUTE_TYPE": "PERSON", "AGE": "30"}]) + +BOOK_NBS_COMMS = AvailableAction( + ActionType="ButtonAuthLink", + ExternalRoutingCode="BookNBS", + ActionDescription="Action description", + UrlLink=HttpUrl("https://www.nhs.uk/book-rsv"), + UrlLabel="Continue to booking", +) + +DEFAULT_COMMS_DETAIL = AvailableAction( + ActionType="CareCardWithText", + ExternalRoutingCode="BookLocal", + ActionDescription="You can get an RSV vaccination at your GP surgery", +) + + +def test_get_action_rules_components_redirect_type(): + iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_redirect", + default_not_eligible_routing="default_not_eligible", + default_not_actionable_routing="default_not_actionable", + actions_mapper=ActionsMapperFactory.build(), + iteration_rules=[rule_builder.ICBRedirectRuleFactory.build(name="RedirectRule")], + ) + rules_found, mapper, default_comms = ActionRuleHandler._get_action_rules_components(iteration, RuleType.redirect) + assert_that(len(rules_found), is_(1)) + assert_that(rules_found[0].name, is_(RuleName("RedirectRule"))) + assert_that(mapper, is_(iteration.actions_mapper)) + assert_that(default_comms, is_("default_redirect")) + + +def test_get_action_rules_components_not_eligible_actions_type(): + iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_redirect", + default_not_eligible_routing="default_not_eligible", + default_not_actionable_routing="default_not_actionable", + actions_mapper=ActionsMapperFactory.build(), + iteration_rules=[rule_builder.ICBNonEligibleActionRuleFactory.build(name="NonEligibleRule")], + ) + rules_found, mapper, default_comms = ActionRuleHandler._get_action_rules_components( + iteration, RuleType.not_eligible_actions + ) + assert_that(len(rules_found), is_(1)) + assert_that(rules_found[0].name, is_(RuleName("NonEligibleRule"))) + assert_that(mapper, is_(iteration.actions_mapper)) + assert_that(default_comms, is_("default_not_eligible")) + + +def test_get_action_rules_components_not_actionable_actions_type(): + iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_redirect", + default_not_eligible_routing="default_not_eligible", + default_not_actionable_routing="default_not_actionable", + actions_mapper=ActionsMapperFactory.build(), + iteration_rules=[rule_builder.ICBNonActionableActionRuleFactory.build(name="NonActionableRule")], + ) + rules_found, mapper, default_comms = ActionRuleHandler._get_action_rules_components( + iteration, RuleType.not_actionable_actions + ) + assert_that(len(rules_found), is_(1)) + assert_that(rules_found[0].name, is_(RuleName("NonActionableRule"))) + assert_that(mapper, is_(iteration.actions_mapper)) + assert_that(default_comms, is_("default_not_actionable")) + + +def test_get_action_rules_components_no_matching_rules(): + iteration = rule_builder.IterationFactory.build( + iteration_rules=[rule_builder.PersonAgeSuppressionRuleFactory.build()] + ) + rules_found, _, _ = ActionRuleHandler._get_action_rules_components(iteration, RuleType.redirect) + assert_that(len(rules_found), is_(0)) + + +def test_get_actions_from_comms_single_comm(): + action_mapper = ActionsMapperFactory.build(root={"book_nbs": BOOK_NBS_COMMS}) + actions = ActionRuleHandler._get_actions_from_comms(action_mapper, "book_nbs") + assert_that(len(actions), is_(1)) + assert_that(actions[0].internal_action_code, is_(InternalActionCode("book_nbs"))) + assert_that(actions[0].action_code, is_(ActionCode("BookNBS"))) + + +def test_get_actions_from_comms_multiple_comms(): + action_mapper = ActionsMapperFactory.build(root={"book_nbs": BOOK_NBS_COMMS, "default_comms": DEFAULT_COMMS_DETAIL}) + actions = ActionRuleHandler._get_actions_from_comms(action_mapper, "book_nbs|default_comms") + assert_that(len(actions), is_(2)) + assert_that(actions[0].internal_action_code, is_(InternalActionCode("book_nbs"))) + assert_that(actions[1].internal_action_code, is_(InternalActionCode("default_comms"))) + + +def test_get_actions_from_comms_unknown_comm_code(): + action_mapper = ActionsMapperFactory.build(root={"book_nbs": BOOK_NBS_COMMS}) + actions = ActionRuleHandler._get_actions_from_comms(action_mapper, "book_nbs|unknown_code") + assert_that(len(actions), is_(1)) + assert_that(actions[0].internal_action_code, is_(InternalActionCode("book_nbs"))) + + +def test_get_actions_from_comms_empty_string(): + action_mapper = ActionsMapperFactory.build(root={"book_nbs": BOOK_NBS_COMMS}) + actions = ActionRuleHandler._get_actions_from_comms(action_mapper, "") + assert_that(len(actions), is_(0)) + + +def test_get_actions_from_comms_no_actions_found(): + action_mapper = ActionsMapperFactory.build(root={}) + actions = ActionRuleHandler._get_actions_from_comms(action_mapper, "unknown_code") + assert_that(len(actions), is_(0)) + + +@patch("eligibility_signposting_api.services.calculators.rule_calculator.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_actions_no_matching_rules_returns_default( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build(root={"default_action_code": DEFAULT_COMMS_DETAIL}), + iteration_rules=[], + ) + + mock_get_action_rules_components.return_value = ( + [], + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [], + ] + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, RuleType.redirect) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("default_action_code"))) + assert_that(matched_action_detail.rule_priority, is_(None)) + assert_that(matched_action_detail.rule_name, is_(None)) + mock_get_action_rules_components.assert_called_once_with(active_iteration, RuleType.redirect) + mock_get_actions_from_comms.assert_called_once_with(active_iteration.actions_mapper, "default_action_code") + mock_rule_calculator_class.assert_not_called() + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_actions_matching_redirect_rule_overrides_default( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + matching_rule = rule_builder.ICBRedirectRuleFactory.build( + priority=10, comms_routing="rule_specific_action", name="RuleSpecificAction" + ) + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build( + root={"default_action_code": DEFAULT_COMMS_DETAIL, "rule_specific_action": BOOK_NBS_COMMS} + ), + iteration_rules=[matching_rule], + ) + mock_get_action_rules_components.return_value = ( + (matching_rule,), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("rule_specific_action"), + action_type=ActionType(BOOK_NBS_COMMS.action_type), + action_code=ActionCode(BOOK_NBS_COMMS.action_code), + action_description=ActionDescription(BOOK_NBS_COMMS.action_description), + url_link=BOOK_NBS_COMMS.url_link, + url_label=BOOK_NBS_COMMS.url_label, + ) + ], + ] + + mock_rule_instance = Mock() + mock_rule_instance.evaluate_exclusion.return_value = (Status.actionable, Mock(matcher_matched=True)) + mock_rule_calculator_class.return_value = mock_rule_instance + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, RuleType.redirect) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("rule_specific_action"))) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("RuleSpecificAction"))) + + mock_get_action_rules_components.assert_called_once_with(active_iteration, RuleType.redirect) + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_action_code") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "rule_specific_action") + mock_rule_calculator_class.assert_called_once_with(person=MOCK_PERSON, rule=matching_rule) + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_actions_matching_not_eligible_rule_overrides_default( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + matching_rule = rule_builder.ICBNonEligibleActionRuleFactory.build( + priority=10, comms_routing="rule_specific_action", name="RuleSpecificAction" + ) + active_iteration = rule_builder.IterationFactory.build( + default_not_eligible_routing="default_not_eligible", + actions_mapper=ActionsMapperFactory.build( + root={"default_not_eligible": DEFAULT_COMMS_DETAIL, "rule_specific_action": BOOK_NBS_COMMS} + ), + iteration_rules=[matching_rule], + ) + mock_get_action_rules_components.return_value = ( + (matching_rule,), + active_iteration.actions_mapper, + active_iteration.default_not_eligible_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_not_eligible"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("rule_specific_action"), + action_type=ActionType(BOOK_NBS_COMMS.action_type), + action_code=ActionCode(BOOK_NBS_COMMS.action_code), + action_description=ActionDescription(BOOK_NBS_COMMS.action_description), + url_link=BOOK_NBS_COMMS.url_link, + url_label=BOOK_NBS_COMMS.url_label, + ) + ], + ] + + mock_rule_instance = Mock() + mock_rule_instance.evaluate_exclusion.return_value = (Status.actionable, Mock(matcher_matched=True)) + mock_rule_calculator_class.return_value = mock_rule_instance + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, RuleType.not_eligible_actions) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("rule_specific_action"))) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("RuleSpecificAction"))) + + mock_get_action_rules_components.assert_called_once_with(active_iteration, RuleType.not_eligible_actions) + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_not_eligible") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "rule_specific_action") + mock_rule_calculator_class.assert_called_once_with(person=MOCK_PERSON, rule=matching_rule) + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_actions_matching_not_actionable_rule_overrides_default( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + matching_rule = rule_builder.ICBNonActionableActionRuleFactory.build( + priority=10, comms_routing="rule_specific_action", name="RuleSpecificAction" + ) + active_iteration = rule_builder.IterationFactory.build( + default_not_actionable_routing="default_not_actionable", + actions_mapper=ActionsMapperFactory.build( + root={"default_not_actionable": DEFAULT_COMMS_DETAIL, "rule_specific_action": BOOK_NBS_COMMS} + ), + iteration_rules=[matching_rule], + ) + mock_get_action_rules_components.return_value = ( + (matching_rule,), + active_iteration.actions_mapper, + active_iteration.default_not_actionable_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_not_actionable"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("rule_specific_action"), + action_type=ActionType(BOOK_NBS_COMMS.action_type), + action_code=ActionCode(BOOK_NBS_COMMS.action_code), + action_description=ActionDescription(BOOK_NBS_COMMS.action_description), + url_link=BOOK_NBS_COMMS.url_link, + url_label=BOOK_NBS_COMMS.url_label, + ) + ], + ] + + mock_rule_instance = Mock() + mock_rule_instance.evaluate_exclusion.return_value = (Status.actionable, Mock(matcher_matched=True)) + mock_rule_calculator_class.return_value = mock_rule_instance + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, RuleType.not_actionable_actions) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("rule_specific_action"))) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("RuleSpecificAction"))) + + mock_get_action_rules_components.assert_called_once_with(active_iteration, RuleType.not_actionable_actions) + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_not_actionable") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "rule_specific_action") + mock_rule_calculator_class.assert_called_once_with(person=MOCK_PERSON, rule=matching_rule) + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_non_matching_rule_returns_default( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + non_matching_rule = rule_builder.ICBRedirectRuleFactory.build( + priority=10, comms_routing="rule_specific_action", name="RuleSpecificAction" + ) + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build( + root={"default_action_code": DEFAULT_COMMS_DETAIL, "rule_specific_action": BOOK_NBS_COMMS} + ), + iteration_rules=[non_matching_rule], + ) + rule_type = RuleType.redirect + + mock_get_action_rules_components.return_value = ( + (non_matching_rule,), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("rule_specific_action"), + action_type=ActionType(BOOK_NBS_COMMS.action_type), + action_code=ActionCode(BOOK_NBS_COMMS.action_code), + action_description=ActionDescription(BOOK_NBS_COMMS.action_description), + url_link=BOOK_NBS_COMMS.url_link, + url_label=BOOK_NBS_COMMS.url_label, + ) + ], + ] + + mock_rule_calculator_class.return_value.evaluate_exclusion.return_value = ( + Status.actionable, + Mock(matcher_matched=False), + ) + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, rule_type) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("default_action_code"))) + assert_that(matched_action_detail.rule_priority, is_(None)) + assert_that(matched_action_detail.rule_name, is_(None)) + + mock_get_action_rules_components.assert_called_once_with(active_iteration, rule_type) + assert_that(mock_get_actions_from_comms.call_count, is_(1)) + mock_get_actions_from_comms.assert_called_once_with(active_iteration.actions_mapper, "default_action_code") + mock_rule_calculator_class.assert_called_once_with(person=MOCK_PERSON, rule=non_matching_rule) + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_multiple_rules_same_priority_all_match( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + rule1 = rule_builder.ICBRedirectRuleFactory.build(priority=10, comms_routing="action_a", name="RuleA") + rule2 = rule_builder.ICBRedirectRuleFactory.build(priority=10, comms_routing="action_b", name="RuleB") + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build( + root={ + "default_action_code": DEFAULT_COMMS_DETAIL, + "action_a": BOOK_NBS_COMMS, + "action_b": DEFAULT_COMMS_DETAIL, + } + ), + iteration_rules=[rule1, rule2], + ) + + mock_get_action_rules_components.return_value = ( + (rule1, rule2), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("action_a"), + action_type=ActionType(BOOK_NBS_COMMS.action_type), + action_code=ActionCode(BOOK_NBS_COMMS.action_code), + action_description=ActionDescription(BOOK_NBS_COMMS.action_description), + url_link=BOOK_NBS_COMMS.url_link, + url_label=BOOK_NBS_COMMS.url_label, + ), + SuggestedAction( + internal_action_code=InternalActionCode("action_b"), + action_type=ActionType(DEFAULT_COMMS_DETAIL.action_type), + action_code=ActionCode(DEFAULT_COMMS_DETAIL.action_code), + action_description=ActionDescription(DEFAULT_COMMS_DETAIL.action_description), + url_link=DEFAULT_COMMS_DETAIL.url_link, + url_label=DEFAULT_COMMS_DETAIL.url_label, + ), + ], + ] + + mock_rule_calculator_class.side_effect = [ + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=True)))), + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=True)))), + ] + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, RuleType.redirect) + + assert_that(len(matched_action_detail.actions), is_(2)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("action_a"))) + assert_that(matched_action_detail.actions[1].internal_action_code, is_(InternalActionCode("action_b"))) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("RuleA"))) + + assert_that(mock_rule_calculator_class.call_count, is_(2)) + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_action_code") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "action_a") + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_multiple_rules_same_priority_one_mismatch( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + rule1 = rule_builder.ICBRedirectRuleFactory.build(priority=10, comms_routing="action_a", name="RuleA") + rule2 = rule_builder.ICBRedirectRuleFactory.build(priority=10, comms_routing="action_b", name="RuleB") + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build( + root={ + "default_action_code": DEFAULT_COMMS_DETAIL, + "action_a": BOOK_NBS_COMMS, + "action_b": DEFAULT_COMMS_DETAIL, + } + ), + iteration_rules=[rule1, rule2], + ) + rule_type = RuleType.redirect + + mock_get_action_rules_components.return_value = ( + (rule1, rule2), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("DefaultInfoText"), + action_code=ActionCode("DefaultHealthcareProInfo"), + action_description=ActionDescription("Default Speak to your healthcare professional."), + url_link=None, + url_label=None, + ) + ] + ] + + mock_rule_calculator_class.side_effect = [ + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=True)))), + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=False)))), + ] + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, rule_type) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("default_action_code"))) + assert_that(matched_action_detail.rule_priority, is_(None)) + assert_that(matched_action_detail.rule_name, is_(None)) + + mock_get_action_rules_components.assert_called_once_with(active_iteration, rule_type) + assert_that(mock_get_actions_from_comms.call_count, is_(1)) + mock_get_actions_from_comms.assert_called_once_with(active_iteration.actions_mapper, "default_action_code") + assert_that( + mock_rule_calculator_class.call_args_list, + is_([call(person=MOCK_PERSON, rule=rule1), call(person=MOCK_PERSON, rule=rule2)]), + ) + + +@patch("eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator") +@patch.object(ActionRuleHandler, "_get_actions_from_comms") +@patch.object(ActionRuleHandler, "_get_action_rules_components") +def test_handle_different_priority_rules_highest_priority_wins( + mock_get_action_rules_components, + mock_get_actions_from_comms, + mock_rule_calculator_class, + handler: ActionRuleHandler, +): + lower_priority_rule = rule_builder.ICBRedirectRuleFactory.build( + priority=20, comms_routing="action_low", name="LowP" + ) + higher_priority_rule = rule_builder.ICBRedirectRuleFactory.build( + priority=10, comms_routing="action_high", name="HighP" + ) + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build( + root={ + "default_action_code": DEFAULT_COMMS_DETAIL, + "action_low": DEFAULT_COMMS_DETAIL, + "action_high": BOOK_NBS_COMMS, + } + ), + iteration_rules=[lower_priority_rule, higher_priority_rule], + ) + rule_type = RuleType.redirect + + mock_get_action_rules_components.return_value = ( + (lower_priority_rule, higher_priority_rule), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("DefaultInfoText"), + action_code=ActionCode("DefaultHealthcareProInfo"), + action_description=ActionDescription("Default Speak to your healthcare professional."), + url_link=None, + url_label=None, + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("action_high"), + action_type=ActionType("ButtonAuthLink"), + action_code=ActionCode("BookNBS"), + action_description=ActionDescription("Action description"), + url_link=UrlLink(HttpUrl("https://www.nhs.uk/book-rsv")), + url_label=UrlLabel("Continue to booking"), + ) + ], + [ + SuggestedAction( + internal_action_code=InternalActionCode("action_low"), + action_type=ActionType("CareCardWithText"), + action_code=ActionCode("BookLocal"), + action_description=ActionDescription("You can get an RSV vaccination at your GP surgery"), + url_link=None, + url_label=None, + ) + ], + ] + + mock_rule_calculator_class.side_effect = [ + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=True)))), + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(matcher_matched=True)))), + ] + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, rule_type) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that(matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("action_high"))) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("HighP"))) + + assert_that(mock_rule_calculator_class.call_count, is_(1)) + mock_rule_calculator_class.assert_called_once_with(person=MOCK_PERSON, rule=higher_priority_rule) + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_action_code") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "action_high") + + +def test_handle_no_actions_mapper_entry_for_rule_comms_returns_default(handler: ActionRuleHandler): + matching_rule = rule_builder.ICBRedirectRuleFactory.build( + priority=10, comms_routing="non_existent_action", name="RuleSpecificAction" + ) + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="default_action_code", + actions_mapper=ActionsMapperFactory.build(root={"default_action_code": DEFAULT_COMMS_DETAIL}), + iteration_rules=[matching_rule], + ) + rule_type = RuleType.redirect + + with ( + patch.object(ActionRuleHandler, "_get_action_rules_components") as mock_get_action_rules_components, + patch.object(ActionRuleHandler, "_get_actions_from_comms") as mock_get_actions_from_comms, + patch( + "eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator" + ) as mock_rule_calculator_class, + ): + mock_get_action_rules_components.return_value = ( + (matching_rule,), + active_iteration.actions_mapper, + active_iteration.default_comms_routing, + ) + mock_get_actions_from_comms.side_effect = [ + [ + SuggestedAction( + internal_action_code=InternalActionCode("default_action_code"), + action_type=ActionType("DefaultInfoText"), + action_code=ActionCode("DefaultHealthcareProInfo"), + action_description=ActionDescription("Default Speak to your healthcare professional."), + url_link=None, + url_label=None, + ) + ], + None, + ] + mock_rule_calculator_class.return_value.evaluate_exclusion.return_value = ( + Status.actionable, + Mock(matcher_matched=True), + ) + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, rule_type) + + assert_that(len(matched_action_detail.actions), is_(1)) + assert_that( + matched_action_detail.actions[0].internal_action_code, is_(InternalActionCode("default_action_code")) + ) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(10))) + assert_that(matched_action_detail.rule_name, is_(RuleName("RuleSpecificAction"))) + + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "default_action_code") + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "non_existent_action") + mock_rule_calculator_class.assert_called_once() + + +def test_handle_no_default_comms_and_no_matching_rule(handler: ActionRuleHandler): + active_iteration = rule_builder.IterationFactory.build( + default_comms_routing="", + actions_mapper=ActionsMapperFactory.build(root={}), + iteration_rules=[rule_builder.ICBRedirectRuleFactory.build(comms_routing="some_action")], + ) + rule_type = RuleType.redirect + + with ( + patch.object(ActionRuleHandler, "_get_action_rules_components") as mock_get_action_rules_components, + patch.object(ActionRuleHandler, "_get_actions_from_comms") as mock_get_actions_from_comms, + patch( + "eligibility_signposting_api.services.processors.action_rule_handler.RuleCalculator" + ) as mock_rule_calculator_class, + ): + mock_get_action_rules_components.return_value = ( + (rule_builder.ICBRedirectRuleFactory.build(comms_routing="some_action"),), + active_iteration.actions_mapper, + None, + ) + mock_get_actions_from_comms.side_effect = [None, None] + mock_rule_calculator_class.return_value.evaluate_exclusion.return_value = ( + Status.actionable, + Mock(matcher_matched=True), + ) + + matched_action_detail = handler._handle(MOCK_PERSON, active_iteration, rule_type) + + assert_that(matched_action_detail.actions, is_(None)) + assert_that(matched_action_detail.rule_priority, is_(RulePriority(20))) + assert_that(matched_action_detail.rule_name, is_(RuleName("In QE1"))) + + assert_that(mock_get_actions_from_comms.call_count, is_(2)) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, None) + mock_get_actions_from_comms.assert_any_call(active_iteration.actions_mapper, "some_action") + mock_rule_calculator_class.assert_called_once() + + +@patch.object(ActionRuleHandler, "_handle") +def test_handle_when_active_iteration_present_and_include_actions_is_true(mock_handle, handler: ActionRuleHandler): + mock_handle.side_effect = [MatchedActionDetail()] + + handler.get_actions( + MOCK_PERSON, IterationFactory.build(), IterationResult(Status.actionable, [], []), include_actions_flag=True + ) + + assert_that(mock_handle.call_count, is_(1)) + + +@patch.object(ActionRuleHandler, "_handle") +def test_handle_when_active_iteration_absent_and_include_actions_is_true(mock_handle, handler: ActionRuleHandler): + mock_handle.side_effect = [MatchedActionDetail()] + + handler.get_actions(MOCK_PERSON, None, IterationResult(Status.actionable, [], []), include_actions_flag=True) + + assert_that(mock_handle.call_count, is_(0)) + + +@patch.object(ActionRuleHandler, "_handle") +def test_handle_is_not_called_when_include_actions_is_false(mock_handle, handler: ActionRuleHandler): + mock_handle.side_effect = [MatchedActionDetail()] + + handler.get_actions( + MOCK_PERSON, IterationFactory.build(), IterationResult(Status.actionable, [], []), include_actions_flag=False + ) + + assert_that(mock_handle.call_count, is_(0)) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py new file mode 100644 index 000000000..a0b59a53a --- /dev/null +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -0,0 +1,117 @@ +import datetime + +import pytest +from hamcrest import assert_that, is_ + +from eligibility_signposting_api.model.campaign_config import CampaignID +from eligibility_signposting_api.services.processors.campaign_evaluator import CampaignEvaluator +from tests.fixtures.builders.model import rule + + +@pytest.fixture +def campaign_evaluator(): + return CampaignEvaluator() + + +@pytest.mark.parametrize( + ("campaign_target", "campaign_type", "conditions_filter", "category_filter", "expected_result"), + [ + ("RSV", "V", ["RSV"], "VACCINATIONS", [("RSV", "V")]), + ("RSV", "V", ["COVID"], "VACCINATIONS", []), + ("RSV", "S", ["RSV"], "ALL", [("RSV", "S")]), + ("RSV", "S", ["ALL"], "ALL", [("RSV", "S")]), + ("RSV", "S", ["RSV"], "VACCINATIONS", []), + ("RSV", "V", ["RSV"], "ALL", [("RSV", "V")]), + ("FLU", "V", ["COVID", "RSV"], "ALL", []), + ("FLU", "S", ["ALL"], "ALL", [("FLU", "S")]), + ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", []), + ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", [("FLU", "V")]), + ], +) +def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 + campaign_evaluator, campaign_target, campaign_type, conditions_filter, category_filter, expected_result +): + campaign = rule.CampaignConfigFactory.build(target=campaign_target, type=campaign_type) + + result = campaign_evaluator.get_requested_grouped_campaigns([campaign], conditions_filter, category_filter) + assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) + + +def test_campaigns_grouped_by_condition_name_with_no_campaigns(campaign_evaluator): + result = campaign_evaluator.get_requested_grouped_campaigns([], ["RSV"], "VACCINATIONS") + assert_that(list(result), is_([])) + + +def test_campaigns_grouped_by_condition_name_with_no_active_campaigns(campaign_evaluator): + campaign = rule.CampaignConfigFactory.build( + target="RSV", type="V", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) + ) + + result = campaign_evaluator.get_requested_grouped_campaigns([campaign], ["RSV"], "VACCINATIONS") + assert_that(list(result), is_([])) + + +@pytest.mark.parametrize( + ("category_filter", "campaign_type", "expected_count"), + [ + ("SCREENING", "S", 1), + ("SCREENING", "V", 0), + ("INVALID_CATEGORY", "S", 0), + ], +) +def test_campaigns_grouped_by_condition_name_with_various_categories( + campaign_evaluator, category_filter, campaign_type, expected_count +): + campaign = rule.CampaignConfigFactory.build(target="COVID", type=campaign_type) + result = list(campaign_evaluator.get_requested_grouped_campaigns([campaign], ["COVID"], category_filter)) + assert_that(len(result), is_(expected_count)) + if expected_count > 0: + assert_that(str(result[0][0]), is_("COVID")) + + +def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): + campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") + result = campaign_evaluator.get_requested_grouped_campaigns([campaign], [], "VACCINATIONS") + assert_that(list(result), is_([])) + + +def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): + campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1") + campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2") + campaign3 = rule.CampaignConfigFactory.build(target="FLU", type="V", id="F1") + inactive_campaign = rule.CampaignConfigFactory.build( + target="COVID", type="V", id="C3", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) + ) + + all_campaigns = [campaign1, campaign2, campaign3, inactive_campaign] + result = list(campaign_evaluator.get_requested_grouped_campaigns(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) + + assert_that(len(result), is_(2)) + + result_dict = {str(name): campaigns for name, campaigns in result} + assert_that("COVID" in result_dict) + assert_that("FLU" in result_dict) + + assert_that(len(result_dict["COVID"]), is_(2)) + assert_that({c.id for c in result_dict["COVID"]}, is_({CampaignID("C1"), CampaignID("C2")})) + + assert_that(len(result_dict["FLU"]), is_(1)) + assert_that(result_dict["FLU"][0].id, is_(CampaignID("F1"))) + + +def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluator): + campaign_v = rule.CampaignConfigFactory.build(target="RSV", type="V") + campaign_s = rule.CampaignConfigFactory.build(target="RSV", type="S") + + evaluator_s_first = campaign_evaluator + result_s_first = list( + evaluator_s_first.get_requested_grouped_campaigns([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") + ) + assert_that(result_s_first, is_([])) + + evaluator_v_first = campaign_evaluator + result_v_first = list( + evaluator_v_first.get_requested_grouped_campaigns([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") + ) + assert_that(len(result_v_first), is_(1)) + assert_that(len(result_v_first[0][1]), is_(2)) diff --git a/tests/unit/services/processors/test_cohort_handler.py b/tests/unit/services/processors/test_cohort_handler.py new file mode 100644 index 000000000..8eb25ca32 --- /dev/null +++ b/tests/unit/services/processors/test_cohort_handler.py @@ -0,0 +1,122 @@ +from unittest.mock import Mock + +import pytest +from hamcrest import assert_that, has_length, is_ + +from eligibility_signposting_api.model.eligibility_status import CohortGroupResult, Status +from eligibility_signposting_api.model.person import Person +from eligibility_signposting_api.services.processors.cohort_handler import ( + BaseEligibilityHandler, + CohortEligibilityHandler, + FilterRuleHandler, + SuppressionRuleHandler, +) +from eligibility_signposting_api.services.processors.rule_processor import RuleProcessor +from tests.fixtures.builders.model import rule as rule_builder + +MOCK_PERSON = Person([{"ATTRIBUTE_TYPE": "PERSON", "AGE": "30"}]) + + +@pytest.fixture +def mock_rule_processor_for_handlers(): + return Mock(spec=RuleProcessor) + + +@pytest.fixture +def mock_next_handler(): + return Mock(spec=CohortEligibilityHandler) + + +def test_base_eligibility_handler_is_base_eligible(mock_rule_processor_for_handlers, mock_next_handler): + handler = BaseEligibilityHandler(next_handler=mock_next_handler) + cohort = rule_builder.IterationCohortFactory.build(cohort_label="cohort1") + cohort_results = {} + + mock_rule_processor_for_handlers.is_base_eligible.return_value = True + + handler.handle(MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers) + + mock_rule_processor_for_handlers.is_base_eligible.assert_called_once_with(MOCK_PERSON, cohort) + assert_that(cohort_results, is_({})) + + mock_next_handler.handle.assert_called_once_with( + MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers + ) + + +def test_base_eligibility_handler_is_not_base_eligible(mock_rule_processor_for_handlers, mock_next_handler): + handler = BaseEligibilityHandler(next_handler=mock_next_handler) + cohort = rule_builder.IterationCohortFactory.build(cohort_label="cohort1", negative_description="Not Base eligible") + cohort_results = {} + + mock_rule_processor_for_handlers.is_base_eligible.return_value = False + + handler.handle(MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers) + + mock_rule_processor_for_handlers.is_base_eligible.assert_called_once_with(MOCK_PERSON, cohort) + assert_that(cohort_results, has_length(1)) + assert_that(cohort_results["cohort1"].status, is_(Status.not_eligible)) + assert_that(cohort_results["cohort1"].description, is_("Not Base eligible")) + mock_next_handler.handle.assert_not_called() + + +def test_filter_rule_handler_is_eligible(mock_rule_processor_for_handlers, mock_next_handler): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="cohort1") + cohort_results = {} + filter_rules = [Mock()] + handler = FilterRuleHandler(next_handler=mock_next_handler, filter_rules=filter_rules) + + mock_rule_processor_for_handlers.is_eligible.return_value = True + + handler.handle(MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers) + + mock_rule_processor_for_handlers.is_eligible.assert_called_once_with( + MOCK_PERSON, cohort, cohort_results, filter_rules + ) + assert_that(cohort_results, is_({})) + + mock_next_handler.handle.assert_called_once_with( + MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers + ) + + +def test_filter_rule_handler_is_not_eligible(mock_rule_processor_for_handlers, mock_next_handler): + filter_rules = [Mock()] + handler = FilterRuleHandler(next_handler=mock_next_handler, filter_rules=filter_rules) + cohort = rule_builder.IterationCohortFactory.build(cohort_label="cohort1", negative_description="Not Eligible") + cohort_results = {} + + mock_rule_processor_for_handlers.is_eligible.side_effect = ( + lambda p, c, cr, fr: cr.update( # noqa: ARG005 + {c.cohort_label: CohortGroupResult(c.cohort_group, Status.not_eligible, [], c.negative_description, [])} + ) + or False + ) + + handler.handle(MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers) + + mock_rule_processor_for_handlers.is_eligible.assert_called_once_with( + MOCK_PERSON, cohort, cohort_results, filter_rules + ) + assert_that(cohort_results, has_length(1)) + assert_that(cohort_results["cohort1"].status, is_(Status.not_eligible)) + mock_next_handler.handle.assert_not_called() + + +def test_suppression_rule_handler_is_actionable(mock_rule_processor_for_handlers): + suppression_rules = [Mock()] + handler = SuppressionRuleHandler(suppression_rules=suppression_rules) + cohort = rule_builder.IterationCohortFactory.build(cohort_label="cohort1", positive_description="Actionable") + cohort_results = {} + + mock_rule_processor_for_handlers.is_actionable.side_effect = lambda p, c, cr, sr: cr.update( # noqa: ARG005 + {c.cohort_label: CohortGroupResult(c.cohort_group, Status.actionable, [], c.positive_description, [])} + ) + + handler.handle(MOCK_PERSON, cohort, cohort_results, mock_rule_processor_for_handlers) + + mock_rule_processor_for_handlers.is_actionable.assert_called_once_with( + MOCK_PERSON, cohort, cohort_results, suppression_rules + ) + assert_that(cohort_results, has_length(1)) + assert_that(cohort_results["cohort1"].status, is_(Status.actionable)) diff --git a/tests/unit/services/processors/test_person_data_reader.py b/tests/unit/services/processors/test_person_data_reader.py new file mode 100644 index 000000000..6219cd8a3 --- /dev/null +++ b/tests/unit/services/processors/test_person_data_reader.py @@ -0,0 +1,102 @@ +import pytest +from hamcrest import assert_that, is_ + +from eligibility_signposting_api.model.person import Person +from eligibility_signposting_api.services.processors.person_data_reader import PersonDataReader + + +@pytest.fixture +def person_data_reader(): + return PersonDataReader() + + +def test_get_person_cohorts_empty_data(person_data_reader): + result = person_data_reader.get_person_cohorts(Person([])) + assert_that(result, is_(set())) + + +def test_get_person_cohorts_no_cohorts_attribute_type(person_data_reader): + no_cohorts_type = Person( + [ + {"ATTRIBUTE_TYPE": "NAME", "VALUE": "John Doe"}, + {"ATTRIBUTE_TYPE": "AGE", "VALUE": 30}, + ] + ) + result = person_data_reader.get_person_cohorts(no_cohorts_type) + assert_that(result, is_(set())) + + +def test_get_person_cohorts_no_cohort_map_key(person_data_reader): + no_cohorts_map = Person( + [ + {"ATTRIBUTE_TYPE": "COHORTS", "OTHER_FIELD": "value"}, + ] + ) + result = person_data_reader.get_person_cohorts(no_cohorts_map) + assert_that(result, is_(set())) + + +def test_get_person_cohorts_single_cohort(person_data_reader): + single_cohorts = Person( + [ + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [{"COHORT_LABEL": "flu_65+_autumnwinter2023", "DATE_JOINED": "20231020"}], + }, + {"ATTRIBUTE_TYPE": "NAME", "VALUE": "Jane Smith"}, + ] + ) + result = person_data_reader.get_person_cohorts(single_cohorts) + assert_that(result, is_({"flu_65+_autumnwinter2023"})) + + +def test_get_person_cohorts_multiple_cohorts(person_data_reader): + multiple_cohorts = Person( + [ + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + {"COHORT_LABEL": "COHORT_B", "DATE_JOINED": "20231020"}, + {"COHORT_LABEL": "COHORT_C", "DATE_JOINED": "20241020"}, + ], + }, + {"ATTRIBUTE_TYPE": "AGE", "VALUE": 45}, + ] + ) + result = person_data_reader.get_person_cohorts(multiple_cohorts) + assert_that(result, is_({"COHORT_B", "COHORT_C"})) + + +def test_get_person_cohorts_mixed_data(person_data_reader): + mixed_data = Person( + [ + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + {"COHORT_LABEL": "COHORT_D", "DATE_JOINED": "20231020"}, + {"COHORT_LABEL": "COHORT_E", "DATE_JOINED": "20241020"}, + ], + }, + {"ATTRIBUTE_TYPE": "NAME", "VALUE": "Alice"}, + {"ATTRIBUTE_TYPE": "ADDRESS", "VALUE": "123 Main St"}, + ] + ) + + result = person_data_reader.get_person_cohorts(mixed_data) + assert_that(result, is_({"COHORT_D", "COHORT_E"})) + + +def test_get_person_cohorts_with_other_attribute_types_present(person_data_reader): + data = Person( + [ + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [{"COHORT_LABEL": "COHORT_F", "DATE_JOINED": "20231020"}], + }, + {"ATTRIBUTE_TYPE": "NAME", "VALUE": "Charlie"}, + {"ATTRIBUTE_TYPE": "AGE", "VALUE": 25}, + ] + ) + + result = person_data_reader.get_person_cohorts(data) + assert_that(result, is_({"COHORT_F"})) diff --git a/tests/unit/services/processors/test_rule_processor.py b/tests/unit/services/processors/test_rule_processor.py new file mode 100644 index 000000000..592322770 --- /dev/null +++ b/tests/unit/services/processors/test_rule_processor.py @@ -0,0 +1,657 @@ +from unittest.mock import Mock, patch + +import pytest +from hamcrest import assert_that, empty, is_ + +from eligibility_signposting_api.model.campaign_config import CohortLabel, RuleType +from eligibility_signposting_api.model.eligibility_status import CohortGroupResult, Reason, RuleName, Status +from eligibility_signposting_api.model.person import Person +from eligibility_signposting_api.services.processors.person_data_reader import PersonDataReader +from eligibility_signposting_api.services.processors.rule_processor import RuleProcessor +from tests.fixtures.builders.model import rule as rule_builder +from tests.fixtures.builders.model.eligibility import ReasonFactory + + +@pytest.fixture +def mock_person_data_reader(): + return Mock(spec=PersonDataReader) + + +@pytest.fixture +def rule_processor(mock_person_data_reader): + return RuleProcessor(mock_person_data_reader) + + +MOCK_PERSON_DATA = Person([{"ATTRIBUTE_TYPE": "PERSON", "AGE": "30"}]) + + +def test_get_exclusion_rules_no_rules(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + rules_to_filter = [] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that(result, is_([])) + + +def test_get_exclusion_rules_general_rule(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + no_cohort_label_rule = rule_builder.IterationRuleFactory.build(cohort_label=None) + rules_to_filter = [no_cohort_label_rule] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that(result, is_([no_cohort_label_rule])) + + +def test_get_exclusion_rules_matching_cohort_label(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + matching_rule = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_A") + rules_to_filter = [matching_rule] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that(result, is_([matching_rule])) + + +def test_get_exclusion_rules_non_matching_cohort_label(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + non_matching_rule = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_B") + rules_to_filter = [non_matching_rule] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that(result, is_([])) + + +def test_get_exclusion_rules_matching_from_list_cohort_label(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + rule1 = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_A") + rule2 = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_B") + rules_to_filter = [rule1, rule2] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that(result, is_([rule1])) + + +def test_get_exclusion_rules_mixed_rules(): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + no_cohort_label_rule = rule_builder.IterationRuleFactory.build(cohort_label=None, name="General") + matching_rule = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_A", name="Matching") + non_matching_rule = rule_builder.IterationRuleFactory.build(cohort_label="COHORT_B", name="NonMatching") + + rules_to_filter = [no_cohort_label_rule, matching_rule, non_matching_rule] + result = list(RuleProcessor.get_exclusion_rules(cohort, rules_to_filter)) + assert_that({r.name for r in result}, is_({"General", "Matching"})) + + +@patch("eligibility_signposting_api.services.processors.rule_processor.RuleCalculator") +def test_evaluate_rules_priority_group_all_actionable(mock_rule_calculator_class, rule_processor): + mock_rule_calculator_class.return_value.evaluate_exclusion.return_value = ( + Status.actionable, + Mock(spec=Reason, matcher_matched=False), + ) + + rule1 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter) + rule2 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter) + rules_group = iter([rule1, rule2]) + + status, reasons, is_rule_stop = rule_processor.evaluate_rules_priority_group(MOCK_PERSON_DATA, rules_group) + + assert_that(status, is_(Status.actionable)) + assert_that(reasons, is_([])) + assert_that(is_rule_stop, is_(False)) + assert_that(mock_rule_calculator_class.call_count, is_(2)) + + +@patch("eligibility_signposting_api.services.processors.rule_processor.RuleCalculator") +def test_evaluate_rules_priority_group_one_not_eligible(mock_rule_calculator_class, rule_processor): + mock_rule_calculator_class.side_effect = [ + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(spec=Reason, matcher_matched=False)))), + Mock( + evaluate_exclusion=Mock( + return_value=( + Status.not_eligible, + ReasonFactory.build(rule_name="ExclusionReason", matcher_matched=True), + ) + ) + ), + ] + + rule1 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter, name="Rule1") + rule2 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter, name="Rule2") + rules_group = iter([rule1, rule2]) + + status, reasons, is_rule_stop = rule_processor.evaluate_rules_priority_group(MOCK_PERSON_DATA, rules_group) + + assert_that(status, is_(Status.actionable)) + assert_that(len(reasons), is_(1)) + assert_that(reasons[0].rule_name, is_(RuleName("ExclusionReason"))) + assert_that(is_rule_stop, is_(False)) + assert_that(mock_rule_calculator_class.call_count, is_(2)) + + +@patch("eligibility_signposting_api.services.processors.rule_processor.RuleCalculator") +def test_evaluate_rules_priority_group_with_rule_stop(mock_rule_calculator_class, rule_processor): + mock_rule_calculator_class.side_effect = [ + Mock(evaluate_exclusion=Mock(return_value=(Status.actionable, Mock(spec=Reason, matcher_matched=False)))), + Mock( + evaluate_exclusion=Mock( + return_value=(Status.not_eligible, ReasonFactory.build(rule_name="StopReason", matcher_matched=True)) + ) + ), + ] + + rule1 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression, rule_stop=False) + rule2 = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression, rule_stop=True) + rules_group = iter([rule1, rule2]) + + status, reasons, is_rule_stop = rule_processor.evaluate_rules_priority_group(MOCK_PERSON_DATA, rules_group) + + assert_that(status, is_(Status.actionable)) + assert_that(len(reasons), is_(1)) + assert_that(is_rule_stop, is_(True)) + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +def test_general_suppression_rule_should_not_evaluate_in_isolation_without_matching_specific_rule( + mock_evaluate_rules_priority_group, + rule_processor, +): + # Person is in COHORT_B + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_B", positive_description="Eligible") + cohort_results = {} + + # Rule 1: Non-matching rule cohort-specific to COHORT_A — should not be evaluated + rule_specific = rule_builder.IterationRuleFactory.build( + priority=510, type=RuleType.suppression, cohort_label="COHORT_A", name="SPECIFIC_RULE" + ) + + # Rule 2: Matching general rule of the same priority as cohort-specific rule + # - should also not be evaluated + rule_general = rule_builder.IterationRuleFactory.build( + priority=510, type=RuleType.suppression, cohort_label=None, name="GENERAL_RULE" + ) + + suppression_rules = [rule_specific, rule_general] + + # Act + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + # None of the rules should be evaluated + mock_evaluate_rules_priority_group.assert_not_called() + # Cohort remains actionable + assert_that(cohort_results["COHORT_B"].status, is_(Status.actionable)) + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +def test_general_filter_rule_should_not_evaluate_in_isolation_without_matching_specific_rule( + mock_evaluate_rules_priority_group, + rule_processor, +): + # Person is in COHORT_B + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_B", positive_description="Eligible") + cohort_results = {} + + # Rule 1: Non-matching rule cohort-specific to COHORT_A — should not be evaluated + rule_specific = rule_builder.IterationRuleFactory.build( + priority=510, type=RuleType.filter, cohort_label="COHORT_A", name="SPECIFIC_RULE" + ) + + # Rule 2: Matching general rule of the same priority as cohort-specific rule + # - should also not be evaluated + rule_general = rule_builder.IterationRuleFactory.build( + priority=510, type=RuleType.filter, cohort_label=None, name="GENERAL_RULE" + ) + + filter_rules = [rule_specific, rule_general] + + # Act + rule_processor.is_eligible(MOCK_PERSON_DATA, cohort, cohort_results, filter_rules) + + # None of the rules should be evaluated + mock_evaluate_rules_priority_group.assert_not_called() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_eligible_by_filter_rules_eligible( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + cohort_results = {} + filter_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter) + filter_rules = [filter_rule] + + mock_evaluate_rules_priority_group.return_value = (Status.actionable, [], False) + + is_eligible = rule_processor.is_eligible(MOCK_PERSON_DATA, cohort, cohort_results, filter_rules) + + assert_that(is_eligible, is_(True)) + assert_that(cohort_results, is_({})) + mock_should_skip_rule_group.assert_called_once_with(cohort, filter_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_eligible_by_filter_rules_not_eligible( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A", negative_description="Not Eligible") + cohort_results = {} + filter_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter, name="F1") + filter_rules = [filter_rule] + mock_reason = ReasonFactory.build(rule_name="F1_Reason") + + mock_evaluate_rules_priority_group.return_value = (Status.not_eligible, [mock_reason], False) + + is_eligible = rule_processor.is_eligible(MOCK_PERSON_DATA, cohort, cohort_results, filter_rules) + + assert_that(is_eligible, is_(False)) + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_eligible)) + assert_that(cohort_results["COHORT_A"].description, is_("Not Eligible")) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason])) + mock_should_skip_rule_group.assert_called_once_with(cohort, filter_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_evaluate_suppression_rules_actionable( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A", positive_description="Actionable") + cohort_results = {} + suppression_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression) + suppression_rules = [suppression_rule] + + mock_evaluate_rules_priority_group.return_value = (Status.actionable, [], False) + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.actionable)) + assert_that(cohort_results["COHORT_A"].description, is_("Actionable")) + assert_that(cohort_results["COHORT_A"].reasons, is_([])) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([])) + mock_should_skip_rule_group.assert_called_once_with(cohort, suppression_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_evaluate_suppression_rules_not_actionable( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build( + cohort_label="COHORT_A", positive_description="Positive Description" + ) + cohort_results = {} + suppression_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression, name="S1") + suppression_rules = [suppression_rule] + mock_reason = ReasonFactory.build(rule_name="S1_Reason") + + mock_evaluate_rules_priority_group.return_value = (Status.not_actionable, [mock_reason], False) + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_actionable)) + assert_that(cohort_results["COHORT_A"].description, is_("Positive Description")) + assert_that(cohort_results["COHORT_A"].reasons, is_([mock_reason])) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason])) + mock_should_skip_rule_group.assert_called_once_with(cohort, suppression_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_evaluate_suppression_rules_stops_on_rule_stop( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + cohort_results = {} + suppression_rule_p1 = rule_builder.IterationRuleFactory.build( + priority=1, type=RuleType.suppression, rule_stop=True, name="S1" + ) + suppression_rule_p2 = rule_builder.IterationRuleFactory.build(priority=2, type=RuleType.suppression, name="S2") + suppression_rules = [suppression_rule_p1, suppression_rule_p2] + + mock_reason_p1 = ReasonFactory.build(rule_name="S1_Reason") + mock_reason_p2 = ReasonFactory.build(rule_name="S2_Reason") + + mock_evaluate_rules_priority_group.side_effect = [ + (Status.not_actionable, [mock_reason_p1], True), + (Status.not_actionable, [mock_reason_p2], False), + ] + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_actionable)) + assert_that(cohort_results["COHORT_A"].reasons, is_([mock_reason_p1])) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason_p1])) + assert_that(mock_evaluate_rules_priority_group.call_count, is_(1)) + mock_should_skip_rule_group.assert_called_once_with(cohort, [suppression_rule_p1]) + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_evaluate_suppression_rules_does_not_stop_on_rule_stop_when_status_is_actionable( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + cohort_results = {} + suppression_rule_p1 = rule_builder.IterationRuleFactory.build( + priority=1, type=RuleType.suppression, rule_stop=True, name="S1" + ) + suppression_rule_p2 = rule_builder.IterationRuleFactory.build(priority=2, type=RuleType.suppression, name="S2") + suppression_rules = [suppression_rule_p1, suppression_rule_p2] + + mock_reason_p1 = ReasonFactory.build(rule_name="S1_Reason") + mock_reason_p2 = ReasonFactory.build(rule_name="S2_Reason") + + mock_evaluate_rules_priority_group.side_effect = [ + (Status.actionable, [mock_reason_p1], True), + (Status.not_actionable, [mock_reason_p2], False), + ] + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_actionable)) + assert_that(cohort_results["COHORT_A"].reasons, is_([mock_reason_p2])) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason_p2])) + + assert_that(mock_evaluate_rules_priority_group.call_count, is_(2)) + assert_that(mock_should_skip_rule_group.call_count, is_(2)) + + +def test_is_base_eligible(mock_person_data_reader): + person = Person( + [ + {"ATTRIBUTE_TYPE": "PERSON", "AGE": "30"}, + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + {"COHORT_LABEL": "COHORT_A"}, + {"COHORT_LABEL": "COHORT_C"}, + ], + }, + ] + ) + + rule_processor = RuleProcessor(mock_person_data_reader) + mock_person_data_reader.get_person_cohorts.return_value = {"COHORT_A", "COHORT_C"} + + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + + assert_that(rule_processor.is_base_eligible(person, cohort), is_(True)) + mock_person_data_reader.get_person_cohorts.assert_called_once_with(person) + + +def test_is_not_base_eligible(mock_person_data_reader): + person = Person( + [ + {"ATTRIBUTE_TYPE": "PERSON", "AGE": "30"}, + { + "ATTRIBUTE_TYPE": "COHORTS", + "COHORT_MEMBERSHIPS": [ + {"COHORT_LABEL": "COHORT_C"}, + ], + }, + ] + ) + + rule_processor = RuleProcessor(mock_person_data_reader) + mock_person_data_reader.get_person_cohorts.return_value = {"COHORT_C"} + + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + + assert_that(rule_processor.is_base_eligible(person, cohort), is_(False)) + mock_person_data_reader.get_person_cohorts.assert_called_once_with(person) + + +def test_rules_get_group_by_types_of_rules(rule_processor): + active_iteration = rule_builder.IterationFactory.build() + iteration_rules = active_iteration.iteration_rules + iteration_rules.append(rule_builder.IterationRuleFactory.build()) + + iteration_rules[0].type = RuleType.filter + iteration_rules[1].type = RuleType.suppression + iteration_rules[2].type = RuleType.filter + + rules_by_type = rule_processor.get_rules_by_type(active_iteration) + + assert_that(len(rules_by_type), is_(2)) + + assert_that(rules_by_type[0][0].type, is_(RuleType.filter)) + assert_that(rules_by_type[0][1].type, is_(RuleType.filter)) + assert_that(rules_by_type[1][0].type, is_(RuleType.suppression)) + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_eligible_by_filter_rules(mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A") + cohort_results = {} + filter_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter) + filter_rules = [filter_rule] + + mock_evaluate_rules_priority_group.return_value = (Status.actionable, [], False) + + is_eligible = rule_processor.is_eligible(MOCK_PERSON_DATA, cohort, cohort_results, filter_rules) + + assert_that(is_eligible, is_(True)) + assert_that(cohort_results, is_({})) + mock_should_skip_rule_group.assert_called_once_with(cohort, filter_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_not_eligible_by_filter_rules( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A", negative_description="Not Eligible") + cohort_results = {} + filter_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.filter, name="F1") + filter_rules = [filter_rule] + mock_reason = ReasonFactory.build(rule_name="F1_Reason") + + def mock_evaluate_side_effect(person, rules_group): # noqa: ARG001 + cohort_results[cohort.cohort_label] = CohortGroupResult( + cohort.cohort_group, + Status.not_eligible, + [], + cohort.negative_description, + [mock_reason], + ) + return Status.not_eligible, [mock_reason], False + + mock_evaluate_rules_priority_group.side_effect = mock_evaluate_side_effect + + is_eligible = rule_processor.is_eligible(MOCK_PERSON_DATA, cohort, cohort_results, filter_rules) + + assert_that(is_eligible, is_(False)) + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_eligible)) + assert_that(cohort_results["COHORT_A"].description, is_("Not Eligible")) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason])) + mock_should_skip_rule_group.assert_called_once_with(cohort, filter_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_actionable_by_suppression_rules( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build(cohort_label="COHORT_A", positive_description="Actionable") + cohort_results = {} + suppression_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression) + suppression_rules = [suppression_rule] + + mock_evaluate_rules_priority_group.return_value = (Status.actionable, [], False) + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.actionable)) + assert_that(cohort_results["COHORT_A"].description, is_("Actionable")) + assert_that(cohort_results["COHORT_A"].reasons, is_(empty())) + assert_that(cohort_results["COHORT_A"].audit_rules, is_(empty())) + mock_should_skip_rule_group.assert_called_once_with(cohort, suppression_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "evaluate_rules_priority_group") +@patch.object(RuleProcessor, "_should_skip_rule_group", return_value=False) +def test_is_not_actionable_by_suppression_rules( + mock_should_skip_rule_group, mock_evaluate_rules_priority_group, rule_processor +): + cohort = rule_builder.IterationCohortFactory.build( + cohort_label="COHORT_A", positive_description="Positive Description" + ) + cohort_results = {} + suppression_rule = rule_builder.IterationRuleFactory.build(priority=1, type=RuleType.suppression, name="S1") + suppression_rules = [suppression_rule] + mock_reason = ReasonFactory.build(rule_name="S1_Reason") + + def mock_evaluate_side_effect(person, rules_group): # noqa: ARG001 + cohort_results[cohort.cohort_label] = CohortGroupResult( + cohort.cohort_group, + Status.not_actionable, + [mock_reason], + cohort.positive_description, + [mock_reason], + ) + return Status.not_actionable, [mock_reason], False + + mock_evaluate_rules_priority_group.side_effect = mock_evaluate_side_effect + + rule_processor.is_actionable(MOCK_PERSON_DATA, cohort, cohort_results, suppression_rules) + + assert_that(len(cohort_results), is_(1)) + assert_that(cohort_results["COHORT_A"].status, is_(Status.not_actionable)) + assert_that(cohort_results["COHORT_A"].description, is_("Positive Description")) + assert_that(cohort_results["COHORT_A"].reasons, is_([mock_reason])) + assert_that(cohort_results["COHORT_A"].audit_rules, is_([mock_reason])) + mock_should_skip_rule_group.assert_called_once_with(cohort, suppression_rules) + mock_evaluate_rules_priority_group.assert_called_once() + + +@patch.object(RuleProcessor, "get_rules_by_type") +@patch("eligibility_signposting_api.services.processors.rule_processor.BaseEligibilityHandler") +@patch("eligibility_signposting_api.services.processors.rule_processor.FilterRuleHandler") +@patch("eligibility_signposting_api.services.processors.rule_processor.SuppressionRuleHandler") +def test_get_cohort_group_results( + mock_suppression_handler_class, + mock_filter_handler_class, + mock_base_handler_class, + mock_get_rules_by_type, + rule_processor, +): + mock_base_handler_instance = mock_base_handler_class.return_value + mock_filter_handler_instance = mock_filter_handler_class.return_value + mock_suppression_handler_instance = mock_suppression_handler_class.return_value + + mock_base_handler_instance.next.return_value = mock_filter_handler_instance + mock_filter_handler_instance.next.return_value = mock_suppression_handler_instance + + cohort_a = rule_builder.IterationCohortFactory.build( + cohort_label="COHORT_A", priority=1, cohort_group="common_cohort" + ) + cohort_b = rule_builder.IterationCohortFactory.build( + cohort_label="COHORT_B", priority=2, cohort_group="common_cohort" + ) + active_iteration = rule_builder.IterationFactory.build( + iteration_cohorts=[cohort_a, cohort_b], + iteration_rules=[ + rule_builder.IterationRuleFactory.build(type=RuleType.filter, priority=1), + rule_builder.IterationRuleFactory.build(type=RuleType.suppression, priority=1), + ], + ) + + filter_rules = (rule_builder.IterationRuleFactory.build(type=RuleType.filter),) + suppression_rules = (rule_builder.IterationRuleFactory.build(type=RuleType.suppression),) + mock_get_rules_by_type.return_value = (filter_rules, suppression_rules) + + def mock_handle_side_effect(person, cohort, cohort_results_dict, rule_processor_instance): # noqa: ARG001 + if cohort.cohort_label == CohortLabel("COHORT_A"): + cohort_results_dict[CohortLabel("COHORT_A")] = CohortGroupResult( + cohort_code=cohort.cohort_group, + status=Status.actionable, + reasons=[], + description="Cohort A Description", + audit_rules=[], + ) + elif cohort.cohort_label == CohortLabel("COHORT_B"): + cohort_results_dict[CohortLabel("COHORT_B")] = CohortGroupResult( + cohort_code=cohort.cohort_group, + status=Status.not_eligible, + reasons=[], + description="Cohort B Description", + audit_rules=[], + ) + + mock_base_handler_instance.handle.side_effect = mock_handle_side_effect + + result = rule_processor.get_cohort_group_results(MOCK_PERSON_DATA, active_iteration) + + mock_get_rules_by_type.assert_called_once_with(active_iteration) + + mock_base_handler_class.assert_called_once_with() + mock_filter_handler_class.assert_called_once_with(filter_rules=filter_rules) + mock_suppression_handler_class.assert_called_once_with(suppression_rules=suppression_rules) + + mock_base_handler_instance.next.assert_called_once_with(mock_filter_handler_instance) + mock_filter_handler_instance.next.assert_called_once_with(mock_suppression_handler_instance) + + assert_that(mock_base_handler_instance.handle.call_count, is_(2)) + calls = mock_base_handler_instance.handle.call_args_list + assert_that(calls[0].args[1], is_(cohort_a)) + assert_that(calls[1].args[1], is_(cohort_b)) + + assert_that(len(result), is_(2)) + expected_result = { + CohortLabel("COHORT_A"): CohortGroupResult( + cohort_code=cohort_a.cohort_group, + status=Status.actionable, + reasons=[], + description="Cohort A Description", + audit_rules=[], + ), + CohortLabel("COHORT_B"): CohortGroupResult( + cohort_code=cohort_b.cohort_group, + status=Status.not_eligible, + reasons=[], + description="Cohort B Description", + audit_rules=[], + ), + } + assert_that(result, is_(expected_result)) + + assert_that(result[CohortLabel("COHORT_A")].status, is_(Status.actionable)) + assert_that(result[CohortLabel("COHORT_B")].status, is_(Status.not_eligible)) + + assert_that(result[CohortLabel("COHORT_A")].status, is_(Status.actionable)) + assert_that(result[CohortLabel("COHORT_B")].status, is_(Status.not_eligible)) + + +@patch.object(RuleProcessor, "get_rules_by_type", return_value=((), ())) +@patch("eligibility_signposting_api.services.processors.rule_processor.BaseEligibilityHandler") +@patch("eligibility_signposting_api.services.processors.rule_processor.FilterRuleHandler") +@patch("eligibility_signposting_api.services.processors.rule_processor.SuppressionRuleHandler") +def test_get_cohort_group_results_no_rules_no_cohorts( + mock_suppression_handler_class, + mock_filter_handler_class, + mock_base_handler_class, + mock_get_rules_by_type, + rule_processor, +): + mock_base_handler_instance = mock_base_handler_class.return_value + active_iteration = rule_builder.IterationFactory.build(iteration_cohorts=[], iteration_rules=[]) + + result = rule_processor.get_cohort_group_results(MOCK_PERSON_DATA, active_iteration) + + mock_get_rules_by_type.assert_called_once_with(active_iteration) + mock_base_handler_class.assert_called_once_with() + mock_filter_handler_class.assert_called_once_with(filter_rules=()) + mock_suppression_handler_class.assert_called_once_with(suppression_rules=()) + + mock_base_handler_instance.handle.assert_not_called() + assert_that(result, is_({})) diff --git a/tests/unit/services/test_eligibility_services.py b/tests/unit/services/test_eligibility_services.py index 872347a00..504888f12 100644 --- a/tests/unit/services/test_eligibility_services.py +++ b/tests/unit/services/test_eligibility_services.py @@ -3,7 +3,7 @@ import pytest from hamcrest import assert_that, empty -from eligibility_signposting_api.model.eligibility import NHSNumber +from eligibility_signposting_api.model.eligibility_status import NHSNumber from eligibility_signposting_api.repos import CampaignRepo, NotFoundError, PersonRepo from eligibility_signposting_api.services import EligibilityService, UnknownPersonError from eligibility_signposting_api.services.calculators.eligibility_calculator import EligibilityCalculatorFactory diff --git a/tests/unit/test_wrapper.py b/tests/unit/test_wrapper.py deleted file mode 100644 index 18ef5d477..000000000 --- a/tests/unit/test_wrapper.py +++ /dev/null @@ -1,313 +0,0 @@ -import json -import logging -from http import HTTPStatus - -import pytest - -from eligibility_signposting_api import wrapper -from eligibility_signposting_api.wrapper import logger - - -@pytest.fixture(autouse=True) -def setup_logging_for_tests(): - logger.handlers = [] - logger.setLevel(logging.INFO) - logger.addHandler(logging.NullHandler()) - - -@pytest.mark.parametrize( - ("path_nhs", "header_nhs", "expected_result", "expected_log_msg"), - [ - (None, None, False, "NHS number is not present"), - ("1234567890", None, False, "NHS number is not present"), - (None, "1234567890", False, "NHS number is not present"), - ("1234567890", "0987654321", False, "NHS number mismatch"), - ("1234567890", "1234567890", True, None), - ], -) -def test_validate_nhs_number(path_nhs, header_nhs, expected_result, expected_log_msg, caplog): - with caplog.at_level(logging.ERROR): - result = wrapper.validate_nhs_number(path_nhs, header_nhs) - - assert result == expected_result - - if expected_log_msg: - assert any(expected_log_msg in record.message for record in caplog.records) - else: - assert not caplog.records - - -@pytest.mark.parametrize( - ("conditions_input", "is_valid_expected", "expected_log_msg"), - [ - ("ALL", True, None), - ("COVID", True, None), - ("covid19", True, None), - ("FLU,MMR", True, None), - (" RSV , COVID19", True, None), - (" condition_with_spaces ", False, "Invalid condition query param: ' condition_with_spaces '"), - ("CONDITION_A,ANOTHER_ONE,123ABC", False, "Invalid condition query param: 'CONDITION_A'"), - ("condition1,", False, "Invalid condition query param: ''"), - (",condition2", False, "Invalid condition query param: ''"), - ("condition-invalid", False, "Invalid condition query param: 'condition-invalid'"), - ("condition with spaces", False, "Invalid condition query param: 'condition with spaces'"), - ("condition!", False, "Invalid condition query param: 'condition!'"), - ("condition@#$", False, "Invalid condition query param: 'condition@#$'"), - ], -) -def test_validate_query_params_conditions(conditions_input, is_valid_expected, expected_log_msg, caplog): - params = {"conditions": conditions_input} - - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - - assert is_valid == is_valid_expected - if is_valid_expected: - assert problem is None - assert not caplog.records - else: - assert problem is not None - assert any((record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records) - - -def test_validate_query_params_conditions_default(caplog): - params = {"category": "ALL", "includeActions": "Y"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is True - assert problem is None - assert not caplog.records - - -@pytest.mark.parametrize( - ("category_input", "is_valid_expected", "expected_log_msg"), - [ - ("VACCINATIONS", True, None), - ("SCREENING", True, None), - ("ALL", True, None), - ("vaccinations", True, None), - ("screening", True, None), - ("all", True, None), - (" VACCINATIONS ", True, None), - ("OTHER_CATEGORY ", False, "Invalid category query param: 'OTHER_CATEGORY '"), - ("invalid!", False, "Invalid category query param: 'invalid!'"), - ("VACCINATION", False, "Invalid category query param: 'VACCINATION'"), - ], -) -def test_validate_query_params_category(category_input, is_valid_expected, expected_log_msg, caplog): - params = {"category": category_input} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid == is_valid_expected - - if is_valid_expected: - assert problem is None - assert not caplog.records - else: - assert problem is not None - assert any((record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records) - - -def test_validate_query_params_category_default(caplog): - params = {"conditions": "ALL", "includeActions": "Y"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is True - assert problem is None - assert not caplog.records - - -@pytest.mark.parametrize( - ("include_actions_input", "is_valid_expected", "expected_log_msg"), - [ - ("Y", True, None), - ("N", True, None), - ("y", True, None), - ("n", True, None), - ("n ", True, None), - ("TRUE", False, "Invalid include actions query param: 'TRUE'"), - ("YES", False, "Invalid include actions query param: 'YES'"), - ("0", False, "Invalid include actions query param: '0'"), - ("1", False, "Invalid include actions query param: '1'"), - ("", False, "Invalid include actions query param: ''"), - (" ", False, "Invalid include actions query param: ' '"), - ], -) -def test_validate_query_params_include_actions(include_actions_input, is_valid_expected, expected_log_msg, caplog): - params = {"includeActions": include_actions_input} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid == is_valid_expected - - if is_valid_expected: - assert problem is None - assert not caplog.records - else: - assert problem is not None - assert any((record.levelname == "ERROR" and expected_log_msg in record.message) for record in caplog.records) - - -def test_validate_query_params_include_actions_default(caplog): - params = {"conditions": "ALL", "category": "ALL"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is True - assert problem is None - assert not caplog.records - - -def test_validate_query_params_all_valid_params(caplog): - params = {"conditions": "COND1,COND2", "category": "SCREENING", "includeActions": "N"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is True - assert problem is None - assert not caplog.records - - -def test_validate_query_params_mixed_valid_invalid_conditions_fail_first(caplog): - params = {"conditions": "VALID_COND,INVALID!,ANOTHER_VALID", "category": "SCREENING", "includeActions": "N"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is False - assert problem is not None - assert any( - (record.levelname == "ERROR" and "Invalid condition query param: " in record.message) - for record in caplog.records - ) - - -def test_validate_query_params_valid_conditions_invalid_category_fail_second(caplog): - params = {"conditions": "CONDITION", "category": "BAD_CAT", "includeActions": "N"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is False - assert problem is not None - assert any( - (record.levelname == "ERROR" and "Invalid category query param: " in record.message) - for record in caplog.records - ) - error_logs = [r for r in caplog.records if r.levelname == "ERROR"] - assert len(error_logs) == 1 - - -def test_validate_query_params_valid_conditions_category_invalid_actions_fail_third(caplog): - params = {"conditions": "CONDITION", "category": "VACCINATIONS", "includeActions": "Nope"} - with caplog.at_level(logging.ERROR): - is_valid, problem = wrapper.validate_query_params(params) - assert is_valid is False - assert problem is not None - assert any( - (record.levelname == "ERROR" and "Invalid include actions query param: " in record.message) - for record in caplog.records - ) - error_logs = [r for r in caplog.records if r.levelname == "ERROR"] - assert len(error_logs) == 1 - - -def test_validate_query_params_returns_correct_problem_details_for_conditions_error(): - invalid_condition = "FLU&COVID" - params = {"conditions": invalid_condition} - - is_valid, problem = wrapper.validate_query_params(params) - - assert is_valid is False - assert problem is not None - assert problem["statusCode"] == HTTPStatus.BAD_REQUEST - assert problem["headers"]["Content-Type"] == "application/fhir+json" - - response_body = json.loads(problem["body"]) - - assert response_body["resourceType"] == "OperationOutcome" - assert "id" in response_body - assert "meta" in response_body - assert "lastUpdated" in response_body["meta"] - - assert len(response_body["issue"]) == 1 - issue = response_body["issue"][0] - - assert issue["severity"] == "error" - assert issue["code"] == "value" - assert issue["diagnostics"] == ( - f"{invalid_condition} should be a single or comma separated list of condition " - f"strings with no other punctuation or special characters" - ) - assert issue["location"] == ["parameters/conditions"] - assert "details" in issue - assert "coding" in issue["details"] - assert len(issue["details"]["coding"]) == 1 - coding = issue["details"]["coding"][0] - - assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" - assert coding["code"] == "INVALID_PARAMETER" - assert coding["display"] == "The given conditions were not in the expected format." - - -def test_validate_query_params_returns_correct_problem_details_for_category_error(): - invalid_category = "HEALTHCHECKS" - params = {"category": invalid_category} - - is_valid, problem = wrapper.validate_query_params(params) - - assert is_valid is False - assert problem is not None - assert problem["statusCode"] == HTTPStatus.UNPROCESSABLE_ENTITY - assert problem["headers"]["Content-Type"] == "application/fhir+json" - - response_body = json.loads(problem["body"]) - - assert response_body["resourceType"] == "OperationOutcome" - assert "id" in response_body - assert "meta" in response_body - assert "lastUpdated" in response_body["meta"] - - assert len(response_body["issue"]) == 1 - issue = response_body["issue"][0] - - assert issue["severity"] == "error" - assert issue["code"] == "value" - assert issue["diagnostics"] == f"{invalid_category} is not a category that is supported by the API" - assert issue["location"] == ["parameters/category"] - assert "details" in issue - assert "coding" in issue["details"] - assert len(issue["details"]["coding"]) == 1 - coding = issue["details"]["coding"][0] - - assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" - assert coding["code"] == "INVALID_PARAMETER" - assert coding["display"] == "The supplied category was not recognised by the API." - - -def test_validate_query_params_returns_correct_problem_details_for_include_actions_error(): - invalid_include_actions = "NAH" - params = {"includeActions": invalid_include_actions} - - is_valid, problem = wrapper.validate_query_params(params) - - assert is_valid is False - assert problem is not None - assert problem["statusCode"] == HTTPStatus.UNPROCESSABLE_ENTITY - assert problem["headers"]["Content-Type"] == "application/fhir+json" - - response_body = json.loads(problem["body"]) - - assert response_body["resourceType"] == "OperationOutcome" - assert "id" in response_body - assert "meta" in response_body - assert "lastUpdated" in response_body["meta"] - - assert len(response_body["issue"]) == 1 - issue = response_body["issue"][0] - - assert issue["severity"] == "error" - assert issue["code"] == "value" - assert issue["diagnostics"] == f"{invalid_include_actions} is not a value that is supported by the API" - assert issue["location"] == ["parameters/includeActions"] - assert "details" in issue - assert "coding" in issue["details"] - assert len(issue["details"]["coding"]) == 1 - coding = issue["details"]["coding"][0] - - assert coding["system"] == "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1" - assert coding["code"] == "INVALID_PARAMETER" - assert coding["display"] == "The supplied value was not recognised by the API." diff --git a/tests/unit/validation/__init__.py b/tests/unit/validation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/validation/conftest.py b/tests/unit/validation/conftest.py new file mode 100644 index 000000000..cb711c20d --- /dev/null +++ b/tests/unit/validation/conftest.py @@ -0,0 +1,61 @@ +import pytest + + +@pytest.fixture +def valid_campaign_config_with_only_mandatory_fields(): + return { + "ID": "CAMP001", + "Version": 1, + "Name": "Spring Campaign", + "Type": "V", + "Target": "COVID", + "IterationFrequency": "M", + "IterationType": "A", + "StartDate": "20250101", + "EndDate": "20250331", + "Iterations": [ + { + "ID": "ITER001", + "Version": 1, + "Name": "Mid-January Push", + "IterationDate": "20250101", + "IterationNumber": 1, + "ApprovalMinimum": 10, + "ApprovalMaximum": 100, + "Type": "A", + "DefaultCommsRouting": "", + "DefaultNotEligibleRouting": "", + "DefaultNotActionableRouting": "", + "IterationCohorts": [], + "IterationRules": [], + "ActionsMapper": {}, + } + ], + } + + +@pytest.fixture +def valid_iteration_rule_with_only_mandatory_fields(): + return { + "Type": "F", + "Name": "Assure only already vaccinated taken from magic cohort", + "Description": "Exclude anyone who has NOT been given a dose of RSV Vaccination from the magic cohort", + "Operator": "is_empty", + "Comparator": "", + "AttributeTarget": "RSV", + "AttributeLevel": "TARGET", + "AttributeName": "LAST_SUCCESSFUL_DATE", + "CohortLabel": "elid_all_people", + "Priority": 100, + } + + +@pytest.fixture +def valid_available_action(): + return { + "ExternalRoutingCode": "BookNBS", + "ActionDescription": "", + "ActionType": "ButtonWithAuthLink", + "UrlLink": "http://www.nhs.uk/book-rsv", + "UrlLabel": "Continue to booking", + } diff --git a/tests/unit/validation/test_actions_mapper_validator.py b/tests/unit/validation/test_actions_mapper_validator.py new file mode 100644 index 000000000..89af4958f --- /dev/null +++ b/tests/unit/validation/test_actions_mapper_validator.py @@ -0,0 +1,64 @@ +import pytest +from pydantic import ValidationError + +from eligibility_signposting_api.model.campaign_config import AvailableAction +from rules_validation_api.validators.actions_mapper_validator import ActionsMapperValidation + + +@pytest.fixture +def valid_available_action(): + return { + "ExternalRoutingCode": "BookNBS", + "ActionDescription": "", + "ActionType": "ButtonWithAuthLink", + "UrlLink": "http://www.nhs.uk/book-rsv", + "UrlLabel": "Continue to booking", + } + + +class TestBUCValidations: + def make_action(self, data: dict) -> AvailableAction: + return AvailableAction(**data) + + def test_valid_actions_mapper(self, valid_available_action): + data = { + "action1": self.make_action(valid_available_action), + "action2": self.make_action({**valid_available_action, "ExternalRoutingCode": "AltCode"}), + } + mapper = ActionsMapperValidation(root=data) + + expected_action_count = 2 + assert isinstance(mapper, ActionsMapperValidation) + assert len(mapper.root) == expected_action_count + + @pytest.mark.parametrize( + "invalid_action", + [ + {"action1": ""}, + {"action1": "invalid_action"}, + {"action3": None}, + {"action1": "", "action3": None}, + {"action1": "invalid_action", "action2": ""}, + ], + ) + def test_if_exception_raised_when_adding_invalid_actions_to_action_mapper(self, invalid_action): + data = {"": invalid_action} + with pytest.raises(ValidationError): + ActionsMapperValidation(root=data) + + def test_invalid_actions_mapper_empty_key(self, valid_available_action): + data = {"": self.make_action(valid_available_action), "action2": self.make_action(valid_available_action)} + with pytest.raises(ValidationError) as exc_info: + ActionsMapperValidation(root=data) + assert "Invalid keys found in ActionsMapper" in str(exc_info.value) + assert "['']" in str(exc_info.value) + + @pytest.mark.parametrize("bad_key", [""]) + def test_invalid_keys_parametrized(self, bad_key, valid_available_action): + data = { + bad_key: self.make_action(valid_available_action), + "valid_key": self.make_action(valid_available_action), + } + with pytest.raises(ValidationError) as exc_info: + ActionsMapperValidation(root=data) + assert "Invalid keys found in ActionsMapper" in str(exc_info.value) diff --git a/tests/unit/validation/test_available_action_validator.py b/tests/unit/validation/test_available_action_validator.py new file mode 100644 index 000000000..468ead7b9 --- /dev/null +++ b/tests/unit/validation/test_available_action_validator.py @@ -0,0 +1,55 @@ +import copy + +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.available_action_validator import AvailableActionValidation + + +# 🔍 Mandatory Fields +class TestMandatoryFieldsSchemaValidations: + def test_valid_minimal_input(self, valid_available_action): + data = copy.deepcopy(valid_available_action) + data.pop("ActionDescription") + data.pop("UrlLink") + data.pop("UrlLabel") + action = AvailableActionValidation(**data) + assert action.action_type == "ButtonWithAuthLink" + assert action.action_code == "BookNBS" + assert action.action_description is None + assert action.url_link is None + assert action.url_label is None + + def test_missing_required_fields(self, valid_available_action): + data = copy.deepcopy(valid_available_action) + data.pop("ActionType") + data.pop("ExternalRoutingCode") + with pytest.raises(ValidationError) as exc_info: + AvailableActionValidation(**data) + error_msg = str(exc_info.value) + assert "ActionType" in error_msg + assert "ExternalRoutingCode" in error_msg + + +# 🔍 Optional Fields +class TestOptionalFieldsSchemaValidations: + def test_valid_full_input(self, valid_available_action): + action = AvailableActionValidation(**valid_available_action) + assert action.action_type == "ButtonWithAuthLink" + assert action.action_code == "BookNBS" + assert action.action_description == "" + assert str(action.url_link) == "http://www.nhs.uk/book-rsv" + assert action.url_label == "Continue to booking" + + def test_empty_string_is_valid_for_optional_fields(self, valid_available_action): + action = AvailableActionValidation(**valid_available_action) + assert action.action_description == "" + assert action.url_label == "Continue to booking" + + @pytest.mark.parametrize("bad_url", ["not-a-url", "ftp://bad", "123"]) + def test_invalid_url_raises_validation_error(self, valid_available_action, bad_url): + data = copy.deepcopy(valid_available_action) + data["UrlLink"] = bad_url + with pytest.raises(ValidationError) as exc_info: + AvailableActionValidation(**data) + assert "UrlLink" in str(exc_info.value) diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py new file mode 100644 index 000000000..61bb75ca7 --- /dev/null +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -0,0 +1,235 @@ +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.campaign_config_validator import CampaignConfigValidation + + +class TestMandatoryFieldsSchemaValidations: + def test_campaign_config_with_only_mandatory_fields_configuration( + self, valid_campaign_config_with_only_mandatory_fields + ): + try: + CampaignConfigValidation(**valid_campaign_config_with_only_mandatory_fields) + except ValidationError as e: + pytest.fail(f"Unexpected error during model instantiation: {e}") + + @pytest.mark.parametrize( + "mandatory_field", + [ + "ID", + "Version", + "Name", + "Type", + "Target", + "IterationFrequency", + "IterationType", + "StartDate", + "EndDate", + "Iterations", + ], + ) + def test_missing_mandatory_fields(self, mandatory_field, valid_campaign_config_with_only_mandatory_fields): + data = valid_campaign_config_with_only_mandatory_fields.copy() + data.pop(mandatory_field, None) # Simulate missing field + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # ID + @pytest.mark.parametrize("id_value", ["CAMP001", "12345", "X001"]) + def test_valid_id(self, id_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "ID": id_value} + model = CampaignConfigValidation(**data) + assert model.id == id_value + + # Version + @pytest.mark.parametrize("version_value", [1, 2, 100]) + def test_valid_version(self, version_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Version": version_value} + model = CampaignConfigValidation(**data) + assert model.version == version_value + + # Name + @pytest.mark.parametrize("name_value", ["Spring Campaign", "COVID-Alert", "Mass Outreach"]) + def test_valid_name(self, name_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Name": name_value} + model = CampaignConfigValidation(**data) + assert model.name == name_value + + # Type + @pytest.mark.parametrize("type_value", ["V", "S"]) + def test_valid_type(self, type_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Type": type_value} + model = CampaignConfigValidation(**data) + assert model.type == type_value + + @pytest.mark.parametrize("type_value", ["X", "", None]) + def test_invalid_type(self, type_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Type": type_value} + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # Target + @pytest.mark.parametrize("target_value", ["COVID", "FLU", "MMR", "RSV"]) + def test_valid_target(self, target_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Target": target_value} + model = CampaignConfigValidation(**data) + assert model.target == target_value + + @pytest.mark.parametrize("target_value", ["XYZ", "ABC", "", None]) + def test_invalid_target(self, target_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Target": target_value} + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # IterationFrequency + @pytest.mark.parametrize("freq_value", ["X", "D", "W", "M", "Q", "A"]) + def test_valid_iteration_frequency(self, freq_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationFrequency": freq_value} + model = CampaignConfigValidation(**data) + assert model.iteration_frequency == freq_value + + @pytest.mark.parametrize("freq_value", ["Z", "", None]) + def test_invalid_iteration_frequency(self, freq_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationFrequency": freq_value} + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # IterationType + @pytest.mark.parametrize("iter_type", ["A", "M", "S", "O"]) + def test_valid_iteration_type(self, iter_type, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationType": iter_type} + model = CampaignConfigValidation(**data) + assert model.iteration_type == iter_type + + @pytest.mark.parametrize("iter_type", ["B", "", None]) + def test_invalid_iteration_type(self, iter_type, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationType": iter_type} + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # StartDate + @pytest.mark.parametrize( + "start_date", + [ + "", # empty string + "invalid-date", # malformed value + ], + ) + def test_invalid_start_date(self, start_date, valid_campaign_config_with_only_mandatory_fields): + data = valid_campaign_config_with_only_mandatory_fields.copy() + data["StartDate"] = start_date + + with pytest.raises(ValidationError) as exc_info: + CampaignConfigValidation(**data) + + errors = exc_info.value.errors() + for error in errors: + assert error["loc"][0] == "StartDate" + + # EndDates + @pytest.mark.parametrize( + "end_date", + [ + "", # empty string + "31032025", # malformed value + ], + ) + def test_invalid_end_date(self, end_date, valid_campaign_config_with_only_mandatory_fields): + data = valid_campaign_config_with_only_mandatory_fields.copy() + data["EndDate"] = end_date + + with pytest.raises(ValidationError) as exc_info: + CampaignConfigValidation(**data) + + errors = exc_info.value.errors() + for error in errors: + assert error["loc"][0] == "EndDate" + + +class TestOptionalFieldsSchemaValidations: + @pytest.mark.parametrize("manager", [["alice"], ["bob"], ["carol"]]) + def test_manager_field(self, manager, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Manager": manager} + model = CampaignConfigValidation(**data) + assert model.manager == manager + + @pytest.mark.parametrize("approver", [["alice"], ["bob"], ["carol"]]) + def test_approver_field(self, approver, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Approver": approver} + model = CampaignConfigValidation(**data) + assert model.approver == approver + + @pytest.mark.parametrize("reviewer", [["alice"], ["bob"], ["carol"]]) + def test_reviewer_field(self, reviewer, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Reviewer": reviewer} + model = CampaignConfigValidation(**data) + assert model.reviewer == reviewer + + @pytest.mark.parametrize("iteration_time", ["14:00", "09:30", "18:45"]) + def test_iteration_time_field(self, iteration_time, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationTime": iteration_time} + model = CampaignConfigValidation(**data) + assert model.iteration_time == iteration_time + + @pytest.mark.parametrize("routing", ["email", "sms", "push"]) + def test_default_comms_routing_field(self, routing, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "DefaultCommsRouting": routing} + model = CampaignConfigValidation(**data) + assert model.default_comms_routing == routing + + @pytest.mark.parametrize("min_approval", [0, 1, 2]) + def test_approval_minimum_field(self, min_approval, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "ApprovalMinimum": min_approval} + model = CampaignConfigValidation(**data) + assert model.approval_minimum == min_approval + + @pytest.mark.parametrize("max_approval", [5, 10, 15]) + def test_approval_maximum_field(self, max_approval, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "ApprovalMaximum": max_approval} + model = CampaignConfigValidation(**data) + assert model.approval_maximum == max_approval + + +class TestBUCValidations: + # StartDate and EndDates + @pytest.mark.parametrize( + ("start_date", "end_date"), + [ + ("20250101", "20250331"), # valid range + ("20250601", "20250630"), # valid short range + ("20250101", "20250101"), # same day + ], + ) + def test_valid_start_and_end_dates_and_iteration_dates_relation( + self, start_date, end_date, valid_campaign_config_with_only_mandatory_fields + ): + data = valid_campaign_config_with_only_mandatory_fields.copy() + data["StartDate"] = start_date + data["EndDate"] = end_date + data["Iterations"][0]["IterationDate"] = "20241231" + CampaignConfigValidation(**data) + + @pytest.mark.parametrize( + ("start_date", "end_date"), + [ + ("20241230", "20250101"), # campaign start date is after the iteration date + ("20250331", "20250101"), # end before start + ], + ) + def test_invalid_start_and_end_dates_and_iteration_dates_relation( + self, start_date, end_date, valid_campaign_config_with_only_mandatory_fields + ): + data = valid_campaign_config_with_only_mandatory_fields.copy() + data["StartDate"] = start_date + data["EndDate"] = end_date + data["Iterations"][0]["IterationDate"] = "20241231" + with pytest.raises(ValidationError): + CampaignConfigValidation(**data) + + # Iteration + def test_validate_iterations_non_empty(self, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields, "Iterations": []} + with pytest.raises(ValidationError) as error: + CampaignConfigValidation(**data) + errors = error.value.errors() + assert any(e["loc"][-1] == "Iterations" for e in errors), "Expected validation error on 'Iterations'" diff --git a/tests/unit/validation/test_iteration_cohorts_validator.py b/tests/unit/validation/test_iteration_cohorts_validator.py new file mode 100644 index 000000000..2b8c2ac4c --- /dev/null +++ b/tests/unit/validation/test_iteration_cohorts_validator.py @@ -0,0 +1,65 @@ +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.iteration_cohort_validator import IterationCohortValidation + + +class TestMandatoryFieldsSchemaValidations: + def test_missing_cohort_label_raises_error(self): + data = {"CohortGroup": "rsv_age_rolling"} + with pytest.raises(ValidationError) as exc_info: + IterationCohortValidation(**data) + assert "CohortLabel" in str(exc_info.value) + + def test_missing_cohort_group_raises_error(self): + data = {"CohortLabel": "rsv_75_rolling"} + with pytest.raises(ValidationError) as exc_info: + IterationCohortValidation(**data) + assert "CohortGroup" in str(exc_info.value) + + def test_valid_with_only_mandatory_fields(self): + data = {"CohortLabel": "rsv_75_rolling", "CohortGroup": "rsv_age_rolling"} + cohort = IterationCohortValidation(**data) + assert cohort.cohort_label == "rsv_75_rolling" + assert cohort.cohort_group == "rsv_age_rolling" + + +class TestOptionalFieldsSchemaValidations: + def test_positive_description_can_be_none(self): + data = {"CohortLabel": "rsv_75_rolling", "CohortGroup": "rsv_age_rolling", "PositiveDescription": None} + cohort = IterationCohortValidation(**data) + assert cohort.positive_description is None + + def test_negative_description_can_be_none(self): + data = {"CohortLabel": "rsv_75_rolling", "CohortGroup": "rsv_age_rolling", "NegativeDescription": None} + cohort = IterationCohortValidation(**data) + assert cohort.negative_description is None + + def test_priority_can_be_none(self): + data = {"CohortLabel": "rsv_75_rolling", "CohortGroup": "rsv_age_rolling", "Priority": None} + cohort = IterationCohortValidation(**data) + assert cohort.priority is None + + def test_positive_description_accepts_valid_value(self): + data = { + "CohortLabel": "rsv_75_rolling", + "CohortGroup": "rsv_age_rolling", + "PositiveDescription": "Eligible for benefits", + } + cohort = IterationCohortValidation(**data) + assert cohort.positive_description == "Eligible for benefits" + + def test_negative_description_accepts_valid_value(self): + data = { + "CohortLabel": "rsv_75_rolling", + "CohortGroup": "rsv_age_rolling", + "NegativeDescription": "Not eligible", + } + cohort = IterationCohortValidation(**data) + assert cohort.negative_description == "Not eligible" + + def test_priority_accepts_valid_value(self): + cohort_priority = 10 + data = {"CohortLabel": "rsv_75_rolling", "CohortGroup": "rsv_age_rolling", "Priority": cohort_priority} + cohort = IterationCohortValidation(**data) + assert cohort.priority == cohort_priority diff --git a/tests/unit/validation/test_iteration_rules_validator.py b/tests/unit/validation/test_iteration_rules_validator.py new file mode 100644 index 000000000..fd544528e --- /dev/null +++ b/tests/unit/validation/test_iteration_rules_validator.py @@ -0,0 +1,247 @@ +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.iteration_validator import IterationRuleValidation + + +class TestMandatoryFieldsSchemaValidations: + def test_campaign_config_with_only_mandatory_fields_configuration( + self, valid_iteration_rule_with_only_mandatory_fields + ): + try: + IterationRuleValidation(**valid_iteration_rule_with_only_mandatory_fields) + except ValidationError as e: + pytest.fail(f"Unexpected error during model instantiation: {e}") + + @pytest.mark.parametrize( + "mandatory_field", + ["Type", "Name", "Description", "Priority", "AttributeLevel", "Operator", "Comparator"], + ) + def test_missing_mandatory_fields(self, mandatory_field, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data.pop(mandatory_field, None) # Simulate missing field + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + assert mandatory_field.lower() + + @pytest.mark.parametrize("type_value", ["F", "S", "R", "X", "Y"]) + def test_valid_type(self, type_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Type"] = type_value + result = IterationRuleValidation(**data) + assert result.type.value == type_value + + @pytest.mark.parametrize("type_value", ["Z", 123, None]) + def test_invalid_type(self, type_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Type"] = type_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("name_value", ["", "ValidName", "Test_Rule_01"]) + def test_valid_name(self, name_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Name"] = name_value + result = IterationRuleValidation(**data) + assert result.name == name_value + + @pytest.mark.parametrize("name_value", [None, 42]) + def test_invalid_name(self, name_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Name"] = name_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("description_value", ["", "A rule description", "Sample text"]) + def test_valid_description(self, description_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Description"] = description_value + result = IterationRuleValidation(**data) + assert result.description == description_value + + @pytest.mark.parametrize("description_value", [None]) + def test_invalid_description(self, description_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Description"] = description_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("priority_value", [-1, -5, 1, 100, 999]) + def test_valid_priority(self, priority_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Priority"] = priority_value + result = IterationRuleValidation(**data) + assert result.priority == priority_value + + @pytest.mark.parametrize("priority_value", ["high", None]) + def test_invalid_priority(self, priority_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Priority"] = priority_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("attribute_level", ["PERSON", "TARGET", "COHORT"]) + def test_valid_attribute_level(self, attribute_level, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeLevel"] = attribute_level + data["AttributeName"] = None # Ignoring the validation constraint btw AttributeLevel and AttributeName + result = IterationRuleValidation(**data) + assert result.attribute_level == attribute_level + + @pytest.mark.parametrize("attribute_level", ["", None, 42, "basic", "BASIC"]) + def test_invalid_attribute_level(self, attribute_level, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeLevel"] = attribute_level + data["AttributeName"] = None # Ignoring the validation constraint btw AttributeLevel and AttributeName + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("operator_value", ["=", "!=", ">", "<=", "contains", "is_true"]) + def test_valid_operator(self, operator_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Operator"] = operator_value + result = IterationRuleValidation(**data) + assert result.operator.value == operator_value + + @pytest.mark.parametrize("operator_value", ["approx", "", None]) + def test_invalid_operator(self, operator_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Operator"] = operator_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize("comparator_value", ["status", "true", "0"]) + def test_valid_comparator(self, comparator_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Comparator"] = comparator_value + result = IterationRuleValidation(**data) + assert result.comparator == comparator_value + + @pytest.mark.parametrize("comparator_value", [None, 123]) + def test_invalid_comparator(self, comparator_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["Comparator"] = comparator_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + @pytest.mark.parametrize( + ("rule_stop_input", "expected_bool"), + [ + (True, True), + (False, False), + ("Y", True), + ("N", False), + ("YES", False), + ("NO", False), + ("YEAH", False), + ("ONE", False), + ], + ) + def test_rule_stop_boolean_resolution( + self, rule_stop_input, expected_bool, valid_iteration_rule_with_only_mandatory_fields + ): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["RuleStop"] = rule_stop_input + result = IterationRuleValidation(**data) + assert result.rule_stop is expected_bool + + +class TestOptionalFieldsSchemaValidations: + # AttributeName + @pytest.mark.parametrize("attr_name", ["status", "user_type", None]) + def test_valid_attribute_name(self, attr_name, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeName"] = attr_name + result = IterationRuleValidation(**data) + assert result.attribute_name == attr_name + + @pytest.mark.parametrize("attr_name", [123, {}, []]) + def test_invalid_attribute_name(self, attr_name, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeName"] = attr_name + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + # CohortLabel + @pytest.mark.parametrize("label", ["Cohort_A", "Segment_2025", None, ""]) + def test_valid_cohort_label(self, label, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["CohortLabel"] = label + result = IterationRuleValidation(**data) + assert result.cohort_label == label + + @pytest.mark.parametrize("label", [123, [], {}]) + def test_invalid_cohort_label(self, label, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["CohortLabel"] = label + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + # AttributeTarget + @pytest.mark.parametrize("target", ["target_value", None]) + def test_valid_attribute_target(self, target, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeTarget"] = target + result = IterationRuleValidation(**data) + assert result.attribute_target == target + + @pytest.mark.parametrize("target", [123, [], {}]) + def test_invalid_attribute_target(self, target, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeTarget"] = target + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + # RuleStop + @pytest.mark.parametrize("rule_stop_value", [True, False, "Y", "N", "YES", "NO", "YEAH", "ONE"]) + def test_valid_rule_stop(self, rule_stop_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["RuleStop"] = rule_stop_value + result = IterationRuleValidation(**data) + assert isinstance(result.rule_stop, bool) + + @pytest.mark.parametrize("rule_stop_value", [{}, None]) + def test_invalid_rule_stop(self, rule_stop_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["RuleStop"] = rule_stop_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + # CommsRouting + @pytest.mark.parametrize("routing_value", ["route_A", None]) + def test_valid_comms_routing(self, routing_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["CommsRouting"] = routing_value + result = IterationRuleValidation(**data) + assert result.comms_routing == routing_value + + @pytest.mark.parametrize("routing_value", [123, [], {}]) + def test_invalid_comms_routing(self, routing_value, valid_iteration_rule_with_only_mandatory_fields): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["CommsRouting"] = routing_value + with pytest.raises(ValidationError): + IterationRuleValidation(**data) + + +class TestBUCValidations: + @pytest.mark.parametrize("attribute_name", [None, "", "COHORT_LABEL"]) + def test_valid_when_attribute_level_is_cohort_then_attribute_name_should_be_none_or_cohort_label( + self, attribute_name, valid_iteration_rule_with_only_mandatory_fields + ): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeLevel"] = "COHORT" + data["AttributeName"] = attribute_name + result = IterationRuleValidation(**data) + assert result.attribute_name == attribute_name + + @pytest.mark.parametrize("attribute_name", ["LAST_SUCCESSFUL_DATE", "cohort_label"]) + def test_invalid_when_attribute_level_is_cohort_but_attribute_name_is_neither_none_nor_cohort_label( + self, attribute_name, valid_iteration_rule_with_only_mandatory_fields + ): + data = valid_iteration_rule_with_only_mandatory_fields.copy() + data["AttributeLevel"] = "COHORT" + data["AttributeName"] = attribute_name + with pytest.raises(ValidationError) as error: + IterationRuleValidation(**data) + msg = "When attribute_level is COHORT, attribute_name must be COHORT_LABEL or None (default:COHORT_LABEL)" + assert msg in str(error.value) diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py new file mode 100644 index 000000000..bc2d54633 --- /dev/null +++ b/tests/unit/validation/test_iteration_validator.py @@ -0,0 +1,408 @@ +from datetime import UTC, datetime +from typing import ClassVar + +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.iteration_validator import IterationValidation + + +class TestMandatoryFieldsSchemaValidations: + def test_campaign_config_with_only_mandatory_fields_configuration( + self, valid_campaign_config_with_only_mandatory_fields + ): + try: + IterationValidation(**(valid_campaign_config_with_only_mandatory_fields["Iterations"][0])) + except ValidationError as e: + pytest.fail(f"Unexpected error during model instantiation: {e}") + + @pytest.mark.parametrize( + "mandatory_field", + [ + "ID", + "Version", + "Name", + "IterationDate", + "Type", + "DefaultCommsRouting", + "DefaultNotEligibleRouting", + "DefaultNotActionableRouting", + "IterationCohorts", + "IterationRules", + "ActionsMapper", + ], + ) + def test_missing_mandatory_fields(self, mandatory_field, valid_campaign_config_with_only_mandatory_fields): + data = valid_campaign_config_with_only_mandatory_fields["Iterations"][0].copy() + data.pop(mandatory_field, None) # Simulate missing field + with pytest.raises(ValidationError): + IterationValidation(**data) + assert mandatory_field.lower() + + # ID + @pytest.mark.parametrize("id_value", ["ITER001", "X123", "IT01"]) + def test_valid_id(self, id_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "ID": id_value} + model = IterationValidation(**data) + assert model.id == id_value + + # Version + @pytest.mark.parametrize("version_value", [1, 2, 100]) + def test_valid_version(self, version_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "Version": version_value} + model = IterationValidation(**data) + assert model.version == version_value + + # Name + @pytest.mark.parametrize("name_value", ["Mid-January Push", "Spring Surge", "Early Outreach"]) + def test_valid_name(self, name_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "Name": name_value} + model = IterationValidation(**data) + assert model.name == name_value + + # IterationDate + @pytest.mark.parametrize("date_value", ["20250101", "20250215", "20250301"]) + def test_valid_iteration_date(self, date_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "IterationDate": date_value} + model = IterationValidation(**data) + expected_date = datetime.strptime(str(date_value), "%Y%m%d").replace(tzinfo=UTC).date() + assert model.iteration_date == expected_date, f"Expected {expected_date}, got {model.iteration_date}" + + # Type + @pytest.mark.parametrize("type_value", ["A", "M", "S", "O"]) + def test_valid_type(self, type_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "Type": type_value} + model = IterationValidation(**data) + assert model.type == type_value + + @pytest.mark.parametrize("type_value", ["", "Z", None]) + def test_invalid_type(self, type_value, valid_campaign_config_with_only_mandatory_fields): + data = {**valid_campaign_config_with_only_mandatory_fields["Iterations"][0], "Type": type_value} + with pytest.raises(ValidationError): + IterationValidation(**data) + + # DefaultCommsRouting + @pytest.mark.parametrize("routing_value", ["BOOK_NBS"]) + def test_valid_default_comms_routing(self, routing_value, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultCommsRouting": routing_value, + "ActionsMapper": { + "BOOK_NBS": { + "ExternalRoutingCode": "BookNBS", + "ActionDescription": "", + "ActionType": "ButtonWithAuthLink", + "UrlLink": "http://www.nhs.uk/book-rsv", + "UrlLabel": "Continue to booking", + } + }, + } + model = IterationValidation(**data) + assert model.default_comms_routing == routing_value + + # DefaultNotEligibleRouting + @pytest.mark.parametrize("routing_value", ["", "BOOK_NBS"]) + def test_valid_default_not_eligible_routing(self, routing_value, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotEligibleRouting": routing_value, + "ActionsMapper": { + "BOOK_NBS": { + "ExternalRoutingCode": "BookNBS", + "ActionDescription": "", + "ActionType": "ButtonWithAuthLink", + "UrlLink": "http://www.nhs.uk/book-rsv", + "UrlLabel": "Continue to booking", + } + }, + } + model = IterationValidation(**data) + assert model.default_not_eligible_routing == routing_value + + # DefaultNotActionableRouting + @pytest.mark.parametrize("routing_value", ["", "BOOK_NBS"]) + def test_valid_default_not_actionable_routing( + self, routing_value, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotActionableRouting": routing_value, + "ActionsMapper": { + "BOOK_NBS": { + "ExternalRoutingCode": "BookNBS", + "ActionDescription": "", + "ActionType": "ButtonWithAuthLink", + "UrlLink": "http://www.nhs.uk/book-rsv", + "UrlLabel": "Continue to booking", + } + }, + } + model = IterationValidation(**data) + assert model.default_not_actionable_routing == routing_value + + def test_invalid_actions_mapper_empty_key( + self, valid_campaign_config_with_only_mandatory_fields, valid_available_action + ): + actions_mapper = {"": valid_available_action, "action2": valid_available_action} + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "ActionsMapper": actions_mapper, + } + with pytest.raises(ValidationError): + IterationValidation(**data) + + +class TestOptionalFieldsSchemaValidations: + @pytest.mark.parametrize("iteration_number", [1, 5, 10]) + def test_iteration_number(self, iteration_number, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "IterationNumber": iteration_number, + } + model = IterationValidation(**data) + assert model.iteration_number == iteration_number + + @pytest.mark.parametrize("approval_minimum", [0, 25, 99]) + def test_approval_minimum(self, approval_minimum, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "ApprovalMinimum": approval_minimum, + } + model = IterationValidation(**data) + assert model.approval_minimum == approval_minimum + + @pytest.mark.parametrize("approval_maximum", [100, 250, 999]) + def test_approval_maximum(self, approval_maximum, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "ApprovalMaximum": approval_maximum, + } + model = IterationValidation(**data) + assert model.approval_maximum == approval_maximum + + +class TestIterationCohortsSchemaValidations: + book_local_1_action: ClassVar[dict] = { + "ExternalRoutingCode": "BookLocal_1", + "ActionDescription": "##Getting the vaccine\n" + "You can get an RSV vaccination at your GP surgery.\n" + "Your GP surgery may contact you about getting the RSV vaccine. " + "This may be by letter, text, phone call, email or through the NHS App. " + "You do not need to wait to be contacted before booking your vaccination.", + "ActionType": "InfoText", + } + + book_local_2_action: ClassVar[dict] = { + "ExternalRoutingCode": "BookLocal_2", + "ActionDescription": "##Getting the vaccine\n" + "You can get an RSV vaccination at your GP surgery.\n" + "Your GP surgery may contact you about getting the RSV vaccine. " + "This may be by letter, text, phone call, email or through the NHS App. " + "You do not need to wait to be contacted before booking your vaccination.", + "ActionType": "InfoText", + } + + def test_valid_iteration_if_actions_mapper_has_entry_for_the_provided_default_routing_key( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultCommsRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action, "BOOK_LOCAL_2": self.book_local_2_action}, + } + IterationValidation(**data) + + def test_invalid_iteration_if_actions_mapper_has_doesnt_have_entries_for_every_default_not_default_routing_keys( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultCommsRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action}, + } + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL_2" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL_2 entry in ActionsMapper" + ) + + def test_invalid_iteration_if_actions_mapper_has_no_entry_for_the_provided_default_routing_key( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultCommsRouting": "BOOK_LOCAL", + "ActionsMapper": {}, + } # Missing BOOK_LOCAL in ActionsMapper + + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL entry in ActionsMapper" + ) + + def test_valid_iteration_if_actions_mapper_has_entry_for_the_provided_default_not_eligible_routing_key( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotEligibleRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action, "BOOK_LOCAL_2": self.book_local_2_action}, + } + IterationValidation(**data) + + def test_invalid_iteration_if_actions_mapper_has_doesnt_have_entries_for_every_default_not_eligible_routing_keys( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotEligibleRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action}, + } + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL_2" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL_2 entry in ActionsMapper" + ) + + def test_invalid_iteration_if_actions_mapper_has_no_entry_for_the_provided_default_not_eligible_routing( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotEligibleRouting": "BOOK_LOCAL", + "ActionsMapper": {}, + } # Missing BOOK_LOCAL in ActionsMapper + + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL entry in ActionsMapper" + ) + + def test_valid_iteration_if_actions_mapper_has_entry_for_the_provided_default_not_actionable_routing_key( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotActionableRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action, "BOOK_LOCAL_2": self.book_local_2_action}, + } + IterationValidation(**data) + + def test_invalid_iteration_if_actions_mapper_has_doesnt_have_entries_for_every_default_not_actionable_routing_keys( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotActionableRouting": "BOOK_LOCAL_1|BOOK_LOCAL_2", + "ActionsMapper": {"BOOK_LOCAL_1": self.book_local_1_action}, + } + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL_2" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL_2 entry in ActionsMapper" + ) + + def test_invalid_iteration_if_actions_mapper_has_no_entry_for_the_provided_default_not_actionable_routing( + self, valid_campaign_config_with_only_mandatory_fields + ): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "DefaultNotActionableRouting": "BOOK_LOCAL", + "ActionsMapper": {}, + } # Missing BOOK_LOCAL in ActionsMapper + + with pytest.raises(ValidationError) as error: + IterationValidation(**data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "actions_mapper" and "BOOK_LOCAL" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL entry in ActionsMapper" + ) + + @pytest.mark.parametrize("rule_type", ["R", "X", "Y", "F"]) + @pytest.mark.parametrize( + ("default_routing", "actions_mapper"), + [ + ("BOOK_LOCAL_1|BOOK_LOCAL_2", {"BOOK_LOCAL_1": book_local_1_action, "BOOK_LOCAL_2": book_local_2_action}), + ("BOOK_LOCAL_1", {"BOOK_LOCAL_1": book_local_1_action}), + ("", {"BOOK_LOCAL_1": book_local_1_action}), + ], + ) + def test_valid_iteration_if_actions_mapper_exists_for_rule_routing( + self, valid_campaign_config_with_only_mandatory_fields, rule_type, default_routing, actions_mapper + ): + iteration_rule = { + "Type": rule_type, + "Name": "Test Rule", + "Description": "Test rule description", + "Operator": "is_empty", + "Comparator": "", + "AttributeTarget": "RSV", + "AttributeLevel": "TARGET", + "AttributeName": "LAST_SUCCESSFUL_DATE", + "CohortLabel": "elid_all_people", + "Priority": 100, + "CommsRouting": default_routing, + } + + iteration_data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "IterationRules": [iteration_rule], + "ActionsMapper": actions_mapper, + } + + iteration = IterationValidation(**iteration_data) + assert iteration is not None, ( + f"Expected iteration to be valid for rule type '{rule_type}' with routing '{default_routing}'" + ) + + @pytest.mark.parametrize("rule_type", ["R", "X", "Y"]) + @pytest.mark.parametrize( + ("default_routing", "actions_mapper"), + [ + ("BOOK_LOCAL_1|BOOK_LOCAL_2", {"BOOK_LOCAL_2": book_local_2_action}), + ("BOOK_LOCAL_1", {"BOOK_LOCAL_2": book_local_2_action}), + ], + ) + def test_invalid_iteration_if_actions_mapper_exists_for_rule_routing( + self, valid_campaign_config_with_only_mandatory_fields, rule_type, default_routing, actions_mapper + ): + iteration_rule = { + "Type": rule_type, + "Name": "Test Rule", + "Description": "Test rule description", + "Operator": "is_empty", + "Comparator": "", + "AttributeTarget": "RSV", + "AttributeLevel": "TARGET", + "AttributeName": "LAST_SUCCESSFUL_DATE", + "CohortLabel": "elid_all_people", + "Priority": 100, + "CommsRouting": default_routing, + } + + iteration_data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "IterationRules": [iteration_rule], + "ActionsMapper": actions_mapper, + } + + with pytest.raises(ValidationError) as error: + IterationValidation(**iteration_data) + + errors = error.value.errors() + assert any(e["loc"][-1] == "iteration_rules" and "BOOK_LOCAL_1" in str(e["msg"]) for e in errors), ( + "Expected validation error for missing BOOK_LOCAL entry in ActionsMapper" + ) diff --git a/tests/unit/validation/test_rule_validator.py b/tests/unit/validation/test_rule_validator.py new file mode 100644 index 000000000..a24419824 --- /dev/null +++ b/tests/unit/validation/test_rule_validator.py @@ -0,0 +1,17 @@ +import pytest +from pydantic import ValidationError + +from rules_validation_api.validators.rules_validator import RulesValidation + + +def test_valid_campaign_config(valid_campaign_config_with_only_mandatory_fields): + config_data = {"campaign_config": valid_campaign_config_with_only_mandatory_fields} + validated = RulesValidation(**config_data) + assert validated.campaign_config.name is not None + + +def test_invalid_campaign_config_missing_field(): + invalid_data = {} + + with pytest.raises(ValidationError): + RulesValidation(**invalid_data) diff --git a/tests/unit/views/test_eligibility.py b/tests/unit/views/test_eligibility.py index cd0dda37b..04d223236 100644 --- a/tests/unit/views/test_eligibility.py +++ b/tests/unit/views/test_eligibility.py @@ -14,7 +14,7 @@ from wireup.integration.flask import get_app_container from eligibility_signposting_api.audit.audit_service import AuditService -from eligibility_signposting_api.model.eligibility import ( +from eligibility_signposting_api.model.eligibility_status import ( ActionCode, ActionDescription, ActionType, @@ -22,11 +22,6 @@ Condition, EligibilityStatus, NHSNumber, - Reason, - RuleDescription, - RuleName, - RulePriority, - RuleType, Status, SuggestedAction, UrlLabel, @@ -39,13 +34,13 @@ build_suitability_results, get_or_default_query_params, ) -from eligibility_signposting_api.views.response_model import eligibility +from eligibility_signposting_api.views.response_model import eligibility_response from tests.fixtures.builders.model.eligibility import ( CohortResultFactory, ConditionFactory, EligibilityStatusFactory, ) -from tests.fixtures.matchers.eligibility import is_eligibility_cohort, is_suitability_rule +from tests.fixtures.matchers.eligibility import is_eligibility_cohort logger = logging.getLogger(__name__) @@ -121,6 +116,7 @@ def test_no_nhs_number_given(app: Flask, client: FlaskClient): response, is_response() .with_status_code(HTTPStatus.NOT_FOUND) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) .and_text( is_json_that( has_entries( @@ -158,6 +154,7 @@ def test_unexpected_error(app: Flask, client: FlaskClient): response, is_response() .with_status_code(HTTPStatus.INTERNAL_SERVER_ERROR) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) .and_text( is_json_that( has_entries( @@ -251,166 +248,6 @@ def test_build_eligibility_cohorts_results_consider_only_cohorts_groups_that_has ) -def test_build_suitability_results_with_deduplication(): - condition: Condition = ConditionFactory.build( - status=Status.not_actionable, - cohort_results=[ - CohortResultFactory.build( - cohort_code="cohort_group1", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude too young less than 75"), - rule_description=RuleDescription("your age is greater than 75"), - matcher_matched=False, - rule_priority=RulePriority(1), - ), - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude too young less than 75"), - rule_description=RuleDescription("your age is greater than 75"), - matcher_matched=False, - rule_priority=RulePriority(1), - ), - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude more than 100"), - rule_description=RuleDescription("your age is greater than 100"), - matcher_matched=False, - rule_priority=RulePriority(1), - ), - ], - ), - CohortResultFactory.build( - cohort_code="cohort_group2", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude too young less than 75"), - rule_description=RuleDescription("your age is greater than 75"), - matcher_matched=False, - rule_priority=RulePriority(1), - ) - ], - ), - CohortResultFactory.build( - cohort_code="cohort_group3", - status=Status.not_eligible, - reasons=[ - Reason( - rule_type=RuleType.filter, - rule_name=RuleName("Exclude is present in sw1"), - rule_description=RuleDescription("your a member of sw1"), - matcher_matched=False, - rule_priority=RulePriority(1), - ) - ], - ), - CohortResultFactory.build( - cohort_code="cohort_group4", - description="", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.filter, - rule_name=RuleName("Already vaccinated"), - rule_description=RuleDescription("you have already vaccinated"), - matcher_matched=False, - rule_priority=RulePriority(1), - ) - ], - ), - ], - ) - - results = build_suitability_results(condition) - - assert_that( - results, - contains_exactly( - is_suitability_rule() - .with_rule_code("Exclude too young less than 75") - .and_rule_text("your age is greater than 75"), - is_suitability_rule().with_rule_code("Exclude more than 100").and_rule_text("your age is greater than 100"), - is_suitability_rule().with_rule_code("Already vaccinated").and_rule_text("you have already vaccinated"), - ), - ) - - -def test_build_suitability_results_when_rule_text_is_empty_or_null(): - condition: Condition = ConditionFactory.build( - status=Status.not_actionable, - cohort_results=[ - CohortResultFactory.build( - cohort_code="cohort_group1", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude too young less than 75"), - rule_description=RuleDescription("your age is greater than 75"), - matcher_matched=False, - rule_priority=RulePriority(1), - ), - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude more than 100"), - rule_description=RuleDescription(""), - matcher_matched=False, - rule_priority=RulePriority(1), - ), - Reason( - rule_type=RuleType.suppression, - rule_name=RuleName("Exclude more than 100"), - matcher_matched=False, - rule_description=None, - rule_priority=RulePriority(1), - ), - ], - ), - CohortResultFactory.build( - cohort_code="cohort_group2", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.filter, - rule_name=RuleName("Exclude is present in sw1"), - rule_description=RuleDescription(""), - matcher_matched=False, - rule_priority=RulePriority(1), - ) - ], - ), - CohortResultFactory.build( - cohort_code="cohort_group3", - status=Status.not_actionable, - reasons=[ - Reason( - rule_type=RuleType.filter, - rule_name=RuleName("Exclude is present in sw1"), - rule_description=None, - matcher_matched=False, - rule_priority=RulePriority(1), - ) - ], - ), - ], - ) - - results = build_suitability_results(condition) - - assert_that( - results, - contains_exactly( - is_suitability_rule() - .with_rule_code("Exclude too young less than 75") - .and_rule_text("your age is greater than 75") - ), - ) - - def test_no_suitability_rules_for_actionable(): condition = ConditionFactory.build(status=Status.actionable, cohort_results=[]) @@ -433,12 +270,12 @@ def test_no_suitability_rules_for_actionable(): ) ], [ - eligibility.Action( - actionType=eligibility.ActionType("TYPE_A"), - actionCode=eligibility.ActionCode("CODE123"), - description=eligibility.Description("Some description"), - urlLink=eligibility.UrlLink("https://example.com"), - urlLabel=eligibility.UrlLabel("Learn more"), + eligibility_response.Action( + actionType=eligibility_response.ActionType("TYPE_A"), + actionCode=eligibility_response.ActionCode("CODE123"), + description=eligibility_response.Description("Some description"), + urlLink=eligibility_response.UrlLink("https://example.com"), + urlLabel=eligibility_response.UrlLabel("Learn more"), ) ], ), @@ -453,9 +290,9 @@ def test_no_suitability_rules_for_actionable(): ) ], [ - eligibility.Action( - actionType=eligibility.ActionType("TYPE_B"), - actionCode=eligibility.ActionCode("CODE123"), + eligibility_response.Action( + actionType=eligibility_response.ActionType("TYPE_B"), + actionCode=eligibility_response.ActionCode("CODE123"), description="", urlLink="", urlLabel="", @@ -481,23 +318,23 @@ def test_build_actions(suggested_actions, expected): def test_excludes_nulls_via_build_response(client: FlaskClient): - mocked_response = eligibility.EligibilityResponse( + mocked_response = eligibility_response.EligibilityResponse( responseId=uuid4(), - meta=eligibility.Meta(lastUpdated=eligibility.LastUpdated(datetime(2023, 1, 1, tzinfo=UTC))), + meta=eligibility_response.Meta(lastUpdated=eligibility_response.LastUpdated(datetime(2023, 1, 1, tzinfo=UTC))), processedSuggestions=[ - eligibility.ProcessedSuggestion( - condition=eligibility.ConditionName("ConditionA"), - status=eligibility.Status.actionable, - statusText=eligibility.StatusText("Go ahead"), + eligibility_response.ProcessedSuggestion( + condition=eligibility_response.ConditionName("ConditionA"), + status=eligibility_response.Status.actionable, + statusText=eligibility_response.StatusText("Go ahead"), eligibilityCohorts=[], suitabilityRules=[], actions=[ - eligibility.Action( - actionType=eligibility.ActionType("TYPE_A"), - actionCode=eligibility.ActionCode("CODE123"), - description=eligibility.Description(""), # Should be an empty string - urlLink=eligibility.UrlLink(""), # Should be an empty string - urlLabel=eligibility.UrlLabel(""), # Should be an empty string + eligibility_response.Action( + actionType=eligibility_response.ActionType("TYPE_A"), + actionCode=eligibility_response.ActionCode("CODE123"), + description=eligibility_response.Description(""), # Should be an empty string + urlLink=eligibility_response.UrlLink(""), # Should be an empty string + urlLabel=eligibility_response.UrlLabel(""), # Should be an empty string ) ], ) @@ -533,23 +370,23 @@ def test_excludes_nulls_via_build_response(client: FlaskClient): def test_build_response_include_values_that_are_not_null(client: FlaskClient): - mocked_response = eligibility.EligibilityResponse( + mocked_response = eligibility_response.EligibilityResponse( responseId=uuid4(), - meta=eligibility.Meta(lastUpdated=eligibility.LastUpdated(datetime(2023, 1, 1, tzinfo=UTC))), + meta=eligibility_response.Meta(lastUpdated=eligibility_response.LastUpdated(datetime(2023, 1, 1, tzinfo=UTC))), processedSuggestions=[ - eligibility.ProcessedSuggestion( - condition=eligibility.ConditionName("ConditionA"), - status=eligibility.Status.actionable, - statusText=eligibility.StatusText("Go ahead"), + eligibility_response.ProcessedSuggestion( + condition=eligibility_response.ConditionName("ConditionA"), + status=eligibility_response.Status.actionable, + statusText=eligibility_response.StatusText("Go ahead"), eligibilityCohorts=[], suitabilityRules=[], actions=[ - eligibility.Action( - actionType=eligibility.ActionType("TYPE_A"), - actionCode=eligibility.ActionCode("CODE123"), - description=eligibility.Description("Contact GP"), - urlLink=eligibility.UrlLink("https://example.dummy/"), - urlLabel=eligibility.UrlLabel("GP contact"), + eligibility_response.Action( + actionType=eligibility_response.ActionType("TYPE_A"), + actionCode=eligibility_response.ActionCode("CODE123"), + description=eligibility_response.Description("Contact GP"), + urlLink=eligibility_response.UrlLink("https://example.dummy/"), + urlLabel=eligibility_response.UrlLabel("GP contact"), ) ], ) @@ -584,46 +421,6 @@ def test_build_response_include_values_that_are_not_null(client: FlaskClient): assert action["urlLabel"] == "GP contact" -@pytest.mark.parametrize( - ("headers", "expected_request_id"), - [ - ({"X-Request-ID": "test-request-id-123"}, "test-request-id-123"), - ( - {"X-Request-ID": ""}, - "", - ), - ( - {}, # No headers provided - None, - ), - ], -) -def test_request_id_from_header_logging_variants( - app: Flask, client: FlaskClient, caplog, headers: dict[str, str], expected_request_id: str -): - """ - This test checks that the x-request-ID is logged so that it can be used to correlate logs - with that of the logs from api-gateway - """ - with ( - get_app_container(app).override.service(EligibilityService, new=FakeEligibilityService()), - get_app_container(app).override.service(AuditService, new=FakeAuditService()), - ): - with caplog.at_level(logging.INFO): - response = client.get("/patient-check/12345", headers=headers) - - request_id_logged = False - for record in caplog.records: - request_id = getattr(record, "X-Request-ID", None) - - if request_id == expected_request_id: - request_id_logged = True - break - - assert request_id_logged - assert response.status_code == HTTPStatus.OK - - def test_get_or_default_query_params_with_no_args(app: Flask): with app.test_request_context("/patient-check"): result = get_or_default_query_params()