|
| 1 | +import http |
| 2 | + |
| 3 | +import pytest |
| 4 | + |
| 5 | +from tests.e2e.tests import test_config |
| 6 | +from tests.e2e.utils.data_helper import initialise_tests, load_all_expected_responses |
| 7 | + |
| 8 | +# Update the below with the configuration values specified in test_config.py |
| 9 | +all_data, dto = initialise_tests(test_config.IN_PROGRESS_TEST_DATA) |
| 10 | +all_expected_responses = load_all_expected_responses(test_config.IN_PROGRESS_RESPONSES) |
| 11 | +config_path = test_config.IN_PROGRESS_CONFIGS |
| 12 | + |
| 13 | +param_list = list(all_data.items()) |
| 14 | +id_list = [f"{filename} - {scenario.get('scenario_name', 'No Scenario')}" for filename, scenario in param_list] |
| 15 | + |
| 16 | + |
| 17 | +@pytest.mark.parametrize(("filename", "scenario"), param_list, ids=id_list) |
| 18 | +def test_run_in_progress_tests(filename, scenario, eligibility_client, get_scenario_params): |
| 19 | + nhs_number, config_filenames, request_headers, query_params, expected_response_code = get_scenario_params(scenario, config_path) |
| 20 | + |
| 21 | + actual_response = eligibility_client.make_request( |
| 22 | + nhs_number, headers=request_headers, query_params=query_params, strict_ssl=False |
| 23 | + ) |
| 24 | + expected_response = all_expected_responses.get(filename).get("response_items", {}) |
| 25 | + |
| 26 | + expected_response_code = expected_response_code or http.HTTPStatus.OK |
| 27 | + |
| 28 | + assert actual_response["status_code"] == expected_response_code |
| 29 | + assert actual_response["body"] == expected_response, ( |
| 30 | + f"\n❌ Mismatch in test: {filename}\n" |
| 31 | + f"NHS Number: {nhs_number}\n" |
| 32 | + f"Expected: {expected_response}\n" |
| 33 | + f"Actual: {actual_response}\n" |
| 34 | + ) |
0 commit comments