From c962278cd38f9cfdb980df87fe12a0cbe7f89f35 Mon Sep 17 00:00:00 2001 From: Priya Majali Date: Thu, 1 Jan 2026 15:51:49 +0530 Subject: [PATCH 01/11] plugin for MR --- linodecli/plugins/get_metrics.py | 457 ++++++++++++++++++ .../monitor/test_plugin_get_metrics.py | 339 +++++++++++++ tests/unit/test_plugin_get_metrics.py | 251 ++++++++++ 3 files changed, 1047 insertions(+) create mode 100644 linodecli/plugins/get_metrics.py create mode 100644 tests/integration/monitor/test_plugin_get_metrics.py create mode 100644 tests/unit/test_plugin_get_metrics.py diff --git a/linodecli/plugins/get_metrics.py b/linodecli/plugins/get_metrics.py new file mode 100644 index 000000000..ce36470e6 --- /dev/null +++ b/linodecli/plugins/get_metrics.py @@ -0,0 +1,457 @@ +""" +This plugin allows users to query metrics from the monitoring service for various services. + +""" + +import json +import os +import sys +from argparse import ArgumentParser +from typing import List, Optional + +import requests +import urllib3 + +from linodecli.exit_codes import ExitCodes +from linodecli.help_formatter import SortingHelpFormatter +from linodecli.helpers import register_debug_arg + +PLUGIN_BASE = "linode-cli get_metrics" + +# API Configuration +API_BASE_URL = "https://monitor-api.linode.com/v2/monitor/services" + + +def get_auth_token(): + """ + Get authentication token from JWE_TOKEN environment variable + Raises an error if the environment variable is not set + """ + token = os.getenv("JWE_TOKEN") + if not token: + raise ValueError( + "JWE_TOKEN environment variable is required but not set. " + "Please set it with: export JWE_TOKEN='your_token_here'" + ) + return token + + + +# Aggregate functions +AGGREGATE_FUNCTIONS = ["sum", "avg", "max", "min", "count"] + + +def make_api_request( + service_name: str, + endpoint: str, + method: str = "POST", + data: Optional[dict] = None, + token: Optional[str] = None, +) -> tuple: + """ + Make an API request to the monitoring service + + Args: + service_name: The service name (nodebalancer, netloadbalancer, etc.) + endpoint: The API endpoint to call + method: HTTP method + data: Request payload for POST requests + token: Bearer token for authentication + + Returns: + Tuple of (status_code, response_data) + """ + url = f"{API_BASE_URL}/{service_name}/{endpoint}" + + headers = { + "Authorization": f"Bearer {token or get_auth_token()}", + "Authentication-type": "jwe", + "Pragma": "akamai-x-get-extracted-values", + "Content-Type": "application/json", + } + + try: + if method.upper() == "POST": + response = requests.post( + url, headers=headers, json=data, timeout=30, verify=False + ) + else: + response = requests.get( + url, headers=headers, timeout=30, verify=False + ) + + # Try to parse JSON response, fallback to text if it fails + try: + response_data = response.json() if response.content else {} + except json.JSONDecodeError: + response_data = {"error": f"Non-JSON response: {response.text}"} + + return response.status_code, response_data + except requests.exceptions.RequestException as e: + print(f"Error making API request: {e}", file=sys.stderr) + return 500, {"error": str(e)} + + +def get_metrics( + service_name: str, + entity_ids: List, + duration: Optional[int], + duration_unit: Optional[str], + start_time: Optional[str], + end_time: Optional[str], + metrics: List[str], + granularity: Optional[int], + granularity_unit: Optional[str], + filters: Optional[List[str]] = None, + group_by: Optional[List[str]] = None, + entity_region: Optional[str] = None, + associated_entity_region: Optional[str] = None, + token: Optional[str] = None, +): + """ + Get metrics for specified service entities + """ + + # Parse metrics with mandatory aggregate functions + parsed_metrics = [] + for metric in metrics: + if ":" in metric: + metric_name, agg_func = metric.split(":", 1) + parsed_metrics.append( + {"aggregate_function": agg_func, "name": metric_name.strip()} + ) + else: + # No aggregate function specified - this is an error + print( + f"Aggregate function required for metric '{metric}'", + file=sys.stderr, + ) + print( + f"Format: 'metric_name:function' where function is one of: {', '.join(AGGREGATE_FUNCTIONS)}", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Build request payload + payload = {"metrics": parsed_metrics} + if entity_ids: + payload["entity_ids"] = entity_ids + if group_by: + payload["group_by"] = group_by + if entity_region: + payload["entity_region"] = entity_region + if associated_entity_region: + payload["associated_entity_region"] = associated_entity_region + + # Add time duration - either relative or absolute + if start_time and end_time: + payload["absolute_time_duration"] = { + "start": start_time, + "end": end_time, + } + elif duration is not None and duration_unit is not None: + payload["relative_time_duration"] = { + "unit": duration_unit, + "value": duration, + } + else: + print( + "Either (--duration and --duration-unit) or (--start-time and --end-time) must be provided", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Add time_granularity only if both granularity and granularity_unit are provided + if granularity is not None and granularity_unit is not None: + payload["time_granularity"] = { + "unit": granularity_unit, + "value": granularity, + } + + # Add filters if provided + if filters: + parsed_filters = [] + for filter_str in filters: + parts = filter_str.split( + ":", 2 + ) # Split into max 3 parts: dimension, operator, value + if len(parts) != 3: + print( + f"Invalid filter format: '{filter_str}'. Expected format: 'dimension:operator:value'", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) + + dimension_label, operator, value = parts + parsed_filters.append( + { + "dimension_label": dimension_label.strip(), + "operator": operator.strip(), + "value": value.strip(), + } + ) + + payload["filters"] = parsed_filters + + if entity_ids: + print(f"Fetching metrics for {service_name} entities: {entity_ids}") + else: + print(f"Fetching metrics for {service_name} (all entities)") + print(f"Request payload: {json.dumps(payload, indent=2)}") + try: + status, response = make_api_request( + service_name, "metrics", "POST", payload, token + ) + except ValueError as e: + print(f"Authentication Error: {e}", file=sys.stderr) + sys.exit(ExitCodes.REQUEST_FAILED) + + if status != 200: + print(f"API request failed with status {status}", file=sys.stderr) + print( + f"Error response: {json.dumps(response, indent=2)}", file=sys.stderr + ) + print("Exiting due to API error...", file=sys.stderr) + sys.exit(ExitCodes.REQUEST_FAILED) + + print_metrics_response(response) + + +def print_metrics_response(data: dict): + """ + Print metrics data as formatted JSON + """ + if not data: + print("No response received") + return + + if data.get("status") == "success": + metrics_data = data.get("data", {}).get("result", []) + stats = data.get("stats", {}) + + if not metrics_data: + print("No metrics data found for the specified parameters") + print(f"Execution time: {stats.get('executionTimeMsec', 0)}ms") + print(f"Series fetched: {stats.get('seriesFetched', 0)}") + else: + print(f"Series fetched: {stats.get('seriesFetched', 0)}") + print("\nMetrics Data:") + print(json.dumps(data.get("data"), indent=2)) + else: + print(f"API returned error status: {data.get('status', 'unknown')}") + if "error" in data: + print(f"Error: {data['error']}") + + +def print_help(parser: ArgumentParser): + """ + Print help information + """ + parser.print_help() + + print("\nExamples:") + print(" # Get metrics with relative time duration") + print( + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg" + ) + + print("\n # Get metrics for all entities (only allowed for objectstorage service)") + print( + " linode-cli get_metrics objectstorage --duration 15 --duration-unit min --metrics obj_requests_num:avg --entity-region us-east-1" + ) + + print("\n # Get metrics with absolute time duration") + print( + " linode-cli get_metrics dbaas --entity-ids 123 --start-time 2024-10-10T00:00:01Z --end-time 2024-10-10T23:59:59Z --metrics cpu_usage:avg,memory_usage:sum" + ) + + print("\n # Get metrics with filters") + print( + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg --filters 'node_type:in:primary,secondary'" + ) + + print("\n # Get metrics with multiple filters") + print( + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg --filters 'node_type:in:primary,secondary;status:eq:active'" + ) + + print("\n # Get metrics with granularity") + print( + " linode-cli get_metrics netloadbalancer --entity-ids 123 --duration 1 --duration-unit hour --metrics nlb_ingress_traffic:sum --granularity 10 --granularity-unit min" + ) + + print("\n # Get metrics with entity region (required ObjectStorage)") + print( + " linode-cli get_metrics objectstorage --entity-region us-east-1 --duration 15 --duration-unit min --metrics obj_requests_num:sum" + ) + + print("\n # Get metrics with associated entity region (mandatory for cloud firewall service)") + print( + " linode-cli get_metrics firewall --entity-region us-east-1 --associated-entity-region us-west-1 --duration 15 --duration-unit min --metrics fw_active_connections:sum" + ) + + +def get_metrics_parser(): + """ + Build argument parser for metrics plugin + """ + parser = ArgumentParser( + PLUGIN_BASE, add_help=False, formatter_class=SortingHelpFormatter + ) + + register_debug_arg(parser) + + # Service name as positional argument + parser.add_argument( + "service", + nargs="?", + help="Service name (Dbaas, Nodebalancer, NetLoadBalancer, Linode, Firewall, ObjectStorage, Blockstorage,LKE)", + ) + + # Optional arguments for get-metrics functionality + parser.add_argument( + "--entity-ids", + help="Comma-separated list of entity IDs (can be integers or strings depending on service type)", + required=False, + ) + + parser.add_argument( + "--entity-region", + help="Region for entities (required for services like ObjectStorage)", + required=False, + ) + + parser.add_argument( + "--associated-entity-region", + help="Associated region for entities (Required for cloud firewall service)", + required=False, + ) + + # Time duration options - either relative or absolute + parser.add_argument( + "--duration", + type=int, + help="Relative time duration to look back (e.g., 15 for 15 minutes)", + ) + parser.add_argument( + "--duration-unit", help="Unit for relative duration: min, hr, day" + ) + parser.add_argument( + "--start-time", + help="Absolute start time (ISO format: 2024-10-10T00:00:01Z)", + ) + parser.add_argument( + "--end-time", + help="Absolute end time (ISO format: 2024-10-10T23:59:59Z)", + ) + + parser.add_argument( + "--metrics", + help="Comma-separated list of metrics with mandatory aggregate functions. Format: 'metric1:function1,metric2:function2' (e.g., 'cpu_usage:avg,memory_usage:sum')", + ) + parser.add_argument( + "--granularity", + type=int, + help="Time granularity for data points (optional)", + ) + parser.add_argument( + "--granularity-unit", + help="Unit for granularity: min, hr, day (optional)", + ) + parser.add_argument( + "--filters", + help="Optional filters in format 'dimension:operator:value'. Multiple filters separated by semicolons. Example: 'node_type:in:primary,secondary;status:eq:active'", + ) + parser.add_argument( + "--group_by", + help="Comma-separated list of fields to group by (default: entity_id)", + ) + + return parser + + +def call(args, context): + """ + The entrypoint for this plugin + """ + parser = get_metrics_parser() + parsed, remaining_args = parser.parse_known_args(args) + + # Handle help cases + if not parsed.service or parsed.service == "help" or "--help" in args: + print_help(parser) + sys.exit(ExitCodes.SUCCESS) + + if remaining_args: + print(f"Unknown arguments: {' '.join(remaining_args)}", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Validate required arguments for get-metrics functionality + if not parsed.metrics: + print( + "Missing required arguments for metrics retrieval:", file=sys.stderr + ) + print(" --metrics: required", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Validate time duration arguments - either relative or absolute required + has_relative = ( + parsed.duration is not None and parsed.duration_unit is not None + ) + has_absolute = parsed.start_time is not None and parsed.end_time is not None + + if not has_relative and not has_absolute: + print("Time duration required:", file=sys.stderr) + print(" Either: --duration and --duration-unit", file=sys.stderr) + print(" Or: --start-time and --end-time", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + if has_relative and has_absolute: + print( + "Cannot specify both relative and absolute time duration", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Parse entity IDs (can be integers or strings depending on service type) + entity_ids = [] + if parsed.entity_ids: + for entity_id in parsed.entity_ids.split(","): + entity_id = entity_id.strip() + # Try to convert to int first, if that fails keep as string + try: + entity_ids.append(int(entity_id)) + except ValueError: + entity_ids.append(entity_id) + + # Parse metrics + metrics = [x.strip() for x in parsed.metrics.split(",")] + + # Parse group_by if provided + group_by = None + if parsed.group_by: + group_by = [x.strip() for x in parsed.group_by.split(",")] + + # Parse filters if provided + filters = None + if parsed.filters: + filters = [x.strip() for x in parsed.filters.split(";")] + + get_metrics( + service_name=parsed.service, + entity_ids=entity_ids, + duration=parsed.duration, + duration_unit=parsed.duration_unit, + start_time=parsed.start_time, + end_time=parsed.end_time, + metrics=metrics, + granularity=parsed.granularity, + granularity_unit=parsed.granularity_unit, + filters=filters, + group_by=group_by, + entity_region=parsed.entity_region, + associated_entity_region=parsed.associated_entity_region, + ) diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py new file mode 100644 index 000000000..537220593 --- /dev/null +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -0,0 +1,339 @@ +""" +Integration tests for the get_metrics plugin +""" + +import json +import os + +import pytest + +from tests.integration.helpers import ( + exec_failing_test_command, + exec_test_command, + get_random_text, +) +from linodecli.exit_codes import ExitCodes + +# Base command for get_metrics plugin +BASE_CMD = ["linode-cli", "get_metrics"] + + +""" +Integration tests for the get_metrics plugin +""" + +import os + +import pytest + +from tests.integration.helpers import ( + exec_failing_test_command, + exec_test_command, +) +from linodecli.exit_codes import ExitCodes + +# Base command for get_metrics plugin +BASE_CMD = ["linode-cli", "get_metrics"] + + +def test_missing_required_args(): + """Test error handling for missing required arguments""" + # Missing entity-ids + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + # Missing metrics + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + # Missing duration and time parameters + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_invalid_service(): + """Test error handling for invalid service name""" + exec_failing_test_command( + BASE_CMD + [ + "invalid_service", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_invalid_aggregate_function(): + """Test error handling for metrics without aggregate functions""" + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--metrics", "cpu_usage", # Missing :avg + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_invalid_duration_unit(): + """Test handling of invalid duration unit""" + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "invalid_unit" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_conflicting_time_params(): + """Test handling of conflicting time parameters""" + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min", + "--start-time", "2025-12-22T00:00:00Z", + "--end-time", "2025-12-22T12:00:00Z" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +@pytest.mark.skipif( + not os.getenv('JWE_TOKEN'), + reason="JWE_TOKEN environment variable required for monitoring tests" +) +@pytest.mark.smoke +def test_nodebalancer_metrics_basic(): + """Test get_metrics with nodebalancer service (with authentication)""" + # Use a non-existent entity ID to avoid side effects + # This will test the complete command flow but fail gracefully + try: + output = exec_test_command(BASE_CMD + [ + "nodebalancer", + "--entity-ids", "999999", + "--metrics", "nb_ingress_traffic_rate:sum", + "--duration", "15", + "--duration-unit", "min" + ]) + + # If it succeeds, check for expected output structure + assert "Fetching metrics" in output or "data" in output.lower() + + except RuntimeError as e: + # Expected to fail with entity not found or similar API error + # Ensure it's not a command structure error + error_output = str(e) + assert "API request failed" in error_output or "entity" in error_output.lower() + # Should not be argument parsing errors + assert "unrecognized arguments" not in error_output + assert "invalid choice" not in error_output + + +@pytest.mark.skipif( + not os.getenv('JWE_TOKEN'), + reason="JWE_TOKEN environment variable required for monitoring tests" +) +def test_dbaas_metrics_with_filters(): + """Test get_metrics with dbaas service and filters""" + try: + output = exec_test_command(BASE_CMD + [ + "dbaas", + "--entity-ids", "999999", + "--metrics", "cpu_usage:avg,memory_usage:max", + "--duration", "30", + "--duration-unit", "min", + "--filters", "node_type:in:primary,secondary", + "--group-by", "entity_id,node_type" + ]) + + assert "Fetching metrics" in output or "data" in output.lower() + + except RuntimeError as e: + error_output = str(e) + assert "API request failed" in error_output or "entity" in error_output.lower() + assert "unrecognized arguments" not in error_output + + +@pytest.mark.skipif( + not os.getenv('JWE_TOKEN'), + reason="JWE_TOKEN environment variable required for monitoring tests" +) +def test_absolute_time_metrics(): + """Test get_metrics with absolute time range""" + try: + output = exec_test_command(BASE_CMD + [ + "linodes", + "--entity-ids", "999999", + "--metrics", "cpu_percent:avg", + "--start-time", "2025-12-22T00:00:00Z", + "--end-time", "2025-12-22T12:00:00Z", + "--granularity", "5", + "--granularity-unit", "min" + ]) + + assert "Fetching metrics" in output or "data" in output.lower() + + except RuntimeError as e: + error_output = str(e) + assert "API request failed" in error_output or "entity" in error_output.lower() + assert "unrecognized arguments" not in error_output + + +@pytest.mark.skipif( + not os.getenv('JWE_TOKEN'), + reason="JWE_TOKEN environment variable required for monitoring tests" +) +def test_multiple_entity_ids(): + """Test get_metrics with multiple entity IDs""" + try: + output = exec_test_command(BASE_CMD + [ + "nodebalancer", + "--entity-ids", "999999,888888,777777", + "--metrics", "nb_ingress_traffic_rate:sum,nb_egress_traffic_rate:avg", + "--duration", "1", + "--duration-unit", "hr", + "--granularity", "15", + "--granularity-unit", "min" + ]) + + assert "Fetching metrics" in output or "data" in output.lower() + + except RuntimeError as e: + error_output = str(e) + assert "API request failed" in error_output or "entity" in error_output.lower() + assert "unrecognized arguments" not in error_output + + +@pytest.mark.skipif( + not os.getenv('JWE_TOKEN'), + reason="JWE_TOKEN environment variable required for monitoring tests" +) +def test_complex_filters(): + """Test get_metrics with complex filter combinations""" + try: + output = exec_test_command(BASE_CMD + [ + "dbaas", + "--entity-ids", "999999", + "--metrics", "cpu_usage:avg,memory_usage:avg,connections:count", + "--duration", "2", + "--duration-unit", "hr", + "--filters", "node_type:in:primary,secondary;status:eq:active;environment:ne:test", + "--group-by", "entity_id,node_type,environment", + "--granularity", "30", + "--granularity-unit", "min" + ]) + + assert "Fetching metrics" in output or "data" in output.lower() + + except RuntimeError as e: + error_output = str(e) + assert "API request failed" in error_output or "entity" in error_output.lower() + assert "unrecognized arguments" not in error_output + + +def test_missing_token_error(): + """Test error handling when JWE_TOKEN is missing""" + # Temporarily remove token + original_token = os.getenv('JWE_TOKEN') + if 'JWE_TOKEN' in os.environ: + del os.environ['JWE_TOKEN'] + + try: + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + finally: + # Restore token + if original_token: + os.environ['JWE_TOKEN'] = original_token + + +def test_empty_entity_ids(): + """Test handling of empty entity IDs""" + exec_failing_test_command( + BASE_CMD + [ + "nodebalancer", + "--entity-ids", "", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_malformed_filters(): + """Test handling of malformed filter syntax""" + exec_failing_test_command( + BASE_CMD + [ + "dbaas", + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min", + "--filters", "invalid_filter_format" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + + +def test_service_validation(): + """Test that valid services are recognized correctly""" + valid_services = ["nodebalancer", "netloadbalancer", "linodes", "dbaas"] + + for service in valid_services: + # This should fail due to missing authentication, not service validation + try: + exec_failing_test_command( + BASE_CMD + [ + service, + "--entity-ids", "123", + "--metrics", "cpu_usage:avg", + "--duration", "15", + "--duration-unit", "min" + ], + expected_code=ExitCodes.REQUEST_FAILED + ) + except AssertionError as e: + # If it fails with wrong exit code, check it's not service validation error + error_msg = str(e).lower() + assert "invalid choice" not in error_msg + assert f"invalid choice: '{service}'" not in error_msg diff --git a/tests/unit/test_plugin_get_metrics.py b/tests/unit/test_plugin_get_metrics.py new file mode 100644 index 000000000..afd09a128 --- /dev/null +++ b/tests/unit/test_plugin_get_metrics.py @@ -0,0 +1,251 @@ +""" +Unit tests for the get_metrics plugin +""" + +import json +import os +from unittest.mock import MagicMock, Mock, patch + +import pytest +from pytest import CaptureFixture + +from linodecli.plugins.get_metrics import ( + call, + get_auth_token, + get_metrics, + get_metrics_parser, + make_api_request, + print_metrics_response, +) + + +class TestAuthToken: + """Test authentication token handling""" + + def test_get_auth_token_success(self): + """Test successful token retrieval from environment""" + with patch.dict(os.environ, {"JWE_TOKEN": "test_token"}): + token = get_auth_token() + assert token == "test_token" + + def test_get_auth_token_missing(self): + """Test error when token is missing""" + with patch.dict(os.environ, {}, clear=True): + with pytest.raises(ValueError) as excinfo: + get_auth_token() + assert "JWE_TOKEN environment variable is required" in str( + excinfo.value + ) + + +class TestAPIRequest: + """Test API request functionality""" + + @patch("linodecli.plugins.get_metrics.requests.post") + def test_make_api_request_success(self, mock_post): + """Test successful API request""" + # Mock successful response + mock_response = Mock() + mock_response.status_code = 200 + mock_response.content = b'{"data": {"test": "data"}}' + mock_response.json.return_value = {"data": {"test": "data"}} + mock_post.return_value = mock_response + + status_code, result = make_api_request( + "nodebalancer", "metrics", "POST", {"test": "data"}, "test_token" + ) + + assert status_code == 200 + assert result == {"data": {"test": "data"}} + mock_post.assert_called_once() + + @patch("linodecli.plugins.get_metrics.requests.post") + def test_make_api_request_http_error(self, mock_post): + """Test API request with HTTP error""" + mock_response = Mock() + mock_response.status_code = 401 + mock_response.content = b"Unauthorized" + mock_response.json.return_value = {"error": "Unauthorized"} + mock_post.return_value = mock_response + + status_code, result = make_api_request( + "nodebalancer", "metrics", "POST", {}, "test_token" + ) + assert status_code == 401 + + +class TestGetMetrics: + """Test get_metrics function""" + + @patch("linodecli.plugins.get_metrics.print_metrics_response") + @patch("linodecli.plugins.get_metrics.make_api_request") + def test_get_metrics_relative_time(self, mock_api_request, mock_print): + """Test get_metrics with relative time duration""" + mock_api_request.return_value = (200, {"data": {"test": "data"}}) + + # get_metrics doesn't return data, it calls print_metrics_response + get_metrics( + service_name="nodebalancer", + entity_ids=[123, 456], + duration=15, + duration_unit="min", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + + # Verify that print_metrics_response was called with the response + mock_print.assert_called_once_with({"data": {"test": "data"}}) + mock_api_request.assert_called_once() + + @patch("linodecli.plugins.get_metrics.print_metrics_response") + @patch("linodecli.plugins.get_metrics.make_api_request") + def test_get_metrics_absolute_time(self, mock_api_request, mock_print): + """Test get_metrics with absolute time range""" + mock_api_request.return_value = (200, {"data": {"test": "data"}}) + + get_metrics( + service_name="dbaas", + entity_ids=[789], + duration=None, + duration_unit=None, + start_time="2025-12-22T00:00:00Z", + end_time="2025-12-22T12:00:00Z", + metrics=["memory_usage:max"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + + # Verify print_metrics_response was called + mock_print.assert_called_once_with({"data": {"test": "data"}}) + + @patch("linodecli.plugins.get_metrics.print_metrics_response") + @patch("linodecli.plugins.get_metrics.make_api_request") + def test_get_metrics_with_filters_and_groupby( + self, mock_api_request, mock_print + ): + """Test get_metrics with filters and group_by""" + mock_api_request.return_value = (200, {"data": {"test": "data"}}) + + get_metrics( + service_name="dbaas", + entity_ids=[123], + duration=1, + duration_unit="hr", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + filters=["node_type:in:primary,secondary", "status:eq:active"], + group_by=["entity_id", "node_type"], + token="test_token", + ) + + # Verify print_metrics_response was called + mock_print.assert_called_once_with({"data": {"test": "data"}}) + + @patch("linodecli.plugins.get_metrics.make_api_request") + @patch("builtins.print") + @patch("sys.exit") + def test_get_metrics_api_error( + self, mock_exit, mock_print, mock_api_request + ): + """Test get_metrics with API error response""" + mock_api_request.return_value = (401, {"error": "Unauthorized"}) + + get_metrics( + service_name="nodebalancer", + entity_ids=[123], + duration=15, + duration_unit="min", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + + # Verify that sys.exit was called due to API error + mock_exit.assert_called_with(2) # ExitCodes.REQUEST_FAILED + + +class TestArgumentParsing: + """Test argument parsing""" + + def test_get_metrics_parser(self): + """Test parser creation""" + parser = get_metrics_parser() + + # Test that parser has all expected arguments + args = parser.parse_args( + [ + "nodebalancer", + "--entity-ids", + "123,456", + "--metrics", + "cpu_usage:avg,memory_usage:max", + "--duration", + "15", + "--duration-unit", + "min", + ] + ) + + assert args.service == "nodebalancer" + assert args.entity_ids == "123,456" + assert args.metrics == "cpu_usage:avg,memory_usage:max" + assert args.duration == 15 + assert args.duration_unit == "min" + + +class TestPrintResponse: + """Test response printing""" + + def test_print_metrics_response_success(self, capsys: CaptureFixture): + """Test metrics response printing for successful response""" + response_data = { + "status": "success", + "data": { + "result": [ + { + "entity_id": 123, + "cpu_usage": [ + {"timestamp": "2025-12-22T10:00:00Z", "value": 45.2} + ], + } + ] + }, + "stats": {"executionTimeMsec": 150, "seriesFetched": 1}, + } + + print_metrics_response(response_data) + captured = capsys.readouterr() + + # Verify success output + assert "✓ Success" in captured.out + assert "150ms" in captured.out + assert "Metrics Data:" in captured.out + + def test_print_metrics_response_error(self, capsys: CaptureFixture): + """Test metrics response printing for error response""" + response_data = {"status": "error", "error": "Invalid parameters"} + + print_metrics_response(response_data) + captured = capsys.readouterr() + + # Verify error output + assert "API returned error status: error" in captured.out + assert "Error: Invalid parameters" in captured.out + + def test_print_metrics_response_empty(self, capsys: CaptureFixture): + """Test metrics response printing for empty response""" + print_metrics_response({}) + captured = capsys.readouterr() + + assert "No response received" in captured.out From e98bc3615cc938edc802d8b0a4a916e866be5231 Mon Sep 17 00:00:00 2001 From: Priya Majali Date: Wed, 7 Jan 2026 13:48:33 +0530 Subject: [PATCH 02/11] updating tests --- .../monitor/test_plugin_get_metrics.py | 224 +++--------------- tests/unit/test_plugin_get_metrics.py | 35 +-- 2 files changed, 39 insertions(+), 220 deletions(-) diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py index 537220593..54cfece8e 100644 --- a/tests/integration/monitor/test_plugin_get_metrics.py +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -1,7 +1,3 @@ -""" -Integration tests for the get_metrics plugin -""" - import json import os @@ -129,184 +125,59 @@ def test_conflicting_time_params(): ) -@pytest.mark.skipif( - not os.getenv('JWE_TOKEN'), - reason="JWE_TOKEN environment variable required for monitoring tests" -) @pytest.mark.smoke -def test_nodebalancer_metrics_basic(): - """Test get_metrics with nodebalancer service (with authentication)""" - # Use a non-existent entity ID to avoid side effects - # This will test the complete command flow but fail gracefully - try: - output = exec_test_command(BASE_CMD + [ - "nodebalancer", - "--entity-ids", "999999", - "--metrics", "nb_ingress_traffic_rate:sum", - "--duration", "15", - "--duration-unit", "min" - ]) +def test_objstorage_metrics_basic(): + """Test get_metrics with objectstorage service (with authentication)""" + # Use objectstorage service which doesn't require entity-ids + output = exec_test_command(BASE_CMD + [ + "objectstorage", + "--metrics", "obj_requests_num:sum", + "--duration", "15", + "--duration-unit", "min", + "--entity-region", "us-east" + ]) - # If it succeeds, check for expected output structure - assert "Fetching metrics" in output or "data" in output.lower() + print(f"SUCCESS: {output}") + assert "Fetching metrics" in output or "data" in output.lower() - except RuntimeError as e: - # Expected to fail with entity not found or similar API error - # Ensure it's not a command structure error - error_output = str(e) - assert "API request failed" in error_output or "entity" in error_output.lower() - # Should not be argument parsing errors - assert "unrecognized arguments" not in error_output - assert "invalid choice" not in error_output +def test_obj_metrics_with_filters(): + """Test get_metrics with objectstorage service and filters""" + output = exec_test_command(BASE_CMD + [ + "objectstorage", + "--metrics", "obj_requests_num:sum", + "--duration", "30", + "--duration-unit", "min", + "--entity-region", "us-west", + "--filters", "request_type:eq:get" + ]) -@pytest.mark.skipif( - not os.getenv('JWE_TOKEN'), - reason="JWE_TOKEN environment variable required for monitoring tests" -) -def test_dbaas_metrics_with_filters(): - """Test get_metrics with dbaas service and filters""" - try: - output = exec_test_command(BASE_CMD + [ - "dbaas", - "--entity-ids", "999999", - "--metrics", "cpu_usage:avg,memory_usage:max", - "--duration", "30", - "--duration-unit", "min", - "--filters", "node_type:in:primary,secondary", - "--group-by", "entity_id,node_type" - ]) - - assert "Fetching metrics" in output or "data" in output.lower() + assert "Fetching metrics" in output or "data" in output.lower() - except RuntimeError as e: - error_output = str(e) - assert "API request failed" in error_output or "entity" in error_output.lower() - assert "unrecognized arguments" not in error_output -@pytest.mark.skipif( - not os.getenv('JWE_TOKEN'), - reason="JWE_TOKEN environment variable required for monitoring tests" -) def test_absolute_time_metrics(): - """Test get_metrics with absolute time range""" - try: - output = exec_test_command(BASE_CMD + [ - "linodes", - "--entity-ids", "999999", - "--metrics", "cpu_percent:avg", - "--start-time", "2025-12-22T00:00:00Z", - "--end-time", "2025-12-22T12:00:00Z", - "--granularity", "5", - "--granularity-unit", "min" - ]) - - assert "Fetching metrics" in output or "data" in output.lower() - - except RuntimeError as e: - error_output = str(e) - assert "API request failed" in error_output or "entity" in error_output.lower() - assert "unrecognized arguments" not in error_output - - -@pytest.mark.skipif( - not os.getenv('JWE_TOKEN'), - reason="JWE_TOKEN environment variable required for monitoring tests" -) -def test_multiple_entity_ids(): - """Test get_metrics with multiple entity IDs""" - try: - output = exec_test_command(BASE_CMD + [ - "nodebalancer", - "--entity-ids", "999999,888888,777777", - "--metrics", "nb_ingress_traffic_rate:sum,nb_egress_traffic_rate:avg", - "--duration", "1", - "--duration-unit", "hr", - "--granularity", "15", - "--granularity-unit", "min" - ]) + """Test get_metrics with objectstorage service and absolute time range""" + output = exec_test_command(BASE_CMD + [ + "objectstorage", + "--metrics", "obj_requests_num:sum", + "--start-time", "2025-12-22T00:00:00Z", + "--end-time", "2025-12-22T12:00:00Z", + "--entity-region", "us-southeast", + "--granularity", "5", + "--granularity-unit", "min" + ]) - assert "Fetching metrics" in output or "data" in output.lower() + assert "Fetching metrics" in output or "data" in output.lower() - except RuntimeError as e: - error_output = str(e) - assert "API request failed" in error_output or "entity" in error_output.lower() - assert "unrecognized arguments" not in error_output - - -@pytest.mark.skipif( - not os.getenv('JWE_TOKEN'), - reason="JWE_TOKEN environment variable required for monitoring tests" -) -def test_complex_filters(): - """Test get_metrics with complex filter combinations""" - try: - output = exec_test_command(BASE_CMD + [ - "dbaas", - "--entity-ids", "999999", - "--metrics", "cpu_usage:avg,memory_usage:avg,connections:count", - "--duration", "2", - "--duration-unit", "hr", - "--filters", "node_type:in:primary,secondary;status:eq:active;environment:ne:test", - "--group-by", "entity_id,node_type,environment", - "--granularity", "30", - "--granularity-unit", "min" - ]) - - assert "Fetching metrics" in output or "data" in output.lower() - - except RuntimeError as e: - error_output = str(e) - assert "API request failed" in error_output or "entity" in error_output.lower() - assert "unrecognized arguments" not in error_output - - -def test_missing_token_error(): - """Test error handling when JWE_TOKEN is missing""" - # Temporarily remove token - original_token = os.getenv('JWE_TOKEN') - if 'JWE_TOKEN' in os.environ: - del os.environ['JWE_TOKEN'] - - try: - exec_failing_test_command( - BASE_CMD + [ - "nodebalancer", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min" - ], - expected_code=ExitCodes.REQUEST_FAILED - ) - finally: - # Restore token - if original_token: - os.environ['JWE_TOKEN'] = original_token - - -def test_empty_entity_ids(): - """Test handling of empty entity IDs""" - exec_failing_test_command( - BASE_CMD + [ - "nodebalancer", - "--entity-ids", "", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min" - ], - expected_code=ExitCodes.REQUEST_FAILED - ) def test_malformed_filters(): """Test handling of malformed filter syntax""" exec_failing_test_command( BASE_CMD + [ - "dbaas", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", + "objectstorage", + "--metrics", "obj_requests_num:sum", "--duration", "15", "--duration-unit", "min", "--filters", "invalid_filter_format" @@ -315,25 +186,4 @@ def test_malformed_filters(): ) -def test_service_validation(): - """Test that valid services are recognized correctly""" - valid_services = ["nodebalancer", "netloadbalancer", "linodes", "dbaas"] - - for service in valid_services: - # This should fail due to missing authentication, not service validation - try: - exec_failing_test_command( - BASE_CMD + [ - service, - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min" - ], - expected_code=ExitCodes.REQUEST_FAILED - ) - except AssertionError as e: - # If it fails with wrong exit code, check it's not service validation error - error_msg = str(e).lower() - assert "invalid choice" not in error_msg - assert f"invalid choice: '{service}'" not in error_msg + diff --git a/tests/unit/test_plugin_get_metrics.py b/tests/unit/test_plugin_get_metrics.py index afd09a128..5c66ecc14 100644 --- a/tests/unit/test_plugin_get_metrics.py +++ b/tests/unit/test_plugin_get_metrics.py @@ -1,7 +1,3 @@ -""" -Unit tests for the get_metrics plugin -""" - import json import os from unittest.mock import MagicMock, Mock, patch @@ -11,7 +7,6 @@ from linodecli.plugins.get_metrics import ( call, - get_auth_token, get_metrics, get_metrics_parser, make_api_request, @@ -19,25 +14,6 @@ ) -class TestAuthToken: - """Test authentication token handling""" - - def test_get_auth_token_success(self): - """Test successful token retrieval from environment""" - with patch.dict(os.environ, {"JWE_TOKEN": "test_token"}): - token = get_auth_token() - assert token == "test_token" - - def test_get_auth_token_missing(self): - """Test error when token is missing""" - with patch.dict(os.environ, {}, clear=True): - with pytest.raises(ValueError) as excinfo: - get_auth_token() - assert "JWE_TOKEN environment variable is required" in str( - excinfo.value - ) - - class TestAPIRequest: """Test API request functionality""" @@ -68,7 +44,7 @@ def test_make_api_request_http_error(self, mock_post): mock_response.json.return_value = {"error": "Unauthorized"} mock_post.return_value = mock_response - status_code, result = make_api_request( + status_code, _ = make_api_request( "nodebalancer", "metrics", "POST", {}, "test_token" ) assert status_code == 401 @@ -83,7 +59,6 @@ def test_get_metrics_relative_time(self, mock_api_request, mock_print): """Test get_metrics with relative time duration""" mock_api_request.return_value = (200, {"data": {"test": "data"}}) - # get_metrics doesn't return data, it calls print_metrics_response get_metrics( service_name="nodebalancer", entity_ids=[123, 456], @@ -97,7 +72,6 @@ def test_get_metrics_relative_time(self, mock_api_request, mock_print): token="test_token", ) - # Verify that print_metrics_response was called with the response mock_print.assert_called_once_with({"data": {"test": "data"}}) mock_api_request.assert_called_once() @@ -120,7 +94,6 @@ def test_get_metrics_absolute_time(self, mock_api_request, mock_print): token="test_token", ) - # Verify print_metrics_response was called mock_print.assert_called_once_with({"data": {"test": "data"}}) @patch("linodecli.plugins.get_metrics.print_metrics_response") @@ -146,7 +119,6 @@ def test_get_metrics_with_filters_and_groupby( token="test_token", ) - # Verify print_metrics_response was called mock_print.assert_called_once_with({"data": {"test": "data"}}) @patch("linodecli.plugins.get_metrics.make_api_request") @@ -171,7 +143,6 @@ def test_get_metrics_api_error( token="test_token", ) - # Verify that sys.exit was called due to API error mock_exit.assert_called_with(2) # ExitCodes.REQUEST_FAILED @@ -182,7 +153,6 @@ def test_get_metrics_parser(self): """Test parser creation""" parser = get_metrics_parser() - # Test that parser has all expected arguments args = parser.parse_args( [ "nodebalancer", @@ -228,8 +198,7 @@ def test_print_metrics_response_success(self, capsys: CaptureFixture): captured = capsys.readouterr() # Verify success output - assert "✓ Success" in captured.out - assert "150ms" in captured.out + assert "Series fetched: 1" in captured.out assert "Metrics Data:" in captured.out def test_print_metrics_response_error(self, capsys: CaptureFixture): From 6f3b11584e4150b44fb2f0723bc7f091ef2dae6c Mon Sep 17 00:00:00 2001 From: Priya Majali Date: Mon, 12 Jan 2026 14:04:08 +0530 Subject: [PATCH 03/11] refactoring --- linodecli/plugins/get_metrics.py | 256 +++++++++++++++----------- tests/unit/test_plugin_get_metrics.py | 13 +- 2 files changed, 159 insertions(+), 110 deletions(-) diff --git a/linodecli/plugins/get_metrics.py b/linodecli/plugins/get_metrics.py index ce36470e6..d1fa5d93d 100644 --- a/linodecli/plugins/get_metrics.py +++ b/linodecli/plugins/get_metrics.py @@ -7,10 +7,10 @@ import os import sys from argparse import ArgumentParser +from dataclasses import dataclass from typing import List, Optional import requests -import urllib3 from linodecli.exit_codes import ExitCodes from linodecli.help_formatter import SortingHelpFormatter @@ -41,6 +41,25 @@ def get_auth_token(): AGGREGATE_FUNCTIONS = ["sum", "avg", "max", "min", "count"] +@dataclass +class MetricsConfig: + """Configuration for metrics request""" + service_name: str + entity_ids: List + duration: Optional[int] + duration_unit: Optional[str] + start_time: Optional[str] + end_time: Optional[str] + metrics: List[str] + granularity: Optional[int] + granularity_unit: Optional[str] + filters: Optional[List[str]] = None + group_by: Optional[List[str]] = None + entity_region: Optional[str] = None + associated_entity_region: Optional[str] = None + token: Optional[str] = None + + def make_api_request( service_name: str, endpoint: str, @@ -73,11 +92,11 @@ def make_api_request( try: if method.upper() == "POST": response = requests.post( - url, headers=headers, json=data, timeout=30, verify=False + url, headers=headers, json=data, timeout=30, verify=True ) else: response = requests.get( - url, headers=headers, timeout=30, verify=False + url, headers=headers, timeout=30, verify=True ) # Try to parse JSON response, fallback to text if it fails @@ -92,27 +111,8 @@ def make_api_request( return 500, {"error": str(e)} -def get_metrics( - service_name: str, - entity_ids: List, - duration: Optional[int], - duration_unit: Optional[str], - start_time: Optional[str], - end_time: Optional[str], - metrics: List[str], - granularity: Optional[int], - granularity_unit: Optional[str], - filters: Optional[List[str]] = None, - group_by: Optional[List[str]] = None, - entity_region: Optional[str] = None, - associated_entity_region: Optional[str] = None, - token: Optional[str] = None, -): - """ - Get metrics for specified service entities - """ - - # Parse metrics with mandatory aggregate functions +def parse_metrics(metrics: List[str]) -> List[dict]: + """Parse metrics with mandatory aggregate functions""" parsed_metrics = [] for metric in metrics: if ":" in metric: @@ -121,63 +121,68 @@ def get_metrics( {"aggregate_function": agg_func, "name": metric_name.strip()} ) else: - # No aggregate function specified - this is an error print( f"Aggregate function required for metric '{metric}'", file=sys.stderr, ) print( - f"Format: 'metric_name:function' where function is one of: {', '.join(AGGREGATE_FUNCTIONS)}", + f"Format: 'metric_name:function' where function is one of: " + f"{', '.join(AGGREGATE_FUNCTIONS)}", file=sys.stderr, ) sys.exit(ExitCodes.REQUEST_FAILED) + return parsed_metrics + - # Build request payload +def build_payload(config: MetricsConfig) -> dict: + """Build API request payload from configuration""" + parsed_metrics = parse_metrics(config.metrics) payload = {"metrics": parsed_metrics} - if entity_ids: - payload["entity_ids"] = entity_ids - if group_by: - payload["group_by"] = group_by - if entity_region: - payload["entity_region"] = entity_region - if associated_entity_region: - payload["associated_entity_region"] = associated_entity_region + + if config.entity_ids: + payload["entity_ids"] = config.entity_ids + if config.group_by: + payload["group_by"] = config.group_by + if config.entity_region: + payload["entity_region"] = config.entity_region + if config.associated_entity_region: + payload["associated_entity_region"] = config.associated_entity_region # Add time duration - either relative or absolute - if start_time and end_time: + if config.start_time and config.end_time: payload["absolute_time_duration"] = { - "start": start_time, - "end": end_time, + "start": config.start_time, + "end": config.end_time, } - elif duration is not None and duration_unit is not None: + elif config.duration is not None and config.duration_unit is not None: payload["relative_time_duration"] = { - "unit": duration_unit, - "value": duration, + "unit": config.duration_unit, + "value": config.duration, } else: print( - "Either (--duration and --duration-unit) or (--start-time and --end-time) must be provided", + "Either (--duration and --duration-unit) or " + "(--start-time and --end-time) must be provided", file=sys.stderr, ) sys.exit(ExitCodes.REQUEST_FAILED) - # Add time_granularity only if both granularity and granularity_unit are provided - if granularity is not None and granularity_unit is not None: + # Add time_granularity if provided + if config.granularity is not None and config.granularity_unit is not None: payload["time_granularity"] = { - "unit": granularity_unit, - "value": granularity, + "unit": config.granularity_unit, + "value": config.granularity, } # Add filters if provided - if filters: + if config.filters: parsed_filters = [] - for filter_str in filters: - parts = filter_str.split( - ":", 2 - ) # Split into max 3 parts: dimension, operator, value + for filter_str in config.filters: + parts = filter_str.split(":", 2) if len(parts) != 3: print( - f"Invalid filter format: '{filter_str}'. Expected format: 'dimension:operator:value'", + f"Invalid filter format: '{filter_str}'. " + "Expected format: 'dimension:operator:value'", file=sys.stderr, ) sys.exit(ExitCodes.REQUEST_FAILED) @@ -190,17 +195,24 @@ def get_metrics( "value": value.strip(), } ) - payload["filters"] = parsed_filters - if entity_ids: - print(f"Fetching metrics for {service_name} entities: {entity_ids}") + return payload + + +def get_metrics(config: MetricsConfig): + """Get metrics for specified service entities""" + payload = build_payload(config) + + if config.entity_ids: + print(f"Fetching metrics for {config.service_name} entities: {config.entity_ids}") else: - print(f"Fetching metrics for {service_name} (all entities)") + print(f"Fetching metrics for {config.service_name} (all entities)") print(f"Request payload: {json.dumps(payload, indent=2)}") + try: status, response = make_api_request( - service_name, "metrics", "POST", payload, token + config.service_name, "metrics", "POST", payload, config.token ) except ValueError as e: print(f"Authentication Error: {e}", file=sys.stderr) @@ -216,7 +228,6 @@ def get_metrics( print_metrics_response(response) - def print_metrics_response(data: dict): """ Print metrics data as formatted JSON @@ -252,42 +263,62 @@ def print_help(parser: ArgumentParser): print("\nExamples:") print(" # Get metrics with relative time duration") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg" + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + "--duration-unit min --metrics cpu_usage:avg" ) - print("\n # Get metrics for all entities (only allowed for objectstorage service)") print( - " linode-cli get_metrics objectstorage --duration 15 --duration-unit min --metrics obj_requests_num:avg --entity-region us-east-1" + "\n # Get metrics for all entities " + "(only allowed for objectstorage service)" + ) + print( + " linode-cli get_metrics objectstorage --duration 15 " + "--duration-unit min --metrics obj_requests_num:avg " + "--entity-region us-east-1" ) print("\n # Get metrics with absolute time duration") print( - " linode-cli get_metrics dbaas --entity-ids 123 --start-time 2024-10-10T00:00:01Z --end-time 2024-10-10T23:59:59Z --metrics cpu_usage:avg,memory_usage:sum" + " linode-cli get_metrics dbaas --entity-ids 123 " + "--start-time 2024-10-10T00:00:01Z --end-time 2024-10-10T23:59:59Z " + "--metrics cpu_usage:avg,memory_usage:sum" ) print("\n # Get metrics with filters") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg --filters 'node_type:in:primary,secondary'" + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + "--duration-unit min --metrics cpu_usage:avg " + "--filters 'node_type:in:primary,secondary'" ) print("\n # Get metrics with multiple filters") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 --duration-unit min --metrics cpu_usage:avg --filters 'node_type:in:primary,secondary;status:eq:active'" + " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + "--duration-unit min --metrics cpu_usage:avg " + "--filters 'node_type:in:primary,secondary;status:eq:active'" ) print("\n # Get metrics with granularity") print( - " linode-cli get_metrics netloadbalancer --entity-ids 123 --duration 1 --duration-unit hour --metrics nlb_ingress_traffic:sum --granularity 10 --granularity-unit min" + " linode-cli get_metrics netloadbalancer --entity-ids 123 " + "--duration 1 --duration-unit hour --metrics nlb_ingress_traffic:sum " + "--granularity 10 --granularity-unit min" ) print("\n # Get metrics with entity region (required ObjectStorage)") print( - " linode-cli get_metrics objectstorage --entity-region us-east-1 --duration 15 --duration-unit min --metrics obj_requests_num:sum" + " linode-cli get_metrics objectstorage --entity-region us-east-1 " + "--duration 15 --duration-unit min --metrics obj_requests_num:sum" ) - print("\n # Get metrics with associated entity region (mandatory for cloud firewall service)") print( - " linode-cli get_metrics firewall --entity-region us-east-1 --associated-entity-region us-west-1 --duration 15 --duration-unit min --metrics fw_active_connections:sum" + "\n # Get metrics with associated entity region " + "(mandatory for cloud firewall service)" + ) + print( + " linode-cli get_metrics firewall --entity-region us-east-1 " + "--associated-entity-region us-west-1 --duration 15 " + "--duration-unit min --metrics fw_active_connections:sum" ) @@ -305,13 +336,15 @@ def get_metrics_parser(): parser.add_argument( "service", nargs="?", - help="Service name (Dbaas, Nodebalancer, NetLoadBalancer, Linode, Firewall, ObjectStorage, Blockstorage,LKE)", + help="Service name (Dbaas, Nodebalancer, NetLoadBalancer, Linode, " + "Firewall, ObjectStorage, Blockstorage,LKE)", ) # Optional arguments for get-metrics functionality parser.add_argument( "--entity-ids", - help="Comma-separated list of entity IDs (can be integers or strings depending on service type)", + help="Comma-separated list of entity IDs " + "(can be integers or strings depending on service type)", required=False, ) @@ -347,7 +380,9 @@ def get_metrics_parser(): parser.add_argument( "--metrics", - help="Comma-separated list of metrics with mandatory aggregate functions. Format: 'metric1:function1,metric2:function2' (e.g., 'cpu_usage:avg,memory_usage:sum')", + help="Comma-separated list of metrics with mandatory aggregate functions. " + "Format: 'metric1:function1,metric2:function2' " + "(e.g., 'cpu_usage:avg,memory_usage:sum')", ) parser.add_argument( "--granularity", @@ -360,7 +395,9 @@ def get_metrics_parser(): ) parser.add_argument( "--filters", - help="Optional filters in format 'dimension:operator:value'. Multiple filters separated by semicolons. Example: 'node_type:in:primary,secondary;status:eq:active'", + help="Optional filters in format 'dimension:operator:value'. " + "Multiple filters separated by semicolons. " + "Example: 'node_type:in:primary,secondary;status:eq:active'", ) parser.add_argument( "--group_by", @@ -370,33 +407,16 @@ def get_metrics_parser(): return parser -def call(args, context): - """ - The entrypoint for this plugin - """ - parser = get_metrics_parser() - parsed, remaining_args = parser.parse_known_args(args) - - # Handle help cases - if not parsed.service or parsed.service == "help" or "--help" in args: - print_help(parser) - sys.exit(ExitCodes.SUCCESS) - - if remaining_args: - print(f"Unknown arguments: {' '.join(remaining_args)}", file=sys.stderr) - print_help(parser) - sys.exit(ExitCodes.REQUEST_FAILED) - - # Validate required arguments for get-metrics functionality +def validate_arguments(parsed): + """Validate required arguments""" if not parsed.metrics: print( "Missing required arguments for metrics retrieval:", file=sys.stderr ) print(" --metrics: required", file=sys.stderr) - print_help(parser) - sys.exit(ExitCodes.REQUEST_FAILED) + return False - # Validate time duration arguments - either relative or absolute required + # Validate time duration arguments has_relative = ( parsed.duration is not None and parsed.duration_unit is not None ) @@ -406,41 +426,63 @@ def call(args, context): print("Time duration required:", file=sys.stderr) print(" Either: --duration and --duration-unit", file=sys.stderr) print(" Or: --start-time and --end-time", file=sys.stderr) - print_help(parser) - sys.exit(ExitCodes.REQUEST_FAILED) + return False if has_relative and has_absolute: print( "Cannot specify both relative and absolute time duration", file=sys.stderr, ) - sys.exit(ExitCodes.REQUEST_FAILED) + return False - # Parse entity IDs (can be integers or strings depending on service type) + return True + + +def parse_entity_ids(entity_ids_str: Optional[str]) -> List: + """Parse entity IDs from string""" entity_ids = [] - if parsed.entity_ids: - for entity_id in parsed.entity_ids.split(","): + if entity_ids_str: + for entity_id in entity_ids_str.split(","): entity_id = entity_id.strip() - # Try to convert to int first, if that fails keep as string try: entity_ids.append(int(entity_id)) except ValueError: entity_ids.append(entity_id) + return entity_ids - # Parse metrics - metrics = [x.strip() for x in parsed.metrics.split(",")] - # Parse group_by if provided +def call(args, context=None): # pylint: disable=unused-argument + """The entrypoint for this plugin""" + parser = get_metrics_parser() + parsed, remaining_args = parser.parse_known_args(args) + + # Handle help cases + if not parsed.service or parsed.service == "help" or "--help" in args: + print_help(parser) + sys.exit(ExitCodes.SUCCESS) + + if remaining_args: + print(f"Unknown arguments: {' '.join(remaining_args)}", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Validate arguments + if not validate_arguments(parsed): + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Parse arguments + entity_ids = parse_entity_ids(parsed.entity_ids) + metrics = [x.strip() for x in parsed.metrics.split(",")] group_by = None if parsed.group_by: group_by = [x.strip() for x in parsed.group_by.split(",")] - - # Parse filters if provided filters = None if parsed.filters: filters = [x.strip() for x in parsed.filters.split(";")] - get_metrics( + # Create config and call get_metrics + config = MetricsConfig( service_name=parsed.service, entity_ids=entity_ids, duration=parsed.duration, @@ -455,3 +497,5 @@ def call(args, context): entity_region=parsed.entity_region, associated_entity_region=parsed.associated_entity_region, ) + + get_metrics(config) diff --git a/tests/unit/test_plugin_get_metrics.py b/tests/unit/test_plugin_get_metrics.py index 5c66ecc14..f4fcb44c5 100644 --- a/tests/unit/test_plugin_get_metrics.py +++ b/tests/unit/test_plugin_get_metrics.py @@ -11,6 +11,7 @@ get_metrics_parser, make_api_request, print_metrics_response, + MetricsConfig, ) @@ -59,7 +60,7 @@ def test_get_metrics_relative_time(self, mock_api_request, mock_print): """Test get_metrics with relative time duration""" mock_api_request.return_value = (200, {"data": {"test": "data"}}) - get_metrics( + config = MetricsConfig( service_name="nodebalancer", entity_ids=[123, 456], duration=15, @@ -71,6 +72,7 @@ def test_get_metrics_relative_time(self, mock_api_request, mock_print): granularity_unit=None, token="test_token", ) + get_metrics(config) mock_print.assert_called_once_with({"data": {"test": "data"}}) mock_api_request.assert_called_once() @@ -81,7 +83,7 @@ def test_get_metrics_absolute_time(self, mock_api_request, mock_print): """Test get_metrics with absolute time range""" mock_api_request.return_value = (200, {"data": {"test": "data"}}) - get_metrics( + config = MetricsConfig( service_name="dbaas", entity_ids=[789], duration=None, @@ -93,6 +95,7 @@ def test_get_metrics_absolute_time(self, mock_api_request, mock_print): granularity_unit=None, token="test_token", ) + get_metrics(config) mock_print.assert_called_once_with({"data": {"test": "data"}}) @@ -104,7 +107,7 @@ def test_get_metrics_with_filters_and_groupby( """Test get_metrics with filters and group_by""" mock_api_request.return_value = (200, {"data": {"test": "data"}}) - get_metrics( + config = MetricsConfig( service_name="dbaas", entity_ids=[123], duration=1, @@ -118,6 +121,7 @@ def test_get_metrics_with_filters_and_groupby( group_by=["entity_id", "node_type"], token="test_token", ) + get_metrics(config) mock_print.assert_called_once_with({"data": {"test": "data"}}) @@ -130,7 +134,7 @@ def test_get_metrics_api_error( """Test get_metrics with API error response""" mock_api_request.return_value = (401, {"error": "Unauthorized"}) - get_metrics( + config = MetricsConfig( service_name="nodebalancer", entity_ids=[123], duration=15, @@ -142,6 +146,7 @@ def test_get_metrics_api_error( granularity_unit=None, token="test_token", ) + get_metrics(config) mock_exit.assert_called_with(2) # ExitCodes.REQUEST_FAILED From d21bd221063cfe368f4431145a791320a82d4de3 Mon Sep 17 00:00:00 2001 From: susharma Date: Wed, 8 Apr 2026 10:16:23 +0530 Subject: [PATCH 04/11] Resolving review comments - Changed plugin base from 'linode-cli get_metrics' to 'linode-cli monitor-api' with 'get-metrics' as a subcommand - Refactored API_BASE_URL to be more generic, allowing API version specification - Updated all help text and examples to reflect new command structure - Updated unit and integration tests accordingly --- .../{get_metrics.py => monitor-api.py} | 56 +++-- .../monitor/test_plugin_get_metrics.py | 20 +- tests/unit/test_plugin_get_metrics.py | 223 +++++++++--------- 3 files changed, 149 insertions(+), 150 deletions(-) rename linodecli/plugins/{get_metrics.py => monitor-api.py} (89%) diff --git a/linodecli/plugins/get_metrics.py b/linodecli/plugins/monitor-api.py similarity index 89% rename from linodecli/plugins/get_metrics.py rename to linodecli/plugins/monitor-api.py index d1fa5d93d..4d433c71a 100644 --- a/linodecli/plugins/get_metrics.py +++ b/linodecli/plugins/monitor-api.py @@ -1,6 +1,11 @@ """ -This plugin allows users to query metrics from the monitoring service for various services. +This plugin provides access to the Linode Monitor API. +Commands: + get-metrics: Query metrics from the monitoring service for various services. + +Usage: + linode-cli monitor-api get-metrics [options] """ import json @@ -16,10 +21,11 @@ from linodecli.help_formatter import SortingHelpFormatter from linodecli.helpers import register_debug_arg -PLUGIN_BASE = "linode-cli get_metrics" +PLUGIN_BASE = "linode-cli monitor-api" # API Configuration -API_BASE_URL = "https://monitor-api.linode.com/v2/monitor/services" +API_BASE_URL = "https://monitor-api.linode.com" +API_VERSION = "v2" def get_auth_token(): @@ -80,7 +86,7 @@ def make_api_request( Returns: Tuple of (status_code, response_data) """ - url = f"{API_BASE_URL}/{service_name}/{endpoint}" + url = f"{API_BASE_URL}/{API_VERSION}/monitor/services/{service_name}/{endpoint}" headers = { "Authorization": f"Bearer {token or get_auth_token()}", @@ -263,7 +269,7 @@ def print_help(parser: ArgumentParser): print("\nExamples:") print(" # Get metrics with relative time duration") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + " linode-cli monitor-api get-metrics dbaas --entity-ids 123 --duration 15 " "--duration-unit min --metrics cpu_usage:avg" ) @@ -272,42 +278,42 @@ def print_help(parser: ArgumentParser): "(only allowed for objectstorage service)" ) print( - " linode-cli get_metrics objectstorage --duration 15 " + " linode-cli monitor-api get-metrics objectstorage --duration 15 " "--duration-unit min --metrics obj_requests_num:avg " "--entity-region us-east-1" ) print("\n # Get metrics with absolute time duration") print( - " linode-cli get_metrics dbaas --entity-ids 123 " + " linode-cli monitor-api get-metrics dbaas --entity-ids 123 " "--start-time 2024-10-10T00:00:01Z --end-time 2024-10-10T23:59:59Z " "--metrics cpu_usage:avg,memory_usage:sum" ) print("\n # Get metrics with filters") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + " linode-cli monitor-api get-metrics dbaas --entity-ids 123 --duration 15 " "--duration-unit min --metrics cpu_usage:avg " "--filters 'node_type:in:primary,secondary'" ) print("\n # Get metrics with multiple filters") print( - " linode-cli get_metrics dbaas --entity-ids 123 --duration 15 " + " linode-cli monitor-api get-metrics dbaas --entity-ids 123 --duration 15 " "--duration-unit min --metrics cpu_usage:avg " "--filters 'node_type:in:primary,secondary;status:eq:active'" ) print("\n # Get metrics with granularity") print( - " linode-cli get_metrics netloadbalancer --entity-ids 123 " + " linode-cli monitor-api get-metrics netloadbalancer --entity-ids 123 " "--duration 1 --duration-unit hour --metrics nlb_ingress_traffic:sum " "--granularity 10 --granularity-unit min" ) print("\n # Get metrics with entity region (required ObjectStorage)") print( - " linode-cli get_metrics objectstorage --entity-region us-east-1 " + " linode-cli monitor-api get-metrics objectstorage --entity-region us-east-1 " "--duration 15 --duration-unit min --metrics obj_requests_num:sum" ) @@ -316,7 +322,7 @@ def print_help(parser: ArgumentParser): "(mandatory for cloud firewall service)" ) print( - " linode-cli get_metrics firewall --entity-region us-east-1 " + " linode-cli monitor-api get-metrics firewall --entity-region us-east-1 " "--associated-entity-region us-west-1 --duration 15 " "--duration-unit min --metrics fw_active_connections:sum" ) @@ -332,12 +338,19 @@ def get_metrics_parser(): register_debug_arg(parser) - # Service name as positional argument + # Command as first positional argument + parser.add_argument( + "command", + nargs="?", + help="Command to execute (get-metrics)", + ) + + # Service name as second positional argument parser.add_argument( "service", nargs="?", help="Service name (Dbaas, Nodebalancer, NetLoadBalancer, Linode, " - "Firewall, ObjectStorage, Blockstorage,LKE)", + "Firewall, ObjectStorage, Blockstorage, LKE)", ) # Optional arguments for get-metrics functionality @@ -457,10 +470,23 @@ def call(args, context=None): # pylint: disable=unused-argument parsed, remaining_args = parser.parse_known_args(args) # Handle help cases - if not parsed.service or parsed.service == "help" or "--help" in args: + if not parsed.command or parsed.command == "help" or "--help" in args: print_help(parser) sys.exit(ExitCodes.SUCCESS) + # Validate command + if parsed.command != "get-metrics": + print(f"Unknown command: {parsed.command}", file=sys.stderr) + print("Available commands: get-metrics", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + + # Validate service is provided + if not parsed.service: + print("Service name is required", file=sys.stderr) + print_help(parser) + sys.exit(ExitCodes.REQUEST_FAILED) + if remaining_args: print(f"Unknown arguments: {' '.join(remaining_args)}", file=sys.stderr) print_help(parser) diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py index 54cfece8e..2ba6e6b51 100644 --- a/tests/integration/monitor/test_plugin_get_metrics.py +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -1,19 +1,3 @@ -import json -import os - -import pytest - -from tests.integration.helpers import ( - exec_failing_test_command, - exec_test_command, - get_random_text, -) -from linodecli.exit_codes import ExitCodes - -# Base command for get_metrics plugin -BASE_CMD = ["linode-cli", "get_metrics"] - - """ Integration tests for the get_metrics plugin """ @@ -28,8 +12,8 @@ ) from linodecli.exit_codes import ExitCodes -# Base command for get_metrics plugin -BASE_CMD = ["linode-cli", "get_metrics"] +# Base command for monitor-api plugin +BASE_CMD = ["linode-cli", "monitor-api", "get-metrics"] def test_missing_required_args(): diff --git a/tests/unit/test_plugin_get_metrics.py b/tests/unit/test_plugin_get_metrics.py index f4fcb44c5..995ee1b20 100644 --- a/tests/unit/test_plugin_get_metrics.py +++ b/tests/unit/test_plugin_get_metrics.py @@ -1,154 +1,141 @@ import json import os +from importlib import import_module from unittest.mock import MagicMock, Mock, patch import pytest from pytest import CaptureFixture -from linodecli.plugins.get_metrics import ( - call, - get_metrics, - get_metrics_parser, - make_api_request, - print_metrics_response, - MetricsConfig, -) +# Import the monitor-api module using importlib (hyphens not allowed in regular imports) +monitor_api = import_module("linodecli.plugins.monitor-api") + +call = monitor_api.call +get_metrics = monitor_api.get_metrics +get_metrics_parser = monitor_api.get_metrics_parser +make_api_request = monitor_api.make_api_request +print_metrics_response = monitor_api.print_metrics_response +MetricsConfig = monitor_api.MetricsConfig class TestAPIRequest: """Test API request functionality""" - @patch("linodecli.plugins.get_metrics.requests.post") - def test_make_api_request_success(self, mock_post): + def test_make_api_request_success(self): """Test successful API request""" # Mock successful response mock_response = Mock() mock_response.status_code = 200 mock_response.content = b'{"data": {"test": "data"}}' mock_response.json.return_value = {"data": {"test": "data"}} - mock_post.return_value = mock_response - - status_code, result = make_api_request( - "nodebalancer", "metrics", "POST", {"test": "data"}, "test_token" - ) + + with patch.object(monitor_api.requests, 'post', return_value=mock_response) as mock_post: + status_code, result = make_api_request( + "nodebalancer", "metrics", "POST", {"test": "data"}, "test_token" + ) - assert status_code == 200 - assert result == {"data": {"test": "data"}} - mock_post.assert_called_once() + assert status_code == 200 + assert result == {"data": {"test": "data"}} + mock_post.assert_called_once() - @patch("linodecli.plugins.get_metrics.requests.post") - def test_make_api_request_http_error(self, mock_post): + def test_make_api_request_http_error(self): """Test API request with HTTP error""" mock_response = Mock() mock_response.status_code = 401 mock_response.content = b"Unauthorized" mock_response.json.return_value = {"error": "Unauthorized"} - mock_post.return_value = mock_response - - status_code, _ = make_api_request( - "nodebalancer", "metrics", "POST", {}, "test_token" - ) - assert status_code == 401 + + with patch.object(monitor_api.requests, 'post', return_value=mock_response): + status_code, _ = make_api_request( + "nodebalancer", "metrics", "POST", {}, "test_token" + ) + assert status_code == 401 class TestGetMetrics: """Test get_metrics function""" - @patch("linodecli.plugins.get_metrics.print_metrics_response") - @patch("linodecli.plugins.get_metrics.make_api_request") - def test_get_metrics_relative_time(self, mock_api_request, mock_print): + def test_get_metrics_relative_time(self): """Test get_metrics with relative time duration""" - mock_api_request.return_value = (200, {"data": {"test": "data"}}) - - config = MetricsConfig( - service_name="nodebalancer", - entity_ids=[123, 456], - duration=15, - duration_unit="min", - start_time=None, - end_time=None, - metrics=["cpu_usage:avg"], - granularity=None, - granularity_unit=None, - token="test_token", - ) - get_metrics(config) - - mock_print.assert_called_once_with({"data": {"test": "data"}}) - mock_api_request.assert_called_once() - - @patch("linodecli.plugins.get_metrics.print_metrics_response") - @patch("linodecli.plugins.get_metrics.make_api_request") - def test_get_metrics_absolute_time(self, mock_api_request, mock_print): + with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): + with patch.object(monitor_api, 'print_metrics_response') as mock_print: + config = MetricsConfig( + service_name="nodebalancer", + entity_ids=[123, 456], + duration=15, + duration_unit="min", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + get_metrics(config) + + mock_print.assert_called_once_with({"data": {"test": "data"}}) + + def test_get_metrics_absolute_time(self): """Test get_metrics with absolute time range""" - mock_api_request.return_value = (200, {"data": {"test": "data"}}) - - config = MetricsConfig( - service_name="dbaas", - entity_ids=[789], - duration=None, - duration_unit=None, - start_time="2025-12-22T00:00:00Z", - end_time="2025-12-22T12:00:00Z", - metrics=["memory_usage:max"], - granularity=None, - granularity_unit=None, - token="test_token", - ) - get_metrics(config) - - mock_print.assert_called_once_with({"data": {"test": "data"}}) - - @patch("linodecli.plugins.get_metrics.print_metrics_response") - @patch("linodecli.plugins.get_metrics.make_api_request") - def test_get_metrics_with_filters_and_groupby( - self, mock_api_request, mock_print - ): + with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): + with patch.object(monitor_api, 'print_metrics_response') as mock_print: + config = MetricsConfig( + service_name="dbaas", + entity_ids=[789], + duration=None, + duration_unit=None, + start_time="2025-12-22T00:00:00Z", + end_time="2025-12-22T12:00:00Z", + metrics=["memory_usage:max"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + get_metrics(config) + + mock_print.assert_called_once_with({"data": {"test": "data"}}) + + def test_get_metrics_with_filters_and_groupby(self): """Test get_metrics with filters and group_by""" - mock_api_request.return_value = (200, {"data": {"test": "data"}}) - - config = MetricsConfig( - service_name="dbaas", - entity_ids=[123], - duration=1, - duration_unit="hr", - start_time=None, - end_time=None, - metrics=["cpu_usage:avg"], - granularity=None, - granularity_unit=None, - filters=["node_type:in:primary,secondary", "status:eq:active"], - group_by=["entity_id", "node_type"], - token="test_token", - ) - get_metrics(config) - - mock_print.assert_called_once_with({"data": {"test": "data"}}) - - @patch("linodecli.plugins.get_metrics.make_api_request") - @patch("builtins.print") - @patch("sys.exit") - def test_get_metrics_api_error( - self, mock_exit, mock_print, mock_api_request - ): + with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): + with patch.object(monitor_api, 'print_metrics_response') as mock_print: + config = MetricsConfig( + service_name="dbaas", + entity_ids=[123], + duration=1, + duration_unit="hr", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + filters=["node_type:in:primary,secondary", "status:eq:active"], + group_by=["entity_id", "node_type"], + token="test_token", + ) + get_metrics(config) + + mock_print.assert_called_once_with({"data": {"test": "data"}}) + + def test_get_metrics_api_error(self): """Test get_metrics with API error response""" - mock_api_request.return_value = (401, {"error": "Unauthorized"}) - - config = MetricsConfig( - service_name="nodebalancer", - entity_ids=[123], - duration=15, - duration_unit="min", - start_time=None, - end_time=None, - metrics=["cpu_usage:avg"], - granularity=None, - granularity_unit=None, - token="test_token", - ) - get_metrics(config) - - mock_exit.assert_called_with(2) # ExitCodes.REQUEST_FAILED + with patch.object(monitor_api, 'make_api_request', return_value=(401, {"error": "Unauthorized"})): + with patch('builtins.print'): + with patch.object(monitor_api.sys, 'exit') as mock_exit: + config = MetricsConfig( + service_name="nodebalancer", + entity_ids=[123], + duration=15, + duration_unit="min", + start_time=None, + end_time=None, + metrics=["cpu_usage:avg"], + granularity=None, + granularity_unit=None, + token="test_token", + ) + get_metrics(config) + + mock_exit.assert_called_with(2) # ExitCodes.REQUEST_FAILED class TestArgumentParsing: @@ -160,6 +147,7 @@ def test_get_metrics_parser(self): args = parser.parse_args( [ + "get-metrics", "nodebalancer", "--entity-ids", "123,456", @@ -172,6 +160,7 @@ def test_get_metrics_parser(self): ] ) + assert args.command == "get-metrics" assert args.service == "nodebalancer" assert args.entity_ids == "123,456" assert args.metrics == "cpu_usage:avg,memory_usage:max" From 1d2c52e3cb07f938a88d03abac39b002e877388b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Apr 2026 10:28:37 +0200 Subject: [PATCH 05/11] build(deps): bump docker/build-push-action from 6.19.2 to 7.0.0 (#873) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ef0bd5002..373b5c888 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,7 +67,7 @@ jobs: result-encoding: string - name: Build and push to DockerHub - uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # pin@v6.19.2 + uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # pin@v7.0.0 with: context: . file: Dockerfile From f5c289a1ec18c6b2a0240683eff61ead19d9bd8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Apr 2026 01:19:11 +0000 Subject: [PATCH 06/11] build(deps): bump docker/setup-buildx-action from 3.12.0 to 4.0.0 Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.12.0 to 4.0.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/8d2750c68a42422c14e847fe6c8ac0403b4cbd6f...4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-version: 4.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 373b5c888..315c24cb2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -42,7 +42,7 @@ jobs: uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # pin@v3.7.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # pin@v3.12.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # pin@v4.0.0 - name: Login to Docker Hub uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # pin@v3.7.0 From f48b50eb848ede96e2cc58fc9de8f3adb27e7624 Mon Sep 17 00:00:00 2001 From: Erik Zilber Date: Wed, 1 Apr 2026 10:08:26 -0400 Subject: [PATCH 07/11] TPT-4298: Added PR title checking to lint workflow and new clean up release notes workflow (#871) --- .github/workflows/ci.yml | 28 +++++++++++++++++ .github/workflows/clean-release-notes.yml | 37 +++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 .github/workflows/clean-release-notes.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e6030adda..f7fd86cf9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,35 @@ jobs: lint: runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: read steps: + # Enforce TPT-1234: prefix on PR titles, with the following exemptions: + # - PRs labeled 'dependencies' (e.g. Dependabot PRs) + # - PRs labeled 'hotfix' (urgent fixes that may not have a ticket) + # - PRs labeled 'community-contribution' (external contributors without TPT tickets) + # - PRs labeled 'ignore-for-release' (release PRs that don't need a ticket prefix) + - name: Validate PR Title + if: github.event_name == 'pull_request' + uses: amannn/action-semantic-pull-request@v6 + with: + types: | + TPT-\d+ + requireScope: false + # Override the default header pattern to allow hyphens and digits in the type + # (e.g. "TPT-4298: Description"). The default pattern only matches word + # characters (\w) which excludes hyphens. + headerPattern: '^([\w-]+):\s?(.*)$' + headerPatternCorrespondence: type, subject + ignoreLabels: | + dependencies + hotfix + community-contribution + ignore-for-release + env: + GITHUB_TOKEN: ${{ github.token }} + - name: checkout repo uses: actions/checkout@v6 diff --git a/.github/workflows/clean-release-notes.yml b/.github/workflows/clean-release-notes.yml new file mode 100644 index 000000000..9006a5daf --- /dev/null +++ b/.github/workflows/clean-release-notes.yml @@ -0,0 +1,37 @@ +name: Clean Release Notes + +on: + release: + types: [published] + +jobs: + clean-release-notes: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Remove ticket prefixes from release notes + uses: actions/github-script@v8 + with: + script: | + const release = context.payload.release; + + let body = release.body; + + if (!body) { + console.log("Release body empty, nothing to clean."); + return; + } + + // Remove ticket prefixes like "TPT-1234: " or "TPT-1234:" + body = body.replace(/TPT-\d+:\s*/g, ''); + + await github.rest.repos.updateRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release.id, + body: body + }); + + console.log("Release notes cleaned."); From 42e16fbe07bee0b9753cb63af9bc9b30d70521e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Apr 2026 13:20:33 -0400 Subject: [PATCH 08/11] build(deps): bump docker/login-action from 3.7.0 to 4.0.0 (#876) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 315c24cb2..11c68f5af 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -45,7 +45,7 @@ jobs: uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # pin@v4.0.0 - name: Login to Docker Hub - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # pin@v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # pin@v4.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} From 0a19f042afbaf32a4576293249177499f02ff802 Mon Sep 17 00:00:00 2001 From: susharma Date: Wed, 8 Apr 2026 14:28:44 +0530 Subject: [PATCH 09/11] Fix import sorting in integration tests --- tests/integration/monitor/test_plugin_get_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py index 2ba6e6b51..75f0dbde0 100644 --- a/tests/integration/monitor/test_plugin_get_metrics.py +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -6,11 +6,11 @@ import pytest +from linodecli.exit_codes import ExitCodes from tests.integration.helpers import ( exec_failing_test_command, exec_test_command, ) -from linodecli.exit_codes import ExitCodes # Base command for monitor-api plugin BASE_CMD = ["linode-cli", "monitor-api", "get-metrics"] From 7ff8e9012a82ae55b3265e72b87c491fabcfbae0 Mon Sep 17 00:00:00 2001 From: susharma Date: Wed, 8 Apr 2026 14:40:38 +0530 Subject: [PATCH 10/11] Fix lint issues: remove unused imports and format code --- linodecli/plugins/monitor-api.py | 9 +- .../monitor/test_plugin_get_metrics.py | 207 +++++++++++------- tests/unit/test_plugin_get_metrics.py | 69 ++++-- 3 files changed, 183 insertions(+), 102 deletions(-) diff --git a/linodecli/plugins/monitor-api.py b/linodecli/plugins/monitor-api.py index 4d433c71a..46b7105f8 100644 --- a/linodecli/plugins/monitor-api.py +++ b/linodecli/plugins/monitor-api.py @@ -42,7 +42,6 @@ def get_auth_token(): return token - # Aggregate functions AGGREGATE_FUNCTIONS = ["sum", "avg", "max", "min", "count"] @@ -50,6 +49,7 @@ def get_auth_token(): @dataclass class MetricsConfig: """Configuration for metrics request""" + service_name: str entity_ids: List duration: Optional[int] @@ -79,7 +79,7 @@ def make_api_request( Args: service_name: The service name (nodebalancer, netloadbalancer, etc.) endpoint: The API endpoint to call - method: HTTP method + method: HTTP method data: Request payload for POST requests token: Bearer token for authentication @@ -211,7 +211,9 @@ def get_metrics(config: MetricsConfig): payload = build_payload(config) if config.entity_ids: - print(f"Fetching metrics for {config.service_name} entities: {config.entity_ids}") + print( + f"Fetching metrics for {config.service_name} entities: {config.entity_ids}" + ) else: print(f"Fetching metrics for {config.service_name} (all entities)") print(f"Request payload: {json.dumps(payload, indent=2)}") @@ -234,6 +236,7 @@ def get_metrics(config: MetricsConfig): print_metrics_response(response) + def print_metrics_response(data: dict): """ Print metrics data as formatted JSON diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py index 75f0dbde0..84041dc3c 100644 --- a/tests/integration/monitor/test_plugin_get_metrics.py +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -2,8 +2,6 @@ Integration tests for the get_metrics plugin """ -import os - import pytest from linodecli.exit_codes import ExitCodes @@ -20,92 +18,119 @@ def test_missing_required_args(): """Test error handling for missing required arguments""" # Missing entity-ids exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "nodebalancer", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min" + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) # Missing metrics exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "nodebalancer", - "--entity-ids", "123", - "--duration", "15", - "--duration-unit", "min" + "--entity-ids", + "123", + "--duration", + "15", + "--duration-unit", + "min", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) # Missing duration and time parameters exec_failing_test_command( - BASE_CMD + [ - "nodebalancer", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg" - ], - expected_code=ExitCodes.REQUEST_FAILED + BASE_CMD + + ["nodebalancer", "--entity-ids", "123", "--metrics", "cpu_usage:avg"], + expected_code=ExitCodes.REQUEST_FAILED, ) def test_invalid_service(): """Test error handling for invalid service name""" exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "invalid_service", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min" + "--entity-ids", + "123", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) def test_invalid_aggregate_function(): """Test error handling for metrics without aggregate functions""" exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "nodebalancer", - "--entity-ids", "123", - "--metrics", "cpu_usage", # Missing :avg - "--duration", "15", - "--duration-unit", "min" + "--entity-ids", + "123", + "--metrics", + "cpu_usage", # Missing :avg + "--duration", + "15", + "--duration-unit", + "min", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) def test_invalid_duration_unit(): """Test handling of invalid duration unit""" exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "nodebalancer", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "invalid_unit" + "--entity-ids", + "123", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "invalid_unit", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) def test_conflicting_time_params(): """Test handling of conflicting time parameters""" exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "nodebalancer", - "--entity-ids", "123", - "--metrics", "cpu_usage:avg", - "--duration", "15", - "--duration-unit", "min", - "--start-time", "2025-12-22T00:00:00Z", - "--end-time", "2025-12-22T12:00:00Z" + "--entity-ids", + "123", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", + "--start-time", + "2025-12-22T00:00:00Z", + "--end-time", + "2025-12-22T12:00:00Z", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) @@ -113,13 +138,20 @@ def test_conflicting_time_params(): def test_objstorage_metrics_basic(): """Test get_metrics with objectstorage service (with authentication)""" # Use objectstorage service which doesn't require entity-ids - output = exec_test_command(BASE_CMD + [ - "objectstorage", - "--metrics", "obj_requests_num:sum", - "--duration", "15", - "--duration-unit", "min", - "--entity-region", "us-east" - ]) + output = exec_test_command( + BASE_CMD + + [ + "objectstorage", + "--metrics", + "obj_requests_num:sum", + "--duration", + "15", + "--duration-unit", + "min", + "--entity-region", + "us-east", + ] + ) print(f"SUCCESS: {output}") assert "Fetching metrics" in output or "data" in output.lower() @@ -127,47 +159,64 @@ def test_objstorage_metrics_basic(): def test_obj_metrics_with_filters(): """Test get_metrics with objectstorage service and filters""" - output = exec_test_command(BASE_CMD + [ - "objectstorage", - "--metrics", "obj_requests_num:sum", - "--duration", "30", - "--duration-unit", "min", - "--entity-region", "us-west", - "--filters", "request_type:eq:get" - ]) + output = exec_test_command( + BASE_CMD + + [ + "objectstorage", + "--metrics", + "obj_requests_num:sum", + "--duration", + "30", + "--duration-unit", + "min", + "--entity-region", + "us-west", + "--filters", + "request_type:eq:get", + ] + ) assert "Fetching metrics" in output or "data" in output.lower() - def test_absolute_time_metrics(): """Test get_metrics with objectstorage service and absolute time range""" - output = exec_test_command(BASE_CMD + [ - "objectstorage", - "--metrics", "obj_requests_num:sum", - "--start-time", "2025-12-22T00:00:00Z", - "--end-time", "2025-12-22T12:00:00Z", - "--entity-region", "us-southeast", - "--granularity", "5", - "--granularity-unit", "min" - ]) + output = exec_test_command( + BASE_CMD + + [ + "objectstorage", + "--metrics", + "obj_requests_num:sum", + "--start-time", + "2025-12-22T00:00:00Z", + "--end-time", + "2025-12-22T12:00:00Z", + "--entity-region", + "us-southeast", + "--granularity", + "5", + "--granularity-unit", + "min", + ] + ) assert "Fetching metrics" in output or "data" in output.lower() - def test_malformed_filters(): """Test handling of malformed filter syntax""" exec_failing_test_command( - BASE_CMD + [ + BASE_CMD + + [ "objectstorage", - "--metrics", "obj_requests_num:sum", - "--duration", "15", - "--duration-unit", "min", - "--filters", "invalid_filter_format" + "--metrics", + "obj_requests_num:sum", + "--duration", + "15", + "--duration-unit", + "min", + "--filters", + "invalid_filter_format", ], - expected_code=ExitCodes.REQUEST_FAILED + expected_code=ExitCodes.REQUEST_FAILED, ) - - - diff --git a/tests/unit/test_plugin_get_metrics.py b/tests/unit/test_plugin_get_metrics.py index 995ee1b20..5c68b6ed9 100644 --- a/tests/unit/test_plugin_get_metrics.py +++ b/tests/unit/test_plugin_get_metrics.py @@ -1,15 +1,11 @@ -import json -import os from importlib import import_module -from unittest.mock import MagicMock, Mock, patch +from unittest.mock import Mock, patch -import pytest from pytest import CaptureFixture # Import the monitor-api module using importlib (hyphens not allowed in regular imports) monitor_api = import_module("linodecli.plugins.monitor-api") -call = monitor_api.call get_metrics = monitor_api.get_metrics get_metrics_parser = monitor_api.get_metrics_parser make_api_request = monitor_api.make_api_request @@ -27,10 +23,16 @@ def test_make_api_request_success(self): mock_response.status_code = 200 mock_response.content = b'{"data": {"test": "data"}}' mock_response.json.return_value = {"data": {"test": "data"}} - - with patch.object(monitor_api.requests, 'post', return_value=mock_response) as mock_post: + + with patch.object( + monitor_api.requests, "post", return_value=mock_response + ) as mock_post: status_code, result = make_api_request( - "nodebalancer", "metrics", "POST", {"test": "data"}, "test_token" + "nodebalancer", + "metrics", + "POST", + {"test": "data"}, + "test_token", ) assert status_code == 200 @@ -43,8 +45,10 @@ def test_make_api_request_http_error(self): mock_response.status_code = 401 mock_response.content = b"Unauthorized" mock_response.json.return_value = {"error": "Unauthorized"} - - with patch.object(monitor_api.requests, 'post', return_value=mock_response): + + with patch.object( + monitor_api.requests, "post", return_value=mock_response + ): status_code, _ = make_api_request( "nodebalancer", "metrics", "POST", {}, "test_token" ) @@ -56,8 +60,14 @@ class TestGetMetrics: def test_get_metrics_relative_time(self): """Test get_metrics with relative time duration""" - with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): - with patch.object(monitor_api, 'print_metrics_response') as mock_print: + with patch.object( + monitor_api, + "make_api_request", + return_value=(200, {"data": {"test": "data"}}), + ): + with patch.object( + monitor_api, "print_metrics_response" + ) as mock_print: config = MetricsConfig( service_name="nodebalancer", entity_ids=[123, 456], @@ -76,8 +86,14 @@ def test_get_metrics_relative_time(self): def test_get_metrics_absolute_time(self): """Test get_metrics with absolute time range""" - with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): - with patch.object(monitor_api, 'print_metrics_response') as mock_print: + with patch.object( + monitor_api, + "make_api_request", + return_value=(200, {"data": {"test": "data"}}), + ): + with patch.object( + monitor_api, "print_metrics_response" + ) as mock_print: config = MetricsConfig( service_name="dbaas", entity_ids=[789], @@ -96,8 +112,14 @@ def test_get_metrics_absolute_time(self): def test_get_metrics_with_filters_and_groupby(self): """Test get_metrics with filters and group_by""" - with patch.object(monitor_api, 'make_api_request', return_value=(200, {"data": {"test": "data"}})): - with patch.object(monitor_api, 'print_metrics_response') as mock_print: + with patch.object( + monitor_api, + "make_api_request", + return_value=(200, {"data": {"test": "data"}}), + ): + with patch.object( + monitor_api, "print_metrics_response" + ) as mock_print: config = MetricsConfig( service_name="dbaas", entity_ids=[123], @@ -108,7 +130,10 @@ def test_get_metrics_with_filters_and_groupby(self): metrics=["cpu_usage:avg"], granularity=None, granularity_unit=None, - filters=["node_type:in:primary,secondary", "status:eq:active"], + filters=[ + "node_type:in:primary,secondary", + "status:eq:active", + ], group_by=["entity_id", "node_type"], token="test_token", ) @@ -118,9 +143,13 @@ def test_get_metrics_with_filters_and_groupby(self): def test_get_metrics_api_error(self): """Test get_metrics with API error response""" - with patch.object(monitor_api, 'make_api_request', return_value=(401, {"error": "Unauthorized"})): - with patch('builtins.print'): - with patch.object(monitor_api.sys, 'exit') as mock_exit: + with patch.object( + monitor_api, + "make_api_request", + return_value=(401, {"error": "Unauthorized"}), + ): + with patch("builtins.print"): + with patch.object(monitor_api.sys, "exit") as mock_exit: config = MetricsConfig( service_name="nodebalancer", entity_ids=[123], From 041db659a4e193c76d77ea1fa8cfc67a420ec212 Mon Sep 17 00:00:00 2001 From: susharma Date: Thu, 9 Apr 2026 11:43:02 +0530 Subject: [PATCH 11/11] Address GitHub Copilot review comments - Add validation for aggregate function values (must be in AGGREGATE_FUNCTIONS) - Add validation for duration-unit and granularity-unit (must be min/hr/day) - Enforce entity-ids requirement for non-objectstorage services - Validate that granularity and granularity-unit are provided together - Add -h flag handling for help (in addition to --help) - Make payload printing debug-only (only shown with --debug flag) - Add @requires_jwe_token skip decorator for integration tests - Add new test cases for validation improvements --- linodecli/plugins/monitor-api.py | 95 ++++++++++++++++++- .../monitor/test_plugin_get_metrics.py | 91 ++++++++++++++++++ 2 files changed, 181 insertions(+), 5 deletions(-) diff --git a/linodecli/plugins/monitor-api.py b/linodecli/plugins/monitor-api.py index 46b7105f8..9c1cd799c 100644 --- a/linodecli/plugins/monitor-api.py +++ b/linodecli/plugins/monitor-api.py @@ -45,6 +45,9 @@ def get_auth_token(): # Aggregate functions AGGREGATE_FUNCTIONS = ["sum", "avg", "max", "min", "count"] +# Allowed time units +ALLOWED_TIME_UNITS = ["min", "hr", "day"] + @dataclass class MetricsConfig: @@ -123,8 +126,32 @@ def parse_metrics(metrics: List[str]) -> List[dict]: for metric in metrics: if ":" in metric: metric_name, agg_func = metric.split(":", 1) + metric_name = metric_name.strip() + agg_func = agg_func.strip().lower() + if not metric_name: + print( + f"Metric name is required for metric '{metric}'", + file=sys.stderr, + ) + print( + f"Format: 'metric_name:function' where function is one of: " + f"{', '.join(AGGREGATE_FUNCTIONS)}", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) + if agg_func not in AGGREGATE_FUNCTIONS: + print( + f"Invalid aggregate function '{agg_func}' for metric '{metric}'", + file=sys.stderr, + ) + print( + f"Aggregate function must be one of: " + f"{', '.join(AGGREGATE_FUNCTIONS)}", + file=sys.stderr, + ) + sys.exit(ExitCodes.REQUEST_FAILED) parsed_metrics.append( - {"aggregate_function": agg_func, "name": metric_name.strip()} + {"aggregate_function": agg_func, "name": metric_name} ) else: print( @@ -206,7 +233,7 @@ def build_payload(config: MetricsConfig) -> dict: return payload -def get_metrics(config: MetricsConfig): +def get_metrics(config: MetricsConfig, debug: bool = False): """Get metrics for specified service entities""" payload = build_payload(config) @@ -216,7 +243,12 @@ def get_metrics(config: MetricsConfig): ) else: print(f"Fetching metrics for {config.service_name} (all entities)") - print(f"Request payload: {json.dumps(payload, indent=2)}") + + if debug: + print( + f"Request payload: {json.dumps(payload, indent=2)}", + file=sys.stderr, + ) try: status, response = make_api_request( @@ -432,6 +464,18 @@ def validate_arguments(parsed): print(" --metrics: required", file=sys.stderr) return False + # Validate entity_ids requirement (only objectstorage allows querying all entities) + if not parsed.entity_ids and parsed.service.lower() != "objectstorage": + print( + f"--entity-ids is required for service '{parsed.service}'", + file=sys.stderr, + ) + print( + "Only 'objectstorage' service allows querying all entities without --entity-ids", + file=sys.stderr, + ) + return False + # Validate time duration arguments has_relative = ( parsed.duration is not None and parsed.duration_unit is not None @@ -451,6 +495,42 @@ def validate_arguments(parsed): ) return False + # Validate duration-unit if provided + if parsed.duration is not None and parsed.duration_unit is not None: + if parsed.duration_unit not in ALLOWED_TIME_UNITS: + print( + f"Invalid duration unit '{parsed.duration_unit}'", + file=sys.stderr, + ) + print( + f"Allowed units: {', '.join(ALLOWED_TIME_UNITS)}", + file=sys.stderr, + ) + return False + + # Validate granularity arguments + if (parsed.granularity is not None) != ( + parsed.granularity_unit is not None + ): + print( + "Both --granularity and --granularity-unit must be provided together", + file=sys.stderr, + ) + return False + + # Validate granularity-unit if provided + if parsed.granularity_unit is not None: + if parsed.granularity_unit not in ALLOWED_TIME_UNITS: + print( + f"Invalid granularity unit '{parsed.granularity_unit}'", + file=sys.stderr, + ) + print( + f"Allowed units: {', '.join(ALLOWED_TIME_UNITS)}", + file=sys.stderr, + ) + return False + return True @@ -473,7 +553,12 @@ def call(args, context=None): # pylint: disable=unused-argument parsed, remaining_args = parser.parse_known_args(args) # Handle help cases - if not parsed.command or parsed.command == "help" or "--help" in args: + if ( + not parsed.command + or parsed.command == "help" + or "--help" in args + or "-h" in args + ): print_help(parser) sys.exit(ExitCodes.SUCCESS) @@ -527,4 +612,4 @@ def call(args, context=None): # pylint: disable=unused-argument associated_entity_region=parsed.associated_entity_region, ) - get_metrics(config) + get_metrics(config, debug=parsed.debug) diff --git a/tests/integration/monitor/test_plugin_get_metrics.py b/tests/integration/monitor/test_plugin_get_metrics.py index 84041dc3c..3be3a2413 100644 --- a/tests/integration/monitor/test_plugin_get_metrics.py +++ b/tests/integration/monitor/test_plugin_get_metrics.py @@ -2,6 +2,8 @@ Integration tests for the get_metrics plugin """ +import os + import pytest from linodecli.exit_codes import ExitCodes @@ -13,6 +15,12 @@ # Base command for monitor-api plugin BASE_CMD = ["linode-cli", "monitor-api", "get-metrics"] +# Skip decorator for tests that require JWE_TOKEN +requires_jwe_token = pytest.mark.skipif( + not os.getenv("JWE_TOKEN"), + reason="JWE_TOKEN environment variable not set", +) + def test_missing_required_args(): """Test error handling for missing required arguments""" @@ -135,6 +143,7 @@ def test_conflicting_time_params(): @pytest.mark.smoke +@requires_jwe_token def test_objstorage_metrics_basic(): """Test get_metrics with objectstorage service (with authentication)""" # Use objectstorage service which doesn't require entity-ids @@ -157,6 +166,7 @@ def test_objstorage_metrics_basic(): assert "Fetching metrics" in output or "data" in output.lower() +@requires_jwe_token def test_obj_metrics_with_filters(): """Test get_metrics with objectstorage service and filters""" output = exec_test_command( @@ -179,6 +189,7 @@ def test_obj_metrics_with_filters(): assert "Fetching metrics" in output or "data" in output.lower() +@requires_jwe_token def test_absolute_time_metrics(): """Test get_metrics with objectstorage service and absolute time range""" output = exec_test_command( @@ -220,3 +231,83 @@ def test_malformed_filters(): ], expected_code=ExitCodes.REQUEST_FAILED, ) + + +def test_invalid_aggregate_function_value(): + """Test handling of invalid aggregate function values""" + exec_failing_test_command( + BASE_CMD + + [ + "nodebalancer", + "--entity-ids", + "123", + "--metrics", + "cpu_usage:invalid_func", # Invalid aggregate function + "--duration", + "15", + "--duration-unit", + "min", + ], + expected_code=ExitCodes.REQUEST_FAILED, + ) + + +def test_entity_ids_required_for_non_objectstorage(): + """Test that entity-ids is required for non-objectstorage services""" + exec_failing_test_command( + BASE_CMD + + [ + "dbaas", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", + ], + expected_code=ExitCodes.REQUEST_FAILED, + ) + + +def test_invalid_granularity_unit(): + """Test handling of invalid granularity unit""" + exec_failing_test_command( + BASE_CMD + + [ + "nodebalancer", + "--entity-ids", + "123", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", + "--granularity", + "5", + "--granularity-unit", + "invalid_unit", + ], + expected_code=ExitCodes.REQUEST_FAILED, + ) + + +def test_granularity_without_unit(): + """Test that granularity requires granularity-unit""" + exec_failing_test_command( + BASE_CMD + + [ + "nodebalancer", + "--entity-ids", + "123", + "--metrics", + "cpu_usage:avg", + "--duration", + "15", + "--duration-unit", + "min", + "--granularity", + "5", + ], + expected_code=ExitCodes.REQUEST_FAILED, + )