diff --git a/mcp_servers/integration_test_generator/README.md b/mcp_servers/integration_test_generator/README.md index 12b9e7357fd..41ce6c5786c 100644 --- a/mcp_servers/integration_test_generator/README.md +++ b/mcp_servers/integration_test_generator/README.md @@ -41,7 +41,7 @@ Add to your MCP configuration file: "integration-test-generator": { "command": "python3", "args": [ - "/Users//system-tests/mcp_servers/integration_test_generator/server.py" + "/system-tests/mcp_servers/integration_test_generator/server.py" ] } } @@ -55,7 +55,7 @@ Add to your MCP configuration file: "integration-test-generator": { "command": "python3", "args": [ - "/Users/quinna.halim/system-tests/mcp_servers/integration_test_generator/server.py" + "/system-tests/mcp_servers/integration_test_generator/server.py" ] } } @@ -180,17 +180,8 @@ def setup_main(self) -> None: ### Step 5: Add Feature to utils/_features.py -If the feature doesn't exist, add it: - -```python -@staticmethod -def postgres_receiver_metrics(test_object): - """OpenTelemetry semantic conventions for Postgres receiver metrics - - https://feature-parity.us1.prod.dog/#/?feature=498 - """ - return _mark_test_object(test_object, feature_id=498, owner=_Owner.idm) -``` +If the feature doesn't exist, add it by following this [doc](https://github.com/DataDog/system-tests/blob/main/docs/edit/features.md). +>Each new feature should be defined in _features.py. This consists of adding a feature in Feature Parity Dashboard, get the feature id and copying one of the already added features, changing the name and the feature id in the url, and the feature number. ### Step 6: Format and Test diff --git a/mcp_servers/integration_test_generator/constants.py b/mcp_servers/integration_test_generator/constants.py index cd870da2206..49b4b180448 100644 --- a/mcp_servers/integration_test_generator/constants.py +++ b/mcp_servers/integration_test_generator/constants.py @@ -1,6 +1,6 @@ from pathlib import Path -METRIC_TYPES = {"sum", "gauge", "histogram"} +METRIC_TYPES = {"sum", "gauge"} GH_BASE_API = "https://api.github.com/repos/open-telemetry/opentelemetry-collector-contrib" # Path to reference test files diff --git a/mcp_servers/integration_test_generator/server.py b/mcp_servers/integration_test_generator/server.py index 563632a585f..54cb0a6da0d 100644 --- a/mcp_servers/integration_test_generator/server.py +++ b/mcp_servers/integration_test_generator/server.py @@ -6,6 +6,7 @@ """ import json +import sys from pathlib import Path from typing import Any import requests @@ -14,20 +15,19 @@ from templates.test_integration_file_template import get_test_file_template from templates.prompt_template import get_generate_with_reference_prompt from metric_operations_generator import generate_smoke_operations_from_metrics +from utils._logger import logger +import yaml +import base64 # The content is base64-encoded per GitHub API # MCP SDK imports try: from mcp.server import Server from mcp.types import Tool, TextContent, Resource + from mcp.types import Prompt, PromptArgument import mcp.server.stdio except ImportError: - print("Error: MCP SDK not installed. Install with: pip install mcp") - exit(1) - -# Path to reference test files -SYSTEM_TESTS_ROOT = Path(__file__).parent.parent.parent -POSTGRES_TEST_PATH = SYSTEM_TESTS_ROOT / "tests/otel_postgres_metrics_e2e/test_postgres_metrics.py" -MYSQL_TEST_PATH = SYSTEM_TESTS_ROOT / "tests/otel_mysql_metrics_e2e/test_otel_mysql_metrics.py" + logger.error("Error: MCP SDK not installed. Install with: pip install mcp") + sys.exit(1) def generate_test_file( @@ -38,6 +38,21 @@ def generate_test_file( ) -> str: """Generate a test file for the specified integration.""" + # Get integration config or use defaults + config = constants.INTEGRATION_CONFIGS.get( + integration_name.lower(), + { + "container_name": f"{integration_name.lower()}_container", + "smoke_test_operations": [ + f'logger.info("Add specific {integration_name} operations here")', + ], + "expected_smoke_metrics": [ + f"{integration_name.lower()}.metric1", + f"{integration_name.lower()}.metric2", + ], + }, + ) + integration_title = integration_name.title() integration_lower = integration_name.lower() integration_upper = integration_name.upper() @@ -166,6 +181,81 @@ def generate_metrics_file(integration_name: str) -> str: return metric_template +def generate_metrics_file(integration_name: str) -> str: + """Get info about the latest otel-collector-contrib release. + Metrics are defined in the metadata.yaml file which is provided in response. + We need to get the metrics from the metadata.yaml file. + We can do this by parsing the yaml file and getting the metrics. + We can then return in the following format: + { + "": { + "data_type": "", + "description": "" + }, + } + + """ + url = f"{constants.GH_BASE_API}/releases" + + headers = { + "Accept": "application/vnd.github+json", + } + + response = requests.get(url, headers=headers, timeout=2) + response.raise_for_status() + + releases = response.json() # list[dict] + if not releases: + return "No releases found for opentelemetry-collector-contrib." + + latest = releases[0] + + receiver_path = f"receiver/{integration_name.lower()}receiver/metadata.yaml" + metadata_url = f"{constants.GH_BASE_API}/contents/{receiver_path}?ref={latest.get('tag_name')}" + + response = requests.get(metadata_url, headers=headers, timeout=2) + if response.status_code != 200: + return f"Failed to fetch metadata.yaml for {integration_name}." + + metadata_content = response.json().get("content") + if not metadata_content: + return "No content found in metadata.yaml." + + try: + decoded_yaml = base64.b64decode(metadata_content).decode("utf-8") + except Exception as e: + return f"Error decoding metadata.yaml content: {e}" + + # Parse YAML to get metrics info + try: + yaml_data = yaml.safe_load(decoded_yaml) + except Exception as e: + return f"Error parsing YAML: {e}" + + metric_template = {} + metrics_dict = yaml_data.get("metrics", {}) + for metric_name, metric_info in metrics_dict.items(): + metric_type = set(metric_info.keys()) & constants.METRIC_TYPES + metric_template[metric_name] = { + "data_type": metric_type.pop() if metric_type else None, + "description": metric_info.get("description", ""), + } + + # Return the dict as pretty-printed JSON + result_json = json.dumps(metric_template, indent=2) + + metric_file_name = f"{integration_name}_metrics.json" + metric_file_path = constants.SYSTEM_TESTS_ROOT / f"tests/otel_{integration_name}_metrics_e2e/{metric_file_name}" + if metric_file_path.exists(): + return "There is already a metric file created. Please delete and try again" + # Create the parent directory if it doesn't exist + metric_file_path.parent.mkdir(parents=True, exist_ok=True) + with open(metric_file_path, "x") as file: + file.write(result_json) + + return metric_template + + def generate_init_file() -> str: """Generate __init__.py file.""" return "" @@ -209,7 +299,10 @@ async def list_tools() -> list[Tool]: }, "feature_name": { "type": "string", - "description": "Feature name for the @features decorator (optional, defaults to _receiver_metrics)", + "description": ( + "Feature name for the @features decorator " + "(optional, defaults to _receiver_metrics)" + ), }, }, "required": ["integration_name", "metrics_json_file"], @@ -224,30 +317,20 @@ async def list_tools() -> list[Tool]: }, ), Tool( - name="generate_metrics_json_template", - description="Generate a template metrics JSON file structure", + name="get_shared_utility_info", + description="Get information about the shared OtelMetricsValidator utility", inputSchema={ "type": "object", - "properties": { - "integration_name": { - "type": "string", - "description": "Name of the integration", - }, - "sample_metrics": { - "type": "array", - "items": {"type": "string"}, - "description": "List of sample metric names", - }, - }, - "required": ["integration_name", "sample_metrics"], + "properties": {}, }, ), Tool( - name="get_shared_utility_info", - description="Get information about the shared OtelMetricsValidator utility", + name="generate_metrics_json", + description="Generates a metric file for a particular openTelemetry receiver", inputSchema={ "type": "object", - "properties": {}, + "properties": {"integration_name": {"type": "string"}}, + "required": ["integration_name"], }, ), ] @@ -257,40 +340,51 @@ async def list_tools() -> list[Tool]: async def list_resources() -> list[Resource]: """List available reference resources.""" resources = [] - - if POSTGRES_TEST_PATH.exists(): + + if constants.POSTGRES_TEST_PATH.exists(): resources.append( Resource( uri=f"file://{constants.POSTGRES_TEST_PATH}", name="PostgreSQL Metrics Test (Reference)", description="Reference implementation of OTel metrics test. Use this as the gold standard for structure and patterns.", - mimeType="text/x-python" + mimeType="text/x-python", ) ) - - if MYSQL_TEST_PATH.exists(): + + if constants.MYSQL_TEST_PATH.exists(): resources.append( Resource( - uri=f"file://{MYSQL_TEST_PATH}", + uri=f"file://{constants.MYSQL_TEST_PATH}", name="MySQL Metrics Test (Reference)", description="MySQL metrics test implementation following PostgreSQL patterns", - mimeType="text/x-python" + mimeType="text/x-python", ) ) - + # Add OtelMetricsValidator reference - validator_path = SYSTEM_TESTS_ROOT / "utils/otel_metrics_validator.py" + validator_path = constants.SYSTEM_TESTS_ROOT / "utils/otel_metrics_validator.py" if validator_path.exists(): resources.append( Resource( uri=f"file://{validator_path}", name="OtelMetricsValidator Utility", description="Shared utility for validating OTel metrics. All tests should use this.", - mimeType="text/x-python" + mimeType="text/x-python", ) ) - - + + # Add improvements document + improvements_path = Path(__file__).parent / "IMPROVEMENTS.md" + if improvements_path.exists(): + resources.append( + Resource( + uri=f"file://{improvements_path}", + name="Integration Test Improvements", + description="Design document with improvements and patterns for test generation", + mimeType="text/markdown", + ) + ) + return resources @@ -300,10 +394,10 @@ async def read_resource(uri: str) -> str: # Extract path from file:// URI path = uri.replace("file://", "") path_obj = Path(path) - + if not path_obj.exists(): raise ValueError(f"Resource not found: {uri}") - + with open(path_obj, "r", encoding="utf-8") as f: return f.read() @@ -311,8 +405,7 @@ async def read_resource(uri: str) -> str: @app.list_prompts() async def list_prompts(): """List available prompts.""" - from mcp.types import Prompt, PromptArgument - + return [ Prompt( name="generate_with_reference", @@ -321,14 +414,10 @@ async def list_prompts(): PromptArgument( name="integration_name", description="Name of the integration (e.g., redis, kafka, mongodb)", - required=True + required=True, ), - PromptArgument( - name="metrics_json_file", - description="Name of the metrics JSON file", - required=True - ), - ] + PromptArgument(name="metrics_json_file", description="Name of the metrics JSON file", required=True), + ], ) ] @@ -337,15 +426,17 @@ async def list_prompts(): async def get_prompt(name: str, arguments: dict[str, str] | None = None): """Get a specific prompt.""" from mcp.types import PromptMessage, TextContent as PromptTextContent - + if name == "generate_with_reference": integration_name = arguments.get("integration_name", "example") if arguments else "example" - metrics_json_file = arguments.get("metrics_json_file", "example_metrics.json") if arguments else "example_metrics.json" - + metrics_json_file = ( + arguments.get("metrics_json_file", "example_metrics.json") if arguments else "example_metrics.json" + ) + # Read the PostgreSQL test as reference postgres_test_content = "" - if POSTGRES_TEST_PATH.exists(): - with open(POSTGRES_TEST_PATH, "r", encoding="utf-8") as f: + if constants.POSTGRES_TEST_PATH.exists(): + with open(constants.POSTGRES_TEST_PATH, "r", encoding="utf-8") as f: postgres_test_content = f.read() prompt_text = get_generate_with_reference_prompt( @@ -479,24 +570,24 @@ async def call_tool(name: str, arguments: Any) -> list[TextContent]: } return [TextContent(type="text", text=json.dumps(result, indent=2))] - if name == "generate_metrics_json_template": - integration_name = arguments["integration_name"] - sample_metrics = arguments["sample_metrics"] + # if name == "generate_metrics_json_template": + # integration_name = arguments["integration_name"] + # sample_metrics = arguments["sample_metrics"] - metrics_template = {} - for metric_name in sample_metrics: - metrics_template[metric_name] = { - "data_type": "Sum", # or "Gauge" - "description": f"Description for {metric_name}", - } + # metrics_template = {} + # for metric_name in sample_metrics: + # metrics_template[metric_name] = { + # "data_type": "Sum", # or "Gauge" + # "description": f"Description for {metric_name}", + # } - result = { - "filename": f"{integration_name.lower()}_metrics.json", - "content": metrics_template, - "note": "Update data_type to 'Sum' or 'Gauge' and provide accurate descriptions", - } + # result = { + # "filename": f"{integration_name.lower()}_metrics.json", + # "content": metrics_template, + # "note": "Update data_type to 'Sum' or 'Gauge' and provide accurate descriptions", + # } - return [TextContent(type="text", text=json.dumps(result, indent=2))] + # return [TextContent(type="text", text=json.dumps(result, indent=2))] if name == "get_shared_utility_info": result = { @@ -550,6 +641,10 @@ async def call_tool(name: str, arguments: Any) -> list[TextContent]: return [TextContent(type="text", text=json.dumps(result, indent=2))] + if name == "generate_metrics_json": + integration_name = arguments["integration_name"] + result = generate_metrics_file(integration_name) + return [TextContent(type="text", text=json.dumps(result, indent=2))] raise ValueError(f"Unknown tool: {name}")