-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconftest.py
More file actions
349 lines (281 loc) · 12.3 KB
/
conftest.py
File metadata and controls
349 lines (281 loc) · 12.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
"""
Pytest plugin for automatic test failure metrics collection.
This plugin automatically collects test failures and logs them to the metrics system.
"""
import logging
import os
from pathlib import Path
from typing import Optional
import pytest
logger = logging.getLogger(__name__)
def pytest_configure(config):
"""Register the metrics plugin."""
if not config.pluginmanager.hasplugin("metrics_plugin"):
config.pluginmanager.register(MetricsPlugin(config), "metrics_plugin")
def pytest_addoption(parser):
"""Add command-line options for metrics collection."""
try:
parser.addoption(
"--metrics-output",
action="store",
default=None,
help="Output file for collected metrics",
)
parser.addoption(
"--enable-metrics",
action="store_true",
default=False,
help="Enable automatic metrics collection",
)
except ValueError:
# Options already added
pass
class MetricsPlugin:
"""Plugin to collect test failure metrics."""
def __init__(self, config):
"""Initialize the metrics plugin.
Args:
config: pytest config object
"""
self.config = config
self.metrics_output = config.getoption("--metrics-output")
self.enable_metrics = config.getoption("--enable-metrics")
# Auto-enable if metrics output is specified
if self.metrics_output:
self.enable_metrics = True
# Auto-enable metrics based on configuration or environment
if not self.enable_metrics:
try:
from metrics.config_manager import ConfigManager
config_manager = ConfigManager.get_instance()
if config_manager.should_auto_enable_metrics():
self.enable_metrics = True
logger.info("Auto-enabled metrics collection (config or history detected)")
except ImportError:
# Config manager not available, check environment variable
import os
if os.getenv("FEEDBACK_LOOP_AUTO_METRICS") == "1":
self.enable_metrics = True
# Also check if data/metrics_data.json exists
elif Path("data/metrics_data.json").exists():
self.enable_metrics = True
# Check for .feedback-loop/auto-metrics marker
elif Path(".feedback-loop/auto-metrics").exists():
self.enable_metrics = True
# Default output file if metrics enabled but no file specified
if self.enable_metrics and not self.metrics_output:
try:
from metrics.config_manager import ConfigManager
config_manager = ConfigManager.get_instance()
self.metrics_output = config_manager.get(
"auto_metrics.output_file", "data/metrics_data.json"
)
except ImportError:
self.metrics_output = "data/metrics_data.json"
self.collector = None
if self.enable_metrics:
try:
from metrics.collector import MetricsCollector
self.collector = MetricsCollector()
# Load existing metrics if output file exists
if self.metrics_output and Path(self.metrics_output).exists():
with open(self.metrics_output, "r") as f:
import json
self.collector.data = json.load(f)
logger.info("Metrics collection enabled")
except ImportError:
logger.warning("Could not import MetricsCollector, metrics disabled")
self.enable_metrics = False
# NOTE: Auto-analysis feature
# When metrics collection is enabled (via --enable-metrics or --metrics-output),
# the plugin automatically analyzes test results after each pytest session.
# This provides immediate feedback on pattern violations and effectiveness.
# To disable auto-analysis while keeping metrics collection, set the
# environment variable: FEEDBACK_LOOP_SKIP_AUTO_ANALYSIS=1
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
"""Hook to capture test execution results.
Args:
item: Test item
call: Call information
"""
outcome = yield
report = outcome.get_result()
if not self.enable_metrics or not self.collector:
return
# Only process test call phase (not setup/teardown)
if report.when != "call":
return
# Log test failures
if report.failed:
self._log_test_failure(item, report, call)
def _log_test_failure(self, item, report, call):
"""Log a test failure to the metrics collector.
Args:
item: Test item
report: Test report
call: Call information
"""
if not self.collector:
return
# Extract test information
test_name = item.nodeid
failure_reason = str(report.longrepr) if report.longrepr else "Unknown failure"
# Try to detect pattern violated
pattern_violated = self._detect_pattern_from_failure(failure_reason, report)
# Extract code snippet if available
code_snippet = self._extract_code_snippet(item, report)
# Log the failure
try:
self.collector.log_test_failure(
test_name=test_name,
failure_reason=failure_reason[:500], # Limit length
pattern_violated=pattern_violated,
code_snippet=code_snippet,
)
logger.debug(f"Logged test failure: {test_name}")
except Exception as e:
logger.warning(f"Failed to log test failure: {e}")
def _detect_pattern_from_failure(self, failure_reason: str, report) -> Optional[str]:
"""Detect which pattern was violated from the failure.
Args:
failure_reason: Failure reason text
report: Test report
Returns:
Pattern name or None
"""
failure_lower = failure_reason.lower()
# Pattern detection heuristics
if "typeerror" in failure_lower and "json" in failure_lower:
if "float64" in failure_lower or "int64" in failure_lower or "numpy" in failure_lower:
return "numpy_json_serialization"
if "indexerror" in failure_lower or "list index out of range" in failure_lower:
return "bounds_checking"
if "exception" in failure_lower:
# Check if it's a bare except issue
if "bare except" in failure_lower or "exception:" in failure_lower:
return "specific_exceptions"
if "print" in failure_lower and "logger" in failure_lower:
return "logger_debug"
if "file" in failure_lower and ("not closed" in failure_lower or "leak" in failure_lower):
return "temp_file_handling"
if "memory" in failure_lower or "memoryerror" in failure_lower:
return "large_file_processing"
# Try to extract pattern from test docstring
if hasattr(report, "location"):
# Pattern might be in test name
test_name = str(report.location[2]) if len(report.location) > 2 else ""
for pattern in [
"numpy_json",
"bounds_checking",
"specific_exceptions",
"logger_debug",
"temp_file",
"large_file",
]:
if pattern in test_name.lower():
return pattern.replace("_", "_")
return None
def _extract_code_snippet(self, item, report) -> Optional[str]:
"""Extract code snippet related to the failure.
Args:
item: Test item
report: Test report
Returns:
Code snippet or None
"""
try:
# Try to extract from traceback
if hasattr(report, "longreprtext"):
lines = report.longreprtext.split("\n")
# Find lines with code (usually indented)
code_lines = [
line for line in lines if line.startswith(" ") or line.startswith("> ")
]
if code_lines:
return "\n".join(code_lines[:10]) # Limit to 10 lines
except Exception:
pass
return None
def pytest_sessionfinish(self, session, exitstatus):
"""Hook called after test session finishes.
Args:
session: Test session
exitstatus: Exit status
"""
if not self.enable_metrics or not self.collector:
return
# Save metrics to file
if self.metrics_output:
try:
with open(self.metrics_output, "w") as f:
f.write(self.collector.export_json())
summary = self.collector.get_summary()
logger.info(f"Metrics saved to {self.metrics_output}")
logger.info(f"Test failures logged: {summary['test_failures']}")
# Automatic Analysis (can be skipped via environment variable)
if os.getenv("FEEDBACK_LOOP_SKIP_AUTO_ANALYSIS"):
logger.info("Auto-analysis skipped (FEEDBACK_LOOP_SKIP_AUTO_ANALYSIS set)")
if not self._is_quiet():
print("\n✓ Metrics saved. Auto-analysis skipped.")
return
# Check if auto-analysis should run based on config
# Cache ConfigManager instance to avoid redundant singleton lookups
failure_count = summary.get("test_failures", 0)
should_analyze = True
is_quiet = False
config_manager = None
try:
from metrics.config_manager import ConfigManager
config_manager = ConfigManager.get_instance()
should_analyze = config_manager.should_auto_analyze(failure_count)
is_quiet = config_manager.is_quiet()
except ImportError:
# Config manager not available, use default behavior
pass
if not should_analyze:
if not is_quiet:
print(f"\n✓ Metrics saved ({failure_count} failures, below threshold)")
return
try:
from metrics.integrate import MetricsIntegration
if not is_quiet:
print("\n🔄 Feedback Loop: Analyzing results...")
integration = MetricsIntegration(
metrics_file=self.metrics_output,
patterns_file="data/patterns.json",
)
integration.analyze_metrics(update_patterns=True)
# Show dashboard if configured (reuse cached config_manager)
if config_manager:
try:
if config_manager.should_show_dashboard():
from bin.fl_dashboard import main as dashboard_main
dashboard_main()
except Exception:
pass
except Exception as e:
logger.error(f"Analysis failed: {e}")
if not is_quiet:
print(f"\n❌ Analysis failed: {e}")
except Exception as e:
logger.error(f"Failed to save metrics: {e}")
def _is_quiet(self, config_manager=None) -> bool:
"""Check if output should be quiet.
Args:
config_manager: Optional cached ConfigManager instance
"""
if config_manager:
try:
return config_manager.is_quiet()
except Exception as e:
logger.error(f"Failed to check quiet mode: {e}")
return False
try:
from metrics.config_manager import ConfigManager
return ConfigManager.get_instance().is_quiet()
except ImportError:
return False
except Exception as e:
logger.error(f"Failed to check quiet mode: {e}")
return False