diff --git a/CHANGELOG.md b/CHANGELOG.md index 75847a47..adf52b5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 7.5.0 - 2026-01-06 + +feat: Capture Langchain, OpenAI and Anthropic errors as exceptions (if exception autocapture is enabled) +feat: Add reference to exception in LLMA trace and span events + # 7.4.3 - 2026-01-02 Fixes cache creation cost for Langchain with Anthropic diff --git a/posthog/__init__.py b/posthog/__init__.py index f2ca6ecb..8dbed815 100644 --- a/posthog/__init__.py +++ b/posthog/__init__.py @@ -29,6 +29,9 @@ from posthog.contexts import ( tag as inner_tag, ) +from posthog.contexts import ( + get_tags as inner_get_tags, +) from posthog.exception_utils import ( DEFAULT_CODE_VARIABLES_IGNORE_PATTERNS, DEFAULT_CODE_VARIABLES_MASK_PATTERNS, @@ -190,6 +193,19 @@ def tag(name: str, value: Any): return inner_tag(name, value) +def get_tags() -> Dict[str, Any]: + """ + Get all tags from the current context. + + Returns: + Dict of all tags in the current context + + Category: + Contexts + """ + return inner_get_tags() + + """Settings.""" api_key = None # type: Optional[str] host = None # type: Optional[str] diff --git a/posthog/ai/langchain/callbacks.py b/posthog/ai/langchain/callbacks.py index d92dc8f0..044adfdf 100644 --- a/posthog/ai/langchain/callbacks.py +++ b/posthog/ai/langchain/callbacks.py @@ -22,8 +22,8 @@ try: # LangChain 1.0+ and modern 0.x with langchain-core - from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.callbacks.base import BaseCallbackHandler except (ImportError, ModuleNotFoundError): # Fallback for older LangChain versions from langchain.callbacks.base import BaseCallbackHandler @@ -35,15 +35,15 @@ FunctionMessage, HumanMessage, SystemMessage, - ToolMessage, ToolCall, + ToolMessage, ) from langchain_core.outputs import ChatGeneration, LLMResult from pydantic import BaseModel from posthog import setup -from posthog.ai.utils import get_model_params, with_privacy_mode from posthog.ai.sanitization import sanitize_langchain +from posthog.ai.utils import get_model_params, with_privacy_mode from posthog.client import Client log = logging.getLogger("posthog") @@ -506,6 +506,14 @@ def _capture_trace_or_span( if isinstance(outputs, BaseException): event_properties["$ai_error"] = _stringify_exception(outputs) event_properties["$ai_is_error"] = True + event_properties = _capture_exception_and_update_properties( + self._ph_client, + outputs, + self._distinct_id, + self._groups, + event_properties, + ) + elif outputs is not None: event_properties["$ai_output_state"] = with_privacy_mode( self._ph_client, self._privacy_mode, outputs @@ -576,10 +584,24 @@ def _capture_generation( if run.tools: event_properties["$ai_tools"] = run.tools + if self._properties: + event_properties.update(self._properties) + + if self._distinct_id is None: + event_properties["$process_person_profile"] = False + if isinstance(output, BaseException): event_properties["$ai_http_status"] = _get_http_status(output) event_properties["$ai_error"] = _stringify_exception(output) event_properties["$ai_is_error"] = True + + event_properties = _capture_exception_and_update_properties( + self._ph_client, + output, + self._distinct_id, + self._groups, + event_properties, + ) else: # Add usage usage = _parse_usage(output, run.provider, run.model) @@ -607,12 +629,6 @@ def _capture_generation( self._ph_client, self._privacy_mode, completions ) - if self._properties: - event_properties.update(self._properties) - - if self._distinct_id is None: - event_properties["$process_person_profile"] = False - self._ph_client.capture( distinct_id=self._distinct_id or trace_id, event="$ai_generation", @@ -863,6 +879,27 @@ def _parse_usage( return llm_usage +def _capture_exception_and_update_properties( + client: Client, + exception: BaseException, + distinct_id: Optional[Union[str, int, UUID]], + groups: Optional[Dict[str, Any]], + event_properties: Dict[str, Any], +): + if client.enable_exception_autocapture: + exception_id = client.capture_exception( + exception, + distinct_id=distinct_id, + groups=groups, + properties=event_properties, + ) + + if exception_id: + event_properties["$exception_event_id"] = exception_id + + return event_properties + + def _get_http_status(error: BaseException) -> int: # OpenAI: https://github.com/openai/openai-python/blob/main/src/openai/_exceptions.py # Anthropic: https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/_exceptions.py diff --git a/posthog/ai/utils.py b/posthog/ai/utils.py index 5f8a4b14..de110146 100644 --- a/posthog/ai/utils.py +++ b/posthog/ai/utils.py @@ -2,14 +2,15 @@ import uuid from typing import Any, Callable, Dict, List, Optional, cast -from posthog.client import Client as PostHogClient -from posthog.ai.types import FormattedMessage, StreamingEventData, TokenUsage +from posthog import get_tags, identify_context, new_context, tag from posthog.ai.sanitization import ( - sanitize_openai, sanitize_anthropic, sanitize_gemini, sanitize_langchain, + sanitize_openai, ) +from posthog.ai.types import FormattedMessage, StreamingEventData, TokenUsage +from posthog.client import Client as PostHogClient def merge_usage_stats( @@ -256,94 +257,108 @@ def call_llm_and_track_usage( usage: TokenUsage = TokenUsage() error_params: Dict[str, Any] = {} - try: - response = call_method(**kwargs) - except Exception as exc: - error = exc - http_status = getattr( - exc, "status_code", 0 - ) # default to 0 becuase its likely an SDK error - error_params = { - "$ai_is_error": True, - "$ai_error": exc.__str__(), - } - finally: - end_time = time.time() - latency = end_time - start_time - - if posthog_trace_id is None: - posthog_trace_id = str(uuid.uuid4()) - - if response and ( - hasattr(response, "usage") - or (provider == "gemini" and hasattr(response, "usage_metadata")) - ): - usage = get_usage(response, provider) - - messages = merge_system_prompt(kwargs, provider) - sanitized_messages = sanitize_messages(messages, provider) - - event_properties = { - "$ai_provider": provider, - "$ai_model": kwargs.get("model") or getattr(response, "model", None), - "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": with_privacy_mode( - ph_client, posthog_privacy_mode, sanitized_messages - ), - "$ai_output_choices": with_privacy_mode( - ph_client, posthog_privacy_mode, format_response(response, provider) - ), - "$ai_http_status": http_status, - "$ai_input_tokens": usage.get("input_tokens", 0), - "$ai_output_tokens": usage.get("output_tokens", 0), - "$ai_latency": latency, - "$ai_trace_id": posthog_trace_id, - "$ai_base_url": str(base_url), - **(posthog_properties or {}), - **(error_params or {}), - } - - available_tool_calls = extract_available_tool_calls(provider, kwargs) - - if available_tool_calls: - event_properties["$ai_tools"] = available_tool_calls - - cache_read = usage.get("cache_read_input_tokens") - if cache_read is not None and cache_read > 0: - event_properties["$ai_cache_read_input_tokens"] = cache_read - - cache_creation = usage.get("cache_creation_input_tokens") - if cache_creation is not None and cache_creation > 0: - event_properties["$ai_cache_creation_input_tokens"] = cache_creation - - reasoning = usage.get("reasoning_tokens") - if reasoning is not None and reasoning > 0: - event_properties["$ai_reasoning_tokens"] = reasoning - - web_search_count = usage.get("web_search_count") - if web_search_count is not None and web_search_count > 0: - event_properties["$ai_web_search_count"] = web_search_count - - if posthog_distinct_id is None: - event_properties["$process_person_profile"] = False - - # Process instructions for Responses API - if provider == "openai" and kwargs.get("instructions") is not None: - event_properties["$ai_instructions"] = with_privacy_mode( - ph_client, posthog_privacy_mode, kwargs.get("instructions") + with new_context(client=ph_client, capture_exceptions=False): + if posthog_distinct_id: + identify_context(posthog_distinct_id) + + try: + response = call_method(**kwargs) + except Exception as exc: + error = exc + http_status = getattr( + exc, "status_code", 0 + ) # default to 0 becuase its likely an SDK error + error_params = { + "$ai_is_error": True, + "$ai_error": exc.__str__(), + } + # TODO: Add exception capture for OpenAI/Anthropic/Gemini wrappers when + # enable_exception_autocapture is True, similar to LangChain callbacks. + # See _capture_exception_and_update_properties in langchain/callbacks.py + finally: + end_time = time.time() + latency = end_time - start_time + + if posthog_trace_id is None: + posthog_trace_id = str(uuid.uuid4()) + + if response and ( + hasattr(response, "usage") + or (provider == "gemini" and hasattr(response, "usage_metadata")) + ): + usage = get_usage(response, provider) + + messages = merge_system_prompt(kwargs, provider) + sanitized_messages = sanitize_messages(messages, provider) + + tag("$ai_provider", provider) + tag("$ai_model", kwargs.get("model") or getattr(response, "model", None)) + tag("$ai_model_parameters", get_model_params(kwargs)) + tag( + "$ai_input", + with_privacy_mode(ph_client, posthog_privacy_mode, sanitized_messages), ) - - # send the event to posthog - if hasattr(ph_client, "capture") and callable(ph_client.capture): - ph_client.capture( - distinct_id=posthog_distinct_id or posthog_trace_id, - event="$ai_generation", - properties=event_properties, - groups=posthog_groups, + tag( + "$ai_output_choices", + with_privacy_mode( + ph_client, posthog_privacy_mode, format_response(response, provider) + ), ) + tag("$ai_http_status", http_status) + tag("$ai_input_tokens", usage.get("input_tokens", 0)) + tag("$ai_output_tokens", usage.get("output_tokens", 0)) + tag("$ai_latency", latency) + tag("$ai_trace_id", posthog_trace_id) + tag("$ai_base_url", str(base_url)) + + available_tool_calls = extract_available_tool_calls(provider, kwargs) + + if available_tool_calls: + tag("$ai_tools", available_tool_calls) + + cache_read = usage.get("cache_read_input_tokens") + if cache_read is not None and cache_read > 0: + tag("$ai_cache_read_input_tokens", cache_read) + + cache_creation = usage.get("cache_creation_input_tokens") + if cache_creation is not None and cache_creation > 0: + tag("$ai_cache_creation_input_tokens", cache_creation) + + reasoning = usage.get("reasoning_tokens") + if reasoning is not None and reasoning > 0: + tag("$ai_reasoning_tokens", reasoning) + + web_search_count = usage.get("web_search_count") + if web_search_count is not None and web_search_count > 0: + tag("$ai_web_search_count", web_search_count) + + if posthog_distinct_id is None: + tag("$process_person_profile", False) + + # Process instructions for Responses API + if provider == "openai" and kwargs.get("instructions") is not None: + tag( + "$ai_instructions", + with_privacy_mode( + ph_client, posthog_privacy_mode, kwargs.get("instructions") + ), + ) + + # send the event to posthog + if hasattr(ph_client, "capture") and callable(ph_client.capture): + ph_client.capture( + distinct_id=posthog_distinct_id or posthog_trace_id, + event="$ai_generation", + properties={ + **get_tags(), + **(posthog_properties or {}), + **(error_params or {}), + }, + groups=posthog_groups, + ) - if error: - raise error + if error: + raise error return response @@ -367,94 +382,108 @@ async def call_llm_and_track_usage_async( usage: TokenUsage = TokenUsage() error_params: Dict[str, Any] = {} - try: - response = await call_async_method(**kwargs) - except Exception as exc: - error = exc - http_status = getattr( - exc, "status_code", 0 - ) # default to 0 because its likely an SDK error - error_params = { - "$ai_is_error": True, - "$ai_error": exc.__str__(), - } - finally: - end_time = time.time() - latency = end_time - start_time - - if posthog_trace_id is None: - posthog_trace_id = str(uuid.uuid4()) - - if response and ( - hasattr(response, "usage") - or (provider == "gemini" and hasattr(response, "usage_metadata")) - ): - usage = get_usage(response, provider) - - messages = merge_system_prompt(kwargs, provider) - sanitized_messages = sanitize_messages(messages, provider) - - event_properties = { - "$ai_provider": provider, - "$ai_model": kwargs.get("model") or getattr(response, "model", None), - "$ai_model_parameters": get_model_params(kwargs), - "$ai_input": with_privacy_mode( - ph_client, posthog_privacy_mode, sanitized_messages - ), - "$ai_output_choices": with_privacy_mode( - ph_client, posthog_privacy_mode, format_response(response, provider) - ), - "$ai_http_status": http_status, - "$ai_input_tokens": usage.get("input_tokens", 0), - "$ai_output_tokens": usage.get("output_tokens", 0), - "$ai_latency": latency, - "$ai_trace_id": posthog_trace_id, - "$ai_base_url": str(base_url), - **(posthog_properties or {}), - **(error_params or {}), - } - - available_tool_calls = extract_available_tool_calls(provider, kwargs) - - if available_tool_calls: - event_properties["$ai_tools"] = available_tool_calls - - cache_read = usage.get("cache_read_input_tokens") - if cache_read is not None and cache_read > 0: - event_properties["$ai_cache_read_input_tokens"] = cache_read - - cache_creation = usage.get("cache_creation_input_tokens") - if cache_creation is not None and cache_creation > 0: - event_properties["$ai_cache_creation_input_tokens"] = cache_creation - - reasoning = usage.get("reasoning_tokens") - if reasoning is not None and reasoning > 0: - event_properties["$ai_reasoning_tokens"] = reasoning - - web_search_count = usage.get("web_search_count") - if web_search_count is not None and web_search_count > 0: - event_properties["$ai_web_search_count"] = web_search_count - - if posthog_distinct_id is None: - event_properties["$process_person_profile"] = False - - # Process instructions for Responses API - if provider == "openai" and kwargs.get("instructions") is not None: - event_properties["$ai_instructions"] = with_privacy_mode( - ph_client, posthog_privacy_mode, kwargs.get("instructions") + with new_context(client=ph_client, capture_exceptions=False): + if posthog_distinct_id: + identify_context(posthog_distinct_id) + + try: + response = await call_async_method(**kwargs) + except Exception as exc: + error = exc + http_status = getattr( + exc, "status_code", 0 + ) # default to 0 because its likely an SDK error + error_params = { + "$ai_is_error": True, + "$ai_error": exc.__str__(), + } + # TODO: Add exception capture for OpenAI/Anthropic/Gemini wrappers when + # enable_exception_autocapture is True, similar to LangChain callbacks. + # See _capture_exception_and_update_properties in langchain/callbacks.py + finally: + end_time = time.time() + latency = end_time - start_time + + if posthog_trace_id is None: + posthog_trace_id = str(uuid.uuid4()) + + if response and ( + hasattr(response, "usage") + or (provider == "gemini" and hasattr(response, "usage_metadata")) + ): + usage = get_usage(response, provider) + + messages = merge_system_prompt(kwargs, provider) + sanitized_messages = sanitize_messages(messages, provider) + + tag("$ai_provider", provider) + tag("$ai_model", kwargs.get("model") or getattr(response, "model", None)) + tag("$ai_model_parameters", get_model_params(kwargs)) + tag( + "$ai_input", + with_privacy_mode(ph_client, posthog_privacy_mode, sanitized_messages), ) - - # send the event to posthog - if hasattr(ph_client, "capture") and callable(ph_client.capture): - ph_client.capture( - distinct_id=posthog_distinct_id or posthog_trace_id, - event="$ai_generation", - properties=event_properties, - groups=posthog_groups, + tag( + "$ai_output_choices", + with_privacy_mode( + ph_client, posthog_privacy_mode, format_response(response, provider) + ), ) + tag("$ai_http_status", http_status) + tag("$ai_input_tokens", usage.get("input_tokens", 0)) + tag("$ai_output_tokens", usage.get("output_tokens", 0)) + tag("$ai_latency", latency) + tag("$ai_trace_id", posthog_trace_id) + tag("$ai_base_url", str(base_url)) + + available_tool_calls = extract_available_tool_calls(provider, kwargs) + + if available_tool_calls: + tag("$ai_tools", available_tool_calls) + + cache_read = usage.get("cache_read_input_tokens") + if cache_read is not None and cache_read > 0: + tag("$ai_cache_read_input_tokens", cache_read) + + cache_creation = usage.get("cache_creation_input_tokens") + if cache_creation is not None and cache_creation > 0: + tag("$ai_cache_creation_input_tokens", cache_creation) + + reasoning = usage.get("reasoning_tokens") + if reasoning is not None and reasoning > 0: + tag("$ai_reasoning_tokens", reasoning) + + web_search_count = usage.get("web_search_count") + if web_search_count is not None and web_search_count > 0: + tag("$ai_web_search_count", web_search_count) + + if posthog_distinct_id is None: + tag("$process_person_profile", False) + + # Process instructions for Responses API + if provider == "openai" and kwargs.get("instructions") is not None: + tag( + "$ai_instructions", + with_privacy_mode( + ph_client, posthog_privacy_mode, kwargs.get("instructions") + ), + ) + + # send the event to posthog + if hasattr(ph_client, "capture") and callable(ph_client.capture): + ph_client.capture( + distinct_id=posthog_distinct_id or posthog_trace_id, + event="$ai_generation", + properties={ + **get_tags(), + **(posthog_properties or {}), + **(error_params or {}), + }, + groups=posthog_groups, + ) - if error: - raise error + if error: + raise error return response diff --git a/posthog/contexts.py b/posthog/contexts.py index 1051b1e1..39f9bfde 100644 --- a/posthog/contexts.py +++ b/posthog/contexts.py @@ -62,14 +62,13 @@ def get_distinct_id(self) -> Optional[str]: return None def collect_tags(self) -> Dict[str, Any]: - tags = self.tags.copy() if self.parent and not self.fresh: # We want child tags to take precedence over parent tags, - # so we can't use a simple update here, instead collecting - # the parent tags and then updating with the child tags. - new_tags = self.parent.collect_tags() - tags.update(new_tags) - return tags + # so collect parent tags first, then update with child tags. + tags = self.parent.collect_tags() + tags.update(self.tags) + return tags + return self.tags.copy() def get_capture_exception_code_variables(self) -> Optional[bool]: if self.capture_exception_code_variables is not None: diff --git a/posthog/test/ai/langchain/test_callbacks.py b/posthog/test/ai/langchain/test_callbacks.py index 1f7edba7..a96e71ee 100644 --- a/posthog/test/ai/langchain/test_callbacks.py +++ b/posthog/test/ai/langchain/test_callbacks.py @@ -2441,3 +2441,206 @@ def test_billable_with_real_chain(mock_client): assert props["$ai_billable"] is True assert props["$ai_model"] == "fake-model" assert props["$ai_provider"] == "fake" + + +# Exception Capture Integration Tests + + +def test_exception_autocapture_on_span_error(): + """Test that capture_exception is called when a span errors and autocapture is enabled.""" + mock_client = MagicMock() + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = True + mock_client.capture_exception.return_value = "exception-uuid-123" + + def failing_span(_): + raise ValueError("test error") + + callbacks = [CallbackHandler(mock_client)] + chain = RunnableLambda(failing_span) + + try: + chain.invoke({}, config={"callbacks": callbacks}) + except ValueError: + pass + + # Verify capture_exception was called + assert mock_client.capture_exception.call_count == 1 + exception_call = mock_client.capture_exception.call_args + assert isinstance(exception_call[0][0], ValueError) + assert str(exception_call[0][0]) == "test error" + + +def test_exception_autocapture_adds_exception_id_to_span_event(): + """Test that $exception_event_id is added to the span event properties.""" + mock_client = MagicMock() + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = True + mock_client.capture_exception.return_value = "exception-uuid-456" + + def failing_span(_): + raise ValueError("test error") + + callbacks = [CallbackHandler(mock_client)] + chain = RunnableLambda(failing_span) + + try: + chain.invoke({}, config={"callbacks": callbacks}) + except ValueError: + pass + + # Find the span event (should have $ai_is_error=True) + span_calls = [ + call + for call in mock_client.capture.call_args_list + if call[1].get("properties", {}).get("$ai_is_error") is True + ] + assert len(span_calls) >= 1 + + span_props = span_calls[0][1]["properties"] + assert span_props["$exception_event_id"] == "exception-uuid-456" + assert span_props["$ai_error"] == "ValueError: test error" + + +def test_exception_autocapture_disabled_does_not_capture(): + """Test that capture_exception is NOT called when autocapture is disabled.""" + mock_client = MagicMock() + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = False + + def failing_span(_): + raise ValueError("test error") + + callbacks = [CallbackHandler(mock_client)] + chain = RunnableLambda(failing_span) + + try: + chain.invoke({}, config={"callbacks": callbacks}) + except ValueError: + pass + + # Verify capture_exception was NOT called + assert mock_client.capture_exception.call_count == 0 + + # But the span event should still have error info + span_calls = [ + call + for call in mock_client.capture.call_args_list + if call[1].get("properties", {}).get("$ai_is_error") is True + ] + assert len(span_calls) >= 1 + + span_props = span_calls[0][1]["properties"] + assert "$exception_event_id" not in span_props + assert span_props["$ai_error"] == "ValueError: test error" + + +def test_exception_autocapture_on_llm_generation_error(mock_client): + """Test that capture_exception is called when an LLM generation fails.""" + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = True + mock_client.capture_exception.return_value = "exception-uuid-789" + + callbacks = CallbackHandler(mock_client) + run_id = uuid.uuid4() + + # Simulate LLM start + callbacks.on_llm_start( + serialized={"kwargs": {"openai_api_base": "https://api.openai.com"}}, + prompts=["Hello"], + run_id=run_id, + ) + + # Simulate LLM error + error = Exception("API rate limit exceeded") + callbacks.on_llm_error(error, run_id=run_id) + + # Verify capture_exception was called + assert mock_client.capture_exception.call_count == 1 + exception_call = mock_client.capture_exception.call_args + assert exception_call[0][0] is error + + # Verify the generation event has $exception_event_id + generation_calls = [ + call + for call in mock_client.capture.call_args_list + if call[1].get("event") == "$ai_generation" + ] + assert len(generation_calls) == 1 + + gen_props = generation_calls[0][1]["properties"] + assert gen_props["$exception_event_id"] == "exception-uuid-789" + assert gen_props["$ai_is_error"] is True + + +def test_exception_autocapture_passes_ai_properties_to_exception(): + """Test that AI properties are passed to the exception event.""" + mock_client = MagicMock() + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = True + mock_client.capture_exception.return_value = "exception-uuid-abc" + + callbacks = CallbackHandler( + mock_client, + distinct_id="user-123", + properties={"custom_prop": "custom_value"}, + ) + run_id = uuid.uuid4() + + # Simulate LLM start + callbacks.on_llm_start( + serialized={"kwargs": {"openai_api_base": "https://api.openai.com"}}, + prompts=["Hello"], + run_id=run_id, + ) + + # Simulate LLM error + error = Exception("API error") + callbacks.on_llm_error(error, run_id=run_id) + + # Verify capture_exception received the properties + exception_call = mock_client.capture_exception.call_args + props = exception_call[1]["properties"] + + # Should have AI-related properties + assert "$ai_trace_id" in props + assert "$ai_is_error" in props + assert props["$ai_is_error"] is True + + # Should have distinct_id passed through + assert exception_call[1]["distinct_id"] == "user-123" + + +def test_exception_autocapture_none_return_no_exception_id(): + """Test that when capture_exception returns None, no $exception_event_id is added.""" + mock_client = MagicMock() + mock_client.privacy_mode = False + mock_client.enable_exception_autocapture = True + mock_client.capture_exception.return_value = ( + None # e.g., exception already captured + ) + + def failing_span(_): + raise ValueError("test error") + + callbacks = [CallbackHandler(mock_client)] + chain = RunnableLambda(failing_span) + + try: + chain.invoke({}, config={"callbacks": callbacks}) + except ValueError: + pass + + # capture_exception was called but returned None + assert mock_client.capture_exception.call_count == 1 + + # Span event should NOT have $exception_event_id + span_calls = [ + call + for call in mock_client.capture.call_args_list + if call[1].get("properties", {}).get("$ai_is_error") is True + ] + assert len(span_calls) >= 1 + + span_props = span_calls[0][1]["properties"] + assert "$exception_event_id" not in span_props diff --git a/posthog/test/ai/test_system_prompts.py b/posthog/test/ai/test_system_prompts.py index 2f37ccc4..eb049c86 100644 --- a/posthog/test/ai/test_system_prompts.py +++ b/posthog/test/ai/test_system_prompts.py @@ -11,7 +11,10 @@ import time import unittest -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch + +from posthog.client import Client +from posthog.test.test_utils import FAKE_TEST_API_KEY class TestSystemPromptCapture(unittest.TestCase): @@ -24,7 +27,8 @@ def setUp(self): self.test_response = "I'm doing well, thank you!" # Create mock PostHog client - self.client = MagicMock() + self.client = Client(FAKE_TEST_API_KEY) + self.client._enqueue = MagicMock() self.client.privacy_mode = False def _assert_system_prompt_captured(self, captured_input): @@ -53,10 +57,11 @@ def _assert_system_prompt_captured(self, captured_input): def test_openai_messages_array_system_prompt(self): """Test OpenAI with system prompt in messages array.""" try: - from posthog.ai.openai import OpenAI from openai.types.chat import ChatCompletion, ChatCompletionMessage from openai.types.chat.chat_completion import Choice from openai.types.completion_usage import CompletionUsage + + from posthog.ai.openai import OpenAI except ImportError: self.skipTest("OpenAI package not available") @@ -94,17 +99,18 @@ def test_openai_messages_array_system_prompt(self): model="gpt-4", messages=messages, posthog_distinct_id="test-user" ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) def test_openai_separate_system_parameter(self): """Test OpenAI with system prompt as separate parameter.""" try: - from posthog.ai.openai import OpenAI from openai.types.chat import ChatCompletion, ChatCompletionMessage from openai.types.chat.chat_completion import Choice from openai.types.completion_usage import CompletionUsage + + from posthog.ai.openai import OpenAI except ImportError: self.skipTest("OpenAI package not available") @@ -142,18 +148,21 @@ def test_openai_separate_system_parameter(self): posthog_distinct_id="test-user", ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) def test_openai_streaming_system_parameter(self): """Test OpenAI streaming with system parameter.""" try: - from posthog.ai.openai import OpenAI - from openai.types.chat.chat_completion_chunk import ChatCompletionChunk + from openai.types.chat.chat_completion_chunk import ( + ChatCompletionChunk, + ChoiceDelta, + ) from openai.types.chat.chat_completion_chunk import Choice as ChoiceChunk - from openai.types.chat.chat_completion_chunk import ChoiceDelta from openai.types.completion_usage import CompletionUsage + + from posthog.ai.openai import OpenAI except ImportError: self.skipTest("OpenAI package not available") @@ -206,8 +215,8 @@ def test_openai_streaming_system_parameter(self): list(response_generator) # Consume generator - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) # Anthropic Tests @@ -239,8 +248,8 @@ def test_anthropic_messages_array_system_prompt(self): posthog_distinct_id="test-user", ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) def test_anthropic_separate_system_parameter(self): @@ -269,8 +278,8 @@ def test_anthropic_separate_system_parameter(self): posthog_distinct_id="test-user", ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) # Gemini Tests @@ -310,8 +319,8 @@ def test_gemini_contents_array_system_prompt(self): posthog_distinct_id="test-user", ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) def test_gemini_system_instruction_parameter(self): @@ -349,6 +358,6 @@ def test_gemini_system_instruction_parameter(self): posthog_distinct_id="test-user", ) - self.assertEqual(len(self.client.capture.call_args_list), 1) - properties = self.client.capture.call_args_list[0][1]["properties"] + self.assertEqual(len(self.client._enqueue.call_args_list), 1) + properties = self.client._enqueue.call_args_list[0][0][0]["properties"] self._assert_system_prompt_captured(properties["$ai_input"]) diff --git a/posthog/test/test_contexts.py b/posthog/test/test_contexts.py index 113e86c5..3ff6fddd 100644 --- a/posthog/test/test_contexts.py +++ b/posthog/test/test_contexts.py @@ -191,6 +191,32 @@ def test_context_inheritance_non_fresh_context(self): assert get_context_distinct_id() == "user123" assert get_context_session_id() == "session456" + def test_child_tags_override_parent_tags_in_non_fresh_context(self): + with new_context(fresh=True): + tag("shared_key", "parent_value") + tag("parent_only", "parent") + + with new_context(fresh=False): + # Child should inherit parent tags + assert get_tags()["parent_only"] == "parent" + + # Child sets same key - should override parent + tag("shared_key", "child_value") + tag("child_only", "child") + + tags = get_tags() + # Child value should win for shared key + assert tags["shared_key"] == "child_value" + # Both parent and child tags should be present + assert tags["parent_only"] == "parent" + assert tags["child_only"] == "child" + + # Parent context should be unchanged + parent_tags = get_tags() + assert parent_tags["shared_key"] == "parent_value" + assert parent_tags["parent_only"] == "parent" + assert "child_only" not in parent_tags + def test_scoped_decorator_with_context_ids(self): @scoped() def function_with_context(): diff --git a/posthog/version.py b/posthog/version.py index 9efec676..337cf760 100644 --- a/posthog/version.py +++ b/posthog/version.py @@ -1,4 +1,4 @@ -VERSION = "7.4.3" +VERSION = "7.5.0" if __name__ == "__main__": print(VERSION, end="") # noqa: T201