Skip to content

Commit

Permalink
Python: Update Cross Language Test to get raw request and now raw res…
Browse files Browse the repository at this point in the history
…ponse (#10799)

### Motivation and Context

In our cross language tests we were getting the raw request using a
custom logging transport class. This didn't show how to get the raw HTTP
response back after making the call to the OpenAI SDK.

<!-- Thank you for your contribution to the semantic-kernel repo!
Please help reviewers and future users, providing the following
information:
  1. Why is this change required?
  2. What problem does it solve?
  3. What scenario does it contribute to?
  4. If it fixes an open issue, please link to the issue here.
-->

### Description

This PR updates the cross language test to get the raw request and now
the raw response.

<!-- Describe your changes, the overall approach, the underlying design.
These notes will help understanding how your code works. Thanks! -->

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [X] The code builds clean without any errors or warnings
- [X] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [X] All unit tests pass, and I have added new tests where possible
- [X] I didn't break anyone 😄
  • Loading branch information
moonbox3 authored Mar 5, 2025
1 parent fb70825 commit e7beb6a
Showing 1 changed file with 111 additions and 35 deletions.
146 changes: 111 additions & 35 deletions python/tests/integration/cross_language/test_cross_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import json
import logging
import os
from collections.abc import AsyncGenerator

import httpx
import pytest
Expand Down Expand Up @@ -40,22 +41,39 @@


class LoggingTransport(httpx.AsyncBaseTransport):
def __init__(self, inner: httpx.AsyncBaseTransport):
self.inner = inner
def __init__(self, inner=None):
self.inner = inner or httpx.AsyncHTTPTransport()
self.request_headers = {}
self.request_content = None
self.response_headers = {}
self.response_content = None

async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
logger.info(f"Request: {request.method} {request.url}")
if request.content:
self.request_content = request.content.decode("utf-8")
logger.info(f"Request Body: {self.request_content}")
elif request.stream:
stream_content = await request.stream.aread()
self.request_content = stream_content.decode("utf-8")
logger.info(f"Request Stream Content: {self.request_content}")
request.stream = httpx.AsyncByteStream(stream_content)
self.request_headers = dict(request.headers)
self.request_content = request.content.decode("utf-8") if request.content else None

return await self.inner.handle_async_request(request)
logger.info(f"Request URL: {request.url}")
logger.info(f"Request Headers: {self.request_headers}")
logger.info(f"Request Content: {self.request_content}")

response = await self.inner.handle_async_request(request)

raw_response_bytes = await response.aread()
self.response_headers = dict(response.headers)
self.response_content = raw_response_bytes.decode(response.encoding or "utf-8", errors="replace")

logger.info(f"Response Headers: {self.response_headers}")
logger.info(f"Response Content: {self.response_content}")

headers_without_encoding = {k: v for k, v in response.headers.items() if k.lower() != "content-encoding"}

return httpx.Response(
status_code=response.status_code,
headers=headers_without_encoding,
content=raw_response_bytes,
request=request,
extensions=response.extensions,
)


class LoggingAsyncClient(httpx.AsyncClient):
Expand All @@ -64,23 +82,36 @@ def __init__(self, *args, **kwargs):
self.logging_transport = LoggingTransport(transport or httpx.AsyncHTTPTransport())
super().__init__(*args, **kwargs, transport=self.logging_transport)

def get_request_content(self):
@property
def request_headers(self):
return self.logging_transport.request_headers

@property
def request_content(self):
return self.logging_transport.request_content

@property
def response_headers(self):
return self.logging_transport.response_headers

@property
def response_content(self):
return self.logging_transport.response_content


# endregion

# region Test Helper Methods


@pytest_asyncio.fixture
async def async_client():
async def async_clients() -> AsyncGenerator[tuple[AsyncOpenAI, LoggingAsyncClient], None]:
openai_settings = OpenAISettings.create()
logging_async_client = LoggingAsyncClient()
async with AsyncOpenAI(
api_key=openai_settings.api_key.get_secret_value(), http_client=logging_async_client
) as async_client:
yield async_client
) as client:
yield client, logging_async_client


async def run_prompt(
Expand Down Expand Up @@ -206,11 +237,14 @@ def __init__(self, name):
),
],
)
async def test_prompt_with_chat_roles(is_inline, is_streaming, template_format, prompt, async_client):
async def test_prompt_with_chat_roles(
is_inline, is_streaming, template_format, prompt, async_clients: tuple[AsyncOpenAI, LoggingAsyncClient]
):
client, logging_async_client = async_clients
ai_service = OpenAIChatCompletion(
service_id="test",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel = Kernel()
Expand All @@ -225,9 +259,12 @@ async def test_prompt_with_chat_roles(is_inline, is_streaming, template_format,
prompt=prompt,
)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand Down Expand Up @@ -307,11 +344,15 @@ async def test_prompt_with_chat_roles(is_inline, is_streaming, template_format,
),
],
)
async def test_prompt_with_complex_objects(is_inline, is_streaming, template_format, prompt, async_client):
async def test_prompt_with_complex_objects(
is_inline, is_streaming, template_format, prompt, async_clients: tuple[AsyncOpenAI, LoggingAsyncClient]
):
client, logging_async_client = async_clients

ai_service = OpenAIChatCompletion(
service_id="default",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel = Kernel()
Expand All @@ -327,9 +368,12 @@ async def test_prompt_with_complex_objects(is_inline, is_streaming, template_for
arguments=KernelArguments(city=City("Seattle")),
)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand Down Expand Up @@ -372,11 +416,15 @@ async def test_prompt_with_complex_objects(is_inline, is_streaming, template_for
pytest.param(False, True, "jinja2", j2_prompt, id="j2_non_inline_streaming"),
],
)
async def test_prompt_with_helper_functions(is_inline, is_streaming, template_format, prompt, async_client):
async def test_prompt_with_helper_functions(
is_inline, is_streaming, template_format, prompt, async_clients: tuple[AsyncOpenAI, LoggingAsyncClient]
):
client, logging_async_client = async_clients

ai_service = OpenAIChatCompletion(
service_id="default",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel = Kernel()
Expand All @@ -403,9 +451,12 @@ async def test_prompt_with_helper_functions(is_inline, is_streaming, template_fo
arguments=KernelArguments(city="Seattle"),
)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand Down Expand Up @@ -441,11 +492,15 @@ async def test_prompt_with_helper_functions(is_inline, is_streaming, template_fo
pytest.param(False, True, "jinja2", j2_simple_prompt, id="j2_non_inline_streaming"),
],
)
async def test_prompt_with_simple_variable(is_inline, is_streaming, template_format, prompt, async_client):
async def test_prompt_with_simple_variable(
is_inline, is_streaming, template_format, prompt, async_clients: tuple[AsyncOpenAI, LoggingAsyncClient]
):
client, logging_async_client = async_clients

ai_service = OpenAIChatCompletion(
service_id="default",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel = Kernel()
Expand All @@ -461,9 +516,12 @@ async def test_prompt_with_simple_variable(is_inline, is_streaming, template_for
arguments=KernelArguments(city="Seattle"),
)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand Down Expand Up @@ -499,11 +557,15 @@ async def test_prompt_with_simple_variable(is_inline, is_streaming, template_for
pytest.param(False, True, "jinja2", simple_prompt, id="j2_non_inline_streaming"),
],
)
async def test_simple_prompt(is_inline, is_streaming, template_format, prompt, async_client):
async def test_simple_prompt(
is_inline, is_streaming, template_format, prompt, async_clients: tuple[AsyncOpenAI, LoggingAsyncClient]
):
client, logging_async_client = async_clients

ai_service = OpenAIChatCompletion(
service_id="default",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel = Kernel()
Expand All @@ -518,9 +580,12 @@ async def test_simple_prompt(is_inline, is_streaming, template_format, prompt, a
prompt=prompt,
)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand Down Expand Up @@ -588,11 +653,19 @@ async def test_simple_prompt(is_inline, is_streaming, template_format, prompt, a
),
],
)
async def test_yaml_prompt(is_streaming, prompt_path, expected_result_path, kernel: Kernel, async_client):
async def test_yaml_prompt(
is_streaming,
prompt_path,
expected_result_path,
kernel: Kernel,
async_clients: tuple[AsyncOpenAI, LoggingAsyncClient],
):
client, logging_async_client = async_clients

ai_service = OpenAIChatCompletion(
service_id="default",
ai_model_id=OPENAI_MODEL_ID,
async_client=async_client,
async_client=client,
)

kernel.add_service(ai_service)
Expand All @@ -604,9 +677,12 @@ async def test_yaml_prompt(is_streaming, prompt_path, expected_result_path, kern

await run_function(kernel=kernel, is_streaming=is_streaming, function=function)

request_content = async_client._client.get_request_content()
request_content = logging_async_client.request_content
assert request_content is not None

response_content = logging_async_client.response_content
assert response_content is not None

obtained_object = json.loads(request_content)
assert obtained_object is not None

Expand All @@ -629,7 +705,7 @@ async def test_yaml_prompt(is_streaming, prompt_path, expected_result_path, kern
# region Test OpenAPI Plugin Load


async def setup_openapi_function_call(kernel, function_name, arguments):
async def setup_openapi_function_call(kernel: Kernel, function_name, arguments):
from semantic_kernel.connectors.openapi_plugin import OpenAPIFunctionExecutionParameters

openapi_spec_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "light_bulb_api.json")
Expand Down

0 comments on commit e7beb6a

Please sign in to comment.