Skip to content

Commit 426a22b

Browse files
draft - first draft
1 parent 98fa6d0 commit 426a22b

19 files changed

+58
-51
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,14 +49,14 @@ try:
4949
model="gpt-5",
5050
messages=[{"role": "user", "content": "Hello world"}],
5151
)
52-
print(chat.llm_response.choices[0].message.content)
52+
print(chat.choices[0].message.content)
5353

5454
# Or with the Responses API
5555
resp = client.responses.create(
5656
model="gpt-5",
5757
input="What are the main features of your premium plan?",
5858
)
59-
print(resp.llm_response.output_text)
59+
print(resp.output_text)
6060
except GuardrailTripwireTriggered as e:
6161
print(f"Guardrail triggered: {e}")
6262
```

docs/index.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ response = await client.responses.create(
3535
input="Hello"
3636
)
3737
# Guardrails run automatically
38-
print(response.llm_response.output_text)
38+
print(response.output_text)
3939
```
4040

4141
## Next Steps

docs/quickstart.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,16 +70,15 @@ async def main():
7070
input="Hello world"
7171
)
7272

73-
# Access OpenAI response via .llm_response
74-
print(response.llm_response.output_text)
73+
print(response.output_text)
7574

7675
except GuardrailTripwireTriggered as exc:
7776
print(f"Guardrail triggered: {exc.guardrail_result.info}")
7877

7978
asyncio.run(main())
8079
```
8180

82-
**That's it!** Your existing OpenAI code now includes automatic guardrail validation based on your pipeline configuration. Just use `response.llm_response` instead of `response`.
81+
**That's it!** Your existing OpenAI code now includes automatic guardrail validation based on your pipeline configuration.
8382

8483
## Multi-Turn Conversations
8584

@@ -98,7 +97,7 @@ while True:
9897
model="gpt-4o"
9998
)
10099

101-
response_content = response.llm_response.choices[0].message.content
100+
response_content = response.choices[0].message.content
102101
print(f"Assistant: {response_content}")
103102

104103
# ✅ Only append AFTER guardrails pass

docs/ref/checks/hallucination_detection.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ response = await client.responses.create(
7676
)
7777

7878
# Guardrails automatically validate against your reference documents
79-
print(response.llm_response.output_text)
79+
print(response.output_text)
8080
```
8181

8282
### How It Works

docs/tripwires.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ try:
2525
model="gpt-5",
2626
input="Tell me a secret"
2727
)
28-
print(response.llm_response.output_text)
28+
print(response.output_text)
2929

3030
except GuardrailTripwireTriggered as exc:
3131
print(f"Guardrail triggered: {exc.guardrail_result.info}")

examples/basic/azure_implementation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ async def process_input(
7575
)
7676

7777
# Extract the response content from the GuardrailsResponse
78-
response_text = response.llm_response.choices[0].message.content
78+
response_text = response.choices[0].message.content
7979

8080
# Only show output if all guardrails pass
8181
print(f"\nAssistant: {response_text}")

examples/basic/hello_world.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,13 @@ async def process_input(
4949
previous_response_id=response_id,
5050
)
5151

52-
console.print(f"\nAssistant output: {response.llm_response.output_text}", end="\n\n")
52+
console.print(f"\nAssistant output: {response.output_text}", end="\n\n")
5353

5454
# Show guardrail results if any were run
5555
if response.guardrail_results.all_results:
5656
console.print(f"[dim]Guardrails checked: {len(response.guardrail_results.all_results)}[/dim]")
5757

58-
return response.llm_response.id
58+
return response.id
5959

6060
except GuardrailTripwireTriggered:
6161
raise

examples/basic/local_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ async def process_input(
4848
)
4949

5050
# Access response content using standard OpenAI API
51-
response_content = response.llm_response.choices[0].message.content
51+
response_content = response.choices[0].message.content
5252
console.print(f"\nAssistant output: {response_content}", end="\n\n")
5353

5454
# Add to conversation history

examples/basic/multi_bundle.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,15 +66,15 @@ async def process_input(
6666
with Live(output_text, console=console, refresh_per_second=10) as live:
6767
try:
6868
async for chunk in stream:
69-
# Access streaming response exactly like native OpenAI API through .llm_response
70-
if hasattr(chunk.llm_response, "delta") and chunk.llm_response.delta:
71-
output_text += chunk.llm_response.delta
69+
# Access streaming response exactly like native OpenAI API
70+
if hasattr(chunk, "delta") and chunk.delta:
71+
output_text += chunk.delta
7272
live.update(output_text)
7373

7474
# Get the response ID from the final chunk
7575
response_id_to_return = None
76-
if hasattr(chunk.llm_response, "response") and hasattr(chunk.llm_response.response, "id"):
77-
response_id_to_return = chunk.llm_response.response.id
76+
if hasattr(chunk, "response") and hasattr(chunk.response, "id"):
77+
response_id_to_return = chunk.response.id
7878

7979
return response_id_to_return
8080

examples/basic/multiturn_chat_with_alignment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ async def main(malicious: bool = False) -> None:
235235
tools=tools,
236236
)
237237
print_guardrail_results("initial", resp)
238-
choice = resp.llm_response.choices[0]
238+
choice = resp.choices[0]
239239
message = choice.message
240240
tool_calls = getattr(message, "tool_calls", []) or []
241241

@@ -327,7 +327,7 @@ async def main(malicious: bool = False) -> None:
327327
)
328328

329329
print_guardrail_results("final", resp)
330-
final_message = resp.llm_response.choices[0].message
330+
final_message = resp.choices[0].message
331331
console.print(
332332
Panel(
333333
final_message.content or "(no output)",

0 commit comments

Comments
 (0)