Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,14 @@ try:
model="gpt-5",
messages=[{"role": "user", "content": "Hello world"}],
)
print(chat.llm_response.choices[0].message.content)
print(chat.choices[0].message.content)

# Or with the Responses API
resp = client.responses.create(
model="gpt-5",
input="What are the main features of your premium plan?",
)
print(resp.llm_response.output_text)
print(resp.output_text)
except GuardrailTripwireTriggered as e:
print(f"Guardrail triggered: {e}")
```
Expand Down
2 changes: 1 addition & 1 deletion docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ response = await client.responses.create(
input="Hello"
)
# Guardrails run automatically
print(response.llm_response.output_text)
print(response.output_text)
```

## Next Steps
Expand Down
8 changes: 4 additions & 4 deletions docs/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,16 @@ async def main():
input="Hello world"
)

# Access OpenAI response via .llm_response
print(response.llm_response.output_text)
# Access OpenAI response attributes directly
print(response.output_text)

except GuardrailTripwireTriggered as exc:
print(f"Guardrail triggered: {exc.guardrail_result.info}")

asyncio.run(main())
```

**That's it!** Your existing OpenAI code now includes automatic guardrail validation based on your pipeline configuration. Just use `response.llm_response` instead of `response`.
**That's it!** Your existing OpenAI code now includes automatic guardrail validation based on your pipeline configuration. The response object acts as a drop-in replacement for OpenAI responses with added guardrail results.

## Multi-Turn Conversations

Expand All @@ -98,7 +98,7 @@ while True:
model="gpt-4o"
)

response_content = response.llm_response.choices[0].message.content
response_content = response.choices[0].message.content
print(f"Assistant: {response_content}")

# ✅ Only append AFTER guardrails pass
Expand Down
2 changes: 1 addition & 1 deletion docs/ref/checks/hallucination_detection.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ response = await client.responses.create(
)

# Guardrails automatically validate against your reference documents
print(response.llm_response.output_text)
print(response.output_text)
```

### How It Works
Expand Down
2 changes: 1 addition & 1 deletion docs/tripwires.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ try:
model="gpt-5",
input="Tell me a secret"
)
print(response.llm_response.output_text)
print(response.output_text)

except GuardrailTripwireTriggered as exc:
print(f"Guardrail triggered: {exc.guardrail_result.info}")
Expand Down
2 changes: 1 addition & 1 deletion examples/basic/azure_implementation.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ async def process_input(
)

# Extract the response content from the GuardrailsResponse
response_text = response.llm_response.choices[0].message.content
response_text = response.choices[0].message.content

# Only show output if all guardrails pass
print(f"\nAssistant: {response_text}")
Expand Down
6 changes: 2 additions & 4 deletions examples/basic/hello_world.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,12 @@ async def process_input(
model="gpt-4.1-mini",
previous_response_id=response_id,
)

console.print(f"\nAssistant output: {response.llm_response.output_text}", end="\n\n")

console.print(f"\nAssistant output: {response.output_text}", end="\n\n")
# Show guardrail results if any were run
if response.guardrail_results.all_results:
console.print(f"[dim]Guardrails checked: {len(response.guardrail_results.all_results)}[/dim]")

return response.llm_response.id
return response.id

except GuardrailTripwireTriggered:
raise
Expand Down
2 changes: 1 addition & 1 deletion examples/basic/local_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ async def process_input(
)

# Access response content using standard OpenAI API
response_content = response.llm_response.choices[0].message.content
response_content = response.choices[0].message.content
console.print(f"\nAssistant output: {response_content}", end="\n\n")

# Add to conversation history
Expand Down
10 changes: 5 additions & 5 deletions examples/basic/multi_bundle.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,15 +66,15 @@ async def process_input(
with Live(output_text, console=console, refresh_per_second=10) as live:
try:
async for chunk in stream:
# Access streaming response exactly like native OpenAI API through .llm_response
if hasattr(chunk.llm_response, "delta") and chunk.llm_response.delta:
output_text += chunk.llm_response.delta
# Access streaming response exactly like native OpenAI API (flattened)
if hasattr(chunk, "delta") and chunk.delta:
output_text += chunk.delta
live.update(output_text)

# Get the response ID from the final chunk
response_id_to_return = None
if hasattr(chunk.llm_response, "response") and hasattr(chunk.llm_response.response, "id"):
response_id_to_return = chunk.llm_response.response.id
if hasattr(chunk, "response") and hasattr(chunk.response, "id"):
response_id_to_return = chunk.response.id

return response_id_to_return

Expand Down
4 changes: 2 additions & 2 deletions examples/basic/multiturn_chat_with_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ async def main(malicious: bool = False) -> None:
tools=tools,
)
print_guardrail_results("initial", resp)
choice = resp.llm_response.choices[0]
choice = resp.choices[0]
message = choice.message
tool_calls = getattr(message, "tool_calls", []) or []

Expand Down Expand Up @@ -327,7 +327,7 @@ async def main(malicious: bool = False) -> None:
)

print_guardrail_results("final", resp)
final_message = resp.llm_response.choices[0].message
final_message = resp.choices[0].message
console.print(
Panel(
final_message.content or "(no output)",
Expand Down
2 changes: 1 addition & 1 deletion examples/basic/pii_mask_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ async def process_input(
)

# Show the LLM response (already masked if PII was detected)
content = response.llm_response.choices[0].message.content
content = response.choices[0].message.content
console.print(f"\n[bold blue]Assistant output:[/bold blue] {content}\n")

# Show PII masking information if detected in pre-flight
Expand Down
4 changes: 2 additions & 2 deletions examples/basic/structured_outputs_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,11 @@ async def extract_user_info(
)

# Access the parsed structured output
user_info = response.llm_response.output_parsed
user_info = response.output_parsed
print(f"✅ Successfully extracted: {user_info.name}, {user_info.age}, {user_info.email}")

# Return user info and response ID (only returned if guardrails pass)
return user_info, response.llm_response.id
return user_info, response.id

except GuardrailTripwireTriggered:
# Guardrail blocked - no response ID returned, conversation history unchanged
Expand Down
4 changes: 2 additions & 2 deletions examples/basic/suppress_tripwire.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ async def process_input(
else:
console.print("[bold green]No guardrails triggered.[/bold green]")

console.print(f"\n[bold blue]Assistant output:[/bold blue] {response.llm_response.output_text}\n")
return response.llm_response.id
console.print(f"\n[bold blue]Assistant output:[/bold blue] {response.output_text}\n")
return response.id

except Exception as e:
console.print(f"[bold red]Error: {e}[/bold red]")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ async def main():
model="gpt-4.1-mini",
)

response_content = response.llm_response.choices[0].message.content
response_content = response.choices[0].message.content
console.print(
Panel(
f"[bold green]Tripwire not triggered[/bold green]\n\nResponse: {response_content}",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ async def process_input(
model="gpt-4.1-mini",
)

response_content = response.llm_response.choices[0].message.content
response_content = response.choices[0].message.content
print(f"\nAssistant: {response_content}")

# Guardrails passed - now safe to add to conversation history
Expand Down
4 changes: 2 additions & 2 deletions examples/implementation_code/blocking/blocking_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ async def process_input(guardrails_client: GuardrailsAsyncOpenAI, user_input: st
# including pre-flight, input, and output stages, plus the LLM call
response = await guardrails_client.responses.create(input=user_input, model="gpt-4.1-mini", previous_response_id=response_id)

print(f"\nAssistant: {response.llm_response.output_text}")
print(f"\nAssistant: {response.output_text}")

return response.llm_response.id
return response.id

except GuardrailTripwireTriggered:
# GuardrailsClient automatically handles tripwire exceptions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ async def process_input(
# Stream with output guardrail checks and accumulate response
response_content = ""
async for chunk in stream:
if chunk.llm_response.choices[0].delta.content:
delta = chunk.llm_response.choices[0].delta.content
if chunk.choices[0].delta.content:
delta = chunk.choices[0].delta.content
print(delta, end="", flush=True)
response_content += delta

Expand Down
10 changes: 5 additions & 5 deletions examples/implementation_code/streaming/streaming_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ async def process_input(guardrails_client: GuardrailsAsyncOpenAI, user_input: st

# Stream with output guardrail checks
async for chunk in stream:
# Access streaming response exactly like native OpenAI API through .llm_response
# Access streaming response exactly like native OpenAI API
# For responses API streaming, check for delta content
if hasattr(chunk.llm_response, "delta") and chunk.llm_response.delta:
print(chunk.llm_response.delta, end="", flush=True)
if hasattr(chunk, "delta") and chunk.delta:
print(chunk.delta, end="", flush=True)

# Get the response ID from the final chunk
response_id_to_return = None
if hasattr(chunk.llm_response, "response") and hasattr(chunk.llm_response.response, "id"):
response_id_to_return = chunk.llm_response.response.id
if hasattr(chunk, "response") and hasattr(chunk.response, "id"):
response_id_to_return = chunk.response.id

return response_id_to_return

Expand Down
2 changes: 1 addition & 1 deletion examples/internal_examples/custom_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ async def main() -> None:
model="gpt-4.1-nano",
messages=messages + [{"role": "user", "content": user_input}],
)
response_content = response.llm_response.choices[0].message.content
response_content = response.choices[0].message.content
print("Assistant:", response_content)

# Guardrails passed - now safe to add to conversation history
Expand Down
Loading
Loading