# Ensure OPENAI_API_KEY is set in your environment for this example
import openai
from guardrails import Guard
from guardrails.hub import TwoWords # Example validator
# Initialize Guardrails
guard = Guard().use(
TwoWords() # Using a simple validator from the hub
)
# Make a call through Guardrails
# This will also make an underlying call to the OpenAI LLM
response = guard(
llm_api=openai.chat.completions.create,
prompt="What is another name for America?",
model="gpt-3.5-turbo",
max_tokens=1024,
# You can add instructions or other parameters as needed by your spec
)
# Print the validated (or processed) response
if response.validation_passed:
print(f"Validated Output: {response.validated_output}")
else:
print(f"Validation Failed. Raw LLM Output: {response.raw_llm_output}")
print(f"Validation Details: {response.validation_details}")