Requires pip install openinference-instrumentation-openai
Once you define your OpenAI client, any call inside our context managers will attach the corresponding attributes to the spans.import openai
from openinference.instrumentation import using_attributes
client = openai.OpenAI()
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Write a haiku."}],
max_tokens=20,
)
# Defining a User
with using_attributes(user_id="my-user-id"):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Write a haiku."}],
max_tokens=20,
)
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Write a haiku."}],
max_tokens=20,
)
Alternatively, if you wrap your calls inside functions, you can use them as decorators:from openinference.instrumentation import using_attributes
client = openai.OpenAI()
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(client, *args, **kwargs):
return client.chat.completions.create(*args, **kwargs)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(client, *args, **kwargs):
return client.chat.completions.create(*args, **kwargs)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(client, *args, **kwargs):
return client.chat.completions.create(*args, **kwargs)
Requires pip install openinference-instrumentation-langchain
Once you define your LangChain client, any call inside our context managers will attach the corresponding attributes to the spans.from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
from openinference.instrumentation import using_attributes
prompt_template = "Tell me a {adjective} joke"
prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template)
llm = LLMChain(llm=OpenAI(), prompt=prompt, metadata={"category": "jokes"})
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = llm.predict(adjective="funny")
# Defining a User
with using_attributes(user_id="my-user-id"):
response = llm.predict(adjective="funny")
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = llm.predict(adjective="funny")
Alternatively, if you wrap your calls inside functions, you can use them as decorators:from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
from openinference.instrumentation import using_attributes
prompt_template = "Tell me a {adjective} joke"
prompt = PromptTemplate(input_variables=["adjective"], template=prompt_template)
llm = LLMChain(llm=OpenAI(), prompt=prompt, metadata={"category": "jokes"})
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(llm, *args, **kwargs):
return llm.complete(*args, **kwargs)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(llm, *args, **kwargs):
return llm.complete(*args, **kwargs)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(llm, *args, **kwargs):
return llm.complete(*args, **kwargs)
Requires pip install openinference-instrumentation-llama-index
Once you define your LlamaIndex client, any call inside our context managers will attach the corresponding attributes to the spans.from llama_index.core.chat_engine import SimpleChatEngine
from openinference.instrumentation import using_attributes
chat_engine = SimpleChatEngine.from_defaults()
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
# Defining a User
with using_attributes(user_id="my-user-id"):
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
Alternatively, if you wrap your calls inside functions, you can use them as decorators:from llama_index.core.chat_engine import SimpleChatEngine
from openinference.instrumentation import using_attributes
chat_engine = SimpleChatEngine.from_defaults()
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(chat_engine, *args, **kwargs):
return chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(chat_engine, *args, **kwargs):
return chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(chat_engine, *args, **kwargs):
return chat_engine.chat(
"Say something profound and romantic about fourth of July"
)
Requires pip install openinference-instrumentation-bedrock
Once you define your boto3 session client, any call inside our context managers will attach the corresponding attributes to the spans.import boto3
from openinference.instrumentation import using_attributes
session = boto3.session.Session()
client = session.client("bedrock-runtime", region_name="us-west-2")
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = client.invoke_model(
modelId="anthropic.claude-v2",
body=(
b'{"prompt": "Human: Hello there, how are you? Assistant:", "max_tokens_to_sample": 1024}'
)
)
# Defining a User
with using_attributes(user_id="my-user-id"):
response = client.invoke_model(
modelId="anthropic.claude-v2",
body=(
b'{"prompt": "Human: Hello there, how are you? Assistant:", "max_tokens_to_sample": 1024}'
)
)
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = client.invoke_model(
modelId="anthropic.claude-v2",
body=(
b'{"prompt": "Human: Hello there, how are you? Assistant:", "max_tokens_to_sample": 1024}'
)
)
Alternatively, if you wrap your calls inside functions, you can use them as decorators:import boto3
from openinference.instrumentation import using_attributes
session = boto3.session.Session()
client = session.client("bedrock-runtime", region_name="us-west-2")
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(client, *args, **kwargs):
return client.invoke_model(*args, **kwargs)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(client, *args, **kwargs):
return client.invoke_model(*args, **kwargs)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(client, *args, **kwargs):
return client.invoke_model(*args, **kwargs)
Requires pip install openinference-instrumentation-mistralai
Once you define your Mistral client, any call inside our context managers will attach the corresponding attributes to the spans.from mistralai.client import MistralClient
from openinference.instrumentation import using_attributes
client = MistralClient()
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = client.chat(
model="mistral-large-latest",
messages=[
ChatMessage(
content="Who won the World Cup in 2018?",
role="user",
)
],
)
# Defining a User
with using_attributes(user_id="my-user-id"):
response = client.chat(
model="mistral-large-latest",
messages=[
ChatMessage(
content="Who won the World Cup in 2018?",
role="user",
)
],
)
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = client.chat(
model="mistral-large-latest",
messages=[
ChatMessage(
content="Who won the World Cup in 2018?",
role="user",
)
],
)
Alternatively, if you wrap your calls inside functions, you can use them as decorators:from mistralai.client import MistralClient
from openinference.instrumentation import using_attributes
client = MistralClient()
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(client, *args, **kwargs):
return client.chat(*args, **kwargs)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(client, *args, **kwargs):
return client.chat(*args, **kwargs)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(client, *args, **kwargs):
return client.chat(*args, **kwargs)
Requires pip install openinference-instrumentation-dspy
Once you define your DSPy predictor, any call inside our context managers will attach the corresponding attributes to the spans.import dspy
from openinference.instrumentation import using_attributes
class BasicQA(dspy.Signature):
"""Answer questions with short factoid answers."""
question = dspy.InputField()
answer = dspy.OutputField(desc="often between 1 and 5 words")
turbo = dspy.OpenAI(model="gpt-3.5-turbo")
dspy.settings.configure(lm=turbo)
predictor = dspy.Predict(BasicQA) # Define the predictor.
# Defining a Session
with using_attributes(session_id="my-session-id"):
response = predictor(
question="What is the capital of the united states?"
)
# Defining a User
with using_attributes(user_id="my-user-id"):
response = predictor(
question="What is the capital of the united states?"
)
# Defining a Session AND a User
with using_attributes(
session_id="my-session-id",
user_id="my-user-id",
):
response = predictor(
question="What is the capital of the united states?"
)
Alternatively, if you wrap your calls inside functions, you can use them as decorators:import dspy
from openinference.instrumentation import using_attributes
# Defining a Session
@using_attributes(session_id="my-session-id")
def call_fn(predictor, *args, **kwargs):
return predictor(*args,**kwargs)
# Defining a User
@using_attributes(user_id="my-user-id")
def call_fn(predictor, *args, **kwargs):
return predictor(*args,**kwargs)
# Defining a Session AND a User
@using_attributes(
session_id="my-session-id",
user_id="my-user-id",
)
def call_fn(predictor, *args, **kwargs):
return predictor(*args,**kwargs)