import os
import weave
from openai import OpenAI
weave.init("heroku-ai-project")
client = OpenAI(
base_url=os.getenv("INFERENCE_URL") + "/v1",
api_key=os.getenv("INFERENCE_KEY")
)
@weave.op()
def generate_response(prompt: str, system_prompt: str = "You are a helpful assistant.") -> str:
"""Generate a response using Heroku AI."""
response = client.chat.completions.create(
model=os.getenv("INFERENCE_MODEL_ID"),
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message.content
@weave.op()
def summarize_text(text: str) -> str:
"""Summarize the given text."""
return generate_response(f"Summarize this text in 2-3 sentences:\n\n{text}")
# Both function calls and LLM calls are traced
result = summarize_text("Heroku is a cloud platform that lets companies build, deliver, monitor and scale apps.")
print(result)