import OpenAI from "openai";
import { Maxim } from "@maximai/maxim-js";
import { MaximOpenAIClient } from "@maximai/maxim-js/openai-sdk";
import { v4 as uuid } from "uuid";
async function main() {
const maxim = new Maxim({ apiKey: process.env.MAXIM_API_KEY });
const logger = await maxim.logger({ id: process.env.MAXIM_LOG_REPO_ID });
if (!logger) {
throw new Error("Logger is not available");
}
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const client = new MaximOpenAIClient(openai, logger);
// Create a trace to group all generations in the tool call flow
const trace = logger.trace({ id: uuid() });
// Define tools
const tools: OpenAI.Chat.Completions.ChatCompletionTool[] = [
{
type: "function",
function: {
name: "get_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
},
},
required: ["location"],
},
},
},
];
const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
{ role: "user", content: "What's the weather like in San Francisco?" },
];
// First call - model requests a tool call
const firstResponse = await client.chat.completions.create(
{
model: "gpt-4o-mini",
messages: messages,
tools: tools,
tool_choice: "required",
max_tokens: 200,
},
{
headers: {
"maxim-trace-id": trace.id,
"maxim-generation-name": "tool-call-request",
},
}
);
const toolCall = firstResponse.choices[0]?.message?.tool_calls?.[0];
// Simulate tool execution
const toolResult = JSON.stringify({
location: "San Francisco, CA",
temperature: 72,
unit: "fahrenheit",
condition: "sunny",
});
// Second call - pass tool result back to model
const secondMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
...messages,
firstResponse.choices[0]?.message as OpenAI.Chat.Completions.ChatCompletionMessageParam,
{
role: "tool",
tool_call_id: toolCall!.id,
content: toolResult,
},
];
const secondResponse = await client.chat.completions.create(
{
model: "gpt-4o-mini",
messages: secondMessages,
tools: tools,
max_tokens: 200,
},
{
headers: {
"maxim-trace-id": trace.id,
"maxim-generation-name": "tool-call-response",
},
}
);
console.log("Response:", secondResponse.choices[0]?.message?.content);
// Set trace output and end
trace.output(secondResponse.choices[0]?.message?.content || "");
trace.end();
await logger.cleanup();
await maxim.cleanup();
}
main().catch(console.error);