curl --request POST \
--url https://api.getmaxim.ai/v1/prompts/run \
--header 'Content-Type: application/json' \
--header 'x-maxim-api-key: <api-key>' \
--data '
{
"promptId": "<string>",
"versionId": "<string>",
"workspaceId": "<string>",
"messages": [
{
"role": "assistant",
"content": "<string>",
"tool_call_id": "<string>",
"tool_calls": [
{
"id": "<string>",
"type": "function",
"function": {
"name": "<string>",
"arguments": "<string>"
}
}
]
}
],
"modelName": "<string>",
"modelProvider": "openai",
"modelParameters": {}
}
'{
"data": {
"usage": {
"totalTokens": 123,
"promptTokens": 123,
"completionTokens": 123
},
"output": null
}
}Run a specific version of a prompt
curl --request POST \
--url https://api.getmaxim.ai/v1/prompts/run \
--header 'Content-Type: application/json' \
--header 'x-maxim-api-key: <api-key>' \
--data '
{
"promptId": "<string>",
"versionId": "<string>",
"workspaceId": "<string>",
"messages": [
{
"role": "assistant",
"content": "<string>",
"tool_call_id": "<string>",
"tool_calls": [
{
"id": "<string>",
"type": "function",
"function": {
"name": "<string>",
"arguments": "<string>"
}
}
]
}
],
"modelName": "<string>",
"modelProvider": "openai",
"modelParameters": {}
}
'{
"data": {
"usage": {
"totalTokens": 123,
"promptTokens": 123,
"completionTokens": 123
},
"output": null
}
}API key for authentication
Unique identifier for the prompt
Unique identifier for the version
Unique identifier for the workspace
Array of messages
Show child attributes
Name of the model to use
Provider of the model
openai, azure, huggingface, anthropic, together, google, groq, bedrock, maxim, cohere, ollama, lmstudio, xai, vertex, mistral, fireworks Model parameters configuration
Show child attributes
Prompt version executed successfully
Show child attributes
Was this page helpful?