Retrieves a run.
GET
/threads/{thread_id}/runs/{run_id} https://api.openai.com/v1
Parameters
path Path Parameters
| Name | Type |
|---|---|
thread_id
required
The ID of the thread that was run. | string |
run_id
required
The ID of the run to retrieve. | string |
Responses
200 application/json
OK
interface RunObject {
id: string;
object: "thread.run";
created_at: number;
thread_id: string;
assistant_id: string;
status: "queued" | "in_progress" | "requires_action" | "cancelling" | "cancelled" | "failed" | "completed" | "incomplete" | "expired";
required_action: { type: "submit_tool_outputs";submit_tool_outputs: { tool_calls:RunToolCallObject []; }; } | null;
last_error: { code: "server_error" | "rate_limit_exceeded" | "invalid_prompt";message: string; } | null;
expires_at: number | null;
started_at: number | null;
cancelled_at: number | null;
failed_at: number | null;
completed_at: number | null;
incomplete_details: { reason?: "max_completion_tokens" | "max_prompt_tokens"; } | null;
model: string;
instructions: string;
tools: (AssistantToolsCode | AssistantToolsFileSearch | AssistantToolsFunction )[];
metadata:Metadata ;
usage:RunCompletionUsage ;
temperature?: number | null;
top_p?: number | null;
max_prompt_tokens: number | null;
max_completion_tokens: number | null;
truncation_strategy:TruncationObject & null;
tool_choice:AssistantsApiToolChoiceOption & null;
parallel_tool_calls:ParallelToolCalls ;
response_format:AssistantsApiResponseFormatOption ;
}
id: string;
object: "thread.run";
created_at: number;
thread_id: string;
assistant_id: string;
status: "queued" | "in_progress" | "requires_action" | "cancelling" | "cancelled" | "failed" | "completed" | "incomplete" | "expired";
required_action: { type: "submit_tool_outputs";submit_tool_outputs: { tool_calls:
last_error: { code: "server_error" | "rate_limit_exceeded" | "invalid_prompt";message: string; } | null;
expires_at: number | null;
started_at: number | null;
cancelled_at: number | null;
failed_at: number | null;
completed_at: number | null;
incomplete_details: { reason?: "max_completion_tokens" | "max_prompt_tokens"; } | null;
model: string;
instructions: string;
tools: (
metadata:
usage:
temperature?: number | null;
top_p?: number | null;
max_prompt_tokens: number | null;
max_completion_tokens: number | null;
truncation_strategy:
tool_choice:
parallel_tool_calls:
response_format:
}