The Vercel AI SDK (version 4.2+) supports MCP, letting you add Toolcog’s API capabilities to Next.js and React applications.
npm install ai @ai-sdk/openaiThe Vercel AI SDK provides experimental_createMCPClient for connecting to MCP servers via SSE.
import { experimental_createMCPClient as createMCPClient } from "ai";import { openai } from "@ai-sdk/openai";import { generateText } from "ai";
async function main() { // Connect to Toolcog MCP server const toolcog = await createMCPClient({ transport: { type: "sse", url: "https://mcp.toolcog.com", }, });
// Get tools from Toolcog const tools = await toolcog.tools();
// Use with any AI provider const result = await generateText({ model: openai("gpt-4o"), tools, prompt: "Find Stripe operations for managing subscriptions", });
console.log(result.text);
// Clean up await toolcog.close();}import { experimental_createMCPClient as createMCPClient } from "ai";import { openai } from "@ai-sdk/openai";import { streamText } from "ai";
export async function POST(req: Request) { const { messages } = await req.json();
// Connect to Toolcog const toolcog = await createMCPClient({ transport: { type: "sse", url: "https://mcp.toolcog.com", }, });
const tools = await toolcog.tools();
const result = streamText({ model: openai("gpt-4o"), tools, messages, onFinish: async () => { await toolcog.close(); }, });
return result.toDataStreamResponse();}import { experimental_createMCPClient as createMCPClient } from "ai";import { openai } from "@ai-sdk/openai";import { streamText } from "ai";
export async function POST(req: Request) { const { messages } = await req.json();
const toolcog = await createMCPClient({ transport: { type: "sse", url: "https://mcp.toolcog.com", }, });
const tools = await toolcog.tools();
const result = streamText({ model: openai("gpt-4o"), tools, maxSteps: 10, // Allow multiple tool calls messages, onFinish: async () => { await toolcog.close(); }, });
return result.toDataStreamResponse();}"use client";
import { useChat } from "ai/react";
export default function Chat() { const { messages, input, handleInputChange, handleSubmit } = useChat();
return ( <div> {messages.map((m) => ( <div key={m.id}> <strong>{m.role}:</strong> {m.content} </div> ))}
<form onSubmit={handleSubmit}> <input value={input} onChange={handleInputChange} placeholder="Ask me to do something with APIs..." /> <button type="submit">Send</button> </form> </div> );}Toolcog works with any AI provider supported by the Vercel AI SDK:
import { anthropic } from "@ai-sdk/anthropic";import { google } from "@ai-sdk/google";import { openai } from "@ai-sdk/openai";
// Use with Claudeconst result = await generateText({ model: anthropic("claude-sonnet-4-20250514"), tools, prompt: "Create a GitHub issue",});
// Use with Geminiconst result = await generateText({ model: google("gemini-2.0-flash"), tools, prompt: "Create a GitHub issue",});
// Use with GPT-4const result = await generateText({ model: openai("gpt-4o"), tools, prompt: "Create a GitHub issue",});When Toolcog needs authorization, the tool result includes an authorization URL. Handle this in your UI:
const result = streamText({ model: openai("gpt-4o"), tools, messages, onStepFinish: async ({ toolResults }) => { for (const result of toolResults || []) { if (result.result?.authorization_url) { // Client will receive this in the stream // and can present it to the user } } },});Connect to a specific catalog:
const toolcog = await createMCPClient({ transport: { type: "sse", url: "https://mcp.toolcog.com/mycompany/internal-apis", },});For production apps, manage MCP connections carefully:
import { experimental_createMCPClient as createMCPClient } from "ai";
let toolcogClient: Awaited<ReturnType<typeof createMCPClient>> | null = null;
export async function getToolcogClient() { if (!toolcogClient) { toolcogClient = await createMCPClient({ transport: { type: "sse", url: "https://mcp.toolcog.com", }, }); } return toolcogClient;}
// In your routeexport async function POST(req: Request) { const toolcog = await getToolcogClient(); const tools = await toolcog.tools(); // ...}Server-side only — MCP connections should happen in API routes or server components, not client-side code.
Connection lifecycle — Close connections when done, or reuse them across requests in production.
Error boundaries — Wrap chat components in error boundaries to handle connection failures gracefully.
Streaming — Use streamText for better UX during multi-step tool executions.
Max steps — Set maxSteps to allow the AI to make multiple tool calls in sequence.