|
| 1 | +import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible" |
| 2 | +import type { LanguageModelV2, LanguageModelV2StreamPart, SharedV2ProviderMetadata } from "@ai-sdk/provider" |
| 3 | +import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils" |
| 4 | +import { OpenAIResponsesLanguageModel } from "../openai-compatible/src/responses/openai-responses-language-model" |
| 5 | +import { ProviderTransform } from "../../transform" |
| 6 | + |
| 7 | +type RawChunk = { |
| 8 | + choices?: Array<{ |
| 9 | + message?: { reasoning_opaque?: string } |
| 10 | + delta?: { reasoning_opaque?: string } |
| 11 | + }> |
| 12 | +} |
| 13 | + |
| 14 | +const extractor = { |
| 15 | + async extractMetadata({ parsedBody }: { parsedBody: unknown }): Promise<SharedV2ProviderMetadata | undefined> { |
| 16 | + const body = parsedBody as RawChunk |
| 17 | + const opaque = body?.choices?.[0]?.message?.reasoning_opaque |
| 18 | + if (!opaque) return undefined |
| 19 | + return { openaiCompatible: { reasoning_opaque: opaque } } |
| 20 | + }, |
| 21 | + createStreamExtractor: () => ({ processChunk() {}, buildMetadata: () => undefined }), |
| 22 | +} |
| 23 | + |
| 24 | +function wrapStream(stream: ReadableStream<LanguageModelV2StreamPart>) { |
| 25 | + const state = { opaque: undefined as string | undefined } |
| 26 | + return stream.pipeThrough( |
| 27 | + new TransformStream<LanguageModelV2StreamPart, LanguageModelV2StreamPart>({ |
| 28 | + transform(chunk, controller) { |
| 29 | + if (chunk.type === "raw") { |
| 30 | + const raw = chunk.rawValue as RawChunk |
| 31 | + state.opaque ??= raw?.choices?.[0]?.delta?.reasoning_opaque |
| 32 | + } |
| 33 | + if (chunk.type === "reasoning-end" && state.opaque) { |
| 34 | + controller.enqueue({ |
| 35 | + ...chunk, |
| 36 | + providerMetadata: { ...chunk.providerMetadata, openaiCompatible: { reasoning_opaque: state.opaque } }, |
| 37 | + }) |
| 38 | + return |
| 39 | + } |
| 40 | + controller.enqueue(chunk) |
| 41 | + }, |
| 42 | + }), |
| 43 | + ) |
| 44 | +} |
| 45 | + |
| 46 | +function createFetchAdapter(base?: FetchFunction, modelId?: string): FetchFunction { |
| 47 | + const fetcher = base ?? globalThis.fetch |
| 48 | + const isGemini = modelId?.toLowerCase().includes("gemini") |
| 49 | + |
| 50 | + return (async (url, init) => { |
| 51 | + // catch MCP tools not sanitized in transform.ts |
| 52 | + if (isGemini && init?.body && url.toString().includes("/chat/completions")) { |
| 53 | + const body = JSON.parse(init.body as string) |
| 54 | + if (body.tools) { |
| 55 | + body.tools = body.tools.map((t: any) => ({ |
| 56 | + ...t, |
| 57 | + function: { ...t.function, parameters: ProviderTransform.sanitizeGeminiSchema(t.function.parameters) }, |
| 58 | + })) |
| 59 | + init = { ...init, body: JSON.stringify(body) } |
| 60 | + } |
| 61 | + } |
| 62 | + |
| 63 | + const response = await fetcher(url, init) |
| 64 | + if (!url.toString().includes("/chat/completions")) return response |
| 65 | + |
| 66 | + const contentType = response.headers.get("content-type") ?? "" |
| 67 | + |
| 68 | + if (contentType.includes("text/event-stream")) { |
| 69 | + return new Response( |
| 70 | + response.body!.pipeThrough( |
| 71 | + new TransformStream({ |
| 72 | + transform(chunk, controller) { |
| 73 | + const text = new TextDecoder().decode(chunk) |
| 74 | + controller.enqueue(new TextEncoder().encode(text.replace(/"reasoning_text":/g, '"reasoning_content":'))) |
| 75 | + }, |
| 76 | + }), |
| 77 | + ), |
| 78 | + { status: response.status, headers: response.headers }, |
| 79 | + ) |
| 80 | + } |
| 81 | + |
| 82 | + const text = await response.text() |
| 83 | + return new Response(text.replace(/"reasoning_text":/g, '"reasoning_content":'), { |
| 84 | + status: response.status, |
| 85 | + headers: response.headers, |
| 86 | + }) |
| 87 | + }) as FetchFunction |
| 88 | +} |
| 89 | + |
| 90 | +export function createCopilot( |
| 91 | + options: { |
| 92 | + apiKey?: string |
| 93 | + baseURL?: string |
| 94 | + name?: string |
| 95 | + headers?: Record<string, string> |
| 96 | + fetch?: FetchFunction |
| 97 | + } = {}, |
| 98 | +) { |
| 99 | + const baseURL = withoutTrailingSlash(options.baseURL ?? "https://api.openai.com/v1") |
| 100 | + const headers = { |
| 101 | + ...(options.apiKey && { Authorization: `Bearer ${options.apiKey}` }), |
| 102 | + ...options.headers, |
| 103 | + } |
| 104 | + const getHeaders = () => withUserAgentSuffix(headers, "opencode/copilot") |
| 105 | + |
| 106 | + const createChatModel = (id: string): LanguageModelV2 => { |
| 107 | + const copilotFetch = createFetchAdapter(options.fetch, id) |
| 108 | + const model = new OpenAICompatibleChatLanguageModel(id, { |
| 109 | + provider: "openai.chat", |
| 110 | + headers: getHeaders, |
| 111 | + url: ({ path }) => `${baseURL}${path}`, |
| 112 | + fetch: copilotFetch, |
| 113 | + metadataExtractor: extractor, |
| 114 | + }) |
| 115 | + |
| 116 | + return { |
| 117 | + specificationVersion: model.specificationVersion, |
| 118 | + modelId: model.modelId, |
| 119 | + provider: model.provider, |
| 120 | + get supportedUrls() { |
| 121 | + return model.supportedUrls |
| 122 | + }, |
| 123 | + doGenerate: model.doGenerate.bind(model), |
| 124 | + async doStream(opts) { |
| 125 | + const result = await model.doStream({ ...opts, includeRawChunks: true }) |
| 126 | + return { ...result, stream: wrapStream(result.stream) } |
| 127 | + }, |
| 128 | + } |
| 129 | + } |
| 130 | + |
| 131 | + const createResponsesModel = (id: string): LanguageModelV2 => { |
| 132 | + return new OpenAIResponsesLanguageModel(id, { |
| 133 | + provider: `${options.name ?? "copilot"}.responses`, |
| 134 | + headers: getHeaders, |
| 135 | + url: ({ path }) => `${baseURL}${path}`, |
| 136 | + fetch: options.fetch, |
| 137 | + }) |
| 138 | + } |
| 139 | + |
| 140 | + return Object.assign((id: string) => createChatModel(id), { |
| 141 | + languageModel: createChatModel, |
| 142 | + chat: createChatModel, |
| 143 | + responses: createResponsesModel, |
| 144 | + }) |
| 145 | +} |
0 commit comments