Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
ioIntelligenceModels,
mistralModels,
moonshotModels,
openAiCodexModels,
openAiNativeModels,
qwenCodeModels,
sambaNovaModels,
Expand Down Expand Up @@ -133,6 +134,7 @@ export const providerNames = [
"mistral",
"moonshot",
"minimax",
"openai-codex",
"openai-native",
"qwen-code",
"roo",
Expand Down Expand Up @@ -289,6 +291,10 @@ const geminiCliSchema = apiModelIdProviderModelSchema.extend({
geminiCliProjectId: z.string().optional(),
})

const openAiCodexSchema = apiModelIdProviderModelSchema.extend({
// No additional settings needed - uses OAuth authentication
})

const openAiNativeSchema = apiModelIdProviderModelSchema.extend({
openAiNativeApiKey: z.string().optional(),
openAiNativeBaseUrl: z.string().optional(),
Expand Down Expand Up @@ -436,6 +442,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
lmStudioSchema.merge(z.object({ apiProvider: z.literal("lmstudio") })),
geminiSchema.merge(z.object({ apiProvider: z.literal("gemini") })),
geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })),
openAiCodexSchema.merge(z.object({ apiProvider: z.literal("openai-codex") })),
openAiNativeSchema.merge(z.object({ apiProvider: z.literal("openai-native") })),
mistralSchema.merge(z.object({ apiProvider: z.literal("mistral") })),
deepSeekSchema.merge(z.object({ apiProvider: z.literal("deepseek") })),
Expand Down Expand Up @@ -477,6 +484,7 @@ export const providerSettingsSchema = z.object({
...lmStudioSchema.shape,
...geminiSchema.shape,
...geminiCliSchema.shape,
...openAiCodexSchema.shape,
...openAiNativeSchema.shape,
...mistralSchema.shape,
...deepSeekSchema.shape,
Expand Down Expand Up @@ -559,6 +567,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
openrouter: "openRouterModelId",
bedrock: "apiModelId",
vertex: "apiModelId",
"openai-codex": "apiModelId",
"openai-native": "openAiModelId",
ollama: "ollamaModelId",
lmstudio: "lmStudioModelId",
Expand Down Expand Up @@ -684,6 +693,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "MiniMax",
models: Object.keys(minimaxModels),
},
"openai-codex": {
id: "openai-codex",
label: "OpenAI - ChatGPT Plus/Pro",
models: Object.keys(openAiCodexModels),
},
"openai-native": {
id: "openai-native",
label: "OpenAI",
Expand Down
4 changes: 4 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ export * from "./mistral.js"
export * from "./moonshot.js"
export * from "./ollama.js"
export * from "./openai.js"
export * from "./openai-codex.js"
export * from "./openrouter.js"
export * from "./qwen-code.js"
export * from "./requesty.js"
Expand Down Expand Up @@ -48,6 +49,7 @@ import { ioIntelligenceDefaultModelId } from "./io-intelligence.js"
import { litellmDefaultModelId } from "./lite-llm.js"
import { mistralDefaultModelId } from "./mistral.js"
import { moonshotDefaultModelId } from "./moonshot.js"
import { openAiCodexDefaultModelId } from "./openai-codex.js"
import { openRouterDefaultModelId } from "./openrouter.js"
import { qwenCodeDefaultModelId } from "./qwen-code.js"
import { requestyDefaultModelId } from "./requesty.js"
Expand Down Expand Up @@ -111,6 +113,8 @@ export function getProviderDefaultModelId(
return options?.isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId
case "openai-native":
return "gpt-4o" // Based on openai-native patterns
case "openai-codex":
return openAiCodexDefaultModelId
case "mistral":
return mistralDefaultModelId
case "openai":
Expand Down
92 changes: 92 additions & 0 deletions packages/types/src/providers/openai-codex.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import type { ModelInfo } from "../model.js"

/**
* OpenAI Codex Provider
*
* This provider uses OAuth authentication via ChatGPT Plus/Pro subscription
* instead of direct API keys. Requests are routed to the Codex backend at
* https://chatgpt.com/backend-api/codex/responses
*
* Key differences from openai-native:
* - Uses OAuth Bearer tokens instead of API keys
* - Subscription-based pricing (no per-token costs)
* - Limited model subset available
* - Custom routing to Codex backend
*/

export type OpenAiCodexModelId = keyof typeof openAiCodexModels

export const openAiCodexDefaultModelId: OpenAiCodexModelId = "gpt-5.2-codex"

/**
* Models available through the Codex OAuth flow.
* These models are accessible to ChatGPT Plus/Pro subscribers.
* Costs are 0 as they are covered by the subscription.
*/
export const openAiCodexModels = {
"gpt-5.1-codex-max": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
reasoningEffort: "xhigh",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription",
},
"gpt-5.2-codex": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
reasoningEffort: "medium",
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription",
},
"gpt-5.1-codex-mini": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high"],
reasoningEffort: "medium",
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.1 Codex Mini: Faster version for coding tasks via ChatGPT subscription",
},
"gpt-5.2": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
reasoningEffort: "medium",
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.2: Latest GPT model via ChatGPT subscription",
},
} as const satisfies Record<string, ModelInfo>
3 changes: 3 additions & 0 deletions packages/types/src/vscode-extension-host.ts
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ export type ExtensionState = Pick<
taskSyncEnabled: boolean
featureRoomoteControlEnabled: boolean
claudeCodeIsAuthenticated?: boolean
openAiCodexIsAuthenticated?: boolean
debug?: boolean
}

Expand Down Expand Up @@ -454,6 +455,8 @@ export interface WebviewMessage {
| "rooCloudManualUrl"
| "claudeCodeSignIn"
| "claudeCodeSignOut"
| "openAiCodexSignIn"
| "openAiCodexSignOut"
| "switchOrganization"
| "condenseTaskContextRequest"
| "requestIndexingStatus"
Expand Down
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import {
VertexHandler,
AnthropicVertexHandler,
OpenAiHandler,
OpenAiCodexHandler,
LmStudioHandler,
GeminiHandler,
OpenAiNativeHandler,
Expand Down Expand Up @@ -149,6 +150,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new LmStudioHandler(options)
case "gemini":
return new GeminiHandler(options)
case "openai-codex":
return new OpenAiCodexHandler(options)
case "openai-native":
return new OpenAiNativeHandler(options)
case "deepseek":
Expand Down
101 changes: 101 additions & 0 deletions src/api/providers/__tests__/openai-codex-native-tool-calls.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
// cd src && npx vitest run api/providers/__tests__/openai-codex-native-tool-calls.spec.ts

import { beforeEach, describe, expect, it, vi } from "vitest"

import { OpenAiCodexHandler } from "../openai-codex"
import type { ApiHandlerOptions } from "../../../shared/api"
import { NativeToolCallParser } from "../../../core/assistant-message/NativeToolCallParser"
import { openAiCodexOAuthManager } from "../../../integrations/openai-codex/oauth"

describe("OpenAiCodexHandler native tool calls", () => {
let handler: OpenAiCodexHandler
let mockOptions: ApiHandlerOptions

beforeEach(() => {
vi.restoreAllMocks()
NativeToolCallParser.clearRawChunkState()
NativeToolCallParser.clearAllStreamingToolCalls()

mockOptions = {
apiModelId: "gpt-5.2-2025-12-11",
// minimal settings; OAuth is mocked below
}
handler = new OpenAiCodexHandler(mockOptions)
})

it("yields tool_call_partial chunks when API returns function_call-only response", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")

// Mock OpenAI SDK streaming (preferred path).
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.output_item.added",
item: {
type: "function_call",
call_id: "call_1",
name: "attempt_completion",
arguments: "",
},
output_index: 0,
}
yield {
type: "response.function_call_arguments.delta",
delta: '{"result":"hi"}',
// Note: intentionally omit call_id + name to simulate tool-call-only streams.
item_id: "fc_1",
output_index: 0,
}
yield {
type: "response.completed",
response: {
id: "resp_1",
status: "completed",
output: [
{
type: "function_call",
call_id: "call_1",
name: "attempt_completion",
arguments: '{"result":"hi"}',
},
],
usage: { input_tokens: 1, output_tokens: 1 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "hello" } as any], {
taskId: "t",
toolProtocol: "native",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
if (chunk.type === "tool_call_partial") {
// Simulate Task.ts behavior so finish_reason handling can emit tool_call_end elsewhere
NativeToolCallParser.processRawChunk({
index: chunk.index,
id: chunk.id,
name: chunk.name,
arguments: chunk.arguments,
})
}
}

const toolChunks = chunks.filter((c) => c.type === "tool_call_partial")
expect(toolChunks.length).toBeGreaterThan(0)
expect(toolChunks[0]).toMatchObject({
type: "tool_call_partial",
id: "call_1",
name: "attempt_completion",
})
})
})
1 change: 1 addition & 0 deletions src/api/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export { IOIntelligenceHandler } from "./io-intelligence"
export { LiteLLMHandler } from "./lite-llm"
export { LmStudioHandler } from "./lm-studio"
export { MistralHandler } from "./mistral"
export { OpenAiCodexHandler } from "./openai-codex"
export { OpenAiNativeHandler } from "./openai-native"
export { OpenAiHandler } from "./openai"
export { OpenRouterHandler } from "./openrouter"
Expand Down
Loading
Loading