diff --git a/.changeset/olive-wolves-love.md b/.changeset/olive-wolves-love.md new file mode 100644 index 00000000..4c686d77 --- /dev/null +++ b/.changeset/olive-wolves-love.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-openai': minor +--- + +allows additional configuration options when creating an openAI client diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index 4726683a..585e8a72 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -3,13 +3,13 @@ import { createOpenAIClient, generateId, getOpenAIApiKeyFromEnv, -} from '../utils' +} from '../utils/client' import { validateImageSize, validateNumberOfImages, validatePrompt, } from '../image/image-provider-options' -import type { OPENAI_IMAGE_MODELS } from '../model-meta' +import type { OpenAIImageModel } from '../model-meta' import type { OpenAIImageModelProviderOptionsByName, OpenAIImageModelSizeByName, @@ -21,16 +21,13 @@ import type { ImageGenerationResult, } from '@tanstack/ai' import type OpenAI_SDK from 'openai' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI image adapter */ export interface OpenAIImageConfig extends OpenAIClientConfig {} -/** Model type for OpenAI Image */ -export type OpenAIImageModel = (typeof OPENAI_IMAGE_MODELS)[number] - /** * OpenAI Image Generation Adapter * diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index d507c534..944e8dea 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,13 +1,13 @@ import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' -import { getOpenAIApiKeyFromEnv } from '../utils' +import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' -import type { OPENAI_CHAT_MODELS } from '../model-meta' +import type { OpenAIChatModel } from '../model-meta' import type { StreamChunk, SummarizationOptions, SummarizationResult, } from '@tanstack/ai' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI summarize adapter @@ -24,9 +24,6 @@ export interface OpenAISummarizeProviderOptions { maxTokens?: number } -/** Model type for OpenAI summarization */ -export type OpenAISummarizeModel = (typeof OPENAI_CHAT_MODELS)[number] - /** * OpenAI Summarize Adapter * @@ -34,7 +31,7 @@ export type OpenAISummarizeModel = (typeof OPENAI_CHAT_MODELS)[number] * Delegates all API calls to the OpenAITextAdapter. */ export class OpenAISummarizeAdapter< - TModel extends OpenAISummarizeModel, + TModel extends OpenAIChatModel, > extends BaseSummarizeAdapter { readonly kind = 'summarize' as const readonly name = 'openai' as const @@ -133,7 +130,7 @@ export class OpenAISummarizeAdapter< * const adapter = createOpenaiSummarize('gpt-4o-mini', "sk-..."); * ``` */ -export function createOpenaiSummarize( +export function createOpenaiSummarize( model: TModel, apiKey: string, config?: Omit, @@ -165,7 +162,7 @@ export function createOpenaiSummarize( * }); * ``` */ -export function openaiSummarize( +export function openaiSummarize( model: TModel, config?: Omit, ): OpenAISummarizeAdapter { diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 8ca7cfd5..e48fc2cd 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -5,11 +5,14 @@ import { createOpenAIClient, generateId, getOpenAIApiKeyFromEnv, +} from '../utils/client' +import { makeOpenAIStructuredOutputCompatible, transformNullsToUndefined, -} from '../utils' +} from '../utils/schema-converter' import type { OPENAI_CHAT_MODELS, + OpenAIChatModel, OpenAIChatModelProviderOptionsByName, OpenAIModelInputModalitiesByName, } from '../model-meta' @@ -34,7 +37,7 @@ import type { OpenAIImageMetadata, OpenAIMessageMetadataByModality, } from '../message-types' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI text adapter @@ -79,7 +82,7 @@ type ResolveInputModalities = * Import only what you need for smaller bundle sizes. */ export class OpenAITextAdapter< - TModel extends (typeof OPENAI_CHAT_MODELS)[number], + TModel extends OpenAIChatModel, > extends BaseTextAdapter< TModel, ResolveProviderOptions, diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index c59b3eca..796bc0b2 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -3,8 +3,8 @@ import { createOpenAIClient, generateId, getOpenAIApiKeyFromEnv, -} from '../utils' -import type { OPENAI_TRANSCRIPTION_MODELS } from '../model-meta' +} from '../utils/client' +import type { OpenAITranscriptionModel } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' import type { TranscriptionOptions, @@ -12,17 +12,13 @@ import type { TranscriptionSegment, } from '@tanstack/ai' import type OpenAI_SDK from 'openai' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI Transcription adapter */ export interface OpenAITranscriptionConfig extends OpenAIClientConfig {} -/** Model type for OpenAI Transcription */ -export type OpenAITranscriptionModel = - (typeof OPENAI_TRANSCRIPTION_MODELS)[number] - /** * OpenAI Transcription (Speech-to-Text) Adapter * diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index c320b526..2f34e50f 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -3,13 +3,13 @@ import { createOpenAIClient, generateId, getOpenAIApiKeyFromEnv, -} from '../utils' +} from '../utils/client' import { validateAudioInput, validateInstructions, validateSpeed, } from '../audio/audio-provider-options' -import type { OPENAI_TTS_MODELS } from '../model-meta' +import type { OpenAITTSModel } from '../model-meta' import type { OpenAITTSFormat, OpenAITTSProviderOptions, @@ -17,16 +17,13 @@ import type { } from '../audio/tts-provider-options' import type { TTSOptions, TTSResult } from '@tanstack/ai' import type OpenAI_SDK from 'openai' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI TTS adapter */ export interface OpenAITTSConfig extends OpenAIClientConfig {} -/** Model type for OpenAI TTS */ -export type OpenAITTSModel = (typeof OPENAI_TTS_MODELS)[number] - /** * OpenAI Text-to-Speech Adapter * diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index bf0693c5..c0c2aaa2 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -1,12 +1,12 @@ import { BaseVideoAdapter } from '@tanstack/ai/adapters' -import { createOpenAIClient, getOpenAIApiKeyFromEnv } from '../utils' +import { createOpenAIClient, getOpenAIApiKeyFromEnv } from '../utils/client' import { toApiSeconds, validateVideoSeconds, validateVideoSize, } from '../video/video-provider-options' import type { VideoModel } from 'openai/resources' -import type { OPENAI_VIDEO_MODELS } from '../model-meta' +import type { OpenAIVideoModel } from '../model-meta' import type { OpenAIVideoModelProviderOptionsByName, OpenAIVideoProviderOptions, @@ -18,7 +18,7 @@ import type { VideoUrlResult, } from '@tanstack/ai' import type OpenAI_SDK from 'openai' -import type { OpenAIClientConfig } from '../utils' +import type { OpenAIClientConfig } from '../utils/client' /** * Configuration for OpenAI video adapter. @@ -27,9 +27,6 @@ import type { OpenAIClientConfig } from '../utils' */ export interface OpenAIVideoConfig extends OpenAIClientConfig {} -/** Model type for OpenAI Video */ -export type OpenAIVideoModel = (typeof OPENAI_VIDEO_MODELS)[number] - /** * OpenAI Video Generation Adapter * diff --git a/packages/typescript/ai-openai/src/index.ts b/packages/typescript/ai-openai/src/index.ts index cf2759f2..ffba8da8 100644 --- a/packages/typescript/ai-openai/src/index.ts +++ b/packages/typescript/ai-openai/src/index.ts @@ -78,6 +78,11 @@ export type { OpenAITranscriptionProviderOptions } from './audio/transcription-p export type { OpenAIChatModelProviderOptionsByName, OpenAIModelInputModalitiesByName, + OpenAIChatModel, + OpenAIImageModel, + OpenAIVideoModel, + OpenAITTSModel, + OpenAITranscriptionModel, } from './model-meta' export { OPENAI_IMAGE_MODELS, @@ -94,3 +99,4 @@ export type { OpenAIDocumentMetadata, OpenAIMessageMetadataByModality, } from './message-types' +export type { OpenAIClientConfig } from './utils/client' diff --git a/packages/typescript/ai-openai/src/model-meta.ts b/packages/typescript/ai-openai/src/model-meta.ts index d6ed3312..3557a9b4 100644 --- a/packages/typescript/ai-openai/src/model-meta.ts +++ b/packages/typescript/ai-openai/src/model-meta.ts @@ -1696,6 +1696,8 @@ export const OPENAI_CHAT_MODELS = [ O1_PRO.name, ] as const +export type OpenAIChatModel = (typeof OPENAI_CHAT_MODELS)[number] + // Image generation models (based on endpoints: "image-generation" or "image-edit") export const OPENAI_IMAGE_MODELS = [ GPT_IMAGE_1.name, @@ -1704,6 +1706,8 @@ export const OPENAI_IMAGE_MODELS = [ DALL_E_2.name, ] as const +export type OpenAIImageModel = (typeof OPENAI_IMAGE_MODELS)[number] + // Audio models (based on endpoints: "transcription", "speech_generation", or "realtime") /* const OPENAI_AUDIO_MODELS = [ // Transcription models @@ -1734,6 +1738,8 @@ export const OPENAI_IMAGE_MODELS = [ */ export const OPENAI_VIDEO_MODELS = [SORA2.name, SORA2_PRO.name] as const +export type OpenAIVideoModel = (typeof OPENAI_VIDEO_MODELS)[number] + /** * Text-to-speech models (based on endpoints: "speech_generation") */ @@ -1743,6 +1749,8 @@ export const OPENAI_TTS_MODELS = [ 'gpt-4o-audio-preview', ] as const +export type OpenAITTSModel = (typeof OPENAI_TTS_MODELS)[number] + /** * Transcription models (based on endpoints: "transcription") */ @@ -1753,6 +1761,9 @@ export const OPENAI_TRANSCRIPTION_MODELS = [ 'gpt-4o-transcribe-diarize', ] as const +export type OpenAITranscriptionModel = + (typeof OPENAI_TRANSCRIPTION_MODELS)[number] + /** * Type-only map from chat model name to its provider options type. * Used by the core AI types (via the adapter) to narrow diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index 828541e2..3915e2ea 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,20 +1,15 @@ import OpenAI_SDK from 'openai' +import type { ClientOptions } from 'openai' -export interface OpenAIClientConfig { +export interface OpenAIClientConfig extends ClientOptions { apiKey: string - organization?: string - baseURL?: string } /** * Creates an OpenAI SDK client instance */ export function createOpenAIClient(config: OpenAIClientConfig): OpenAI_SDK { - return new OpenAI_SDK({ - apiKey: config.apiKey, - organization: config.organization, - baseURL: config.baseURL, - }) + return new OpenAI_SDK(config) } /** diff --git a/packages/typescript/ai-openai/src/utils/index.ts b/packages/typescript/ai-openai/src/utils/index.ts deleted file mode 100644 index 8314cf5a..00000000 --- a/packages/typescript/ai-openai/src/utils/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -export { - createOpenAIClient, - getOpenAIApiKeyFromEnv, - generateId, - type OpenAIClientConfig, -} from './client' -export { - makeOpenAIStructuredOutputCompatible, - transformNullsToUndefined, -} from './schema-converter'