From e2619a23506a3611087634eb7abef0c3a1ed1c3a Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 28 May 2025 20:56:00 +0100 Subject: [PATCH 1/3] refactor(functions): rename function helper methods to include tools --- examples/tool-call-helpers-zod.ts | 6 ++-- examples/tool-call-helpers.ts | 6 ++-- src/lib/AbstractChatCompletionRunner.ts | 34 ++++++++++---------- src/lib/ChatCompletionRunner.ts | 9 +----- src/lib/ChatCompletionStreamingRunner.ts | 9 +----- src/resources/beta/chat/completions.ts | 12 ++----- tests/lib/ChatCompletionRunFunctions.test.ts | 34 ++++++++++---------- 7 files changed, 46 insertions(+), 64 deletions(-) diff --git a/examples/tool-call-helpers-zod.ts b/examples/tool-call-helpers-zod.ts index 0d31b4933..4ac1bc41c 100755 --- a/examples/tool-call-helpers-zod.ts +++ b/examples/tool-call-helpers-zod.ts @@ -73,8 +73,10 @@ async function main() { ], }) .on('message', (msg) => console.log('msg', msg)) - .on('functionCall', (functionCall) => console.log('functionCall', functionCall)) - .on('functionCallResult', (functionCallResult) => console.log('functionCallResult', functionCallResult)) + .on('finalFunctionToolCall', (functionCall) => console.log('functionCall', functionCall)) + .on('finalFunctionToolCallResult', (functionCallResult) => + console.log('functionCallResult', functionCallResult), + ) .on('content', (diff) => process.stdout.write(diff)); const result = await runner.finalChatCompletion(); diff --git a/examples/tool-call-helpers.ts b/examples/tool-call-helpers.ts index 21b86f8fb..a680f6da9 100755 --- a/examples/tool-call-helpers.ts +++ b/examples/tool-call-helpers.ts @@ -82,8 +82,10 @@ async function main() { ], }) .on('message', (msg) => console.log('msg', msg)) - .on('functionCall', (functionCall) => console.log('functionCall', functionCall)) - .on('functionCallResult', (functionCallResult) => console.log('functionCallResult', functionCallResult)) + .on('functionToolCall', (functionCall) => console.log('functionCall', functionCall)) + .on('functionToolCallResult', (functionCallResult) => + console.log('functionCallResult', functionCallResult), + ) .on('content', (diff) => process.stdout.write(diff)); const result = await runner.finalChatCompletion(); diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 622112257..7a3286cc1 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -12,7 +12,7 @@ import { type RunnableFunction, isRunnableFunctionWithParse, type BaseFunctionsArgs, - RunnableToolFunction, + type RunnableToolFunction, } from './RunnableFunction'; import type { ChatCompletionToolRunnerParams } from './ChatCompletionRunner'; import type { ChatCompletionStreamingToolRunnerParams } from './ChatCompletionStreamingRunner'; @@ -60,11 +60,11 @@ export class AbstractChatCompletionRunner< this._emit('message', message); if (isToolMessage(message) && message.content) { // Note, this assumes that {role: 'tool', content: …} is always the result of a call of tool of type=function. - this._emit('functionCallResult', message.content as string); + this._emit('functionToolCallResult', message.content as string); } else if (isAssistantMessage(message) && message.tool_calls) { for (const tool_call of message.tool_calls) { if (tool_call.type === 'function') { - this._emit('functionCall', tool_call.function); + this._emit('functionToolCall', tool_call.function); } } } @@ -121,7 +121,7 @@ export class AbstractChatCompletionRunner< return this.#getFinalMessage(); } - #getFinalFunctionCall(): ChatCompletionMessageToolCall.Function | undefined { + #getFinalFunctionToolCall(): ChatCompletionMessageToolCall.Function | undefined { for (let i = this.messages.length - 1; i >= 0; i--) { const message = this.messages[i]; if (isAssistantMessage(message) && message?.tool_calls?.length) { @@ -136,12 +136,12 @@ export class AbstractChatCompletionRunner< * @returns a promise that resolves with the content of the final FunctionCall, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. */ - async finalFunctionCall(): Promise { + async finalFunctionToolCall(): Promise { await this.done(); - return this.#getFinalFunctionCall(); + return this.#getFinalFunctionToolCall(); } - #getFinalFunctionCallResult(): string | undefined { + #getFinalFunctionToolCallResult(): string | undefined { for (let i = this.messages.length - 1; i >= 0; i--) { const message = this.messages[i]; if ( @@ -161,9 +161,9 @@ export class AbstractChatCompletionRunner< return; } - async finalFunctionCallResult(): Promise { + async finalFunctionToolCallResult(): Promise { await this.done(); - return this.#getFinalFunctionCallResult(); + return this.#getFinalFunctionToolCallResult(); } #calculateTotalUsage(): CompletionUsage { @@ -201,11 +201,11 @@ export class AbstractChatCompletionRunner< const finalContent = this.#getFinalContent(); if (finalContent) this._emit('finalContent', finalContent); - const finalFunctionCall = this.#getFinalFunctionCall(); - if (finalFunctionCall) this._emit('finalFunctionCall', finalFunctionCall); + const finalFunctionCall = this.#getFinalFunctionToolCall(); + if (finalFunctionCall) this._emit('finalFunctionToolCall', finalFunctionCall); - const finalFunctionCallResult = this.#getFinalFunctionCallResult(); - if (finalFunctionCallResult != null) this._emit('finalFunctionCallResult', finalFunctionCallResult); + const finalFunctionCallResult = this.#getFinalFunctionToolCallResult(); + if (finalFunctionCallResult != null) this._emit('finalFunctionToolCallResult', finalFunctionCallResult); if (this._chatCompletions.some((c) => c.usage)) { this._emit('totalUsage', this.#calculateTotalUsage()); @@ -390,14 +390,14 @@ export class AbstractChatCompletionRunner< } export interface AbstractChatCompletionRunnerEvents extends BaseEvents { - functionCall: (functionCall: ChatCompletionMessageToolCall.Function) => void; + functionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void; message: (message: ChatCompletionMessageParam) => void; chatCompletion: (completion: ChatCompletion) => void; finalContent: (contentSnapshot: string) => void; finalMessage: (message: ChatCompletionMessageParam) => void; finalChatCompletion: (completion: ChatCompletion) => void; - finalFunctionCall: (functionCall: ChatCompletionMessageToolCall.Function) => void; - functionCallResult: (content: string) => void; - finalFunctionCallResult: (content: string) => void; + finalFunctionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void; + functionToolCallResult: (content: string) => void; + finalFunctionToolCallResult: (content: string) => void; totalUsage: (usage: CompletionUsage) => void; } diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index 1318d9eae..a5edaf741 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -2,7 +2,7 @@ import { type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming, } from '../resources/chat/completions'; -import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction'; +import { type BaseFunctionsArgs, RunnableTools } from './RunnableFunction'; import { AbstractChatCompletionRunner, AbstractChatCompletionRunnerEvents, @@ -16,13 +16,6 @@ export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunner content: (content: string) => void; } -export type ChatCompletionFunctionRunnerParams = Omit< - ChatCompletionCreateParamsNonStreaming, - 'functions' -> & { - functions: RunnableFunctions; -}; - export type ChatCompletionToolRunnerParams = Omit< ChatCompletionCreateParamsNonStreaming, 'tools' diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts index cbacdce63..eb8fcc357 100644 --- a/src/lib/ChatCompletionStreamingRunner.ts +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -4,7 +4,7 @@ import { } from '../resources/chat/completions'; import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner'; import { type ReadableStream } from '../internal/shim-types'; -import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; +import { RunnableTools, type BaseFunctionsArgs } from './RunnableFunction'; import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; import OpenAI from '../index'; import { AutoParseableTool } from '../lib/parser'; @@ -14,13 +14,6 @@ export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunner chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void; } -export type ChatCompletionStreamingFunctionRunnerParams = Omit< - ChatCompletionCreateParamsStreaming, - 'functions' -> & { - functions: RunnableFunctions; -}; - export type ChatCompletionStreamingToolRunnerParams = Omit< ChatCompletionCreateParamsStreaming, 'tools' diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 2b3f619e0..dd9a02521 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -15,13 +15,8 @@ import { } from '../../chat/completions'; import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; +export { ChatCompletionStreamingRunner } from '../../../lib/ChatCompletionStreamingRunner'; export { - ChatCompletionStreamingRunner, - type ChatCompletionStreamingFunctionRunnerParams, -} from '../../../lib/ChatCompletionStreamingRunner'; -export { - type RunnableFunction, - type RunnableFunctions, type RunnableFunctionWithParse, type RunnableFunctionWithoutParse, ParsingToolFunction, @@ -29,10 +24,7 @@ export { export { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; export { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; -export { - ChatCompletionRunner, - type ChatCompletionFunctionRunnerParams, -} from '../../../lib/ChatCompletionRunner'; +export { ChatCompletionRunner } from '../../../lib/ChatCompletionRunner'; import { RequestOptions } from '../../../internal/request-options'; import { type APIPromise } from '../../../index'; diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index 51f648f55..ffe17fb98 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -151,13 +151,13 @@ class RunnerListener { .on('content', (content) => this.contents.push(content)) .on('message', (message) => this.messages.push(message)) .on('chatCompletion', (completion) => this.chatCompletions.push(completion)) - .on('functionCall', (functionCall) => this.functionCalls.push(functionCall)) - .on('functionCallResult', (result) => this.functionCallResults.push(result)) + .on('functionToolCall', (functionCall) => this.functionCalls.push(functionCall)) + .on('functionToolCallResult', (result) => this.functionCallResults.push(result)) .on('finalContent', (content) => (this.finalContent = content)) .on('finalMessage', (message) => (this.finalMessage = message)) .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion)) - .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall)) - .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result)) + .on('finalFunctionToolCall', (functionCall) => (this.finalFunctionCall = functionCall)) + .on('finalFunctionToolCallResult', (result) => (this.finalFunctionCallResult = result)) .on('totalUsage', (usage) => (this.totalUsage = usage)) .on('error', (error) => (this.error = error)) .on('abort', (error) => ((this.error = error), (this.gotAbort = true))) @@ -175,8 +175,8 @@ class RunnerListener { await expect(this.runner.finalChatCompletion()).rejects.toThrow(error); await expect(this.runner.finalMessage()).rejects.toThrow(error); await expect(this.runner.finalContent()).rejects.toThrow(error); - await expect(this.runner.finalFunctionCall()).rejects.toThrow(error); - await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error); + await expect(this.runner.finalFunctionToolCall()).rejects.toThrow(error); + await expect(this.runner.finalFunctionToolCallResult()).rejects.toThrow(error); await expect(this.runner.totalUsage()).rejects.toThrow(error); await expect(this.runner.done()).rejects.toThrow(error); } else { @@ -214,11 +214,11 @@ class RunnerListener { expect(this.finalChatCompletion).toEqual(this.chatCompletions[this.chatCompletions.length - 1]); expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion); expect(this.finalFunctionCall).toEqual(this.functionCalls[this.functionCalls.length - 1]); - expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall); + expect(await this.runner.finalFunctionToolCall()).toEqual(this.finalFunctionCall); expect(this.finalFunctionCallResult).toEqual( this.functionCallResults[this.functionCallResults.length - 1], ); - expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult); + expect(await this.runner.finalFunctionToolCallResult()).toEqual(this.finalFunctionCallResult); expect(this.chatCompletions).toEqual(this.runner.allChatCompletions()); expect(this.messages).toEqual(this.runner.messages.slice(-this.messages.length)); if (this.chatCompletions.some((c) => c.usage)) { @@ -266,13 +266,13 @@ class StreamingRunnerListener { .on('content', (delta, snapshot) => this.eventContents.push([delta, snapshot])) .on('message', (message) => this.eventMessages.push(message)) .on('chatCompletion', (completion) => this.eventChatCompletions.push(completion)) - .on('functionCall', (functionCall) => this.eventFunctionCalls.push(functionCall)) - .on('functionCallResult', (result) => this.eventFunctionCallResults.push(result)) + .on('functionToolCall', (functionCall) => this.eventFunctionCalls.push(functionCall)) + .on('functionToolCallResult', (result) => this.eventFunctionCallResults.push(result)) .on('finalContent', (content) => (this.finalContent = content)) .on('finalMessage', (message) => (this.finalMessage = message)) .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion)) - .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall)) - .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result)) + .on('finalFunctionToolCall', (functionCall) => (this.finalFunctionCall = functionCall)) + .on('finalFunctionToolCallResult', (result) => (this.finalFunctionCallResult = result)) .on('error', (error) => (this.error = error)) .on('abort', (abort) => (this.error = abort)) .on('end', () => (this.gotEnd = true)); @@ -285,8 +285,8 @@ class StreamingRunnerListener { await expect(this.runner.finalChatCompletion()).rejects.toThrow(error); await expect(this.runner.finalMessage()).rejects.toThrow(error); await expect(this.runner.finalContent()).rejects.toThrow(error); - await expect(this.runner.finalFunctionCall()).rejects.toThrow(error); - await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error); + await expect(this.runner.finalFunctionToolCall()).rejects.toThrow(error); + await expect(this.runner.finalFunctionToolCallResult()).rejects.toThrow(error); await expect(this.runner.done()).rejects.toThrow(error); } else { expect(this.error).toBeUndefined(); @@ -318,11 +318,11 @@ class StreamingRunnerListener { expect(this.finalChatCompletion).toEqual(this.eventChatCompletions[this.eventChatCompletions.length - 1]); expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion); expect(this.finalFunctionCall).toEqual(this.eventFunctionCalls[this.eventFunctionCalls.length - 1]); - expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall); + expect(await this.runner.finalFunctionToolCall()).toEqual(this.finalFunctionCall); expect(this.finalFunctionCallResult).toEqual( this.eventFunctionCallResults[this.eventFunctionCallResults.length - 1], ); - expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult); + expect(await this.runner.finalFunctionToolCallResult()).toEqual(this.finalFunctionCallResult); expect(this.eventChatCompletions).toEqual(this.runner.allChatCompletions()); expect(this.eventMessages).toEqual(this.runner.messages.slice(-this.eventMessages.length)); if (error) { @@ -1603,7 +1603,7 @@ describe('resource completions', () => { }, { signal: controller.signal }, ); - runner.on('functionCallResult', () => controller.abort()); + runner.on('functionToolCallResult', () => controller.abort()); const listener = new StreamingRunnerListener(runner); await handleRequest(async function* (request): AsyncIterable { From bb6413f573c23a03fd8f959083ebc37dcfb3bde8 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 28 May 2025 21:24:29 +0100 Subject: [PATCH 2/3] docs(migration): mention function renames --- MIGRATION.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/MIGRATION.md b/MIGRATION.md index 3560899b4..28b58a93a 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -382,6 +382,42 @@ If you were importing these classes at runtime, you'll need to switch to importi The deprecated `client.beta.chat.completions.runFunctions()` method and all of it's surrounding types have been removed, instead you should use `client.beta.chat.completions.runTools()`. +### `.runTools()` event / method names + +To better align with the tool-based API, several event names in the ChatCompletionRunner have been renamed: + +```ts +// Before +openai.beta.chat.completions + .runTools({ + // .. + }) + .on('functionCall', (functionCall) => console.log('functionCall', functionCall)) + .on('functionCallResult', (functionCallResult) => console.log('functionCallResult', functionCallResult)) + .on('finalFunctionCall', (functionCall) => console.log('finalFunctionCall', functionCall)) + .on('finalFunctionCallResult', (result) => console.log('finalFunctionCallResult', result)); + +// After +openai.beta.chat.completions + .runTools({ + // .. + }) + .on('functionToolCall', (functionCall) => console.log('functionCall', functionCall)) + .on('functionToolCallResult', (functionCallResult) => console.log('functionCallResult', functionCallResult)) + .on('finalFunctionToolCall', (functionCall) => console.log('finalFunctionCall', functionCall)) + .on('finalFunctionToolCallResult', (result) => console.log('finalFunctionCallResult', result)); +``` + +The following event names have been changed: +- `functionCall` → `functionToolCall` +- `functionCallResult` → `functionToolCallResult` +- `finalFunctionCall` → `finalFunctionToolCall` +- `finalFunctionCallResult` → `finalFunctionToolCallResult` + +Additionally, the following methods have been renamed: +- `runner.finalFunctionCall()` → `runner.finalFunctionToolCall()` +- `runner.finalFunctionCallResult()` → `runner.finalFunctionToolCallResult()` + ### `openai/src` directory removed Previously IDEs may have auto-completed imports from the `openai/src` directory, however this From 88f3c0b0c53972b51efba0df3a358326e4ab7e42 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 28 May 2025 21:42:40 +0100 Subject: [PATCH 3/3] feat(client): promote beta completions methods to GA --- MIGRATION.md | 38 +++++- .../node-ts-cjs-auto/tests/test.ts | 4 +- examples/logprobs.ts | 2 +- examples/parsing-run-tools.ts | 2 +- examples/parsing-stream.ts | 2 +- examples/parsing-tools-stream.ts | 2 +- examples/parsing-tools.ts | 2 +- examples/parsing.ts | 2 +- examples/stream-to-client-express.ts | 2 +- examples/stream-to-client-next.ts | 2 +- examples/stream.ts | 2 +- examples/tool-call-helpers-zod.ts | 2 +- examples/tool-call-helpers.ts | 2 +- examples/ui-generation.ts | 2 +- helpers.md | 12 +- src/helpers/zod.ts | 2 +- src/lib/AbstractChatCompletionRunner.ts | 2 +- src/lib/ChatCompletionStream.ts | 2 +- src/lib/parser.ts | 2 +- src/resources/beta/beta.ts | 5 - src/resources/beta/chat/chat.ts | 12 -- src/resources/beta/chat/completions.ts | 118 ------------------ src/resources/beta/chat/index.ts | 4 - src/resources/beta/index.ts | 1 - src/resources/chat/completions/completions.ts | 106 ++++++++++++++++ src/resources/chat/completions/index.ts | 1 + tests/lib/ChatCompletionRunFunctions.test.ts | 42 +++---- tests/lib/ChatCompletionStream.test.ts | 6 +- tests/lib/parser.test.ts | 12 +- 29 files changed, 193 insertions(+), 200 deletions(-) delete mode 100644 src/resources/beta/chat/chat.ts delete mode 100644 src/resources/beta/chat/completions.ts delete mode 100644 src/resources/beta/chat/index.ts diff --git a/MIGRATION.md b/MIGRATION.md index 28b58a93a..9d48435e3 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -315,7 +315,7 @@ The `openai/shims` imports have been removed. Your global types must now be [cor Previously, the following code would just output a warning to the console, now it will throw an error. ```ts -const completion = await client.beta.chat.completions.parse({ +const completion = await client.chat.completions.parse({ // ... response_format: zodResponseFormat( z.object({ @@ -329,7 +329,7 @@ const completion = await client.beta.chat.completions.parse({ You must mark optional properties with `.nullable()` as purely optional fields are not supported by the [API](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required). ```ts -const completion = await client.beta.chat.completions.parse({ +const completion = await client.chat.completions.parse({ // ... response_format: zodResponseFormat( z.object({ @@ -377,10 +377,36 @@ export type FineTuningJobsPage = CursorPage; If you were importing these classes at runtime, you'll need to switch to importing the base class or only import them at the type-level. +### Beta chat namespace removed + +The `beta.chat` namespace has been removed. All chat completion methods that were previously in beta have been moved to the main `chat.completions` namespace: + +```ts +// Before +client.beta.chat.completions.parse() +client.beta.chat.completions.stream() +client.beta.chat.completions.runTools() + +// After +client.chat.completions.parse() +client.chat.completions.stream() +client.chat.completions.runTools() +``` + +Additionally, related types have been moved: + +```ts +// Before +import { ParsedChatCompletion, ParsedChoice, ParsedFunction } from 'openai/resources/beta/chat/completions'; + +// After +import { ParsedChatCompletion, ParsedChoice, ParsedFunction } from 'openai/resources/chat/completions'; +``` + ### Removed deprecated `.runFunctions` methods -The deprecated `client.beta.chat.completions.runFunctions()` method and all of it's surrounding types have been removed, instead you should use -`client.beta.chat.completions.runTools()`. +The deprecated `client.chat.completions.runFunctions()` method and all of it's surrounding types have been removed, instead you should use +`client.chat.completions.runTools()`. ### `.runTools()` event / method names @@ -388,7 +414,7 @@ To better align with the tool-based API, several event names in the ChatCompleti ```ts // Before -openai.beta.chat.completions +openai.chat.completions .runTools({ // .. }) @@ -398,7 +424,7 @@ openai.beta.chat.completions .on('finalFunctionCallResult', (result) => console.log('finalFunctionCallResult', result)); // After -openai.beta.chat.completions +openai.chat.completions .runTools({ // .. }) diff --git a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts index 7782ae1d2..e9a7f94e1 100644 --- a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts +++ b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts @@ -76,7 +76,7 @@ it(`ChatCompletionStream works`, async function () { let finalMessage: OpenAI.Chat.ChatCompletionMessageParam | undefined; let finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined; - const stream = client.beta.chat.completions + const stream = client.chat.completions .stream({ model: 'gpt-4', messages: [{ role: 'user', content: 'Say this is a test' }], @@ -120,7 +120,7 @@ it(`aborting ChatCompletionStream works`, async function () { let emittedError: any; let caughtError: any; const controller = new AbortController(); - const stream = client.beta.chat.completions + const stream = client.chat.completions .stream( { model: 'gpt-4', diff --git a/examples/logprobs.ts b/examples/logprobs.ts index 8cf274a14..47dcad259 100755 --- a/examples/logprobs.ts +++ b/examples/logprobs.ts @@ -6,7 +6,7 @@ import OpenAI from 'openai'; const openai = new OpenAI(); async function main() { - const stream = await openai.beta.chat.completions + const stream = await openai.chat.completions .stream({ model: 'gpt-4', messages: [{ role: 'user', content: 'Say this is a test' }], diff --git a/examples/parsing-run-tools.ts b/examples/parsing-run-tools.ts index a3c544c3d..f94e932ad 100644 --- a/examples/parsing-run-tools.ts +++ b/examples/parsing-run-tools.ts @@ -28,7 +28,7 @@ const Condition = z.object({ const openai = new OpenAI(); async function main() { - const runner = openai.beta.chat.completions + const runner = openai.chat.completions .runTools({ model: 'gpt-4o-2024-08-06', messages: [{ role: 'user', content: `What are the last 10 orders?` }], diff --git a/examples/parsing-stream.ts b/examples/parsing-stream.ts index d9eda0a4b..08c95d91b 100644 --- a/examples/parsing-stream.ts +++ b/examples/parsing-stream.ts @@ -15,7 +15,7 @@ const MathResponse = z.object({ async function main() { const client = new OpenAI(); - const stream = client.beta.chat.completions + const stream = client.chat.completions .stream({ model: 'gpt-4o-2024-08-06', messages: [ diff --git a/examples/parsing-tools-stream.ts b/examples/parsing-tools-stream.ts index c527abd00..971e22c40 100644 --- a/examples/parsing-tools-stream.ts +++ b/examples/parsing-tools-stream.ts @@ -12,7 +12,7 @@ async function main() { const client = new OpenAI(); const refusal = process.argv.includes('refusal'); - const stream = client.beta.chat.completions + const stream = client.chat.completions .stream({ model: 'gpt-4o-2024-08-06', messages: [ diff --git a/examples/parsing-tools.ts b/examples/parsing-tools.ts index 8eaea3807..e570d8b6b 100644 --- a/examples/parsing-tools.ts +++ b/examples/parsing-tools.ts @@ -38,7 +38,7 @@ const Query = z.object({ async function main() { const client = new OpenAI(); - const completion = await client.beta.chat.completions.parse({ + const completion = await client.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { diff --git a/examples/parsing.ts b/examples/parsing.ts index d92cc2720..3e71f74f3 100644 --- a/examples/parsing.ts +++ b/examples/parsing.ts @@ -15,7 +15,7 @@ const MathResponse = z.object({ async function main() { const client = new OpenAI(); - const completion = await client.beta.chat.completions.parse({ + const completion = await client.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { role: 'system', content: 'You are a helpful math tutor.' }, diff --git a/examples/stream-to-client-express.ts b/examples/stream-to-client-express.ts index f688f42e7..22bf210c1 100755 --- a/examples/stream-to-client-express.ts +++ b/examples/stream-to-client-express.ts @@ -30,7 +30,7 @@ app.post('/', async (req: Request, res: Response) => { try { console.log('Received request:', req.body); - const stream = openai.beta.chat.completions.stream({ + const stream = openai.chat.completions.stream({ model: 'gpt-3.5-turbo', stream: true, messages: [{ role: 'user', content: req.body }], diff --git a/examples/stream-to-client-next.ts b/examples/stream-to-client-next.ts index c5c1ff317..5ea91b2d7 100755 --- a/examples/stream-to-client-next.ts +++ b/examples/stream-to-client-next.ts @@ -25,7 +25,7 @@ export const runtime = 'edge'; export default async function handler(req: NextApiRequest, res: NextApiResponse) { const openai = new OpenAI(); - const stream = openai.beta.chat.completions.stream({ + const stream = openai.chat.completions.stream({ model: 'gpt-3.5-turbo', stream: true, // @ts-ignore diff --git a/examples/stream.ts b/examples/stream.ts index 86dbde8b8..a5210e643 100644 --- a/examples/stream.ts +++ b/examples/stream.ts @@ -5,7 +5,7 @@ import OpenAI from 'openai'; const openai = new OpenAI(); async function main() { - const runner = openai.beta.chat.completions + const runner = openai.chat.completions .stream({ model: 'gpt-3.5-turbo', messages: [{ role: 'user', content: 'Say this is a test' }], diff --git a/examples/tool-call-helpers-zod.ts b/examples/tool-call-helpers-zod.ts index 4ac1bc41c..b4e1aedcf 100755 --- a/examples/tool-call-helpers-zod.ts +++ b/examples/tool-call-helpers-zod.ts @@ -34,7 +34,7 @@ async function getBook({ id }: GetParams) { } async function main() { - const runner = openai.beta.chat.completions + const runner = openai.chat.completions .runTools({ model: 'gpt-4-1106-preview', stream: true, diff --git a/examples/tool-call-helpers.ts b/examples/tool-call-helpers.ts index a680f6da9..18baacdb4 100755 --- a/examples/tool-call-helpers.ts +++ b/examples/tool-call-helpers.ts @@ -63,7 +63,7 @@ const tools: RunnableToolFunction[] = [ ]; async function main() { - const runner = await openai.beta.chat.completions + const runner = await openai.chat.completions .runTools({ model: 'gpt-4-1106-preview', stream: true, diff --git a/examples/ui-generation.ts b/examples/ui-generation.ts index 84636b1f0..003bb7568 100644 --- a/examples/ui-generation.ts +++ b/examples/ui-generation.ts @@ -31,7 +31,7 @@ const UISchema: z.ZodType = z.lazy(() => ); async function main() { - const completion = await openai.beta.chat.completions.parse({ + const completion = await openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { diff --git a/helpers.md b/helpers.md index 41b352e5e..8b25fe0a5 100644 --- a/helpers.md +++ b/helpers.md @@ -2,7 +2,7 @@ The OpenAI API supports extracting JSON from the model with the `response_format` request param, for more details on the API, see [this guide](https://platform.openai.com/docs/guides/structured-outputs). -The SDK provides a `client.beta.chat.completions.parse()` method which is a wrapper over the `client.chat.completions.create()` that +The SDK provides a `client.chat.completions.parse()` method which is a wrapper over the `client.chat.completions.create()` that provides richer integrations with TS specific types & returns a `ParsedChatCompletion` object, which is an extension of the standard `ChatCompletion` type. ## Auto-parsing response content with Zod schemas @@ -27,7 +27,7 @@ const MathResponse = z.object({ const client = new OpenAI(); -const completion = await client.beta.chat.completions.parse({ +const completion = await client.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { role: 'system', content: 'You are a helpful math tutor.' }, @@ -93,7 +93,7 @@ const Query = z.object({ }); const client = new OpenAI(); -const completion = await client.beta.chat.completions.parse({ +const completion = await client.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -122,7 +122,7 @@ main(); ### Differences from `.create()` -The `beta.chat.completions.parse()` method imposes some additional restrictions on it's usage that `chat.completions.create()` does not. +The `chat.completions.parse()` method imposes some additional restrictions on it's usage that `chat.completions.create()` does not. - If the completion completes with `finish_reason` set to `length` or `content_filter`, the `LengthFinishReasonError` / `ContentFilterFinishReasonError` errors will be raised. - Only strict function tools can be passed, e.g. `{type: 'function', function: {..., strict: true}}` @@ -304,7 +304,7 @@ See an example of streaming helpers in action in [`examples/stream.ts`](examples ### Automated function calls -We provide the `openai.beta.chat.completions.runTools({…})` +We provide the `openai.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, @@ -323,7 +323,7 @@ import OpenAI from 'openai'; const client = new OpenAI(); async function main() { - const runner = client.beta.chat.completions + const runner = client.chat.completions .runTools({ model: 'gpt-4o', messages: [{ role: 'user', content: 'How is the weather this week?' }], diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index de4c3ba93..d12e7f3ca 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -32,7 +32,7 @@ function zodToJsonSchema(schema: ZodType, options: { name: string }): Record extends ChatCompletionMessage { - parsed: ParsedT | null; - tool_calls?: Array; -} - -export interface ParsedChoice extends ChatCompletion.Choice { - message: ParsedChatCompletionMessage; -} - -export interface ParsedChatCompletion extends ChatCompletion { - choices: Array>; -} - -export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming; - -export class Completions extends APIResource { - parse>( - body: Params, - options?: RequestOptions, - ): APIPromise> { - validateInputTools(body.tools); - - return this._client.chat.completions - .create(body, { - ...options, - headers: { - ...options?.headers, - 'X-Stainless-Helper-Method': 'beta.chat.completions.parse', - }, - }) - ._thenUnwrap((completion) => parseChatCompletion(completion, body)); - } - - /** - * A convenience helper for using tool calls with the /chat/completions endpoint - * which automatically calls the JavaScript functions you provide and sends their - * results back to the /chat/completions endpoint, looping as long as the model - * requests function calls. - * - * For more details and examples, see - * [the docs](https://github.com/openai/openai-node#automated-function-calls) - */ - runTools< - Params extends ChatCompletionToolRunnerParams, - ParsedT = ExtractParsedContentFromParams, - >(body: Params, options?: RunnerOptions): ChatCompletionRunner; - - runTools< - Params extends ChatCompletionStreamingToolRunnerParams, - ParsedT = ExtractParsedContentFromParams, - >(body: Params, options?: RunnerOptions): ChatCompletionStreamingRunner; - - runTools< - Params extends ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, - ParsedT = ExtractParsedContentFromParams, - >( - body: Params, - options?: RunnerOptions, - ): ChatCompletionRunner | ChatCompletionStreamingRunner { - if (body.stream) { - return ChatCompletionStreamingRunner.runTools( - this._client, - body as ChatCompletionStreamingToolRunnerParams, - options, - ); - } - - return ChatCompletionRunner.runTools(this._client, body as ChatCompletionToolRunnerParams, options); - } - - /** - * Creates a chat completion stream - */ - stream>( - body: Params, - options?: RequestOptions, - ): ChatCompletionStream { - return ChatCompletionStream.createChatCompletion(this._client, body, options); - } -} diff --git a/src/resources/beta/chat/index.ts b/src/resources/beta/chat/index.ts deleted file mode 100644 index 23b1b8ff3..000000000 --- a/src/resources/beta/chat/index.ts +++ /dev/null @@ -1,4 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { Chat } from './chat'; -export { Completions } from './completions'; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 18746c1d4..a4d5a7ea6 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -20,7 +20,6 @@ export { } from './assistants'; export { Beta } from './beta'; export { Realtime } from './realtime/index'; -export { Chat } from './chat/index'; export { Threads, type AssistantResponseFormatOption, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 20908614e..82bb2e27c 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -12,6 +12,14 @@ import { Stream } from '../../../core/streaming'; import { RequestOptions } from '../../../internal/request-options'; import { path } from '../../../internal/utils/path'; +import { ChatCompletionRunner } from '../../../lib/ChatCompletionRunner'; +import { ChatCompletionStreamingRunner } from '../../../lib/ChatCompletionStreamingRunner'; +import { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner'; +import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; +import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; + export class Completions extends APIResource { messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); @@ -129,8 +137,106 @@ export class Completions extends APIResource { delete(completionID: string, options?: RequestOptions): APIPromise { return this._client.delete(path`/chat/completions/${completionID}`, options); } + + parse>( + body: Params, + options?: RequestOptions, + ): APIPromise> { + validateInputTools(body.tools); + + return this._client.chat.completions + .create(body, { + ...options, + headers: { + ...options?.headers, + 'X-Stainless-Helper-Method': 'chat.completions.parse', + }, + }) + ._thenUnwrap((completion) => parseChatCompletion(completion, body)); + } + + /** + * A convenience helper for using tool calls with the /chat/completions endpoint + * which automatically calls the JavaScript functions you provide and sends their + * results back to the /chat/completions endpoint, looping as long as the model + * requests function calls. + * + * For more details and examples, see + * [the docs](https://github.com/openai/openai-node#automated-function-calls) + */ + runTools< + Params extends ChatCompletionToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >(body: Params, options?: RunnerOptions): ChatCompletionRunner; + + runTools< + Params extends ChatCompletionStreamingToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >(body: Params, options?: RunnerOptions): ChatCompletionStreamingRunner; + + runTools< + Params extends ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >( + body: Params, + options?: RunnerOptions, + ): ChatCompletionRunner | ChatCompletionStreamingRunner { + if (body.stream) { + return ChatCompletionStreamingRunner.runTools( + this._client, + body as ChatCompletionStreamingToolRunnerParams, + options, + ); + } + + return ChatCompletionRunner.runTools(this._client, body as ChatCompletionToolRunnerParams, options); + } + + /** + * Creates a chat completion stream + */ + stream>( + body: Params, + options?: RequestOptions, + ): ChatCompletionStream { + return ChatCompletionStream.createChatCompletion(this._client, body, options); + } +} + +export interface ParsedFunction extends ChatCompletionMessageToolCall.Function { + parsed_arguments?: unknown; } +export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall { + function: ParsedFunction; +} + +export interface ParsedChatCompletionMessage extends ChatCompletionMessage { + parsed: ParsedT | null; + tool_calls?: Array; +} + +export interface ParsedChoice extends ChatCompletion.Choice { + message: ParsedChatCompletionMessage; +} + +export interface ParsedChatCompletion extends ChatCompletion { + choices: Array>; +} + +export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming; + +export { ChatCompletionStreamingRunner } from '../../../lib/ChatCompletionStreamingRunner'; +export { + type RunnableFunctionWithParse, + type RunnableFunctionWithoutParse, + ParsingToolFunction, +} from '../../../lib/RunnableFunction'; +export { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; +export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export { ChatCompletionRunner } from '../../../lib/ChatCompletionRunner'; + export type ChatCompletionsPage = CursorPage; export type ChatCompletionStoreMessagesPage = CursorPage; diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 32d0eb408..ce1897258 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -39,4 +39,5 @@ export { type ChatCompletionStoreMessagesPage, type ChatCompletionsPage, } from './completions'; +export * from './completions'; export { Messages, type MessageListParams } from './messages'; diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index ffe17fb98..42ea3de98 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -7,7 +7,7 @@ import { type ChatCompletionToolRunnerParams, ChatCompletionStreamingRunner, type ChatCompletionStreamingToolRunnerParams, -} from 'openai/resources/beta/chat/completions'; +} from 'openai/resources/chat/completions'; import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'; import { isAssistantMessage } from '../../src/lib/chatCompletionUtils'; import { mockFetch } from '../utils/mock-fetch'; @@ -339,7 +339,7 @@ class StreamingRunnerListener { function _typeTests() { const openai = new OpenAI(); - openai.beta.chat.completions.runTools({ + openai.chat.completions.runTools({ messages: [ { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, ], @@ -380,7 +380,7 @@ function _typeTests() { }, ], }); - openai.beta.chat.completions.runTools({ + openai.chat.completions.runTools({ messages: [ { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, ], @@ -396,7 +396,7 @@ function _typeTests() { }), ], }); - openai.beta.chat.completions.runTools({ + openai.chat.completions.runTools({ messages: [ { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, ], @@ -438,7 +438,7 @@ function _typeTests() { }), ], }); - openai.beta.chat.completions.runTools({ + openai.chat.completions.runTools({ messages: [ { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, ], @@ -500,7 +500,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', tools: [ @@ -639,7 +639,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); const controller = new AbortController(); - const runner = openai.beta.chat.completions.runTools( + const runner = openai.chat.completions.runTools( { messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -726,7 +726,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ messages: [ { role: 'user', @@ -886,7 +886,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ messages: [ { role: 'user', @@ -1135,7 +1135,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', tool_choice: { @@ -1223,7 +1223,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', tools: [ @@ -1459,7 +1459,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -1583,7 +1583,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); const controller = new AbortController(); - const runner = openai.beta.chat.completions.runTools( + const runner = openai.chat.completions.runTools( { stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], @@ -1667,7 +1667,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ stream: true, messages: [ { @@ -1805,7 +1805,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ stream: true, messages: [ { @@ -2007,7 +2007,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -2093,7 +2093,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.runTools({ + const runner = openai.chat.completions.runTools({ stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -2317,7 +2317,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.stream({ + const runner = openai.chat.completions.stream({ stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -2355,7 +2355,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); - const runner = openai.beta.chat.completions.stream({ + const runner = openai.chat.completions.stream({ stream: true, messages: [{ role: 'user', content: 'tell me what the weather is like' }], model: 'gpt-3.5-turbo', @@ -2394,7 +2394,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: '...', fetch }); - const stream = openai.beta.chat.completions.stream( + const stream = openai.chat.completions.stream( { max_tokens: 1024, model: 'gpt-3.5-turbo', @@ -2418,7 +2418,7 @@ describe('resource completions', () => { const openai = new OpenAI({ apiKey: '...', fetch }); - const stream = openai.beta.chat.completions.stream( + const stream = openai.chat.completions.stream( { max_tokens: 1024, model: 'gpt-3.5-turbo', diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts index 34c5fd204..7d78b712a 100644 --- a/tests/lib/ChatCompletionStream.test.ts +++ b/tests/lib/ChatCompletionStream.test.ts @@ -8,7 +8,7 @@ jest.setTimeout(1000 * 30); describe('.stream()', () => { it('works', async () => { const stream = await makeStreamSnapshotRequest((openai) => - openai.beta.chat.completions.stream({ + openai.chat.completions.stream({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -49,7 +49,7 @@ describe('.stream()', () => { const stream = ( await makeStreamSnapshotRequest((openai) => - openai.beta.chat.completions.stream({ + openai.chat.completions.stream({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -208,7 +208,7 @@ describe('.stream()', () => { const stream = ( await makeStreamSnapshotRequest((openai) => - openai.beta.chat.completions.stream({ + openai.chat.completions.stream({ model: 'gpt-4o-2024-08-06', messages: [ { diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index fa8123f5c..74cca7253 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -8,7 +8,7 @@ describe('.parse()', () => { describe('zod', () => { it('deserialises response_format', async () => { const completion = await makeSnapshotRequest((openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -60,7 +60,7 @@ describe('.parse()', () => { ); const completion = await makeSnapshotRequest((openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -448,7 +448,7 @@ describe('.parse()', () => { const completion = await makeSnapshotRequest( (openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -698,7 +698,7 @@ describe('.parse()', () => { const completion = await makeSnapshotRequest( (openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -905,7 +905,7 @@ describe('.parse()', () => { const completion = await makeSnapshotRequest( (openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ { @@ -1030,7 +1030,7 @@ describe('.parse()', () => { const completion = await makeSnapshotRequest( (openai) => - openai.beta.chat.completions.parse({ + openai.chat.completions.parse({ model: 'gpt-4o-2024-08-06', messages: [ {