Skip to content

Commit c0bd324

Browse files
committed
spec 1.0.4 + build
1 parent a41d007 commit c0bd324

15 files changed

+588
-19
lines changed

api.ts

Lines changed: 257 additions & 3 deletions
Large diffs are not rendered by default.

base.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* OpenAI API
55
* APIs for sampling from and fine-tuning language models
66
*
7-
* The version of the OpenAPI document: 1.0.3
7+
* The version of the OpenAPI document: 1.0.4
88
*
99
*
1010
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

common.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* OpenAI API
55
* APIs for sampling from and fine-tuning language models
66
*
7-
* The version of the OpenAPI document: 1.0.3
7+
* The version of the OpenAPI document: 1.0.4
88
*
99
*
1010
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

configuration.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* OpenAI API
55
* APIs for sampling from and fine-tuning language models
66
*
7-
* The version of the OpenAPI document: 1.0.3
7+
* The version of the OpenAPI document: 1.0.4
88
*
99
*
1010
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

dist/api.d.ts

Lines changed: 184 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* OpenAI API
33
* APIs for sampling from and fine-tuning language models
44
*
5-
* The version of the OpenAPI document: 1.0.3
5+
* The version of the OpenAPI document: 1.0.4
66
*
77
*
88
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -361,7 +361,13 @@ export interface CreateCompletionFromModelRequest {
361361
*/
362362
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
363363
/**
364-
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
364+
* The suffix that comes after a completion of inserted text, encoded as a string or array of strings.
365+
* @type {string | Array<string>}
366+
* @memberof CreateCompletionFromModelRequest
367+
*/
368+
'suffix'?: string | Array<string> | null;
369+
/**
370+
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
365371
* @type {number}
366372
* @memberof CreateCompletionFromModelRequest
367373
*/
@@ -471,7 +477,13 @@ export interface CreateCompletionRequest {
471477
*/
472478
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
473479
/**
474-
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
480+
* The suffix that comes after a completion of inserted text, encoded as a string or array of strings.
481+
* @type {string | Array<string>}
482+
* @memberof CreateCompletionRequest
483+
*/
484+
'suffix'?: string | Array<string> | null;
485+
/**
486+
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
475487
* @type {number}
476488
* @memberof CreateCompletionRequest
477489
*/
@@ -648,6 +660,74 @@ export interface CreateCompletionResponseLogprobs {
648660
*/
649661
'text_offset'?: Array<number>;
650662
}
663+
/**
664+
*
665+
* @export
666+
* @interface CreateEditRequest
667+
*/
668+
export interface CreateEditRequest {
669+
/**
670+
* The input text to use as a starting point for the edit.
671+
* @type {string}
672+
* @memberof CreateEditRequest
673+
*/
674+
'input'?: string | null;
675+
/**
676+
* The instruction that tells the model how to edit the prompt.
677+
* @type {string}
678+
* @memberof CreateEditRequest
679+
*/
680+
'instruction': string;
681+
/**
682+
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
683+
* @type {number}
684+
* @memberof CreateEditRequest
685+
*/
686+
'temperature'?: number | null;
687+
/**
688+
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
689+
* @type {number}
690+
* @memberof CreateEditRequest
691+
*/
692+
'top_p'?: number | null;
693+
}
694+
/**
695+
*
696+
* @export
697+
* @interface CreateEditResponse
698+
*/
699+
export interface CreateEditResponse {
700+
/**
701+
*
702+
* @type {string}
703+
* @memberof CreateEditResponse
704+
*/
705+
'id'?: string;
706+
/**
707+
*
708+
* @type {string}
709+
* @memberof CreateEditResponse
710+
*/
711+
'object'?: string;
712+
/**
713+
*
714+
* @type {number}
715+
* @memberof CreateEditResponse
716+
*/
717+
'created'?: number;
718+
/**
719+
*
720+
* @type {string}
721+
* @memberof CreateEditResponse
722+
*/
723+
'model'?: string;
724+
/**
725+
*
726+
* @type {Array<CreateCompletionResponseChoices>}
727+
* @memberof CreateEditResponse
728+
*/
729+
'choices'?: Array<CreateCompletionResponseChoices>;
730+
}
651731
/**
652732
*
653733
* @export
@@ -789,6 +869,12 @@ export interface CreateFineTuneRequest {
789869
* @memberof CreateFineTuneRequest
790870
*/
791871
'classification_betas'?: Array<number> | null;
872+
/**
873+
* A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
874+
* @type {string}
875+
* @memberof CreateFineTuneRequest
876+
*/
877+
'suffix'?: string | null;
792878
}
793879
/**
794880
*
@@ -908,6 +994,31 @@ export interface DeleteFileResponse {
908994
*/
909995
'deleted'?: boolean;
910996
}
997+
/**
998+
*
999+
* @export
1000+
* @interface DeleteModelResponse
1001+
*/
1002+
export interface DeleteModelResponse {
1003+
/**
1004+
*
1005+
* @type {string}
1006+
* @memberof DeleteModelResponse
1007+
*/
1008+
'id'?: string;
1009+
/**
1010+
*
1011+
* @type {string}
1012+
* @memberof DeleteModelResponse
1013+
*/
1014+
'object'?: string;
1015+
/**
1016+
*
1017+
* @type {boolean}
1018+
* @memberof DeleteModelResponse
1019+
*/
1020+
'deleted'?: boolean;
1021+
}
9111022
/**
9121023
*
9131024
* @export
@@ -1232,6 +1343,15 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
12321343
* @throws {RequiredError}
12331344
*/
12341345
createCompletionFromModel: (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1346+
/**
1347+
*
1348+
* @summary Creates a new edit for the provided input, instruction, and parameters
1349+
* @param {string} engineId The ID of the engine to use for this request
1350+
* @param {CreateEditRequest} createEditRequest
1351+
* @param {*} [options] Override http request option.
1352+
* @throws {RequiredError}
1353+
*/
1354+
createEdit: (engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) => Promise<RequestArgs>;
12351355
/**
12361356
*
12371357
* @summary Creates an embedding vector representing the input text.
@@ -1275,6 +1395,14 @@ export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration)
12751395
* @throws {RequiredError}
12761396
*/
12771397
deleteFile: (fileId: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
1398+
/**
1399+
*
1400+
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1401+
* @param {string} model The model to delete
1402+
* @param {*} [options] Override http request option.
1403+
* @throws {RequiredError}
1404+
*/
1405+
deleteModel: (model: string, options?: AxiosRequestConfig) => Promise<RequestArgs>;
12781406
/**
12791407
*
12801408
* @summary Returns the contents of the specified file
@@ -1384,6 +1512,15 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
13841512
* @throws {RequiredError}
13851513
*/
13861514
createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>>;
1515+
/**
1516+
*
1517+
* @summary Creates a new edit for the provided input, instruction, and parameters
1518+
* @param {string} engineId The ID of the engine to use for this request
1519+
* @param {CreateEditRequest} createEditRequest
1520+
* @param {*} [options] Override http request option.
1521+
* @throws {RequiredError}
1522+
*/
1523+
createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateEditResponse>>;
13871524
/**
13881525
*
13891526
* @summary Creates an embedding vector representing the input text.
@@ -1427,6 +1564,14 @@ export declare const OpenAIApiFp: (configuration?: Configuration) => {
14271564
* @throws {RequiredError}
14281565
*/
14291566
deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteFileResponse>>;
1567+
/**
1568+
*
1569+
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1570+
* @param {string} model The model to delete
1571+
* @param {*} [options] Override http request option.
1572+
* @throws {RequiredError}
1573+
*/
1574+
deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DeleteModelResponse>>;
14301575
/**
14311576
*
14321577
* @summary Returns the contents of the specified file
@@ -1536,6 +1681,15 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
15361681
* @throws {RequiredError}
15371682
*/
15381683
createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse>;
1684+
/**
1685+
*
1686+
* @summary Creates a new edit for the provided input, instruction, and parameters
1687+
* @param {string} engineId The ID of the engine to use for this request
1688+
* @param {CreateEditRequest} createEditRequest
1689+
* @param {*} [options] Override http request option.
1690+
* @throws {RequiredError}
1691+
*/
1692+
createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: any): AxiosPromise<CreateEditResponse>;
15391693
/**
15401694
*
15411695
* @summary Creates an embedding vector representing the input text.
@@ -1579,6 +1733,14 @@ export declare const OpenAIApiFactory: (configuration?: Configuration, basePath?
15791733
* @throws {RequiredError}
15801734
*/
15811735
deleteFile(fileId: string, options?: any): AxiosPromise<DeleteFileResponse>;
1736+
/**
1737+
*
1738+
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1739+
* @param {string} model The model to delete
1740+
* @param {*} [options] Override http request option.
1741+
* @throws {RequiredError}
1742+
*/
1743+
deleteModel(model: string, options?: any): AxiosPromise<DeleteModelResponse>;
15821744
/**
15831745
*
15841746
* @summary Returns the contents of the specified file
@@ -1695,6 +1857,16 @@ export declare class OpenAIApi extends BaseAPI {
16951857
* @memberof OpenAIApi
16961858
*/
16971859
createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateCompletionResponse, any>>;
1860+
/**
1861+
*
1862+
* @summary Creates a new edit for the provided input, instruction, and parameters
1863+
* @param {string} engineId The ID of the engine to use for this request
1864+
* @param {CreateEditRequest} createEditRequest
1865+
* @param {*} [options] Override http request option.
1866+
* @throws {RequiredError}
1867+
* @memberof OpenAIApi
1868+
*/
1869+
createEdit(engineId: string, createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<CreateEditResponse, any>>;
16981870
/**
16991871
*
17001872
* @summary Creates an embedding vector representing the input text.
@@ -1743,6 +1915,15 @@ export declare class OpenAIApi extends BaseAPI {
17431915
* @memberof OpenAIApi
17441916
*/
17451917
deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteFileResponse, any>>;
1918+
/**
1919+
*
1920+
* @summary Delete a fine-tuned model. You must have the Owner role in your organization.
1921+
* @param {string} model The model to delete
1922+
* @param {*} [options] Override http request option.
1923+
* @throws {RequiredError}
1924+
* @memberof OpenAIApi
1925+
*/
1926+
deleteModel(model: string, options?: AxiosRequestConfig): Promise<import("axios").AxiosResponse<DeleteModelResponse, any>>;
17461927
/**
17471928
*
17481929
* @summary Returns the contents of the specified file

0 commit comments

Comments
 (0)