@@ -10,6 +10,26 @@ type Example = {
10
10
validate ?: boolean
11
11
} ;
12
12
13
+ const r : OpenAI . Chat . CompletionCreateParams . CreateChatCompletionRequestNonStreaming = {
14
+ "model" : "gpt-3.5-turbo" ,
15
+ "temperature" : 0 ,
16
+ "functions" : [
17
+ {
18
+ "name" : "do_stuff" ,
19
+ "parameters" : {
20
+ "type" : "object" ,
21
+ "properties" : { }
22
+ }
23
+ }
24
+ ] ,
25
+ "messages" : [
26
+ {
27
+ "role" : "system" ,
28
+ "content" : "hello:"
29
+ } ,
30
+ ]
31
+ } ;
32
+
13
33
const TEST_CASES : Example [ ] = [
14
34
{
15
35
messages : [
@@ -23,6 +43,18 @@ const TEST_CASES: Example[] = [
23
43
] ,
24
44
tokens : 9
25
45
} ,
46
+ {
47
+ messages : [
48
+ { role : "system" , content : "hello" }
49
+ ] ,
50
+ tokens : 8 ,
51
+ } ,
52
+ {
53
+ messages : [
54
+ { role : "system" , content : "hello:" }
55
+ ] ,
56
+ tokens : 9 ,
57
+ } ,
26
58
{
27
59
messages : [
28
60
{ role : "system" , content : "# Important: you're the best robot" } ,
@@ -161,10 +193,32 @@ const TEST_CASES: Example[] = [
161
193
}
162
194
] ,
163
195
tokens : 35 ,
164
- }
196
+ } ,
197
+ {
198
+ messages : [
199
+ { "role" : "system" , "content" : "Hello:" } ,
200
+ { "role" : "user" , "content" : "Hi there" } ,
201
+ ] ,
202
+ functions : [
203
+ { "name" : "do_stuff" , "parameters" : { "type" : "object" , "properties" : { } } }
204
+ ] ,
205
+ tokens : 35 ,
206
+ } ,
207
+ {
208
+ messages : [
209
+ { "role" : "system" , "content" : "Hello:" } ,
210
+ { "role" : "system" , "content" : "Hello" } ,
211
+ { "role" : "user" , "content" : "Hi there" } ,
212
+ ] ,
213
+ functions : [
214
+ { "name" : "do_stuff" , "parameters" : { "type" : "object" , "properties" : { } } }
215
+ ] ,
216
+ tokens : 40 ,
217
+ } ,
165
218
] ;
166
219
167
220
const validateAll = false ;
221
+ const openAITimeout = 10000 ;
168
222
169
223
describe . each ( TEST_CASES ) ( "token counts (%j)" , ( example ) => {
170
224
const validateTest = ( ( validateAll || example . validate ) ? test : test . skip )
@@ -174,10 +228,10 @@ describe.each(TEST_CASES)("token counts (%j)", (example) => {
174
228
model : "gpt-3.5-turbo" ,
175
229
messages : example . messages ,
176
230
functions : example . functions as any ,
177
- max_tokens : 1 ,
231
+ max_tokens : 10 ,
178
232
} ) ;
179
233
expect ( response . usage ?. prompt_tokens ) . toBe ( example . tokens ) ;
180
- } ) ;
234
+ } , openAITimeout ) ;
181
235
182
236
test ( "estimate is correct" , async ( ) => {
183
237
expect ( promptTokensEstimate ( { messages : example . messages , functions : example . functions } ) ) . toBe ( example . tokens ) ;
0 commit comments