1
+ /**
2
+ * Ollama provider implementation
3
+ */
4
+
5
+ import { TokenUsage } from '../../tokens.js' ;
6
+ import { LLMProvider } from '../provider.js' ;
7
+ import {
8
+ GenerateOptions ,
9
+ LLMResponse ,
10
+ Message ,
11
+ ProviderOptions ,
12
+ } from '../types.js' ;
13
+
14
+ /**
15
+ * Ollama-specific options
16
+ */
17
+ export interface OllamaOptions extends ProviderOptions {
18
+ baseUrl ?: string ;
19
+ }
20
+
21
+ /**
22
+ * Ollama provider implementation
23
+ */
24
+ export class OllamaProvider implements LLMProvider {
25
+ name : string = 'ollama' ;
26
+ provider : string = 'ollama.chat' ;
27
+ model : string ;
28
+ private baseUrl : string ;
29
+
30
+ constructor ( model : string , options : OllamaOptions = { } ) {
31
+ this . model = model ;
32
+ this . baseUrl = options . baseUrl || process . env . OLLAMA_BASE_URL || 'http://localhost:11434' ;
33
+
34
+ // Ensure baseUrl doesn't end with a slash
35
+ if ( this . baseUrl . endsWith ( '/' ) ) {
36
+ this . baseUrl = this . baseUrl . slice ( 0 , - 1 ) ;
37
+ }
38
+ }
39
+
40
+ /**
41
+ * Generate text using Ollama API
42
+ */
43
+ async generateText ( options : GenerateOptions ) : Promise < LLMResponse > {
44
+ const { messages, functions, temperature = 0.7 , maxTokens, topP, frequencyPenalty, presencePenalty } = options ;
45
+
46
+ // Format messages for Ollama API
47
+ const formattedMessages = this . formatMessages ( messages ) ;
48
+
49
+ try {
50
+ // Prepare request options
51
+ const requestOptions : any = {
52
+ model : this . model ,
53
+ messages : formattedMessages ,
54
+ stream : false ,
55
+ options : {
56
+ temperature : temperature ,
57
+ // Ollama uses top_k instead of top_p, but we'll include top_p if provided
58
+ ...( topP !== undefined && { top_p : topP } ) ,
59
+ ...( frequencyPenalty !== undefined && { frequency_penalty : frequencyPenalty } ) ,
60
+ ...( presencePenalty !== undefined && { presence_penalty : presencePenalty } ) ,
61
+ } ,
62
+ } ;
63
+
64
+ // Add max_tokens if provided
65
+ if ( maxTokens !== undefined ) {
66
+ requestOptions . options . num_predict = maxTokens ;
67
+ }
68
+
69
+ // Add functions/tools if provided
70
+ if ( functions && functions . length > 0 ) {
71
+ requestOptions . tools = functions . map ( ( fn ) => ( {
72
+ name : fn . name ,
73
+ description : fn . description ,
74
+ parameters : fn . parameters ,
75
+ } ) ) ;
76
+ }
77
+
78
+ // Make the API request
79
+ const response = await fetch ( `${ this . baseUrl } /api/chat` , {
80
+ method : 'POST' ,
81
+ headers : {
82
+ 'Content-Type' : 'application/json' ,
83
+ } ,
84
+ body : JSON . stringify ( requestOptions ) ,
85
+ } ) ;
86
+
87
+ if ( ! response . ok ) {
88
+ const errorText = await response . text ( ) ;
89
+ throw new Error ( `Ollama API error: ${ response . status } ${ errorText } ` ) ;
90
+ }
91
+
92
+ const data = await response . json ( ) ;
93
+
94
+ // Extract content and tool calls
95
+ const content = data . message ?. content || '' ;
96
+ const toolCalls = data . message ?. tool_calls ?. map ( ( toolCall : any ) => ( {
97
+ id : toolCall . id || `tool-${ Date . now ( ) } -${ Math . random ( ) . toString ( 36 ) . substring ( 2 , 11 ) } ` ,
98
+ name : toolCall . name ,
99
+ content : JSON . stringify ( toolCall . args || toolCall . arguments || { } ) ,
100
+ } ) ) || [ ] ;
101
+
102
+ // Create token usage from response data
103
+ const tokenUsage = new TokenUsage ( ) ;
104
+ tokenUsage . input = data . prompt_eval_count || 0 ;
105
+ tokenUsage . output = data . eval_count || 0 ;
106
+
107
+ return {
108
+ text : content ,
109
+ toolCalls : toolCalls ,
110
+ tokenUsage : tokenUsage ,
111
+ } ;
112
+ } catch ( error ) {
113
+ throw new Error (
114
+ `Error calling Ollama API: ${ ( error as Error ) . message } ` ,
115
+ ) ;
116
+ }
117
+ }
118
+
119
+ /**
120
+ * Format messages for Ollama API
121
+ */
122
+ private formatMessages ( messages : Message [ ] ) : any [ ] {
123
+ return messages . map ( ( msg ) => {
124
+ if ( msg . role === 'user' || msg . role === 'assistant' || msg . role === 'system' ) {
125
+ return {
126
+ role : msg . role ,
127
+ content : msg . content ,
128
+ } ;
129
+ } else if ( msg . role === 'tool_result' ) {
130
+ // Ollama expects tool results as a 'tool' role
131
+ return {
132
+ role : 'tool' ,
133
+ content : msg . content ,
134
+ tool_call_id : msg . tool_use_id ,
135
+ } ;
136
+ } else if ( msg . role === 'tool_use' ) {
137
+ // We'll convert tool_use to assistant messages with tool_calls
138
+ return {
139
+ role : 'assistant' ,
140
+ content : '' ,
141
+ tool_calls : [
142
+ {
143
+ id : msg . id ,
144
+ name : msg . name ,
145
+ arguments : msg . content ,
146
+ } ,
147
+ ] ,
148
+ } ;
149
+ }
150
+ // Default fallback
151
+ return {
152
+ role : 'user' ,
153
+ content : msg . content ,
154
+ } ;
155
+ } ) ;
156
+ }
157
+ }
0 commit comments