Skip to content

Commit 1bffa4d

Browse files
authored
Merge pull request #247 from drivecore/fix/ollama-provider-implementation
fix: update Ollama provider to use official npm package API correctly
2 parents 53d9afb + f15d0e3 commit 1bffa4d

File tree

10 files changed

+235
-126
lines changed

10 files changed

+235
-126
lines changed

mycoder.config.js

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,13 @@ export default {
99
pageFilter: 'none', // 'simple', 'none', or 'readability'
1010

1111
// Model settings
12-
provider: 'anthropic',
13-
model: 'claude-3-7-sonnet-20250219',
12+
//provider: 'anthropic',
13+
//model: 'claude-3-7-sonnet-20250219',
1414
//provider: 'openai',
1515
//model: 'gpt-4o',
1616
//provider: 'ollama',
1717
//model: 'medragondot/Sky-T1-32B-Preview:latest',
18+
//model: 'llama3.2:3b',
1819
maxTokens: 4096,
1920
temperature: 0.7,
2021

packages/agent/CHANGELOG.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,21 @@
1+
# [mycoder-agent-v1.3.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.2.0...mycoder-agent-v1.3.0) (2025-03-12)
2+
3+
### Features
4+
5+
- implement MCP tools support ([2d99ac8](https://github.com/drivecore/mycoder/commit/2d99ac8cefaa770e368d469355a509739aafe6a3))
6+
7+
# [mycoder-agent-v1.2.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.1.0...mycoder-agent-v1.2.0) (2025-03-12)
8+
9+
### Bug Fixes
10+
11+
- Fix TypeScript errors in MCP implementation ([f5837d3](https://github.com/drivecore/mycoder/commit/f5837d3a5dd219efc8e1d811e467f4bb695a1d94))
12+
13+
### Features
14+
15+
- Add basic Model Context Protocol (MCP) support ([8ec9619](https://github.com/drivecore/mycoder/commit/8ec9619c3cc63df8f14222762f5da0bcabe273a5))
16+
- **agent:** implement incremental resource cleanup for agent lifecycle ([576436e](https://github.com/drivecore/mycoder/commit/576436ef2c7c5f234f088b7dba2e7fd65590738f)), closes [#236](https://github.com/drivecore/mycoder/issues/236)
17+
- background tools is now scope to agents ([e55817f](https://github.com/drivecore/mycoder/commit/e55817f32b373fdbff8bb1ac90105b272044d33f))
18+
119
# [mycoder-agent-v1.1.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.0.0...mycoder-agent-v1.1.0) (2025-03-12)
220

321
### Bug Fixes

packages/agent/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "mycoder-agent",
3-
"version": "1.1.0",
3+
"version": "1.3.0",
44
"description": "Agent module for mycoder - an AI-powered software development assistant",
55
"type": "module",
66
"main": "dist/index.js",
@@ -52,6 +52,7 @@
5252
"chalk": "^5.4.1",
5353
"dotenv": "^16",
5454
"jsdom": "^26.0.0",
55+
"ollama": "^0.5.14",
5556
"openai": "^4.87.3",
5657
"playwright": "^1.50.1",
5758
"uuid": "^11",

packages/agent/src/core/executeToolCall.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,10 @@ export const executeToolCall = async (
1414
): Promise<string> => {
1515
const tool = tools.find((t) => t.name === toolCall.name);
1616
if (!tool) {
17-
throw new Error(`No tool with the name '${toolCall.name}' exists.`);
17+
return JSON.stringify({
18+
error: true,
19+
message: `No tool with the name '${toolCall.name}' exists.`,
20+
});
1821
}
1922

2023
const logger = new Logger({
Lines changed: 159 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,25 @@
11
/**
2-
* Ollama provider implementation
2+
* Ollama provider implementation using the official Ollama npm package
33
*/
44

5+
import {
6+
ChatRequest as OllamaChatRequest,
7+
ChatResponse as OllamaChatResponse,
8+
Ollama,
9+
ToolCall as OllamaTooCall,
10+
Tool as OllamaTool,
11+
Message as OllamaMessage,
12+
} from 'ollama';
13+
514
import { TokenUsage } from '../../tokens.js';
15+
import { ToolCall } from '../../types.js';
616
import { LLMProvider } from '../provider.js';
717
import {
818
GenerateOptions,
919
LLMResponse,
1020
Message,
1121
ProviderOptions,
22+
FunctionDefinition,
1223
} from '../types.js';
1324

1425
/**
@@ -19,29 +30,26 @@ export interface OllamaOptions extends ProviderOptions {
1930
}
2031

2132
/**
22-
* Ollama provider implementation
33+
* Ollama provider implementation using the official Ollama npm package
2334
*/
2435
export class OllamaProvider implements LLMProvider {
2536
name: string = 'ollama';
2637
provider: string = 'ollama.chat';
2738
model: string;
28-
private baseUrl: string;
39+
private client: Ollama;
2940

3041
constructor(model: string, options: OllamaOptions = {}) {
3142
this.model = model;
32-
this.baseUrl =
43+
const baseUrl =
3344
options.baseUrl ||
3445
process.env.OLLAMA_BASE_URL ||
3546
'http://localhost:11434';
3647

37-
// Ensure baseUrl doesn't end with a slash
38-
if (this.baseUrl.endsWith('/')) {
39-
this.baseUrl = this.baseUrl.slice(0, -1);
40-
}
48+
this.client = new Ollama({ host: baseUrl });
4149
}
4250

4351
/**
44-
* Generate text using Ollama API
52+
* Generate text using Ollama API via the official npm package
4553
*/
4654
async generateText(options: GenerateOptions): Promise<LLMResponse> {
4755
const {
@@ -52,126 +60,171 @@ export class OllamaProvider implements LLMProvider {
5260
topP,
5361
frequencyPenalty,
5462
presencePenalty,
63+
stopSequences,
5564
} = options;
5665

5766
// Format messages for Ollama API
5867
const formattedMessages = this.formatMessages(messages);
5968

60-
try {
61-
// Prepare request options
62-
const requestOptions: any = {
63-
model: this.model,
64-
messages: formattedMessages,
65-
stream: false,
66-
options: {
67-
temperature: temperature,
68-
// Ollama uses top_k instead of top_p, but we'll include top_p if provided
69-
...(topP !== undefined && { top_p: topP }),
70-
...(frequencyPenalty !== undefined && {
71-
frequency_penalty: frequencyPenalty,
72-
}),
73-
...(presencePenalty !== undefined && {
74-
presence_penalty: presencePenalty,
75-
}),
76-
},
69+
// Prepare request options
70+
const requestOptions: OllamaChatRequest = {
71+
model: this.model,
72+
messages: formattedMessages,
73+
stream: false,
74+
options: {
75+
temperature: temperature,
76+
...(topP !== undefined && { top_p: topP }),
77+
...(frequencyPenalty !== undefined && {
78+
frequency_penalty: frequencyPenalty,
79+
}),
80+
...(presencePenalty !== undefined && {
81+
presence_penalty: presencePenalty,
82+
}),
83+
...(stopSequences &&
84+
stopSequences.length > 0 && { stop: stopSequences }),
85+
},
86+
};
87+
88+
// Add max_tokens if provided
89+
if (maxTokens !== undefined) {
90+
requestOptions.options = {
91+
...requestOptions.options,
92+
num_predict: maxTokens,
7793
};
94+
}
7895

79-
// Add max_tokens if provided
80-
if (maxTokens !== undefined) {
81-
requestOptions.options.num_predict = maxTokens;
82-
}
96+
// Add functions/tools if provided
97+
if (functions && functions.length > 0) {
98+
requestOptions.tools = this.convertFunctionsToTools(functions);
99+
}
83100

84-
// Add functions/tools if provided
85-
if (functions && functions.length > 0) {
86-
requestOptions.tools = functions.map((fn) => ({
87-
name: fn.name,
88-
description: fn.description,
89-
parameters: fn.parameters,
90-
}));
91-
}
101+
// Make the API request using the Ollama client
102+
const response: OllamaChatResponse = await this.client.chat({
103+
...requestOptions,
104+
stream: false,
105+
});
92106

93-
// Make the API request
94-
const response = await fetch(`${this.baseUrl}/api/chat`, {
95-
method: 'POST',
96-
headers: {
97-
'Content-Type': 'application/json',
98-
},
99-
body: JSON.stringify(requestOptions),
100-
});
101-
102-
if (!response.ok) {
103-
const errorText = await response.text();
104-
throw new Error(`Ollama API error: ${response.status} ${errorText}`);
105-
}
107+
// Extract content and tool calls
108+
const content = response.message?.content || '';
109+
110+
// Handle tool calls if present
111+
const toolCalls = this.extractToolCalls(response);
112+
113+
// Create token usage from response data
114+
const tokenUsage = new TokenUsage();
115+
tokenUsage.output = response.eval_count || 0;
116+
tokenUsage.input = response.prompt_eval_count || 0;
106117

107-
const data = await response.json();
118+
return {
119+
text: content,
120+
toolCalls: toolCalls,
121+
tokenUsage: tokenUsage,
122+
};
123+
}
124+
125+
/*
126+
interface Tool {
127+
type: string;
128+
function: {
129+
name: string;
130+
description: string;
131+
parameters: {
132+
type: string;
133+
required: string[];
134+
properties: {
135+
[key: string]: {
136+
type: string;
137+
description: string;
138+
enum?: string[];
139+
};
140+
};
141+
};
142+
};
143+
}*/
108144

109-
// Extract content and tool calls
110-
const content = data.message?.content || '';
111-
const toolCalls =
112-
data.message?.tool_calls?.map((toolCall: any) => ({
113-
id:
114-
toolCall.id ||
115-
`tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
116-
name: toolCall.name,
117-
content: JSON.stringify(toolCall.args || toolCall.arguments || {}),
118-
})) || [];
145+
/**
146+
* Convert our function definitions to Ollama tool format
147+
*/
148+
private convertFunctionsToTools(
149+
functions: FunctionDefinition[],
150+
): OllamaTool[] {
151+
return functions.map(
152+
(fn) =>
153+
({
154+
type: 'function',
155+
function: {
156+
name: fn.name,
157+
description: fn.description,
158+
parameters: fn.parameters,
159+
},
160+
}) as OllamaTool,
161+
);
162+
}
119163

120-
// Create token usage from response data
121-
const tokenUsage = new TokenUsage();
122-
tokenUsage.input = data.prompt_eval_count || 0;
123-
tokenUsage.output = data.eval_count || 0;
164+
/**
165+
* Extract tool calls from Ollama response
166+
*/
167+
private extractToolCalls(response: OllamaChatResponse): ToolCall[] {
168+
if (!response.message?.tool_calls) {
169+
return [];
170+
}
124171

172+
return response.message.tool_calls.map((toolCall: OllamaTooCall) => {
173+
//console.log('ollama tool call', toolCall);
125174
return {
126-
text: content,
127-
toolCalls: toolCalls,
128-
tokenUsage: tokenUsage,
175+
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
176+
name: toolCall.function?.name,
177+
content:
178+
typeof toolCall.function?.arguments === 'string'
179+
? toolCall.function.arguments
180+
: JSON.stringify(toolCall.function?.arguments || {}),
129181
};
130-
} catch (error) {
131-
throw new Error(`Error calling Ollama API: ${(error as Error).message}`);
132-
}
182+
});
133183
}
134184

135185
/**
136186
* Format messages for Ollama API
137187
*/
138-
private formatMessages(messages: Message[]): any[] {
139-
return messages.map((msg) => {
140-
if (
141-
msg.role === 'user' ||
142-
msg.role === 'assistant' ||
143-
msg.role === 'system'
144-
) {
145-
return {
146-
role: msg.role,
147-
content: msg.content,
148-
};
149-
} else if (msg.role === 'tool_result') {
150-
// Ollama expects tool results as a 'tool' role
151-
return {
152-
role: 'tool',
153-
content: msg.content,
154-
tool_call_id: msg.tool_use_id,
155-
};
156-
} else if (msg.role === 'tool_use') {
157-
// We'll convert tool_use to assistant messages with tool_calls
158-
return {
159-
role: 'assistant',
160-
content: '',
161-
tool_calls: [
188+
private formatMessages(messages: Message[]): OllamaMessage[] {
189+
const output: OllamaMessage[] = [];
190+
191+
messages.forEach((msg) => {
192+
switch (msg.role) {
193+
case 'user':
194+
case 'assistant':
195+
case 'system':
196+
output.push({
197+
role: msg.role,
198+
content: msg.content,
199+
} satisfies OllamaMessage);
200+
break;
201+
case 'tool_result':
202+
// Ollama expects tool results as a 'tool' role
203+
output.push({
204+
role: 'tool',
205+
content:
206+
typeof msg.content === 'string'
207+
? msg.content
208+
: JSON.stringify(msg.content),
209+
} as OllamaMessage);
210+
break;
211+
case 'tool_use': {
212+
// So there is an issue here is that ollama expects tool calls to be part of the assistant message
213+
// get last message and add tool call to it
214+
const lastMessage: OllamaMessage = output[output.length - 1]!;
215+
lastMessage.tool_calls = [
162216
{
163-
id: msg.id,
164-
name: msg.name,
165-
arguments: msg.content,
217+
function: {
218+
name: msg.name,
219+
arguments: JSON.parse(msg.content),
220+
},
166221
},
167-
],
168-
};
222+
];
223+
break;
224+
}
169225
}
170-
// Default fallback for unknown message types
171-
return {
172-
role: 'user',
173-
content: (msg as any).content || '',
174-
};
175226
});
227+
228+
return output;
176229
}
177230
}

0 commit comments

Comments
 (0)