Skip to content

Commit 586fe82

Browse files
committed
fix: redo ollama llm provider using ollama sdk
1 parent 738a84a commit 586fe82

File tree

8 files changed

+183
-127
lines changed

8 files changed

+183
-127
lines changed

mycoder.config.js

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,13 @@ export default {
99
pageFilter: 'none', // 'simple', 'none', or 'readability'
1010

1111
// Model settings
12-
provider: 'anthropic',
13-
model: 'claude-3-7-sonnet-20250219',
12+
//provider: 'anthropic',
13+
//model: 'claude-3-7-sonnet-20250219',
1414
//provider: 'openai',
1515
//model: 'gpt-4o',
1616
//provider: 'ollama',
1717
//model: 'medragondot/Sky-T1-32B-Preview:latest',
18+
//model: 'llama3.2:3b',
1819
maxTokens: 4096,
1920
temperature: 0.7,
2021

packages/agent/CHANGELOG.md

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,20 @@
11
# [mycoder-agent-v1.3.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.2.0...mycoder-agent-v1.3.0) (2025-03-12)
22

3-
43
### Features
54

6-
* implement MCP tools support ([2d99ac8](https://github.com/drivecore/mycoder/commit/2d99ac8cefaa770e368d469355a509739aafe6a3))
5+
- implement MCP tools support ([2d99ac8](https://github.com/drivecore/mycoder/commit/2d99ac8cefaa770e368d469355a509739aafe6a3))
76

87
# [mycoder-agent-v1.2.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.1.0...mycoder-agent-v1.2.0) (2025-03-12)
98

10-
119
### Bug Fixes
1210

13-
* Fix TypeScript errors in MCP implementation ([f5837d3](https://github.com/drivecore/mycoder/commit/f5837d3a5dd219efc8e1d811e467f4bb695a1d94))
14-
11+
- Fix TypeScript errors in MCP implementation ([f5837d3](https://github.com/drivecore/mycoder/commit/f5837d3a5dd219efc8e1d811e467f4bb695a1d94))
1512

1613
### Features
1714

18-
* Add basic Model Context Protocol (MCP) support ([8ec9619](https://github.com/drivecore/mycoder/commit/8ec9619c3cc63df8f14222762f5da0bcabe273a5))
19-
* **agent:** implement incremental resource cleanup for agent lifecycle ([576436e](https://github.com/drivecore/mycoder/commit/576436ef2c7c5f234f088b7dba2e7fd65590738f)), closes [#236](https://github.com/drivecore/mycoder/issues/236)
20-
* background tools is now scope to agents ([e55817f](https://github.com/drivecore/mycoder/commit/e55817f32b373fdbff8bb1ac90105b272044d33f))
15+
- Add basic Model Context Protocol (MCP) support ([8ec9619](https://github.com/drivecore/mycoder/commit/8ec9619c3cc63df8f14222762f5da0bcabe273a5))
16+
- **agent:** implement incremental resource cleanup for agent lifecycle ([576436e](https://github.com/drivecore/mycoder/commit/576436ef2c7c5f234f088b7dba2e7fd65590738f)), closes [#236](https://github.com/drivecore/mycoder/issues/236)
17+
- background tools is now scope to agents ([e55817f](https://github.com/drivecore/mycoder/commit/e55817f32b373fdbff8bb1ac90105b272044d33f))
2118

2219
# [mycoder-agent-v1.1.0](https://github.com/drivecore/mycoder/compare/mycoder-agent-v1.0.0...mycoder-agent-v1.1.0) (2025-03-12)
2320

packages/agent/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@
5252
"chalk": "^5.4.1",
5353
"dotenv": "^16",
5454
"jsdom": "^26.0.0",
55+
"ollama": "^0.5.14",
5556
"openai": "^4.87.3",
5657
"playwright": "^1.50.1",
5758
"uuid": "^11",

packages/agent/src/core/executeToolCall.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,10 @@ export const executeToolCall = async (
1414
): Promise<string> => {
1515
const tool = tools.find((t) => t.name === toolCall.name);
1616
if (!tool) {
17-
throw new Error(`No tool with the name '${toolCall.name}' exists.`);
17+
return JSON.stringify({
18+
error: true,
19+
message: `No tool with the name '${toolCall.name}' exists.`,
20+
});
1821
}
1922

2023
const logger = new Logger({

packages/agent/src/core/llm/providers/ollama.ts

Lines changed: 147 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,24 @@
22
* Ollama provider implementation using the official Ollama npm package
33
*/
44

5-
import ollama, { Ollama, ChatResponse, Tool } from 'ollama';
5+
import {
6+
ChatRequest as OllamaChatRequest,
7+
ChatResponse as OllamaChatResponse,
8+
Ollama,
9+
ToolCall as OllamaTooCall,
10+
Tool as OllamaTool,
11+
Message as OllamaMessage,
12+
} from 'ollama';
13+
614
import { TokenUsage } from '../../tokens.js';
15+
import { ToolCall } from '../../types.js';
716
import { LLMProvider } from '../provider.js';
817
import {
9-
FunctionDefinition,
1018
GenerateOptions,
1119
LLMResponse,
1220
Message,
1321
ProviderOptions,
22+
FunctionDefinition,
1423
} from '../types.js';
1524

1625
/**
@@ -31,9 +40,9 @@ export class OllamaProvider implements LLMProvider {
3140

3241
constructor(model: string, options: OllamaOptions = {}) {
3342
this.model = model;
34-
const baseUrl =
35-
options.baseUrl ||
36-
process.env.OLLAMA_BASE_URL ||
43+
const baseUrl =
44+
options.baseUrl ||
45+
process.env.OLLAMA_BASE_URL ||
3746
'http://localhost:11434';
3847

3948
this.client = new Ollama({ host: baseUrl });
@@ -57,133 +66,165 @@ export class OllamaProvider implements LLMProvider {
5766
// Format messages for Ollama API
5867
const formattedMessages = this.formatMessages(messages);
5968

60-
try {
61-
// Prepare chat options
62-
const ollamaOptions: Record<string, any> = {
63-
temperature,
64-
};
65-
66-
// Add optional parameters if provided
67-
if (topP !== undefined) ollamaOptions.top_p = topP;
68-
if (frequencyPenalty !== undefined) ollamaOptions.frequency_penalty = frequencyPenalty;
69-
if (presencePenalty !== undefined) ollamaOptions.presence_penalty = presencePenalty;
70-
if (maxTokens !== undefined) ollamaOptions.num_predict = maxTokens;
71-
if (stopSequences && stopSequences.length > 0) ollamaOptions.stop = stopSequences;
72-
73-
// Prepare request parameters
74-
const requestParams: any = {
75-
model: this.model,
76-
messages: formattedMessages,
77-
stream: false,
78-
options: ollamaOptions,
69+
// Prepare request options
70+
const requestOptions: OllamaChatRequest = {
71+
model: this.model,
72+
messages: formattedMessages,
73+
stream: false,
74+
options: {
75+
temperature: temperature,
76+
...(topP !== undefined && { top_p: topP }),
77+
...(frequencyPenalty !== undefined && {
78+
frequency_penalty: frequencyPenalty,
79+
}),
80+
...(presencePenalty !== undefined && {
81+
presence_penalty: presencePenalty,
82+
}),
83+
...(stopSequences &&
84+
stopSequences.length > 0 && { stop: stopSequences }),
85+
},
86+
};
87+
88+
// Add max_tokens if provided
89+
if (maxTokens !== undefined) {
90+
requestOptions.options = {
91+
...requestOptions.options,
92+
num_predict: maxTokens,
7993
};
94+
}
8095

81-
// Add functions/tools if provided
82-
if (functions && functions.length > 0) {
83-
requestParams.tools = this.convertFunctionsToTools(functions);
84-
}
96+
// Add functions/tools if provided
97+
if (functions && functions.length > 0) {
98+
requestOptions.tools = this.convertFunctionsToTools(functions);
99+
}
85100

86-
// Make the API request using the Ollama client
87-
const response = await this.client.chat(requestParams);
101+
// Make the API request using the Ollama client
102+
const response: OllamaChatResponse = await this.client.chat({
103+
...requestOptions,
104+
stream: false,
105+
});
88106

89-
// Extract content from response
90-
const content = response.message?.content || '';
91-
92-
// Process tool calls if present
93-
const toolCalls = this.processToolCalls(response);
107+
// Extract content and tool calls
108+
const content = response.message?.content || '';
94109

95-
// Create token usage from response data
96-
const tokenUsage = new TokenUsage();
97-
if (response.prompt_eval_count) {
98-
tokenUsage.input = response.prompt_eval_count;
99-
}
100-
if (response.eval_count) {
101-
tokenUsage.output = response.eval_count;
102-
}
110+
// Handle tool calls if present
111+
const toolCalls = this.extractToolCalls(response);
103112

104-
return {
105-
text: content,
106-
toolCalls: toolCalls,
107-
tokenUsage: tokenUsage,
108-
};
109-
} catch (error) {
110-
throw new Error(`Error calling Ollama API: ${(error as Error).message}`);
111-
}
113+
// Create token usage from response data
114+
const tokenUsage = new TokenUsage();
115+
tokenUsage.output = response.eval_count || 0;
116+
tokenUsage.input = response.prompt_eval_count || 0;
117+
118+
return {
119+
text: content,
120+
toolCalls: toolCalls,
121+
tokenUsage: tokenUsage,
122+
};
112123
}
113124

125+
/*
126+
interface Tool {
127+
type: string;
128+
function: {
129+
name: string;
130+
description: string;
131+
parameters: {
132+
type: string;
133+
required: string[];
134+
properties: {
135+
[key: string]: {
136+
type: string;
137+
description: string;
138+
enum?: string[];
139+
};
140+
};
141+
};
142+
};
143+
}*/
144+
114145
/**
115-
* Convert our FunctionDefinition format to Ollama's Tool format
146+
* Convert our function definitions to Ollama tool format
116147
*/
117-
private convertFunctionsToTools(functions: FunctionDefinition[]): Tool[] {
118-
return functions.map((fn) => ({
119-
type: 'function',
120-
function: {
121-
name: fn.name,
122-
description: fn.description,
123-
parameters: fn.parameters,
124-
}
125-
}));
148+
private convertFunctionsToTools(
149+
functions: FunctionDefinition[],
150+
): OllamaTool[] {
151+
return functions.map(
152+
(fn) =>
153+
({
154+
type: 'function',
155+
function: {
156+
name: fn.name,
157+
description: fn.description,
158+
parameters: fn.parameters,
159+
},
160+
}) as OllamaTool,
161+
);
126162
}
127163

128164
/**
129-
* Process tool calls from the Ollama response
165+
* Extract tool calls from Ollama response
130166
*/
131-
private processToolCalls(response: ChatResponse): any[] {
132-
if (!response.message?.tool_calls || response.message.tool_calls.length === 0) {
167+
private extractToolCalls(response: OllamaChatResponse): ToolCall[] {
168+
if (!response.message?.tool_calls) {
133169
return [];
134170
}
135171

136-
return response.message.tool_calls.map((toolCall) => ({
137-
id: toolCall.function?.name
138-
? `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`
139-
: toolCall.id,
140-
name: toolCall.function?.name,
141-
content: JSON.stringify(toolCall.function?.arguments || {}),
142-
}));
172+
return response.message.tool_calls.map((toolCall: OllamaTooCall) => {
173+
//console.log('ollama tool call', toolCall);
174+
return {
175+
id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`,
176+
name: toolCall.function?.name,
177+
content:
178+
typeof toolCall.function?.arguments === 'string'
179+
? toolCall.function.arguments
180+
: JSON.stringify(toolCall.function?.arguments || {}),
181+
};
182+
});
143183
}
144184

145185
/**
146186
* Format messages for Ollama API
147187
*/
148-
private formatMessages(messages: Message[]): any[] {
149-
return messages.map((msg) => {
150-
if (
151-
msg.role === 'user' ||
152-
msg.role === 'assistant' ||
153-
msg.role === 'system'
154-
) {
155-
return {
156-
role: msg.role,
157-
content: msg.content,
158-
};
159-
} else if (msg.role === 'tool_result') {
160-
// Ollama expects tool results as a 'tool' role
161-
return {
162-
role: 'tool',
163-
content: msg.content,
164-
tool_call_id: msg.tool_use_id,
165-
};
166-
} else if (msg.role === 'tool_use') {
167-
// We'll convert tool_use to assistant messages with tool_calls
168-
return {
169-
role: 'assistant',
170-
content: '',
171-
tool_calls: [
188+
private formatMessages(messages: Message[]): OllamaMessage[] {
189+
const output: OllamaMessage[] = [];
190+
191+
messages.forEach((msg) => {
192+
switch (msg.role) {
193+
case 'user':
194+
case 'assistant':
195+
case 'system':
196+
output.push({
197+
role: msg.role,
198+
content: msg.content,
199+
} satisfies OllamaMessage);
200+
break;
201+
case 'tool_result':
202+
// Ollama expects tool results as a 'tool' role
203+
output.push({
204+
role: 'tool',
205+
content:
206+
typeof msg.content === 'string'
207+
? msg.content
208+
: JSON.stringify(msg.content),
209+
} as OllamaMessage);
210+
break;
211+
case 'tool_use': {
212+
// So there is an issue here is that ollama expects tool calls to be part of the assistant message
213+
// get last message and add tool call to it
214+
const lastMessage: OllamaMessage = output[output.length - 1]!;
215+
lastMessage.tool_calls = [
172216
{
173-
id: msg.id,
174217
function: {
175218
name: msg.name,
176-
arguments: msg.content,
177-
}
219+
arguments: JSON.parse(msg.content),
220+
},
178221
},
179-
],
180-
};
222+
];
223+
break;
224+
}
181225
}
182-
// Default fallback for unknown message types
183-
return {
184-
role: 'user',
185-
content: (msg as any).content || '',
186-
};
187226
});
227+
228+
return output;
188229
}
189-
}
230+
}

packages/agent/src/tools/getTools.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@ import { Tool } from '../core/types.js';
44
// Import tools
55
import { browseMessageTool } from './browser/browseMessage.js';
66
import { browseStartTool } from './browser/browseStart.js';
7-
import { agentMessageTool } from './interaction/agentMessage.js';
8-
import { agentStartTool } from './interaction/agentStart.js';
7+
import { subAgentTool } from './interaction/subAgent.js';
98
import { userPromptTool } from './interaction/userPrompt.js';
109
import { fetchTool } from './io/fetch.js';
1110
import { textEditorTool } from './io/textEditor.js';
@@ -31,8 +30,9 @@ export function getTools(options?: GetToolsOptions): Tool[] {
3130
// Force cast to Tool type to avoid TypeScript issues
3231
const tools: Tool[] = [
3332
textEditorTool as unknown as Tool,
34-
agentStartTool as unknown as Tool,
35-
agentMessageTool as unknown as Tool,
33+
subAgentTool as unknown as Tool,
34+
/*agentStartTool as unknown as Tool,
35+
agentMessageTool as unknown as Tool,*/
3636
sequenceCompleteTool as unknown as Tool,
3737
fetchTool as unknown as Tool,
3838
shellStartTool as unknown as Tool,

0 commit comments

Comments
 (0)