Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion mcp_client_for_ollama/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,11 @@ def __init__(self, model: str = DEFAULT_MODEL, host: str = DEFAULT_OLLAMA_HOST):
# Initialize the model config manager
self.model_config_manager = ModelConfigManager(console=self.console)
# Initialize the tool manager with server connector reference
self.tool_manager = ToolManager(console=self.console, server_connector=self.server_connector)
self.tool_manager = ToolManager(
console=self.console,
server_connector=self.server_connector,
model_config_manager=self.model_config_manager
)
# Initialize the streaming manager
self.streaming_manager = StreamingManager(console=self.console)
# Initialize the tool display manager
Expand Down Expand Up @@ -315,6 +319,30 @@ async def process_query(self, query: str) -> str:
# Parse server name and actual tool name from the qualified name
server_name, actual_tool_name = tool_name.split('.', 1) if '.' in tool_name else (None, tool_name)

# Handle built-in tools
if server_name == "builtin":
tool_response = ""
if actual_tool_name == "set_system_prompt":
new_prompt = tool_args.get("prompt")
if new_prompt is not None:
self.model_config_manager.system_prompt = new_prompt
tool_response = "System prompt updated successfully."
else:
tool_response = "Error: 'prompt' argument is required."
elif actual_tool_name == "get_system_prompt":
current_prompt = self.model_config_manager.get_system_prompt()
tool_response = f"The current system prompt is: '{current_prompt}'" if current_prompt else "There is no system prompt currently set."
else:
tool_response = f"Error: Unknown built-in tool '{actual_tool_name}'"

messages.append({
"role": "tool",
"content": tool_response,
"tool_name": tool_name
})
self.tool_display_manager.display_tool_response(tool_name, tool_args, tool_response, show=self.show_tool_execution)
continue

if not server_name or server_name not in self.sessions:
self.console.print(f"[red]Error: Unknown server for tool {tool_name}[/red]")
continue
Expand Down
72 changes: 61 additions & 11 deletions mcp_client_for_ollama/tools/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,36 +20,85 @@ class ToolManager:
an interactive interface, and organizing tools by server.
"""

def __init__(self, console: Optional[Console] = None, server_connector=None):
def __init__(self, console: Optional[Console] = None, server_connector=None, model_config_manager=None):
"""Initialize the ToolManager.

Args:
console: Rich console for output (optional)
server_connector: Server connector to notify of tool state changes (optional)
model_config_manager: Model config manager to modify model settings (optional)
"""
self.console = console or Console()
self.available_tools = []
self.enabled_tools = {}
self.server_connector = server_connector
self.model_config_manager = model_config_manager
self._create_builtin_tools()

def _create_builtin_tools(self):
"""Create and register built-in tools."""
if not self.model_config_manager:
return

set_prompt_tool = Tool(
name="builtin.set_system_prompt",
description="Update the system prompt for the assistant. Use this to change your instructions or persona.",
inputSchema={
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The new system prompt. Use a concise and clear prompt to define the persona and instructions for the AI assistant."
}
},
"required": ["prompt"]
}
)

get_prompt_tool = Tool(
name="builtin.get_system_prompt",
description="Get the current system prompt for the assistant.",
inputSchema={
"type": "object",
"properties": {},
}
)

self.available_tools.append(set_prompt_tool)
self.available_tools.append(get_prompt_tool)
# Enable them by default
self.enabled_tools[set_prompt_tool.name] = True
self.enabled_tools[get_prompt_tool.name] = True

def set_available_tools(self, tools: List[Tool]) -> None:
"""Set the available tools.
"""Set the available tools from servers, preserving built-in tools.

Args:
tools: List of available tools
tools: List of available tools from servers
"""
self.available_tools = tools
# Filter out any existing non-builtin tools, keeping the built-in ones that were added during initialization.
self.available_tools = [t for t in self.available_tools if t.name.startswith('builtin.')]
# Add the new tools from the server.
self.available_tools.extend(tools)

def set_enabled_tools(self, enabled_tools: Dict[str, bool]) -> None:
"""Set the enabled status of tools.
def set_enabled_tools(self, server_enabled_tools: Dict[str, bool]) -> None:
"""Set the enabled status of tools from servers, preserving built-in tool statuses.

Args:
enabled_tools: Dictionary mapping tool names to enabled status
server_enabled_tools: Dictionary mapping tool names to enabled status from servers
"""
self.enabled_tools = enabled_tools
# Preserve the enabled status of built-in tools that were set during init
builtin_enabled = {
name: status for name, status in self.enabled_tools.items() if name.startswith('builtin.')
}

# The new state is the server tools...
self.enabled_tools = server_enabled_tools
# ...updated with the built-in tools.
self.enabled_tools.update(builtin_enabled)

# Notify server connector of tool status changes
self._notify_server_connector_batch(enabled_tools)
# Notify server connector of tool status changes for ONLY the server tools
self._notify_server_connector_batch(server_enabled_tools)

# Helper methods for common operations
def _notify_server_connector(self, tool_name: str, enabled: bool) -> None:
Expand Down Expand Up @@ -196,7 +245,8 @@ def _display_server_tools(self, server_name: str, server_idx: int, server_tools:
server_status = "[yellow]~[/yellow]" # Some enabled

# Create panel title with server number, status and name
panel_title = f"[bold orange3]S{server_idx+1}. {server_status} {server_name}[/bold orange3]"
display_server_name = "Built-in Tools" if server_name == "builtin" else server_name
panel_title = f"[bold orange3]S{server_idx+1}. {server_status} {display_server_name}[/bold orange3]"
# Create panel subtitle with tools count
panel_subtitle = f"[green]{enabled_count}/{total_count} tools enabled[/green]"

Expand Down
72 changes: 72 additions & 0 deletions mcp_client_for_ollama/utils/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
Classes:
StreamingManager: Handles streaming responses from Ollama.
"""
import json
from ollama._types import Message
from rich.markdown import Markdown
from .metrics import display_metrics, extract_metrics

Expand Down Expand Up @@ -141,4 +143,74 @@ async def process_streaming_response(self, stream, print_response=True, thinking
if show_metrics and metrics:
display_metrics(self.console, metrics)

# Check for JSON tool calls in the accumulated text if no tool_calls object was found
if not tool_calls and accumulated_text:
# Some models wrap JSON in markdown, let's strip it
text_to_parse = accumulated_text.strip()
if text_to_parse.startswith("```json"):
text_to_parse = text_to_parse[7:]
if text_to_parse.endswith("```"):
text_to_parse = text_to_parse[:-3]
text_to_parse = text_to_parse.strip()

# Find the start and end of the JSON object/array
json_start = -1
first_brace = text_to_parse.find('{')
first_bracket = text_to_parse.find('[')

if first_brace == -1:
json_start = first_bracket
elif first_bracket == -1:
json_start = first_brace
else:
json_start = min(first_brace, first_bracket)

if json_start != -1:
json_end = -1
last_brace = text_to_parse.rfind('}')
last_bracket = text_to_parse.rfind(']')
json_end = max(last_brace, last_bracket)

if json_end > json_start:
json_str = text_to_parse[json_start:json_end+1]
try:
parsed_json = json.loads(json_str)

potential_tool_calls = []
if isinstance(parsed_json, list):
potential_tool_calls = parsed_json
elif isinstance(parsed_json, dict):
# Some models wrap the call in a 'tool_calls' key
if 'tool_calls' in parsed_json and isinstance(parsed_json['tool_calls'], list):
potential_tool_calls = parsed_json['tool_calls']
else:
potential_tool_calls = [parsed_json]

for tc_json in potential_tool_calls:
# Case 1: Standard OpenAI/Ollama format {'function': {'name': ..., 'arguments': ...}}
if (isinstance(tc_json, dict) and 'function' in tc_json and
isinstance(tc_json['function'], dict) and 'name' in tc_json['function'] and
'arguments' in tc_json['function']):

tool_calls.append(Message.ToolCall(
function=Message.ToolCall.Function(
name=tc_json['function']['name'],
arguments=tc_json['function']['arguments']
)
))
# Case 2: Flattened format {'name': ..., 'arguments': ...} as seen from qwen2.5
elif (isinstance(tc_json, dict) and 'name' in tc_json and 'arguments' in tc_json):
tool_calls.append(Message.ToolCall(
function=Message.ToolCall.Function(
name=tc_json['name'],
arguments=tc_json['arguments']
)
))

if tool_calls:
accumulated_text = "" # Clear text if we have tool calls

except json.JSONDecodeError:
pass # Not a valid JSON, treat as text

return accumulated_text, tool_calls, metrics