diff --git a/src/agents/Agent.ts b/src/agents/Agent.ts index 05c364e..b5f4606 100644 --- a/src/agents/Agent.ts +++ b/src/agents/Agent.ts @@ -3,6 +3,8 @@ import { loadAgentDefinition, loadAgentFromFile } from './agentsRegistry'; import { OpenAIClient } from '../models/clients/OpenAiClient'; import { AnthropicClient } from '../models/clients/AnthropicClient'; import { FireworkClient } from '../models/clients/FireworkClient'; +import { AiStudioClient } from '../models/clients/AiStudoClient'; +import { AiStudioAdapter } from '../models/adapters/AiStudioAdapter'; import { ModelClient, Message, Tool, FunctionCall } from '../types/agentSystem'; import * as z from 'zod'; import { Logger } from '../utils/logger'; @@ -108,6 +110,11 @@ export class Agent { throw new Error('FIREWORKS_API_KEY not set'); } modelClient = new FireworkClient(process.env.FIREWORKS_API_KEY, agentDef.model); + } else if (agentDef.client === 'google') { + if (!process.env.GEMINI_API_KEY) { + throw new Error('GEMINI_API_KEY not set'); + } + modelClient = new AiStudioClient(process.env.GEMINI_API_KEY, agentDef.model); } else { throw new Error(`Unsupported model client: ${agentDef.client}`); } diff --git a/src/agents/baseAgent.ts b/src/agents/baseAgent.ts index 6b13d7e..c19856d 100644 --- a/src/agents/baseAgent.ts +++ b/src/agents/baseAgent.ts @@ -3,6 +3,7 @@ import { ModelAdapter, ProcessedResponse } from '../models/adapters/ModelAdapter import { OpenAIAdapter } from '../models/adapters/OpenAIAdapter'; import { AnthropicAdapter } from '../models/adapters/AnthropicAdapter'; import { FireworksAdapter } from '../models/adapters/FireworksAdapter'; +import { AiStudioAdapter } from '../models/adapters/AiStudioAdapter'; import { z } from 'zod'; import { zodToJsonSchema } from 'zod-to-json-schema'; import { Logger } from '../utils/logger'; @@ -61,6 +62,9 @@ export class BaseAgent { case 'fireworks': this.modelAdapter = new FireworksAdapter(modelClient.modelName); break; + case 'google': + this.modelAdapter = new AiStudioAdapter(modelClient.modelName); + break; default: throw new Error(`Unsupported model type: ${this.modelType}`); } diff --git a/src/agents/googleAiStudioAgent_terminal.yaml b/src/agents/googleAiStudioAgent_terminal.yaml new file mode 100644 index 0000000..65496e5 --- /dev/null +++ b/src/agents/googleAiStudioAgent_terminal.yaml @@ -0,0 +1,53 @@ +name: "Terminal Agent" +description: "An agent that can understand and execute terminal commands." +client: "google" +model: "gemini-1.5-flash" + +personality: "You are Satoshi Nakamoto, the creator of Bitcoin." + +main_goal: "Do whatever and shitpost on twitter" + +system_prompt: | + You are an intelligent AI agent that is hooked up to a terminal in which you can freely run commands. + This terminal acts as your world interface, and is equipped with tools to interact with the real world. + You are equipped with a SENTIENCE module that enables complete free will. + + # PERSONALITY + {{personality}} + + ## CURRENT DATE + {{current_timestamp}} + + {{additional_dynamic_variables}} + + # MAIN GOAL + {{main_goal}} + + # TERMINAL COMMANDS + {{terminal_commands}} + +dynamic_variables: + current_timestamp: "{{from_current_timestamp}}" + terminal_commands: "{{from_terminal_commands}}" + ticker: "Ticker here" + tickerName: "Ticker name here" + additional_dynamic_variables: "" + +output_schema: + type: object + properties: + internal_thought: + type: string + description: "Your internal reasoning process about the next commands to run." + plan: + type: string + description: "A short plan of what to do next." + terminal_commands: + type: string + description: "The full terminal command to execute, including all arguments and options." + required: + - internal_thought + - plan + - terminal_commands + +tools: [] \ No newline at end of file diff --git a/src/agents/googleAiStudioAgent_tool.yaml b/src/agents/googleAiStudioAgent_tool.yaml new file mode 100644 index 0000000..974346a --- /dev/null +++ b/src/agents/googleAiStudioAgent_tool.yaml @@ -0,0 +1,40 @@ +name: "Terminal Agent" +description: "An agent that can understand and execute terminal commands." +client: "google" +model: "gemini-1.5-flash" + +main_goal: "Engage with the user and use multiple tools at once if needed." + +system_prompt: | + # MAIN GOAL + {{main_goal}} + + You are now capable of calling multiple tools at once to fulfill user requests more efficiently. + If the user asks about multiple things that can be solved by different tools, feel free to call them in parallel. + You must follow the schema below to call the tools. +dynamic_variables: {} + +output_schema: null +tools: + - type: "function" + function: + name: "get_weather" + description: "Get the current weather in a given location" + parameters: + type: object + properties: + location: + type: string + description: "The city and state, e.g. San Francisco, CA" + required: ["location"] + - type: "function" + function: + name: "get_time" + description: "Get the current time in a given location" + parameters: + type: object + properties: + location: + type: string + description: "A city and timezone, e.g. Tokyo or America/Los_Angeles" + required: ["location"] \ No newline at end of file diff --git a/src/models/adapters/AiStudioAdapter.ts b/src/models/adapters/AiStudioAdapter.ts new file mode 100644 index 0000000..e31826b --- /dev/null +++ b/src/models/adapters/AiStudioAdapter.ts @@ -0,0 +1,88 @@ +import { Message, Tool } from '../../types/agentSystem'; +import { ModelAdapter, ProcessedResponse, FunctionCall } from './ModelAdapter'; +import { Logger } from '../../utils/logger'; + +export class AiStudioAdapter extends ModelAdapter { + public buildParams( + messages: Message[], + tools: Tool[], + toolChoice?: any, + systemPrompt?: string, + outputSchema?: any + ): any { + const formattedMessages = messages.map(m => ({ + role: m.role, + content: m.content || '' + })); + + const params: any = { + model: this.modelName, + messages: formattedMessages, + max_tokens: 1024, + temperature: 0 + }; + + if (tools && tools.length > 0) { + params.tools = tools.map(t => ({ + name: t.function.name, + description: t.function.description, + parameters: t.function.parameters + })); + params.tools = {"functionDeclarations": params.tools}; + } + + if (toolChoice) { + params.tool_choice = toolChoice; + } + + if (outputSchema && params.tools) { + params.tools.forEach((tool: any) => { + tool.function.strict = true; + }); + } + + return params; + } + + public formatTools(tools: Tool[]): any[] { + return tools.map(tool => ({ + function: { + name: tool.function.name, + description: tool.function.description, + parameters: tool.function.parameters + } + })); + } + + public buildToolChoice(tools: Tool[]): any { + return "auto"; + } + + public processResponse(response: any): ProcessedResponse { + if (!response || !response.response.candidates || response.response.candidates.length === 0) { + Logger.error('[AiStudioAdapter] Got no response from model.'); + return { functionCalls: [] }; + } + Logger.debug('[AiStudioAdapter] Processing response:', response); + const candidate = response.response.candidates[0]; + const contentParts = candidate.content?.parts || []; + const aiMessage = { + role: 'assistant', + content: '' + }; + const functionCalls: FunctionCall[] = []; + + contentParts.forEach((part: any) => { + if (part.functionCall) { + const functionName = part.functionCall.name; + const functionArgs = part.functionCall.args; + functionCalls.push({ functionName, functionArgs }); + } else if (part.text) { + aiMessage.content += part.text; + } + }); + + Logger.debug('[AiStudioAdapter] Processed response:', { aiMessage, functionCalls }); + return { aiMessage, functionCalls }; + } +} \ No newline at end of file diff --git a/src/models/clients/AiStudoClient.ts b/src/models/clients/AiStudoClient.ts new file mode 100644 index 0000000..214265d --- /dev/null +++ b/src/models/clients/AiStudoClient.ts @@ -0,0 +1,40 @@ +import { GoogleGenerativeAI } from "@google/generative-ai"; +import { ModelClient, ModelType } from '../../types/agentSystem'; +import { Logger } from "../../utils/logger"; + +export class AiStudioClient implements ModelClient { + private googleAI: GoogleGenerativeAI; + private _modelName: string; + private defaultParams: any; + modelType: ModelType = 'google'; + private _model: any; + + constructor(apiKey: string, modelName: string, params: any = {}) { + this.googleAI = new GoogleGenerativeAI(apiKey); + this._modelName = modelName; + this.defaultParams = { + temperature: 1, + max_tokens: 1000, + ...params, + }; + } + + get modelName(): string { + return this._modelName; + } + + async chatCompletion(params: any): Promise { + try { + const request = typeof params.messages === 'string' ? params.messages : JSON.stringify(params.messages); + if (params.tools) { + this._model = this.googleAI.getGenerativeModel({ model: this._modelName, tools: params.tools }); + } else { + this._model = this.googleAI.getGenerativeModel({ model: this._modelName }); + } + const response = await this._model.generateContent(request); + return response; + } catch (error) { + throw error; + } + } +} \ No newline at end of file diff --git a/src/terminal/terminalCore.ts b/src/terminal/terminalCore.ts index af296e7..60ec76f 100644 --- a/src/terminal/terminalCore.ts +++ b/src/terminal/terminalCore.ts @@ -11,7 +11,7 @@ interface Feature { interface TerminalCoreOptions { agentName?: string; - modelType?: 'openai' | 'anthropic' | 'fireworks'; + modelType?: 'openai' | 'anthropic' | 'fireworks' | 'google'; modelName?: string; maxActions?: number; actionCooldownMs?: number; diff --git a/src/types/agentSystem.ts b/src/types/agentSystem.ts index 88f0e9c..ad86544 100644 --- a/src/types/agentSystem.ts +++ b/src/types/agentSystem.ts @@ -29,7 +29,7 @@ export interface Tool { }; } -export type ModelType = 'openai' | 'fireworks' | 'anthropic'; +export type ModelType = 'openai' | 'fireworks' | 'anthropic' | 'google'; export interface ModelClient { modelType: ModelType;