Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,16 @@

All notable changes to this project will be documented in this file.

## [v0.2.0] - 2025-11-05

### Added

- `feat(memory)`: Add persistent JSON memory to save and load chat history.

### Fixed

- `fix(memory)`: Use the correct `get_history()` method to save chat session.

## [v0.1.0] - 2025-11-02

### Added
Expand Down
34 changes: 23 additions & 11 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,29 @@ def run_terminal_chat():
print(f"{ASSISTANT_NAME} is online. Type 'exit' or 'quit' to end.")

while True:
try:
user_prompt = input(f"{USER_NAME}: ")
if user_prompt.lower() in EXIT_WORD_LIST:
print(f"Goodbye from {ASSISTANT_NAME}!")
break
print(f"{ASSISTANT_NAME}: ", end="", flush=True)
for chunk in assistant.send_message_stream(user_prompt):
print(chunk, end="", flush=True)
print()
except Exception as e:
print(f"\nAn error occurred: {e}")
try: # KeyboardInterrupt
try: # other errors
user_prompt = input(f"{USER_NAME}: ")

if user_prompt.lower() in EXIT_WORD_LIST:
assistant.save_history()
print(f"Goodbye from {ASSISTANT_NAME}!")
break

print(f"{ASSISTANT_NAME}: ", end="", flush=True)

for chunk in assistant.send_message_stream(user_prompt):
print(chunk, end="", flush=True)

print()

except Exception as e:
print(f"\nAn error occurred: {e}")

except KeyboardInterrupt:
assistant.save_history()
print(f"\n[Interrupted] Goodbye from {ASSISTANT_NAME}!")
break


if __name__ == "__main__":
Expand Down
54 changes: 53 additions & 1 deletion src/assistant.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os
import sys
import json
from dotenv import load_dotenv
from google import genai
from google.genai import types
from src.config import MEMORY_FILE


class ChatAssistant:
Expand Down Expand Up @@ -39,9 +41,12 @@ def _initialize_client(self):

def _create_chat_session(self):
"""creates a new chat session with the system prompt"""
history = self._load_history()
gen_config = types.GenerateContentConfig(system_instruction=self.system_prompt)
print(f"Initializing chat with {self.model_name}...")
return self.client.chats.create(model=self.model_name, config=gen_config)
return self.client.chats.create(
model=self.model_name, config=gen_config, history=history
)

def send_message_stream(self, prompt: str):
"""sends a message and yields the response chunks."""
Expand All @@ -51,3 +56,50 @@ def send_message_stream(self, prompt: str):
yield chunk.text
except Exception as e:
print(f"\n[Stream Error: {e}]")

def _load_history(self):
"""Loads chat history from the JSON memory file."""
if not os.path.exists(MEMORY_FILE):
return []

try:
with open(MEMORY_FILE, "r") as f:
serializable_history = json.load(f)

print(f"[Memory loaded from {MEMORY_FILE}]")

# Reconstruct 'types.Content' objects (simplified for text-only)
loaded_history = []
for item in serializable_history:
parts = [
types.Part(text=part_data["text"]) for part_data in item["parts"]
]
loaded_history.append(types.Content(role=item["role"], parts=parts))

return loaded_history

except Exception as e:
print(f"Error loading memory: {e}. Starting fresh.")
return []

def save_history(self):
"""Saves the current chat history to the JSON memory file."""
try:
serializable_history = []
for content in self.chat_session.get_history():
parts_list = [
{"text": part.text} for part in content.parts if part.text
]

if parts_list:
serializable_history.append(
{"role": content.role, "parts": parts_list}
)

with open(MEMORY_FILE, "w") as f:
json.dump(serializable_history, f, indent=4)

print(f"\n[Memory saved to {MEMORY_FILE}]")

except Exception as e:
print(f"\nError saving memory: {e}")
4 changes: 3 additions & 1 deletion src/config.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
ASSISTANT_NAME = "Vector"
MODEL_NAME = "gemini-2.5-flash"
USER_NAME = "Ubeyidah"
EXIT_WORD_LIST = ["exit", "quit", "bye", "goodbye", "stop", "end", "quite"]
SYSTEM_PROMPT = f"""
You are {ASSISTANT_NAME}, a helpful and friendly AI assistant.
You are speaking to your creator, {USER_NAME}.
Be calm, thoughtful, and slightly encouraging.
"""
EXIT_WORD_LIST = ["exit", "quit", "bye", "goodbye", "stop", "end", "quite"]

MEMORY_FILE = "vector_memory.json"