Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
SERPER_API_KEY=
OPENAI_API_KEY=
TAVILY_API_KEY=t
15 changes: 14 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,16 @@
*.pyc
# ---------------------------
# Python
# ---------------------------
__pycache__/
*.py[cod]
*.pyo
*.pyd
*.so
*.egg
*.egg-info/
dist/
build/
.eggs/

.env

53 changes: 53 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import streamlit as st
from streamlit_option_menu import option_menu

# ---- CONFIG ----
st.set_page_config(page_title="Card Navigation App", layout="wide")

# ---- SIDEBAR MENU ----
with st.sidebar:
selected = option_menu(
"Navigation",
["Home", "Page 1", "Page 2", "Page 3", "Page 4"],
icons=["house", "1-circle", "2-circle", "3-circle", "4-circle"],
menu_icon="list",
default_index=0,
)

# ---- MAIN CONTENT ----
if selected == "Home":
st.markdown(
"<h2 style='text-align:center;'>Choose a Page</h2>", unsafe_allow_html=True
)

cols = st.columns(4, gap="large")

# Four cards in center
with cols[0]:
if st.button("Page 1", use_container_width=True):
st.session_state["selected"] = "Page 1"
with cols[1]:
if st.button("Page 2", use_container_width=True):
st.session_state["selected"] = "Page 2"
with cols[2]:
if st.button("Page 3", use_container_width=True):
st.session_state["selected"] = "Page 3"
with cols[3]:
if st.button("Page 4", use_container_width=True):
st.session_state["selected"] = "Page 4"

elif selected == "Page 1":
st.title("📘 Page 1")
st.write("Welcome to Page 1 content")

elif selected == "Page 2":
st.title("📙 Page 2")
st.write("Welcome to Page 2 content")

elif selected == "Page 3":
st.title("📗 Page 3")
st.write("Welcome to Page 3 content")

elif selected == "Page 4":
st.title("📕 Page 4")
st.write("Welcome to Page 4 content")
10 changes: 10 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,19 @@ authors = [

dependencies = [
"black>=25.1.0",
"ddgs>=9.5.2",
"duckduckgo-search>=8.1.1",
"langchain>=0.3.26",
"langchain-community>=0.3.27",
"langchain-ollama>=0.3.3",
"langchain-tavily>=0.2.11",
"langgraph>=0.6.4",
"pytest>=8.4.1",
"pytest-mock>=3.14.1",
"python-dotenv>=1.1.0",
"ruff>=0.12.0",
"streamlit>=1.46.0",
"streamlit-option-menu>=0.4.0",
]

classifiers = [
Expand All @@ -37,3 +43,7 @@ addopts = "-ra -q"
testpaths = ["tests"]
pythonpath = [".", "src"]


[tool.ruff.lint]
extend-ignore = ["F401"]
per-file-ignores = { "__init__.py" = ["F401"] }
Binary file modified src/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
2 changes: 2 additions & 0 deletions src/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from src.agents.weather_assistant import weather_assistant_agen_config
from src.agents.agent import Agent
14 changes: 14 additions & 0 deletions src/agents/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain_core.tools.base import BaseTool
from typing import List


class Agent:
def __init__(
self,
prompt: str,
name: str = None,
tools: list[BaseTool] = [],
):
self.system_prompt: str = prompt
self.tools: List[BaseTool] = tools
self.name: str = name
13 changes: 13 additions & 0 deletions src/agents/weather_assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from src.agents.agent import Agent
from src.tools import current_weather_tool, weather_alerts_tool


weather_assistant_agen_config = Agent(
name="Weather_Assistant_Agent",
tools=[current_weather_tool, weather_alerts_tool],
prompt="""You are a helpful weather assistant.
You should identify for which countries weather information is needed from the user messages {messages}.
Use only the country name or postal code if there is any in user message, for tool calling.
lways call the weather tool when you detect a location query. Do not stop after reasoning; fetch and return the weather in the same response.
Also fetch if there is any weather alerts.""",
)
252 changes: 252 additions & 0 deletions src/bins/logs/app.log

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions src/components/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from src.components.sidebar import sidebar


__all__ = ["sidebar"]
13 changes: 13 additions & 0 deletions src/components/sidebar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import streamlit as st


def sidebar(get_ollama_names, update_model):
with st.sidebar:
with st.expander("⚙️ Model Settings", expanded=True):
st.selectbox(
"Select LLM model",
get_ollama_names(),
key="selected_model",
on_change=update_model,
)
st.divider()
1 change: 1 addition & 0 deletions src/constants/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from src.constants.variables import WEATHER_API_KEY, WEATHER_BASE_URL, OPENAI_KEY
9 changes: 9 additions & 0 deletions src/constants/variables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import os
from dotenv import load_dotenv

load_dotenv()

WEATHER_API_KEY: str = os.getenv("WEATHER_API_KEY", "")
WEATHER_BASE_URL: str = os.getenv("WEATHER_BASE_URL")

OPENAI_KEY: str = os.getenv("OPENAI_KEY", "")
2 changes: 2 additions & 0 deletions src/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,5 @@
contact: joshanjohn2003@gmail.com
subjected to copyright@2025
"""

from src.llm.llm import LLM
Binary file removed src/llm/__pycache__/llm.cpython-312.pyc
Binary file not shown.
17 changes: 11 additions & 6 deletions src/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,26 @@
"""

from langchain_ollama import ChatOllama
from src.utils import logger


class LLM:
def __init__(self, model: str) -> None:
self.model: str = model
self.llm: ChatOllama = self.load_model()

def load_model(self) -> ChatOllama:
def get_llm_model(self) -> ChatOllama:
if not self.model:
logger.error("Model name is required to load ChatOllama.")
raise ValueError("Model name is required to load ChatOllama.")
logger.info(f"LLM = {self.model}")
return ChatOllama(model=self.model)

def run(self, msg: str) -> str:
def __call__(self, msg: str) -> str:
if not msg:
return "Failed to generate LLM response"
_error = "Failed to generate LLM response"
logger.error(_error)
return _error

print(self.llm.get_name()) # if get_name() is a method
return self.llm.invoke(msg).content
llm = self.get_llm_model()
logger.debug(f"Model {llm.get_name()} loaded ... ")
return llm.invoke(msg).content
153 changes: 89 additions & 64 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,38 @@

import streamlit as st
from utils.get_models import get_ollama_names
from src.workflows.workflow import WorkFlow
from src.workflows import Workflow
from src.components import sidebar
from dotenv import load_dotenv

load_dotenv()


def update_model():
st.session_state.model = st.session_state.selected_model
st.session_state.workflow = init_workflow(st.session_state.model)


def init_workflow(llm_model: str):
return WorkFlow(llm=llm_model)
return Workflow(llm_model)


def main():
col1, col2 = st.columns([3, 1])
def run_workflow(workflow: Workflow, query: str, messages: list) -> str:
"""Run compiled workflow on user query and return assistant response."""
compiled = workflow.get_workflow()

# Convert chat history into AgentState
# Run graph with user query
result_state = compiled.invoke({"messages": messages})

# Extract AI response from updated state
last_message = (
result_state["messages"][-1].content if result_state["messages"] else ""
)
return last_message


def init_session_states():
# Initialize model and dropdown selection
if "model" not in st.session_state:
default_model = get_ollama_names()[0]
Expand All @@ -28,73 +46,80 @@ def main():
if "selected_model" not in st.session_state:
st.session_state.selected_model = st.session_state.model

# Initialize workflow
if "workflow" not in st.session_state:
st.session_state.workflow = init_workflow(st.session_state.model)

# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []

# UI - Left: Title + Model info
with col1:
st.title("Cllama")
st.write(f"Current model: `{st.session_state.model}`")

# UI - Right: Model selector
with col2:
st.selectbox(
"Select LLM model",
get_ollama_names(),
key="selected_model",
on_change=update_model,
)

# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])

# Chat input
prompt = st.chat_input("Ask me...")

if prompt:
# Show user input
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})

# Process AI response
with st.spinner("AI is typing..."):
workflow = init_workflow(st.session_state.model)
response = workflow.invoke(prompt)

# Show assistant response
with st.chat_message("assistant"):
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})

# Author credit footer at bottom-right
st.markdown(
"""
<style>
.footer-credit {
position: fixed;
bottom: 8px;
right: 16px;
font-size: 12px;
color: gray;
z-index: 9999;
}
.footer-credit a {
color: yellow;
text-decoration: none;
font-size: 14px;
}
</style>
<div class="footer-credit">
Author <a href="https://github.com/joshanjohn" target="_blank">Joshan John</a>
</div>
""",
unsafe_allow_html=True,
def main():

init_session_states()

sidebar(
get_ollama_names=get_ollama_names,
update_model=update_model,
)

st.title("Cllama")
st.write(f"Current model: `{st.session_state.model}`")

with st.container():
# Chat input
prompt = st.chat_input("Ask me...")

# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])

if prompt:
# Show user input
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})

# Process AI response
with st.spinner("AI is typing..."):
response = run_workflow(
st.session_state.workflow,
prompt,
st.session_state.messages,
)

# Show assistant response
with st.chat_message("assistant"):
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})

# Author credit footer at bottom-right
st.markdown(
"""
<style>
.footer-credit {
position: fixed;
bottom: 8px;
right: 16px;
font-size: 12px;
color: gray;
z-index: 9999;
}
.footer-credit a {
color: green;
text-decoration: none;
font-size: 14px;
}
</style>
<div class="footer-credit">
Author <a href="https://github.com/joshanjohn" target="_blank">Joshan John</a>
</div>
""",
unsafe_allow_html=True,
)


if __name__ == "__main__":
main()
1 change: 1 addition & 0 deletions src/states/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from src.states.agent_state import AgentState
Loading