Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/image_smoke.yml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ jobs:
# Upload security results to GitHub Security tab
- name: Upload Trivy Results to GitHub Security
if: matrix.build.name == 'aio'
uses: github/codeql-action/upload-sarif@v3
uses: github/codeql-action/upload-sarif@v4
with:
sarif_file: trivy-results-aio.sarif
category: trivy-aio
Expand Down
2 changes: 1 addition & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ ignore=CVS,.venv
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\\' represents the directory delimiter on Windows systems,
# it can't be used as an escape character.
ignore-paths=.*[/\\]wip[/\\].*,src/client/mcp,docs/themes/relearn,docs/public,docs/static/demoware,src/server/agents/chatbot.py
ignore-paths=.*[/\\]wip[/\\].*,src/client/mcp,docs/themes/relearn,docs/public,docs/static/demoware,src/server/agents

# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
Expand Down
19 changes: 17 additions & 2 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,22 @@
; spell-checker: disable

[pytest]
pythonpath = src
pythonpath = src tests
addopts = --disable-warnings --import-mode=importlib
filterwarnings =
ignore::DeprecationWarning
asyncio_default_fixture_loop_scope = function
asyncio_default_fixture_loop_scope = function

; Test markers for selective test execution
; Usage examples:
; pytest -m "unit" # Run only unit tests
; pytest -m "integration" # Run only integration tests
; pytest -m "not slow" # Skip slow tests
; pytest -m "not db" # Skip tests requiring database
; pytest -m "unit and not slow" # Fast unit tests only
markers =
unit: Unit tests (mocked dependencies, fast execution)
integration: Integration tests (real components, may require external services)
slow: Slow tests (deselect with '-m "not slow"')
db: Tests requiring Oracle database container (deselect with '-m "not db"')
db_container: Alias for db marker - tests requiring database container
4 changes: 2 additions & 2 deletions src/client/content/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from streamlit import session_state as state

from client.content.config.tabs.models import get_models
from client.utils import st_common, api_call, client, vs_options
from client.utils import st_common, api_call, client, vs_options, tool_options
from client.utils.st_footer import render_chat_footer
from common import logging_config

Expand Down Expand Up @@ -82,7 +82,7 @@ def setup_sidebar():
st.stop()

state.enable_client = True
st_common.tools_sidebar()
tool_options.tools_sidebar()
st_common.history_sidebar()
st_common.ll_sidebar()
vs_options.vector_search_sidebar()
Expand Down
4 changes: 2 additions & 2 deletions src/client/content/testbed.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from client.content.config.tabs.models import get_models

from client.utils import st_common, api_call, vs_options
from client.utils import st_common, api_call, vs_options, tool_options

from common import logging_config

Expand Down Expand Up @@ -496,7 +496,7 @@ def render_evaluation_ui(available_ll_models: list) -> None:

st.subheader("Q&A Evaluation", divider="red")
st.info("Use the sidebar settings for chatbot evaluation parameters", icon="⬅️")
st_common.tools_sidebar()
tool_options.tools_sidebar()
st_common.ll_sidebar()
vs_options.vector_search_sidebar()
st.write("Choose a model to judge the correctness of the chatbot answer, then start evaluation.")
Expand Down
87 changes: 38 additions & 49 deletions src/client/utils/api_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,6 @@ def sanitize_sensitive_data(data):
return data


def _handle_json_response(response, method: str):
"""Parse JSON response and handle parsing errors."""
try:
data = response.json()
logger.debug("%s Data: %s", method, data)
return response
except (json.JSONDecodeError, ValueError) as json_ex:
error_msg = f"Server returned invalid JSON response. Status: {response.status_code}"
logger.error("Response text: %s", response.text[:500])
error_msg += f". Response preview: {response.text[:200]}"
raise ApiError(error_msg) from json_ex


def _handle_http_error(ex: requests.exceptions.HTTPError):
"""Extract error message from HTTP error response."""
try:
Expand All @@ -66,6 +53,12 @@ def _handle_http_error(ex: requests.exceptions.HTTPError):
return failure


def _error_response(message: str) -> None:
"""Display error to user and raise ApiError."""
st.error(f"API Error: {message}")
raise ApiError(message)


def send_request(
method: str,
endpoint: str,
Expand All @@ -75,68 +68,65 @@ def send_request(
retries: int = 3,
backoff_factor: float = 2.0,
) -> dict:
"""Send API requests with retry logic."""
"""Send API requests with retry logic. Returns JSON response or error dict."""
method_map = {"GET": requests.get, "POST": requests.post, "PATCH": requests.patch, "DELETE": requests.delete}
if method not in method_map:
return _error_response(f"Unsupported HTTP method: {method}")

url = urljoin(f"{state.server['url']}:{state.server['port']}/", endpoint)
payload = payload or {}
token = state.server["key"]
headers = {"Authorization": f"Bearer {token}"}
# Send client in header if it exists
headers = {"Authorization": f"Bearer {state.server['key']}"}
if getattr(state, "client_settings", {}).get("client"):
headers["Client"] = state.client_settings["client"]

method_map = {"GET": requests.get, "POST": requests.post, "PATCH": requests.patch, "DELETE": requests.delete}

if method not in method_map:
raise ApiError(f"Unsupported HTTP method: {method}")

args = {
args = {k: v for k, v in {
"url": url,
"headers": headers,
"timeout": timeout,
"params": params,
"files": payload.get("files") if method == "POST" else None,
"json": payload.get("json") if method in ["POST", "PATCH"] else None,
}
args = {k: v for k, v in args.items() if v is not None}
# Avoid logging out binary data in files
}.items() if v is not None}

log_args = sanitize_sensitive_data(args.copy())
try:
if log_args.get("files"):
log_args["files"] = [(field_name, (f[0], "<binary_data>", f[2])) for field_name, f in log_args["files"]]
except (ValueError, IndexError):
pass
logger.info("%s Request: %s", method, log_args)

result = None
for attempt in range(retries + 1):
try:
response = method_map[method](**args)
logger.info("%s Response: %s", method, response)
response.raise_for_status()
return _handle_json_response(response, method)
result = response.json()
logger.debug("%s Data: %s", method, result)
break

except requests.exceptions.HTTPError as ex:
logger.error("HTTP Error: %s", ex)
raise ApiError(_handle_http_error(ex)) from ex
_error_response(_handle_http_error(ex))

except requests.exceptions.ConnectionError as ex:
logger.error("Attempt %d: Connection Error: %s", attempt + 1, ex)
if attempt < retries:
sleep_time = backoff_factor * (2**attempt)
logger.info("Retrying in %.1f seconds...", sleep_time)
time.sleep(sleep_time)
time.sleep(backoff_factor * (2**attempt))
continue
raise ApiError(f"Connection failed after {retries + 1} attempts: {str(ex)}") from ex
_error_response(f"Connection failed after {retries + 1} attempts")

except requests.exceptions.RequestException as ex:
logger.error("Request Error: %s", ex)
raise ApiError(f"Request failed: {str(ex)}") from ex
except (requests.exceptions.RequestException, json.JSONDecodeError, ValueError) as ex:
logger.error("Request/JSON Error: %s", ex)
_error_response(f"Request failed: {str(ex)}")

raise ApiError("An unexpected error occurred.")
return result if result is not None else _error_response("An unexpected error occurred.")


def get(endpoint: str, params: Optional[dict] = None, retries: int = 3, backoff_factor: float = 2.0) -> json:
def get(endpoint: str, params: Optional[dict] = None, retries: int = 3, backoff_factor: float = 2.0) -> dict:
"""GET Requests"""
response = send_request("GET", endpoint, params=params, retries=retries, backoff_factor=backoff_factor)
return response.json()
return send_request("GET", endpoint, params=params, retries=retries, backoff_factor=backoff_factor)


def post(
Expand All @@ -146,9 +136,9 @@ def post(
timeout: int = 60,
retries: int = 5,
backoff_factor: float = 1.5,
) -> json:
) -> dict:
"""POST Requests"""
response = send_request(
return send_request(
"POST",
endpoint,
params=params,
Expand All @@ -157,7 +147,6 @@ def post(
retries=retries,
backoff_factor=backoff_factor,
)
return response.json()


def patch(
Expand All @@ -168,9 +157,9 @@ def patch(
retries: int = 5,
backoff_factor: float = 1.5,
toast=True,
) -> None:
) -> dict:
"""PATCH Requests"""
response = send_request(
result = send_request(
"PATCH",
endpoint,
params=params,
Expand All @@ -182,13 +171,13 @@ def patch(
if toast:
st.toast("Update Successful.", icon="✅")
time.sleep(1)
return response.json()
return result


def delete(endpoint: str, timeout: int = 60, retries: int = 5, backoff_factor: float = 1.5, toast=True) -> None:
def delete(endpoint: str, timeout: int = 60, retries: int = 5, backoff_factor: float = 1.5, toast=True) -> dict:
"""DELETE Requests"""
response = send_request("DELETE", endpoint, timeout=timeout, retries=retries, backoff_factor=backoff_factor)
success = response.json()["message"]
result = send_request("DELETE", endpoint, timeout=timeout, retries=retries, backoff_factor=backoff_factor)
if toast:
st.toast(success, icon="✅")
st.toast(result.get("message", "Deleted."), icon="✅")
time.sleep(1)
return result
58 changes: 0 additions & 58 deletions src/client/utils/st_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,61 +232,3 @@ def ll_sidebar() -> None:
key="selected_ll_model_presence_penalty",
on_change=update_client_settings("ll_model"),
)


#####################################################
# Tools Options
#####################################################
def tools_sidebar() -> None:
"""Tools Sidebar Settings"""

# Setup Tool Box
state.tool_box = {
"LLM Only": {"description": "Do not use tools", "enabled": True},
"Vector Search": {"description": "Use AI with Unstructured Data", "enabled": True},
"NL2SQL": {"description": "Use AI with Structured Data", "enabled": True},
}

def _update_set_tool():
"""Update user settings as to which tool is being used"""
state.client_settings["tools_enabled"] = [state.selected_tool]

def _disable_tool(tool: str, reason: str = None) -> None:
"""Disable a tool in the tool box"""
if reason:
logger.debug("%s Disabled (%s)", tool, reason)
st.warning(f"{reason}. Disabling {tool}.", icon="⚠️")
state.tool_box[tool]["enabled"] = False

if not is_db_configured():
logger.debug("Vector Search/NL2SQL Disabled (Database not configured)")
st.warning("Database is not configured. Disabling Vector Search and NL2SQL tools.", icon="⚠️")
_disable_tool("Vector Search")
_disable_tool("NL2SQL")
else:
# Check to enable Vector Store
embed_models_enabled = enabled_models_lookup("embed")
db_alias = state.client_settings.get("database", {}).get("alias")
database_lookup = state_configs_lookup("database_configs", "name")
if not embed_models_enabled:
_disable_tool("Vector Search", "No embedding models are configured and/or enabled.")
elif not database_lookup[db_alias].get("vector_stores"):
_disable_tool("Vector Search", "Database has no vector stores.")
else:
# Check if any vector stores use an enabled embedding model
vector_stores = database_lookup[db_alias].get("vector_stores", [])
usable_vector_stores = [vs for vs in vector_stores if vs.get("model") in embed_models_enabled]
if not usable_vector_stores:
_disable_tool("Vector Search", "No vector stores match the enabled embedding models")

tool_box = [key for key, val in state.tool_box.items() if val["enabled"]]
current_tool = state.client_settings["tools_enabled"][0]
tool_index = tool_box.index(current_tool) if current_tool in tool_box else 0
st.sidebar.selectbox(
"Tool Selection",
tool_box,
index=tool_index,
label_visibility="collapsed",
on_change=_update_set_tool,
key="selected_tool",
)
70 changes: 70 additions & 0 deletions src/client/utils/tool_options.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
"""
Copyright (c) 2024, 2025, Oracle and/or its affiliates.
Licensed under the Universal Permissive License v1.0 as shown at http://oss.oracle.com/licenses/upl.
"""
# spell-checker:ignore selectbox

import streamlit as st
from streamlit import session_state as state

from client.utils import st_common
from common import logging_config

logger = logging_config.logging.getLogger("client.utils.st_common")


def tools_sidebar() -> None:
"""Tools Sidebar Settings"""

# Setup Tool Box
state.tool_box = {
"LLM Only": {"description": "Do not use tools", "enabled": True},
"Vector Search": {"description": "Use AI with Unstructured Data", "enabled": True},
"NL2SQL": {"description": "Use AI with Structured Data", "enabled": True},
}

def _update_set_tool():
"""Update user settings as to which tool is being used"""
state.client_settings["tools_enabled"] = [state.selected_tool]

def _disable_tool(tool: str, reason: str = None) -> None:
"""Disable a tool in the tool box"""
if reason:
logger.debug("%s Disabled (%s)", tool, reason)
st.warning(f"{reason}. Disabling {tool}.", icon="⚠️")
state.tool_box[tool]["enabled"] = False

if not st_common.is_db_configured():
logger.debug("Vector Search/NL2SQL Disabled (Database not configured)")
st.warning("Database is not configured. Disabling Vector Search and NL2SQL tools.", icon="⚠️")
_disable_tool("Vector Search")
_disable_tool("NL2SQL")
else:
# Check to enable Vector Store
embed_models_enabled = st_common.enabled_models_lookup("embed")
db_alias = state.client_settings.get("database", {}).get("alias")
database_lookup = st_common.state_configs_lookup("database_configs", "name")
if not embed_models_enabled:
_disable_tool("Vector Search", "No embedding models are configured and/or enabled.")
elif not database_lookup[db_alias].get("vector_stores"):
_disable_tool("Vector Search", "Database has no vector stores.")
else:
# Check if any vector stores use an enabled embedding model
vector_stores = database_lookup[db_alias].get("vector_stores", [])
usable_vector_stores = [vs for vs in vector_stores if vs.get("model") in embed_models_enabled]
if not usable_vector_stores:
_disable_tool("Vector Search", "No vector stores match the enabled embedding models")

tool_box = [key for key, val in state.tool_box.items() if val["enabled"]]
current_tool = state.client_settings["tools_enabled"][0]
if current_tool not in tool_box:
state.client_settings["tools_enabled"] = ["LLM Only"]
tool_index = tool_box.index(current_tool) if current_tool in tool_box else 0
st.sidebar.selectbox(
"Tool Selection",
tool_box,
index=tool_index,
label_visibility="collapsed",
on_change=_update_set_tool,
key="selected_tool",
)
Loading
Loading