Skip to content

Commit c53ceda

Browse files
committed
test: eliminate remaining test warnings for warning-free test suite
This completes the test warning cleanup effort started in #1344 and #1342, achieving a fully warning-free test suite when running 'make test'. Changes include: - Added explicit type hints for session registry attributes - Fixed AsyncMock usage in session_registry tests with proper SSETransport fixtures - Added done_callback to fire-and-forget asyncio futures to prevent warnings - Implemented proper logging handler cleanup in LoggingService.shutdown() - Replaced deprecated Pydantic v1 .dict() with v2 .model_dump() - Fixed line continuation formatting in content_moderation plugin - Added pytest-integration-mark dependency to pyproject.toml All tests now pass without warnings, improving test output clarity and ensuring proper resource cleanup during test execution. Signed-off-by: Jonathan Springer <jonpspri@gmail.com>
1 parent bc2f512 commit c53ceda

File tree

12 files changed

+220
-258
lines changed

12 files changed

+220
-258
lines changed

mcpgateway/cache/session_registry.py

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050

5151
# Standard
5252
import asyncio
53+
from asyncio import Task
5354
from datetime import datetime, timezone
5455
import json
5556
import logging
@@ -184,7 +185,7 @@ def __init__(
184185
# Set up backend-specific components
185186
if self._backend == "memory":
186187
# Nothing special needed for memory backend
187-
self._session_message = None
188+
self._session_message: dict[str, Any] | None = None
188189

189190
elif self._backend == "none":
190191
# No session tracking - this is just a dummy registry
@@ -295,7 +296,7 @@ def __init__(
295296
super().__init__(backend=backend, redis_url=redis_url, database_url=database_url, session_ttl=session_ttl, message_ttl=message_ttl)
296297
self._sessions: Dict[str, Any] = {} # Local transport cache
297298
self._lock = asyncio.Lock()
298-
self._cleanup_task = None
299+
self._cleanup_task: Task | None = None
299300

300301
async def initialize(self) -> None:
301302
"""Initialize the registry with async setup.
@@ -697,7 +698,7 @@ async def broadcast(self, session_id: str, message: Dict[str, Any]) -> None:
697698
else:
698699
msg_json = json.dumps(str(message))
699700

700-
self._session_message: Dict[str, Any] = {"session_id": session_id, "message": msg_json}
701+
self._session_message: Dict[str, Any] | None = {"session_id": session_id, "message": msg_json}
701702

702703
elif self._backend == "redis":
703704
try:
@@ -835,7 +836,7 @@ async def respond(
835836
elif self._backend == "memory":
836837
# if self._session_message:
837838
transport = self.get_session_sync(session_id)
838-
if transport:
839+
if transport and self._session_message:
839840
message = json.loads(str(self._session_message.get("message")))
840841
await self.generate_response(message=message, transport=transport, server_id=server_id, user=user, base_url=base_url)
841842

@@ -863,7 +864,7 @@ async def respond(
863864

864865
elif self._backend == "database":
865866

866-
def _db_read_session(session_id: str) -> SessionRecord:
867+
def _db_read_session(session_id: str) -> SessionRecord | None:
867868
"""Check if session still exists in the database.
868869
869870
Queries the SessionRecord table to verify that the session
@@ -898,7 +899,7 @@ def _db_read_session(session_id: str) -> SessionRecord:
898899
finally:
899900
db_session.close()
900901

901-
def _db_read(session_id: str) -> SessionMessageRecord:
902+
def _db_read(session_id: str) -> SessionMessageRecord | None:
902903
"""Read pending message for a session from the database.
903904
904905
Retrieves the first (oldest) unprocessed message for the given
@@ -1284,23 +1285,23 @@ async def generate_response(self, message: Dict[str, Any], transport: SSETranspo
12841285
result = {}
12851286

12861287
if "method" in message and "id" in message:
1288+
method = message["method"]
1289+
params = message.get("params", {})
1290+
params["server_id"] = server_id
1291+
req_id = message["id"]
1292+
1293+
rpc_input = {
1294+
"jsonrpc": "2.0",
1295+
"method": method,
1296+
"params": params,
1297+
"id": req_id,
1298+
}
1299+
# Get the token from the current authentication context
1300+
# The user object doesn't contain the token directly, we need to reconstruct it
1301+
# Since we don't have access to the original headers here, we need a different approach
1302+
# We'll extract the token from the session or create a new admin token
1303+
token = None
12871304
try:
1288-
method = message["method"]
1289-
params = message.get("params", {})
1290-
params["server_id"] = server_id
1291-
req_id = message["id"]
1292-
1293-
rpc_input = {
1294-
"jsonrpc": "2.0",
1295-
"method": method,
1296-
"params": params,
1297-
"id": req_id,
1298-
}
1299-
# Get the token from the current authentication context
1300-
# The user object doesn't contain the token directly, we need to reconstruct it
1301-
# Since we don't have access to the original headers here, we need a different approach
1302-
# We'll extract the token from the session or create a new admin token
1303-
token = None
13041305
if hasattr(user, "get") and "auth_token" in user:
13051306
token = user["auth_token"]
13061307
else:

mcpgateway/services/logging_service.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,8 @@ def emit(self, record: logging.LogRecord) -> None:
154154
# No running loop, can't store
155155
return
156156

157-
# Schedule the coroutine
158-
asyncio.run_coroutine_threadsafe(
157+
# Schedule the coroutine and store the future (fire-and-forget)
158+
future = asyncio.run_coroutine_threadsafe(
159159
self.storage.add_log(
160160
level=log_level,
161161
message=message,
@@ -167,6 +167,8 @@ def emit(self, record: logging.LogRecord) -> None:
167167
),
168168
self.loop,
169169
)
170+
# Add a done callback to catch any exceptions without blocking
171+
future.add_done_callback(lambda f: f.exception() if not f.cancelled() else None)
170172
except Exception:
171173
# Silently fail to avoid logging recursion
172174
pass # nosec B110 - Intentional to prevent logging recursion
@@ -204,6 +206,7 @@ def __init__(self) -> None:
204206
self._subscribers: List[asyncio.Queue[_LogMessage]] = []
205207
self._loggers: Dict[str, logging.Logger] = {}
206208
self._storage: LogStorageService | None = None # Will be initialized if admin UI is enabled
209+
self._storage_handler: Optional[StorageHandler] = None # Track the storage handler for cleanup
207210

208211
async def initialize(self) -> None:
209212
"""Initialize logging service.
@@ -249,10 +252,10 @@ async def initialize(self) -> None:
249252
self._storage = LogStorageService()
250253

251254
# Add storage handler to capture all logs
252-
storage_handler = StorageHandler(self._storage)
253-
storage_handler.setFormatter(text_formatter)
254-
storage_handler.setLevel(getattr(logging, settings.log_level.upper()))
255-
root_logger.addHandler(storage_handler)
255+
self._storage_handler = StorageHandler(self._storage)
256+
self._storage_handler.setFormatter(text_formatter)
257+
self._storage_handler.setLevel(getattr(logging, settings.log_level.upper()))
258+
root_logger.addHandler(self._storage_handler)
256259

257260
logging.info(f"Log storage initialized with {settings.log_buffer_size_mb}MB buffer")
258261

@@ -271,6 +274,12 @@ async def shutdown(self) -> None:
271274
>>> asyncio.run(service.shutdown())
272275
273276
"""
277+
# Remove storage handler from root logger if it was added
278+
if self._storage_handler:
279+
root_logger = logging.getLogger()
280+
root_logger.removeHandler(self._storage_handler)
281+
self._storage_handler = None
282+
274283
# Clear subscribers
275284
self._subscribers.clear()
276285
logging.info("Logging service shutdown")

mcpgateway/transports/streamablehttp_transport.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,7 @@ async def get_prompt(prompt_id: str, arguments: dict[str, str] | None = None) ->
502502
if not result or not result.messages:
503503
logger.warning(f"No content returned by prompt: {prompt_id}")
504504
return []
505-
message_dicts = [message.dict() for message in result.messages]
505+
message_dicts = [message.model_dump() for message in result.messages]
506506
return types.GetPromptResult(messages=message_dicts, description=result.description)
507507
except Exception as e:
508508
logger.exception(f"Error getting prompt '{prompt_id}': {e}")

plugins/content_moderation/content_moderation.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,12 @@ async def _moderate_with_patterns(self, text: str) -> ModerationResult:
523523
break
524524

525525
return ModerationResult(
526-
flagged=flagged, categories=categories, action=action, provider=ModerationProvider.IBM_WATSON, confidence=max_score, details={"method": "pattern_matching"} # Default fallback
526+
flagged=flagged,
527+
categories=categories,
528+
action=action,
529+
provider=ModerationProvider.IBM_WATSON,
530+
confidence=max_score,
531+
details={"method": "pattern_matching"}, # Default fallback
527532
)
528533

529534
async def _extract_text_content(self, payload: Any) -> List[str]:
@@ -555,7 +560,7 @@ async def prompt_pre_fetch(self, payload: PromptPrehookPayload, _context: Plugin
555560

556561
if self._cfg.audit_decisions:
557562
logger.info(
558-
f"Content moderation - Prompt: {payload.prompt_id}, Result: {result.flagged}, " f"Action: {result.action}, Provider: {result.provider}, " f"Confidence: {result.confidence:.2f}"
563+
f"Content moderation - Prompt: {payload.prompt_id}, Result: {result.flagged}, Action: {result.action}, Provider: {result.provider}, Confidence: {result.confidence:.2f}"
559564
)
560565

561566
if result.action == ModerationAction.BLOCK:
@@ -572,7 +577,7 @@ async def prompt_pre_fetch(self, payload: PromptPrehookPayload, _context: Plugin
572577
"flagged_text_preview": text[:100] + "..." if len(text) > 100 else text,
573578
},
574579
),
575-
metadata={"moderation_result": result.dict(), "provider": result.provider.value},
580+
metadata={"moderation_result": result.model_dump(), "provider": result.provider.value},
576581
)
577582
elif result.modified_content:
578583
# Modify the payload with redacted/transformed content
@@ -598,7 +603,7 @@ async def tool_pre_invoke(self, payload: ToolPreInvokePayload, _context: PluginC
598603
result = await self._moderate_content(text)
599604

600605
if self._cfg.audit_decisions:
601-
logger.info(f"Content moderation - Tool: {payload.name}, Result: {result.flagged}, " f"Action: {result.action}, Provider: {result.provider}")
606+
logger.info(f"Content moderation - Tool: {payload.name}, Result: {result.flagged}, Action: {result.action}, Provider: {result.provider}")
602607

603608
if result.action == ModerationAction.BLOCK:
604609
return ToolPreInvokeResult(

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ dev = [
129129
"pytest-env>=1.1.5",
130130
"pytest-examples>=0.0.18",
131131
"pytest-httpx>=0.35.0",
132+
"pytest-integration-mark>=0.2.0",
132133
"pytest-md-report>=0.7.0",
133134
"pytest-rerunfailures>=16.0.1",
134135
"pytest-timeout>=2.4.0",

0 commit comments

Comments
 (0)