Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 91 additions & 18 deletions litellm/_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,74 @@ def format(self, record):
return json.dumps(json_record)


# Global handler for JSON exception logging
_json_error_handler = None


def _get_json_error_handler():
"""Get or create the JSON error handler."""
global _json_error_handler
if _json_error_handler is None:
_json_error_handler = logging.StreamHandler()
_json_error_handler.setFormatter(JsonFormatter())
return _json_error_handler


def _set_json_error_handler(handler):
"""Set the JSON error handler (for testing)."""
global _json_error_handler
_json_error_handler = handler


def _async_json_exception_handler(loop, context):
"""
Asyncio exception handler that logs exceptions as JSON with full stacktrace.

This handler extracts the traceback from the exception object and includes
it in the JSON log output via exc_info.
"""
error_handler = _get_json_error_handler()
exception = context.get("exception")
if exception:
# Extract traceback from the exception object
exc_info = (type(exception), exception, exception.__traceback__)
# Use the message from context if available, otherwise use str(exception)
message = context.get("message", str(exception))
record = logging.LogRecord(
name="LiteLLM",
level=logging.ERROR,
pathname="",
lineno=0,
msg=message,
args=(),
exc_info=exc_info,
)
error_handler.handle(record)
else:
# For non-exception errors, still log as JSON
message = context.get("message", "Unknown asyncio error")
record = logging.LogRecord(
name="LiteLLM",
level=logging.ERROR,
pathname="",
lineno=0,
msg=message,
args=(),
exc_info=None,
)
error_handler.handle(record)


# Function to set up exception handlers for JSON logging
def _setup_json_exception_handlers(formatter):
# Create a handler with JSON formatting for exceptions
error_handler = logging.StreamHandler()
error_handler.setFormatter(formatter)

# Update global handler
global _json_error_handler
_json_error_handler = error_handler

# Setup excepthook for uncaught exceptions
def json_excepthook(exc_type, exc_value, exc_traceback):
record = logging.LogRecord(
Expand All @@ -62,27 +124,38 @@ def json_excepthook(exc_type, exc_value, exc_traceback):

sys.excepthook = json_excepthook

# Configure asyncio exception handler if possible
# Note: asyncio exception handler is set separately via _setup_asyncio_json_exception_handler()
# because at import time, the event loop may not be the one used by uvicorn/FastAPI


def _setup_asyncio_json_exception_handler():
"""
Set up the asyncio exception handler for JSON logging.

This should be called AFTER the event loop is created (e.g., in a FastAPI startup event)
to ensure the handler is set on the correct event loop used by the ASGI server.

The issue is that at module import time, asyncio.get_event_loop() may return a
different event loop than the one uvicorn/FastAPI will use. By calling this function
in a startup event, we ensure the handler is set on the actual running event loop.
"""
if not json_logs:
return

try:
import asyncio

def async_json_exception_handler(loop, context):
exception = context.get("exception")
if exception:
record = logging.LogRecord(
name="LiteLLM",
level=logging.ERROR,
pathname="",
lineno=0,
msg=str(exception),
args=(),
exc_info=None,
)
error_handler.handle(record)
else:
loop.default_exception_handler(context)

asyncio.get_event_loop().set_exception_handler(async_json_exception_handler)
# Get the running event loop and set the exception handler
try:
loop = asyncio.get_running_loop()
loop.set_exception_handler(_async_json_exception_handler)
except RuntimeError:
# No running event loop, try get_event_loop as fallback
try:
loop = asyncio.get_event_loop()
loop.set_exception_handler(_async_json_exception_handler)
except Exception:
pass
except Exception:
pass

Expand Down
10 changes: 9 additions & 1 deletion litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,11 @@ def generate_feedback_box():

import litellm
from litellm import Router
from litellm._logging import verbose_proxy_logger, verbose_router_logger
from litellm._logging import (
_setup_asyncio_json_exception_handler,
verbose_proxy_logger,
verbose_router_logger,
)
from litellm.caching.caching import DualCache, RedisCache
from litellm.caching.redis_cluster_cache import RedisClusterCache
from litellm.constants import (
Expand Down Expand Up @@ -662,6 +666,10 @@ async def proxy_startup_event(app: FastAPI):
global prisma_client, master_key, use_background_health_checks, llm_router, llm_model_list, general_settings, proxy_budget_rescheduler_min_time, proxy_budget_rescheduler_max_time, litellm_proxy_admin_name, db_writer_client, store_model_in_db, premium_user, _license_check, proxy_batch_polling_interval, shared_aiohttp_session
import json

# Set up asyncio exception handler for JSON logging on the running event loop
# This must be done here (not at import time) because uvicorn creates its own event loop
_setup_asyncio_json_exception_handler()

init_verbose_loggers()
## CHECK PREMIUM USER
verbose_proxy_logger.debug(
Expand Down
240 changes: 240 additions & 0 deletions tests/litellm/logging/test_json_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,240 @@
"""
Tests for JSON logging functionality in litellm/_logging.py

This tests the fix for asyncio exception handling with JSON logging.

There are two issues being fixed:
1. The asyncio exception handler was set at import time on a different event loop
than the one uvicorn uses, causing asyncio exceptions to be logged as plain text
instead of JSON.
2. The original handler had exc_info=None, so even if it was on the correct loop,
the JSON output would be missing the stacktrace field.

The fix:
1. Added _setup_asyncio_json_exception_handler() that sets the handler on the running
event loop, called from proxy_startup_event after uvicorn's event loop is created.
2. Fixed the handler to extract traceback from exception.__traceback__ and include it
in the JSON output via exc_info.
"""
import asyncio
import json
import logging
import os
import sys
from io import StringIO
from unittest.mock import patch

import pytest


class TestJsonLogging:
"""Tests for JSON logging configuration."""

def test_json_formatter_includes_stacktrace_on_exception(self):
"""Test that JsonFormatter includes stacktrace when exc_info is provided."""
# Import after setting JSON_LOGS
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
# Force reimport to pick up JSON_LOGS
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import JsonFormatter

formatter = JsonFormatter()

# Create a log record with exception info
try:
raise RuntimeError("Test exception")
except RuntimeError:
exc_info = sys.exc_info()

record = logging.LogRecord(
name="test",
level=logging.ERROR,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=exc_info,
)

output = formatter.format(record)
data = json.loads(output)

assert "stacktrace" in data
assert "RuntimeError" in data["stacktrace"]
assert "Test exception" in data["stacktrace"]

def test_json_formatter_no_stacktrace_without_exception(self):
"""Test that JsonFormatter doesn't include stacktrace when no exception."""
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import JsonFormatter

formatter = JsonFormatter()

record = logging.LogRecord(
name="test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)

output = formatter.format(record)
data = json.loads(output)

assert "stacktrace" not in data
assert data["message"] == "Test message"
assert data["level"] == "INFO"

def test_async_json_exception_handler_includes_traceback(self):
"""Test that async_json_exception_handler includes traceback from exception."""
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import (
_async_json_exception_handler,
_set_json_error_handler,
JsonFormatter,
)

# Capture output
captured = StringIO()
handler = logging.StreamHandler(captured)
handler.setFormatter(JsonFormatter())

# Set our custom handler
_set_json_error_handler(handler)

# Simulate asyncio exception context with traceback
try:
raise ValueError("Async task failed")
except ValueError as e:
context = {
"message": "Task exception was never retrieved",
"exception": e,
}

# Create a mock loop
class MockLoop:
def default_exception_handler(self, context):
pass

_async_json_exception_handler(MockLoop(), context)

output = captured.getvalue()
data = json.loads(output.strip())

assert "stacktrace" in data
assert "ValueError" in data["stacktrace"]
assert "Async task failed" in data["stacktrace"]

def test_async_json_exception_handler_handles_non_exception_context(self):
"""Test that async_json_exception_handler handles context without exception."""
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import (
_async_json_exception_handler,
_set_json_error_handler,
JsonFormatter,
)

captured = StringIO()
handler = logging.StreamHandler(captured)
handler.setFormatter(JsonFormatter())

# Set our custom handler
_set_json_error_handler(handler)

# Context without exception
context = {
"message": "Some asyncio warning",
}

class MockLoop:
def default_exception_handler(self, context):
pass

_async_json_exception_handler(MockLoop(), context)

output = captured.getvalue()
data = json.loads(output.strip())

assert data["message"] == "Some asyncio warning"
assert "stacktrace" not in data


@pytest.mark.asyncio
class TestAsyncioJsonExceptionHandler:
"""Tests for asyncio exception handler with JSON logging."""

async def test_setup_asyncio_json_exception_handler_sets_handler(self):
"""Test that _setup_asyncio_json_exception_handler sets the handler on running loop."""
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import _setup_asyncio_json_exception_handler

loop = asyncio.get_running_loop()
original_handler = loop.get_exception_handler()

try:
_setup_asyncio_json_exception_handler()

new_handler = loop.get_exception_handler()
assert new_handler is not None
assert new_handler != original_handler
finally:
# Restore original handler
loop.set_exception_handler(original_handler)

async def test_asyncio_exception_logged_as_json_with_stacktrace(self):
"""Test that asyncio exceptions are logged as JSON with stacktrace."""
with patch.dict(os.environ, {"JSON_LOGS": "true"}):
if "litellm._logging" in sys.modules:
del sys.modules["litellm._logging"]
from litellm._logging import (
_setup_asyncio_json_exception_handler,
_set_json_error_handler,
JsonFormatter,
)

captured = StringIO()
handler = logging.StreamHandler(captured)
handler.setFormatter(JsonFormatter())

# Set our custom handler to capture output
_set_json_error_handler(handler)

loop = asyncio.get_running_loop()
original_handler = loop.get_exception_handler()

try:
_setup_asyncio_json_exception_handler()

async def failing_task():
await asyncio.sleep(0.01)
raise RuntimeError("Test async exception")

task = asyncio.create_task(failing_task())
await asyncio.sleep(0.1)

# Force garbage collection to trigger exception handler
import gc

del task
gc.collect()
await asyncio.sleep(0.1)

output = captured.getvalue()
if output:
data = json.loads(output.strip())
assert "stacktrace" in data
assert "RuntimeError" in data["stacktrace"]
assert "Test async exception" in data["stacktrace"]
finally:
loop.set_exception_handler(original_handler)
Loading