Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.2.1] - 2025-12-25

### Fixed
- **Key Generation Bug**: Fixed an issue where `TTLCache` and `SWRCache` would fail to correctly generate cache keys when using named placeholders (e.g., `"user:{id}"`) if the function was called with positional arguments.
- **Performance**: Optimized cache key generation logic to avoid expensive signature binding on every call, using a fast-path for common patterns and efficient argument merging for complex cases.

### Added
- `configure()` class method on all decorators to easily create pre-configured cache instances (e.g., `MyCache = TTLCache.configure(cache=RedisCache(...))`).

## [0.2.0] - 2025-12-23

### Changed
Expand All @@ -16,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added
- `AsyncTTLCache`, `AsyncStaleWhileRevalidateCache`, `AsyncBackgroundCache` classes (aliased to `TTLCache`, `SWRCache`, `BGCache`).
- `configure()` class method on all decorators to easily create pre-configured cache instances (e.g., `MyCache = TTLCache.configure(cache=RedisCache(...))`).
- `SharedAsyncScheduler` for managing async background jobs.
- `pytest-asyncio` configuration in `pyproject.toml`.

Expand Down
56 changes: 50 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,19 +73,34 @@ def load_inventory() -> list[dict]:
@BGCache.register_loader("inventory_async", interval_seconds=300)
async def load_inventory_async() -> list[dict]:
return await warehouse_api.get_all_items()

# Configured Cache (Reusable Backend)
# Create a decorator pre-wired with a specific cache (e.g., Redis)
RedisTTL = TTLCache.configure(cache=RedisCache(redis_client))

@RedisTTL.cached("user:{}", ttl=300)
async def get_user_redis(user_id: int):
return await db.fetch(user_id)
```

---

## Key Templates

* `"user:{}"` → first positional argument
* `"user:{user_id}"` → named argument
* Custom:
The library supports smart key generation that handles both positional and keyword arguments seamlessly.

```python
key=lambda *a, **k: f"user:{k.get('user_id', a[0])}"
```
* **Positional Placeholder**: `"user:{}"`
* Uses the first argument, whether passed positionally or as a keyword.
* Example: `get_user(123)` or `get_user(user_id=123)` -> `"user:123"`

* **Named Placeholder**: `"user:{user_id}"`
* Resolves `user_id` from keyword arguments OR positional arguments (by inspecting the function signature).
* Example: `def get_user(user_id): ...` called as `get_user(123)` -> `"user:123"`

* **Custom Function**:
* For complex logic, pass a callable.
* Example1 for kw/args with default values use : `key=lambda *a, **k: f"user:{k.get('user_id', a[0])}"`
* Example2 fns with no defaults use : `key=lambda user_id: f"user:{user_id}"`

---

Expand Down Expand Up @@ -214,6 +229,35 @@ db_host = load_config_map().get("db", {}).get("host")

---

## Advanced Configuration

To avoid repeating complex cache configurations (like HybridCache setup) in every decorator, you can create a pre-configured cache instance.

```python
from advanced_caching import SWRCache, HybridCache, InMemCache, RedisCache

# 1. Define your cache factory
def create_hybrid_cache():
return HybridCache(
l1_cache=InMemCache(),
l2_cache=RedisCache(redis_client),
l1_ttl=300,
l2_ttl=3600
)

# 2. Create a configured decorator
MySWRCache = SWRCache.configure(cache=create_hybrid_cache)

# 3. Use it cleanly
@MySWRCache.cached("users:{}", ttl=300)
def get_users(code: str):
return db.get_users(code)
```

This works for `TTLCache`, `SWRCache`, and `BGCache`.

---

### Custom Storage

Implement the `CacheStorage` protocol.
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "advanced-caching"
version = "0.2.0"
version = "0.2.1"
description = "Production-ready composable caching with TTL, SWR, and background refresh patterns for Python."
readme = "README.md"
requires-python = ">=3.10"
Expand Down
2 changes: 1 addition & 1 deletion src/advanced_caching/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
Expose storage backends, decorators, and scheduler utilities under `advanced_caching`.
"""

__version__ = "0.2.0"
__version__ = "0.2.1"

from .storage import (
InMemCache,
Expand Down
170 changes: 143 additions & 27 deletions src/advanced_caching/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@
from __future__ import annotations

import asyncio
import inspect
import logging
import time
from datetime import datetime, timedelta
from typing import Callable, TypeVar
from typing import Callable, TypeVar, Any

from apscheduler.triggers.interval import IntervalTrigger

Expand All @@ -28,56 +29,84 @@


# Helper to normalize cache key builders for all decorators.
def _create_key_fn(key: str | Callable[..., str]) -> Callable[..., str]:
def _create_smart_key_fn(
key: str | Callable[..., str], func: Callable[..., Any]
) -> Callable[..., str]:
# If the key is already a function (e.g., lambda u: f"user:{u}"), return it directly.
if callable(key):
return key # type: ignore[assignment]

template = key
# Optimization: Static key (e.g., "global_config")
# If there are no placeholders, we don't need to format anything.
if "{" not in template:

def key_fn(*args, **kwargs) -> str:
# Always return the static string, ignoring arguments.
return template

return key_fn

if (
template.count("{}") == 1
and template.count("{") == 1
and template.count("}") == 1
):
# Optimization: Simple positional key "prefix:{}" (e.g., "user:{}")
# This is a very common pattern, so we optimize it to avoid full string formatting.
if template.count("{}") == 1 and template.count("{") == 1:
prefix, suffix = template.split("{}", 1)

def key_fn(*args, **kwargs) -> str:
# If positional args are provided (e.g., get_user(123)), use the first one.
if args:
return prefix + str(args[0]) + suffix
return f"{prefix}{args[0]}{suffix}"
# If keyword args are provided (e.g., get_user(user_id=123)), use the first value.
# This supports the case where a positional placeholder is used but the function is called with kwargs.
if kwargs:
if len(kwargs) == 1:
return prefix + str(next(iter(kwargs.values()))) + suffix
return template
# Fallback for single kwarg usage with positional template
return f"{prefix}{next(iter(kwargs.values()))}{suffix}"
# If no arguments are provided, return the raw template (e.g., "user:{}").
return template

return key_fn

# General case: Named placeholders (e.g., "user:{id}") or complex positional (e.g., "{}:{}" or "{0}")
# We need to inspect the function signature to map positional arguments to parameter names.
sig = inspect.signature(func)
param_names = list(sig.parameters.keys())

# Pre-compute defaults to handle cases where arguments are omitted but have default values.
# e.g., def func(a=1): ... with key="{a}"
defaults = {
k: v.default
for k, v in sig.parameters.items()
if v.default is not inspect.Parameter.empty
}

def key_fn(*args, **kwargs) -> str:
# Fast merge of arguments to support named placeholders.
# 1. Start with defaults (e.g., {'a': 1})
merged = defaults.copy() if defaults else {}

# 2. Map positional args to names (e.g., func(2) -> {'a': 2})
# This allows us to use named placeholders even when the function is called positionally.
if args:
try:
return template.format(args[0])
except Exception:
try:
return template.format(*args)
except Exception:
return template
merged.update(zip(param_names, args))

# 3. Update with explicit kwargs (e.g., func(a=3) -> {'a': 3})
if kwargs:
merged.update(kwargs)

try:
# Try formatting with named arguments (e.g., "user:{id}".format(id=123))
return template.format(**merged)
except (KeyError, ValueError, IndexError):
# Fallback: Try raw positional args (for "{}" templates or mixed usage)
# e.g., "user:{}".format(123) if named formatting failed.
try:
return template.format(**kwargs)
return template.format(*args)
except Exception:
if len(kwargs) == 1:
try:
return template.format(next(iter(kwargs.values())))
except Exception:
return template
# If formatting fails entirely, return the raw template to avoid crashing.
return template
return template
except Exception:
# Catch-all for other formatting errors.
return template

return key_fn

Expand All @@ -104,6 +133,33 @@ async def get_user(user_id):
return await db.fetch_user(user_id)
"""

@classmethod
def configure(
cls, cache: CacheStorage | Callable[[], CacheStorage]
) -> type[AsyncTTLCache]:
"""
Create a configured version of TTLCache with a default cache backend.

Example:
MyCache = TTLCache.configure(cache=RedisCache(...))
@MyCache.cached("key", ttl=60)
def func(): ...
"""

class ConfiguredTTLCache(cls):
@classmethod
def cached(
cls_inner,
key: str | Callable[..., str],
ttl: int,
cache: CacheStorage | Callable[[], CacheStorage] | None = None,
) -> Callable[[Callable[..., T]], Callable[..., T]]:
# Use the configured cache if none is provided
return cls.cached(key, ttl, cache=cache or cls_inner._configured_cache)

ConfiguredTTLCache._configured_cache = cache # type: ignore
return ConfiguredTTLCache

@classmethod
def cached(
cls,
Expand All @@ -119,10 +175,10 @@ def cached(
ttl: Time-to-live in seconds
cache: Optional cache backend (defaults to InMemCache)
"""
key_fn = _create_key_fn(key)
cache_factory = normalize_cache_factory(cache, default_factory=InMemCache)

def decorator(func: Callable[..., T]) -> Callable[..., T]:
key_fn = _create_smart_key_fn(key, func)
cache_obj = cache_factory()
cache_get_entry = cache_obj.get_entry
cache_set = cache_obj.set
Expand Down Expand Up @@ -183,6 +239,35 @@ class AsyncStaleWhileRevalidateCache:
Supports both sync and async functions.
"""

@classmethod
def configure(
cls, cache: CacheStorage | Callable[[], CacheStorage]
) -> type[AsyncStaleWhileRevalidateCache]:
"""
Create a configured version of SWRCache with a default cache backend.
"""

class ConfiguredSWRCache(cls):
@classmethod
def cached(
cls_inner,
key: str | Callable[..., str],
ttl: int,
stale_ttl: int = 0,
cache: CacheStorage | Callable[[], CacheStorage] | None = None,
enable_lock: bool = True,
) -> Callable[[Callable[..., T]], Callable[..., T]]:
return cls.cached(
key,
ttl,
stale_ttl=stale_ttl,
cache=cache or cls_inner._configured_cache,
enable_lock=enable_lock,
)

ConfiguredSWRCache._configured_cache = cache # type: ignore
return ConfiguredSWRCache

@classmethod
def cached(
cls,
Expand All @@ -192,10 +277,10 @@ def cached(
cache: CacheStorage | Callable[[], CacheStorage] | None = None,
enable_lock: bool = True,
) -> Callable[[Callable[..., T]], Callable[..., T]]:
key_fn = _create_key_fn(key)
cache_factory = normalize_cache_factory(cache, default_factory=InMemCache)

def decorator(func: Callable[..., T]) -> Callable[..., T]:
key_fn = _create_smart_key_fn(key, func)
cache_obj = cache_factory()
get_entry = cache_obj.get_entry
set_entry = cache_obj.set_entry
Expand Down Expand Up @@ -360,6 +445,37 @@ def shutdown(cls, wait: bool = True) -> None:
SharedAsyncScheduler.shutdown(wait)
SharedScheduler.shutdown(wait)

@classmethod
def configure(
cls, cache: CacheStorage | Callable[[], CacheStorage]
) -> type[AsyncBackgroundCache]:
"""
Create a configured version of BGCache with a default cache backend.
"""

class ConfiguredBGCache(cls):
@classmethod
def register_loader(
cls_inner,
key: str,
interval_seconds: int,
ttl: int | None = None,
run_immediately: bool = True,
on_error: Callable[[Exception], None] | None = None,
cache: CacheStorage | Callable[[], CacheStorage] | None = None,
) -> Callable[[Callable[[], T]], Callable[[], T]]:
return cls.register_loader(
key,
interval_seconds,
ttl=ttl,
run_immediately=run_immediately,
on_error=on_error,
cache=cache or cls_inner._configured_cache,
)

ConfiguredBGCache._configured_cache = cache # type: ignore
return ConfiguredBGCache

@classmethod
def register_loader(
cls,
Expand Down
Loading
Loading