Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions dev/.readthedocs.yaml → .readthedocs.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
#
# Note: This file must be in the root directory (Read the Docs requirement)
# but references dev/mkdocs.yml for the MkDocs configuration
# It references dev/mkdocs.yml for the MkDocs configuration

version: 2

Expand All @@ -14,6 +13,7 @@ build:
commands:
# Use the patched build script to ensure i18n plugin works correctly
# This applies patches to mkdocs-static-i18n before building
# Dependencies are installed via python.install below BEFORE this runs
- python dev/build_docs_patched_clean.py

# MkDocs configuration
Expand All @@ -24,9 +24,10 @@ mkdocs:
configuration: dev/mkdocs.yml

# Python environment configuration
# These steps run BEFORE build.commands
python:
install:
# Install dependencies from requirements file
# Install dependencies from requirements file (relative to project root)
- requirements: dev/requirements-rtd.txt
# Install the project itself (needed for mkdocstrings to parse code)
# Use editable install to ensure imports work correctly
Expand All @@ -39,3 +40,11 @@ formats:
- htmlzip
- pdf









38 changes: 30 additions & 8 deletions ccbt/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -1435,10 +1435,21 @@ def cli(ctx, config, verbose, debug):
)
@click.option("--unchoke-interval", type=float, help=_("Unchoke interval (s)"))
@click.option("--metrics-interval", type=float, help=_("Metrics interval (s)"))
@click.option("--enable-v2", "enable_v2", is_flag=True, help=_("Enable Protocol v2 (BEP 52)"))
@click.option("--disable-v2", "disable_v2", is_flag=True, help=_("Disable Protocol v2 (BEP 52)"))
@click.option("--prefer-v2", "prefer_v2", is_flag=True, help=_("Prefer Protocol v2 when available"))
@click.option("--v2-only", "v2_only", is_flag=True, help=_("Use Protocol v2 only (disable v1)"))
@click.option(
"--enable-v2", "enable_v2", is_flag=True, help=_("Enable Protocol v2 (BEP 52)")
)
@click.option(
"--disable-v2", "disable_v2", is_flag=True, help=_("Disable Protocol v2 (BEP 52)")
)
@click.option(
"--prefer-v2",
"prefer_v2",
is_flag=True,
help=_("Prefer Protocol v2 when available"),
)
@click.option(
"--v2-only", "v2_only", is_flag=True, help=_("Use Protocol v2 only (disable v1)")
)
@click.pass_context
def download(
ctx,
Expand Down Expand Up @@ -1775,10 +1786,21 @@ async def _add_torrent_to_daemon():
)
@click.option("--unchoke-interval", type=float, help=_("Unchoke interval (s)"))
@click.option("--metrics-interval", type=float, help=_("Metrics interval (s)"))
@click.option("--enable-v2", "enable_v2", is_flag=True, help=_("Enable Protocol v2 (BEP 52)"))
@click.option("--disable-v2", "disable_v2", is_flag=True, help=_("Disable Protocol v2 (BEP 52)"))
@click.option("--prefer-v2", "prefer_v2", is_flag=True, help=_("Prefer Protocol v2 when available"))
@click.option("--v2-only", "v2_only", is_flag=True, help=_("Use Protocol v2 only (disable v1)"))
@click.option(
"--enable-v2", "enable_v2", is_flag=True, help=_("Enable Protocol v2 (BEP 52)")
)
@click.option(
"--disable-v2", "disable_v2", is_flag=True, help=_("Disable Protocol v2 (BEP 52)")
)
@click.option(
"--prefer-v2",
"prefer_v2",
is_flag=True,
help=_("Prefer Protocol v2 when available"),
)
@click.option(
"--v2-only", "v2_only", is_flag=True, help=_("Use Protocol v2 only (disable v1)")
)
@click.pass_context
def magnet(
ctx,
Expand Down
6 changes: 0 additions & 6 deletions ccbt/consensus/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,3 @@
"RaftState",
"RaftStateType",
]






16 changes: 16 additions & 0 deletions ccbt/i18n/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,19 @@ def _initialize_locale(self) -> None:
# get_locale() will handle the fallback chain
final_locale = get_locale()
logger.debug("Using locale: %s", final_locale)

def reload(self) -> None:
"""Reload translations from current locale.

This method resets the translation cache and forces
a reload of translations on the next translation call.
"""
import ccbt.i18n as i18n_module

# Reset global translation cache to force reload
i18n_module._translation = None # type: ignore[attr-defined]

# Re-initialize locale to ensure it's up to date
self._initialize_locale()

logger.debug("Translation manager reloaded")
3 changes: 1 addition & 2 deletions ccbt/nat/port_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,8 @@
import asyncio
import logging
import time
from collections.abc import Awaitable, Callable
from dataclasses import dataclass, field
from typing import Optional, Tuple
from typing import Awaitable, Callable, Optional, Tuple

logger = logging.getLogger(__name__)

Expand Down
112 changes: 106 additions & 6 deletions ccbt/session/checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,15 @@ async def resume_from_checkpoint(
session: AsyncTorrentSession instance

"""
# #region agent log
import json
log_path = r"c:\Users\MeMyself\bittorrentclient\.cursor\debug.log"
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "RESUME", "location": "checkpointing.py:451", "message": "resume_from_checkpoint entry", "data": {"checkpoint_rate_limits": str(checkpoint.rate_limits) if hasattr(checkpoint, "rate_limits") else None, "has_ctx": hasattr(self, "_ctx"), "has_ctx_info": hasattr(self, "_ctx") and hasattr(self._ctx, "info")}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
try:
if self._ctx.logger:
self._ctx.logger.info(
Expand Down Expand Up @@ -680,6 +689,15 @@ async def resume_from_checkpoint(
await self._restore_security_state(checkpoint, session)

# Restore rate limits if available
# #region agent log
import json
log_path = r"c:\Users\MeMyself\bittorrentclient\.cursor\debug.log"
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "RESUME", "location": "checkpointing.py:683", "message": "About to call _restore_rate_limits", "data": {"has_checkpoint_rate_limits": bool(checkpoint.rate_limits) if hasattr(checkpoint, "rate_limits") else False, "checkpoint_rate_limits": str(checkpoint.rate_limits) if hasattr(checkpoint, "rate_limits") else None}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
await self._restore_rate_limits(checkpoint, session)

# Restore session state if available
Expand All @@ -693,7 +711,16 @@ async def resume_from_checkpoint(
len(checkpoint.verified_pieces),
)

except Exception:
except Exception as e:
# #region agent log
import json
log_path = r"c:\Users\MeMyself\bittorrentclient\.cursor\debug.log"
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "EXCEPTION", "location": "checkpointing.py:714", "message": "Exception in resume_from_checkpoint", "data": {"exception_type": str(type(e)), "exception_msg": str(e)}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if self._ctx.logger:
self._ctx.logger.exception("Failed to resume from checkpoint")
raise
Expand Down Expand Up @@ -1113,18 +1140,72 @@ async def _restore_rate_limits(
self, checkpoint: TorrentCheckpoint, session: Any
) -> None:
"""Restore rate limits from checkpoint."""
# #region agent log
import json
log_path = r"c:\Users\MeMyself\bittorrentclient\.cursor\debug.log"
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "A", "location": "checkpointing.py:1112", "message": "_restore_rate_limits entry", "data": {"checkpoint_rate_limits": str(checkpoint.rate_limits) if hasattr(checkpoint, "rate_limits") else None}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
try:
if not checkpoint.rate_limits:
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "C", "location": "checkpointing.py:1117", "message": "Early return: checkpoint.rate_limits is None/empty", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
return

# Get session manager
session_manager = getattr(session, "session_manager", None)
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "B", "location": "checkpointing.py:1121", "message": "Session manager check", "data": {"has_session_manager": session_manager is not None, "has_set_rate_limits": hasattr(session_manager, "set_rate_limits") if session_manager else False}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if not session_manager:
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "B", "location": "checkpointing.py:1123", "message": "Early return: session_manager is None", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
return

# Get info hash
info_hash = getattr(self._ctx.info, "info_hash", None)
# Get info hash - try ctx.info first, fall back to checkpoint.info_hash
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "A", "location": "checkpointing.py:1125", "message": "Before info hash check", "data": {"has_ctx": hasattr(self, "_ctx"), "has_ctx_info": hasattr(self._ctx, "info") if hasattr(self, "_ctx") else False, "ctx_info": str(getattr(self._ctx, "info", None)) if hasattr(self, "_ctx") else None, "checkpoint_info_hash": str(checkpoint.info_hash) if hasattr(checkpoint, "info_hash") else None}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
info_hash = getattr(self._ctx.info, "info_hash", None) if hasattr(self._ctx, "info") and self._ctx.info else None
# Fall back to checkpoint.info_hash if ctx.info.info_hash is not available
if not info_hash and hasattr(checkpoint, "info_hash"):
info_hash = checkpoint.info_hash
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "A", "location": "checkpointing.py:1126", "message": "Info hash check", "data": {"has_ctx_info": hasattr(self._ctx, "info"), "info_hash": str(info_hash) if info_hash else None, "ctx_info_type": str(type(getattr(self._ctx, "info", None))), "used_checkpoint_fallback": not getattr(self._ctx.info, "info_hash", None) if hasattr(self._ctx, "info") and self._ctx.info else False}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if not info_hash:
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "A", "location": "checkpointing.py:1128", "message": "Early return: info_hash is None", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
return

# Convert info hash to hex string for set_rate_limits
Expand All @@ -1134,16 +1215,35 @@ async def _restore_rate_limits(
if hasattr(session_manager, "set_rate_limits"):
down_kib = checkpoint.rate_limits.get("down_kib", 0)
up_kib = checkpoint.rate_limits.get("up_kib", 0)
await session_manager.set_rate_limits(
info_hash_hex, down_kib, up_kib
)
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "D", "location": "checkpointing.py:1137", "message": "Calling set_rate_limits", "data": {"info_hash_hex": info_hash_hex, "down_kib": down_kib, "up_kib": up_kib}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
await session_manager.set_rate_limits(info_hash_hex, down_kib, up_kib)
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "D", "location": "checkpointing.py:1138", "message": "set_rate_limits completed", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if self._ctx.logger:
self._ctx.logger.debug(
"Restored rate limits: down=%d KiB/s, up=%d KiB/s",
down_kib,
up_kib,
)
except Exception as e:
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "E", "location": "checkpointing.py:1144", "message": "Exception in _restore_rate_limits", "data": {"exception_type": str(type(e)), "exception_msg": str(e)}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if self._ctx.logger:
self._ctx.logger.debug("Failed to restore rate limits: %s", e)

Expand Down
6 changes: 0 additions & 6 deletions ccbt/session/download_startup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,3 @@
This module handles the initialization and startup sequence for torrent downloads,
including metadata retrieval, piece manager setup, and initial peer connections.
"""






6 changes: 0 additions & 6 deletions ccbt/session/manager_startup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,3 @@
This module handles the startup sequence for the session manager, including
component initialization, service startup, and background task coordination.
"""






23 changes: 23 additions & 0 deletions ccbt/session/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -2679,8 +2679,31 @@ async def get_status(self) -> dict[str, Any]:

async def _resume_from_checkpoint(self, checkpoint: TorrentCheckpoint) -> None:
"""Resume download from checkpoint."""
# #region agent log
import json
log_path = r"c:\Users\MeMyself\bittorrentclient\.cursor\debug.log"
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "SESSION", "location": "session.py:2680", "message": "_resume_from_checkpoint entry", "data": {"has_checkpoint_controller": self.checkpoint_controller is not None, "checkpoint_rate_limits": str(checkpoint.rate_limits) if hasattr(checkpoint, "rate_limits") else None}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
if self.checkpoint_controller:
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "SESSION", "location": "session.py:2683", "message": "About to call checkpoint_controller.resume_from_checkpoint", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
await self.checkpoint_controller.resume_from_checkpoint(checkpoint, self)
# #region agent log
try:
with open(log_path, "a", encoding="utf-8") as f:
f.write(json.dumps({"sessionId": "debug-session", "runId": "run1", "hypothesisId": "SESSION", "location": "session.py:2683", "message": "checkpoint_controller.resume_from_checkpoint completed", "data": {}, "timestamp": __import__("time").time() * 1000}) + "\n")
except Exception:
pass
# #endregion
else:
self.logger.error("Checkpoint controller not initialized")
msg = "Checkpoint controller not initialized"
Expand Down
16 changes: 7 additions & 9 deletions ccbt/utils/network_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from collections import deque
from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional
from typing import Any, ClassVar, Optional

from ccbt.utils.exceptions import NetworkError
from ccbt.utils.logging_config import get_logger
Expand Down Expand Up @@ -367,7 +367,7 @@ class ConnectionPool:
"""Connection pool for efficient connection management."""

# Track all active instances for debugging and forced cleanup
_active_instances: set = set()
_active_instances: ClassVar[set[ConnectionPool]] = set()

def __init__(
self,
Expand Down Expand Up @@ -801,14 +801,12 @@ def reset_network_optimizer() -> None:

def force_cleanup_all_connection_pools() -> None:
"""Force cleanup all ConnectionPool instances (emergency use for test teardown).

This function should be used in test fixtures to ensure all ConnectionPool
instances are properly stopped, preventing thread leaks and test timeouts.
"""
for pool in list(ConnectionPool._active_instances):
try:
pool.stop()
except Exception:
for pool in list(ConnectionPool._active_instances): # noqa: SLF001
with contextlib.suppress(Exception):
# Best effort cleanup - ignore errors to ensure all pools are attempted
pass
ConnectionPool._active_instances.clear()
pool.stop()
ConnectionPool._active_instances.clear() # noqa: SLF001
Loading
Loading