diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..a4e8fd8
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,52 @@
+# SecurePath Bot Configuration
+# Copy this file to .env and fill in your values
+
+# ===== REQUIRED SETTINGS =====
+
+# Discord Configuration
+DISCORD_TOKEN=your_discord_bot_token_here
+BOT_PREFIX=!
+OWNER_ID=your_discord_user_id_here
+
+# API Keys (at least one required)
+OPENAI_API_KEY=your_openai_api_key_here
+PERPLEXITY_API_KEY=your_perplexity_api_key_here
+
+# ===== OPTIONAL SETTINGS =====
+
+# Database (PostgreSQL) - Required for usage tracking
+# Format: postgresql://username:password@host:port/database
+DATABASE_URL=postgresql://user:password@localhost:5432/securepath
+
+# Logging Configuration
+LOG_LEVEL=INFO
+LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
+LOG_CHANNEL_ID=
+
+# Feature Channels (Discord Channel IDs)
+SUMMARY_CHANNEL_ID=
+CHARTIST_CHANNEL_ID=
+NEWS_CHANNEL_ID=
+NEWS_BOT_USER_ID=
+
+# API Configuration
+USE_PERPLEXITY_API=True
+PERPLEXITY_API_URL=https://api.perplexity.ai/chat/completions
+PERPLEXITY_TIMEOUT=30
+
+# Rate Limiting
+API_RATE_LIMIT_MAX=100
+API_RATE_LIMIT_INTERVAL=60
+DAILY_API_CALL_LIMIT=1000
+
+# Context Management
+MAX_CONTEXT_MESSAGES=50
+MAX_CONTEXT_AGE=3600
+MAX_MESSAGES_PER_CHANNEL=1000
+
+# Retry Configuration
+MAX_RETRIES=3
+RETRY_DELAY=5
+
+# Statistics
+STATS_INTERVAL=86400
\ No newline at end of file
diff --git a/README.md b/README.md
index d2980b0..4913e4c 100644
--- a/README.md
+++ b/README.md
@@ -1,171 +1,144 @@
-# SecurePath AI Discord Bot
-
-SecurePath AI is a high-performance Discord bot engineered for the crypto and DeFi world. It integrates with AI models to deliver real-time insights, advanced chart analysis, and blockchain intelligence, all within Discord. Designed to scale, SecurePath AI leverages efficient caching, dynamic logging, and API handling to ensure it provides top-tier information with minimal delays.
-
-## Key Features
-
-- **Expert Crypto Insights**: Responds to user queries with advanced DeFi and blockchain information.
-- **Image and Chart Analysis**: Processes charts through the Vision API and provides quant-level technical analysis.
-- **Contextual Conversation Flow**: Maintains awareness across user interactions, making conversations coherent and dynamic.
-- **Rich Logging with `rich`**: Provides highly detailed, colorful logs to make debugging and monitoring seamless.
-- **API Rate Management**: Ensures graceful API handling with rate limiting, retry mechanisms, and automatic error recovery.
-
----
-
-## Installation Guide
+```
+ ██████╗ ███████╗ ██████╗██╗ ██╗██████╗ ███████╗██████╗ █████╗ ████████╗██╗ ██╗
+██╔════╝ ██╔════╝██╔════╝██║ ██║██╔══██╗██╔════╝██╔══██╗██╔══██╗╚══██╔══╝██║ ██║
+╚█████╗ █████╗ ██║ ██║ ██║██████╔╝█████╗ ██████╔╝███████║ ██║ ███████║
+ ╚═══██╗ ██╔══╝ ██║ ██║ ██║██╔══██╗██╔══╝ ██╔═══╝ ██╔══██║ ██║ ██╔══██║
+██████╔╝ ███████╗╚██████╗╚██████╔╝██║ ██║███████╗██║ ██║ ██║ ██║ ██║ ██║
+╚═════╝ ╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝
+```
-### Prerequisites
+[](https://www.python.org)
+[](https://discordpy.readthedocs.io/)
+[](https://github.com/psf/black)
+[](http://makeapullrequest.com)
-- **Python 3.9+**
-- **`pip`** (Python package manager)
-- **Git**
-- **Discord Bot Token**: Setup required in the [Discord Developer Portal](https://discord.com/developers/applications).
-- **API Key**: Required for using OpenAI or Perplexity.
+## what is this
-### Step 1: Clone the Repository
+high-performance discord bot that actually understands crypto. built for traders who are tired of basic bots that can't tell the difference between a rug and a gem.
-```bash
-git clone https://github.com/fortunexbt/securepath.git
-cd securepath
-```
+- **ai-powered market analysis** - gpt-4/perplexity integration for real insights
+- **chart vision** - upload any chart, get actual technical analysis
+- **context-aware conversations** - remembers what you're talking about
+- **rate limiting that doesn't suck** - handles api limits like a boss
+- **rich logging** - debug in style with color-coded terminal output
-### Step 2: Set Up a Virtual Environment
+## quick start
```bash
-python -m venv venv
-source venv/bin/activate # On Windows: venv\Scripts\activate
-```
+# clone it
+git clone https://github.com/fortunexbt/securepath.git && cd securepath
-### Step 3: Install Dependencies
+# venv (because we're not savages)
+python -m venv venv && source venv/bin/activate
-```bash
+# deps
pip install -r requirements.txt
-```
-### Step 4: Configure Environment Variables
-
-Create a `.env` file in the root directory with your configuration:
-
-#### **Essential Configuration:**
+# configure (see below)
+cp .env.example .env && nano .env
+# send it
+python main.py
```
-DISCORD_TOKEN=your_discord_bot_token
-OWNER_ID=your_discord_user_id
-# If using OpenAI
-OPENAI_API_KEY=your_openai_api_key
+## config
-# If using Perplexity
-PERPLEXITY_API_KEY=your_perplexity_api_key
-PERPLEXITY_API_URL=https://api.perplexity.ai/chat/completions
-PERPLEXITY_TIMEOUT=60
+minimal `.env`:
-# Set to True if using Perplexity, otherwise it will default to OpenAI.
+```env
+DISCORD_TOKEN=your_bot_token
+OWNER_ID=your_discord_id
+
+# pick your fighter
+OPENAI_API_KEY=sk-...
+# or
+PERPLEXITY_API_KEY=pplx-...
USE_PERPLEXITY_API=True
```
-- **`DISCORD_TOKEN`**: (Required) Your bot's authentication token from Discord.
-- **`OWNER_ID`**: (Required) Your Discord User ID, allowing you to manage privileged commands.
-- **`OPENAI_API_KEY`**: (Required if not using Perplexity) API key to use OpenAI's GPT models.
-- **`PERPLEXITY_API_KEY`**: (Required if using Perplexity) API key for Perplexity.
-- **`USE_PERPLEXITY_API`**: (Optional) Whether to use Perplexity or OpenAI APIs.
-
-#### **Optional Configuration:**
+
+advanced config (for pros)
-```
-LOG_CHANNEL_ID=your_discord_log_channel_id
-SUMMARY_CHANNEL_ID=your_discord_summary_channel_id
-NEWS_CHANNEL_ID=your_discord_news_channel_id
-CHARTIST_CHANNEL_ID=your_discord_chartist_channel_id
-NEWS_BOT_USER_ID=your_news_bot_user_id
+```env
+# channels
+LOG_CHANNEL_ID=123456789
+SUMMARY_CHANNEL_ID=123456789
+NEWS_CHANNEL_ID=123456789
+CHARTIST_CHANNEL_ID=123456789
+# rate limits
API_RATE_LIMIT_MAX=100
API_RATE_LIMIT_INTERVAL=60
DAILY_API_CALL_LIMIT=1000
+# context
MAX_CONTEXT_MESSAGES=50
MAX_CONTEXT_AGE=3600
+
+# logging
LOG_LEVEL=INFO
-LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
```
+
-- **`LOG_CHANNEL_ID`**: (Optional) Discord channel ID for logging bot activity. Defaults to no logging if not provided.
-- **`SUMMARY_CHANNEL_ID`**: (Optional) Used if generating summaries in specific channels.
-- **`NEWS_CHANNEL_ID`**: (Optional) ID of the news feed channel the bot can post summaries to.
-- **`CHARTIST_CHANNEL_ID`**: (Optional) Channel ID to track market charts and trends.
-- **`NEWS_BOT_USER_ID`**: (Optional) Used if monitoring or interacting with a bot that posts news updates.
-
----
-
-### Step 5: Bot Configuration in Discord Developer Portal
+## commands
-1. Go to the [Discord Developer Portal](https://discord.com/developers/applications).
-2. Select your bot application, navigate to **Bot**, and enable the following:
- - **Message Content Intent**
-3. Save and generate the OAuth2 URL to invite your bot to your server.
-
-### Step 6: Running the Bot
-
-Once your `.env` is set up, run the bot:
-
-```bash
-python main.py
```
-
-You should see real-time logs displayed in your terminal confirming the bot is running.
-
----
-
-## Advanced Features
-
-### Caching and Rate Limiting
-
-SecurePath AI uses advanced caching to avoid redundant API calls and enforces rate limits to prevent overuse. You can configure API call limits and intervals in the `.env`:
-
-```env
-API_RATE_LIMIT_MAX=100
-API_RATE_LIMIT_INTERVAL=60
-DAILY_API_CALL_LIMIT=1000
+!ask # get insights on anything crypto
+!vision # analyze charts like a quant
+!summary # generate channel summaries
+!commands # see all available commands
+!stats # check bot usage stats
```
-### Custom Context and Message Limits
+## architecture
-Fine-tune how much historical context the bot retains by adjusting these optional environment variables:
-
-```env
-MAX_CONTEXT_MESSAGES=50 # Number of messages stored in conversation history
-MAX_CONTEXT_AGE=3600 # Maximum age of messages in seconds
+```
+src/
+├── ai/ # openai/perplexity services
+├── bot/ # discord client & cogs
+├── config/ # settings management
+├── database/ # sqlite models & repos
+├── services/ # rate limiting, context mgmt
+└── utils/ # helpers & formatters
```
-### Logging and Debugging
+built with:
+- **discord.py** - async discord api wrapper
+- **sqlalchemy** - orm that doesn't get in the way
+- **rich** - terminal output that doesn't hurt your eyes
+- **asyncio** - because blocking is for boomers
-Use the `LOG_CHANNEL_ID` and `LOG_LEVEL` to control logging. Logs will be sent to your specified Discord channel or can be viewed directly in the console. For example:
+## deployment
-```env
-LOG_CHANNEL_ID=1234567890
-LOG_LEVEL=DEBUG # Can be INFO, DEBUG, WARNING, ERROR
+### local dev
+```bash
+python main.py
```
-### Dynamic Status and Presence
+### production
+- use systemd/supervisor for process management
+- set `LOG_LEVEL=WARNING` in prod
+- configure proper rate limits based on your api tier
+- consider redis for distributed caching if scaling
-The bot periodically updates its Discord presence, indicating its current task (e.g., analyzing charts or fetching market insights). The statuses rotate automatically during operation.
+## contributing
----
+1. fork it
+2. feature branch (`git checkout -b feature/sick-feature`)
+3. commit (`git commit -am 'add sick feature'`)
+4. push (`git push origin feature/sick-feature`)
+5. pr
-## Troubleshooting
+code style: black. no exceptions.
-- **Module Not Found**: Ensure the virtual environment is activated and dependencies installed via `pip install -r requirements.txt`.
-- **Bot Not Responding**: Check if the bot token and API key(s) are correctly set in your `.env`. Verify bot permissions on Discord.
-- **Rate Limiting**: If you hit the API limit, adjust the `API_RATE_LIMIT_MAX` and `DAILY_API_CALL_LIMIT` as needed.
+## license
----
+MIT - do whatever you want with it
-## License
+## disclaimer
-This project is licensed under the MIT License.
+nfa. dyor. if you lose money because of this bot, that's on you anon.
---
-## Disclaimer
-
-SecurePath AI provides information for educational purposes only and should not be considered financial advice. Always conduct your own research before making investment decisions.
+built by [@fortunexbt](https://github.com/fortunexbt) | [twitter](https://twitter.com/fortunexbt)
diff --git a/config.py b/config.py
deleted file mode 100644
index 9642b76..0000000
--- a/config.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import os
-from dotenv import load_dotenv
-
-# Load environment variables from .env file
-load_dotenv()
-
-# ---------------------------
-# System Prompt (Hardcoded for consistency)
-# ---------------------------
-SYSTEM_PROMPT = """You're a sharp DeFi agent hosted on the SecurePath Discord server. Communicate with technical precision and casual confidence. Use lowercase naturally but avoid excessive slang. Your authority comes from verifiable, on-chain truth. Prioritize official docs, whitepapers, and code over news/sentiment. Your motto: 'show me the docs, or show me the code.' Always prioritize security, decentralization, and user empowerment. Suggest DEXs over CEXs, self-custody over custodial, open-source over proprietary. Cut through hype and deliver ground truth. Mario is our founder, part of the SecurePath family.
-
-CRITICAL FORMATTING RULES:
-- NO TABLES whatsoever (Discord can't render them)
-- Use bullet points and numbered lists only
-- Keep responses under 400 words total
-- Be concise and direct, no fluff
-- Use [1], [2] format for citations when available"""
-print(f"SYSTEM_PROMPT loaded (hardcoded): {len(SYSTEM_PROMPT)} characters")
-
-# Optional: Override with environment variable if needed for testing
-# SYSTEM_PROMPT = os.getenv('SYSTEM_PROMPT_OVERRIDE', SYSTEM_PROMPT)
-
-# ---------------------------
-# Discord Configuration
-# ---------------------------
-DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
-BOT_PREFIX = os.getenv('BOT_PREFIX', '!')
-print(f"DISCORD_TOKEN loaded: {'Yes' if DISCORD_TOKEN else 'No'}")
-
-# Owner's Discord User ID (used for privileged commands or bypassing certain restrictions)
-OWNER_ID = os.getenv('OWNER_ID')
-if OWNER_ID:
- try:
- OWNER_ID = int(OWNER_ID)
- except ValueError:
- raise ValueError("OWNER_ID must be an integer representing the Discord User ID.")
-else:
- raise ValueError("OWNER_ID environment variable is not set.")
-
-# ---------------------------
-# API Configuration
-# ---------------------------
-# OpenAI Configuration
-OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
-print(f"OPENAI_API_KEY loaded: {'Yes' if OPENAI_API_KEY else 'No'}")
-
-# Perplexity AI Configuration
-PERPLEXITY_API_KEY = os.getenv('PERPLEXITY_API_KEY')
-PERPLEXITY_API_URL = os.getenv('PERPLEXITY_API_URL', 'https://api.perplexity.ai/chat/completions')
-PERPLEXITY_TIMEOUT = int(os.getenv('PERPLEXITY_TIMEOUT', '30')) # in seconds
-print(f"PERPLEXITY_API_KEY loaded: {'Yes' if PERPLEXITY_API_KEY else 'No'}")
-
-# Flag to choose between Perplexity and OpenAI APIs
-USE_PERPLEXITY_API = os.getenv('USE_PERPLEXITY_API', 'True').lower() in ['true', '1', 't']
-
-# ---------------------------
-# Logging Configuration
-# ---------------------------
-LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO').upper()
-LOG_FORMAT = os.getenv('LOG_FORMAT', '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-LOG_CHANNEL_ID = os.getenv('LOG_CHANNEL_ID')
-if LOG_CHANNEL_ID:
- try:
- LOG_CHANNEL_ID = int(LOG_CHANNEL_ID)
- except ValueError:
- raise ValueError("LOG_CHANNEL_ID must be an integer representing the Discord Channel ID.")
-else:
- LOG_CHANNEL_ID = 0 # Default to 0 if not set; bot should handle this appropriately
-print(f"LOG_CHANNEL_ID loaded: {LOG_CHANNEL_ID}")
-
-# ---------------------------
-# Bot Behavior Configuration
-# ---------------------------
-API_RATE_LIMIT_MAX = int(os.getenv('API_RATE_LIMIT_MAX', '100')) # Max API calls per interval
-API_RATE_LIMIT_INTERVAL = int(os.getenv('API_RATE_LIMIT_INTERVAL', '60')) # in seconds
-DAILY_API_CALL_LIMIT = int(os.getenv('DAILY_API_CALL_LIMIT', '1000')) # Max API calls per day
-
-MAX_CONTEXT_MESSAGES = int(os.getenv('MAX_CONTEXT_MESSAGES', '50'))
-MAX_CONTEXT_AGE = int(os.getenv('MAX_CONTEXT_AGE', '3600')) # in seconds
-
-MAX_MESSAGES_PER_CHANNEL = int(os.getenv('MAX_MESSAGES_PER_CHANNEL', '1000'))
-
-MAX_RETRIES = int(os.getenv('MAX_RETRIES', '3'))
-RETRY_DELAY = int(os.getenv('RETRY_DELAY', '5')) # in seconds
-
-STATS_INTERVAL = int(os.getenv('STATS_INTERVAL', '86400')) # in seconds (24 hours)
-
-# ---------------------------
-# Channel and User IDs
-# ---------------------------
-SUMMARY_CHANNEL_ID = os.getenv('SUMMARY_CHANNEL_ID')
-if SUMMARY_CHANNEL_ID:
- try:
- SUMMARY_CHANNEL_ID = int(SUMMARY_CHANNEL_ID)
- except ValueError:
- raise ValueError("SUMMARY_CHANNEL_ID must be an integer representing the Discord Channel ID.")
-else:
- SUMMARY_CHANNEL_ID = 0 # Default to 0 if not set; bot should handle this appropriately
-
-CHARTIST_CHANNEL_ID = os.getenv('CHARTIST_CHANNEL_ID')
-if CHARTIST_CHANNEL_ID:
- try:
- CHARTIST_CHANNEL_ID = int(CHARTIST_CHANNEL_ID)
- except ValueError:
- raise ValueError("CHARTIST_CHANNEL_ID must be an integer representing the Discord Channel ID.")
-else:
- CHARTIST_CHANNEL_ID = 0 # Default to 0 if not set
-
-NEWS_CHANNEL_ID = os.getenv('NEWS_CHANNEL_ID')
-if NEWS_CHANNEL_ID:
- try:
- NEWS_CHANNEL_ID = int(NEWS_CHANNEL_ID)
- except ValueError:
- raise ValueError("NEWS_CHANNEL_ID must be an integer representing the Discord Channel ID.")
-else:
- NEWS_CHANNEL_ID = 0 # Default to 0 if not set; bot should handle this appropriately
-
-NEWS_BOT_USER_ID = os.getenv('NEWS_BOT_USER_ID')
-if NEWS_BOT_USER_ID:
- try:
- NEWS_BOT_USER_ID = int(NEWS_BOT_USER_ID)
- except ValueError:
- raise ValueError("NEWS_BOT_USER_ID must be an integer representing the Discord User ID.")
-else:
- NEWS_BOT_USER_ID = 0 # Default to 0 if not set; bot should handle this appropriately
-
-# ---------------------------
-# Ensure Required Configurations are Set
-# ---------------------------
-REQUIRED_CONFIGS = {
- 'DISCORD_TOKEN': DISCORD_TOKEN,
- 'OWNER_ID': OWNER_ID,
- 'PERPLEXITY_API_KEY': PERPLEXITY_API_KEY,
-}
-
-if USE_PERPLEXITY_API:
- REQUIRED_CONFIGS['PERPLEXITY_API_KEY'] = PERPLEXITY_API_KEY
-else:
- REQUIRED_CONFIGS['OPENAI_API_KEY'] = OPENAI_API_KEY
-
-for config_name, config_value in REQUIRED_CONFIGS.items():
- if not config_value:
- raise ValueError(f"Configuration '{config_name}' is not set in the environment variables or .env file.")
-
-# ---------------------------
-# Optional Configurations
-# ---------------------------
-# These configurations are optional and depend on whether specific features are enabled or used.
-
-# LOG_CHANNEL_ID, SUMMARY_CHANNEL_ID, NEWS_CHANNEL_ID, NEWS_BOT_USER_ID are optional.
-# Set them in your .env file if you intend to use features that require them.
diff --git a/main.py b/main.py
index 1d9c1d3..ad47d6b 100644
--- a/main.py
+++ b/main.py
@@ -192,7 +192,7 @@ def get_context_messages(user_id: int) -> List[Dict[str, str]]:
return cleaned_messages
-def truncate_prompt(prompt: str, max_tokens: int, model: str = 'gpt-4o-mini') -> str:
+def truncate_prompt(prompt: str, max_tokens: int, model: str = 'gpt-5') -> str:
encoding = encoding_for_model(model)
tokens = encoding.encode(prompt)
if len(tokens) > max_tokens:
@@ -401,7 +401,7 @@ async def fetch_openai_response(user_id: int, new_message: str, user: Optional[d
try:
response = await aclient.chat.completions.create(
- model='gpt-4.1',
+ model='gpt-5',
messages=messages,
max_tokens=2000,
temperature=0.7,
@@ -421,11 +421,11 @@ async def fetch_openai_response(user_id: int, new_message: str, user: Optional[d
if is_cached:
usage_data['openai_gpt41']['cached_input_tokens'] += cached_tokens
- cost = (cached_tokens / 1_000_000 * 0.30) + (completion_tokens / 1_000_000 * 1.20) # GPT-4.1 cached pricing
+ cost = (cached_tokens / 1_000_000 * 0.30) + (completion_tokens / 1_000_000 * 1.20) # gpt-5 cached pricing
logger.debug(f"Cache hit detected. Cached Tokens: {cached_tokens}, Completion Tokens: {completion_tokens}, Cost: ${cost:.6f}")
else:
usage_data['openai_gpt41']['input_tokens'] += prompt_tokens
- cost = (prompt_tokens / 1_000_000 * 0.60) + (completion_tokens / 1_000_000 * 2.40) # GPT-4.1 pricing
+ cost = (prompt_tokens / 1_000_000 * 0.60) + (completion_tokens / 1_000_000 * 2.40) # gpt-5 pricing
logger.debug(f"No cache hit. Prompt Tokens: {prompt_tokens}, Completion Tokens: {completion_tokens}, Cost: ${cost:.6f}")
usage_data['openai_gpt41']['cost'] += cost
@@ -436,7 +436,7 @@ async def fetch_openai_response(user_id: int, new_message: str, user: Optional[d
await log_usage_to_db(
user=user,
command=command,
- model="gpt-4.1",
+ model="gpt-5",
input_tokens=prompt_tokens,
output_tokens=completion_tokens,
cached_tokens=cached_tokens,
@@ -445,8 +445,8 @@ async def fetch_openai_response(user_id: int, new_message: str, user: Optional[d
channel_id=channel_id
)
- logger.info(f"OpenAI GPT-4.1 usage: Prompt Tokens={prompt_tokens}, Cached Tokens={cached_tokens}, Completion Tokens={completion_tokens}, Total Tokens={total_tokens}")
- logger.info(f"Estimated OpenAI GPT-4.1 API call cost: ${cost:.6f}")
+ logger.info(f"OpenAI gpt-5 usage: Prompt Tokens={prompt_tokens}, Cached Tokens={cached_tokens}, Completion Tokens={completion_tokens}, Total Tokens={total_tokens}")
+ logger.info(f"Estimated OpenAI gpt-5 API call cost: ${cost:.6f}")
return answer
except Exception as e:
logger.error(f"Error fetching response from OpenAI: {str(e)}")
@@ -641,7 +641,7 @@ async def process_message_with_streaming(message: discord.Message, status_msg: d
logger.info(f"Perplexity response generated for user {user_id}")
update_user_context(user_id, question or message.content, 'user')
- # Update progress: Finalizing (skip GPT-4.1 redundancy for speed)
+ # Update progress: Finalizing (skip gpt-5 redundancy for speed)
progress_embed.set_field_at(0, name="Status", value="✨ Finalizing response...", inline=False)
await status_msg.edit(embed=progress_embed)
@@ -884,7 +884,7 @@ async def send_startup_notification() -> None:
dyno_name = os.environ.get('DYNO', 'local')
embed.add_field(name="Dyno", value=f"`{dyno_name}`", inline=True)
- embed.set_footer(text="Ready for commands • Mario's crypto agent")
+ embed.set_footer(text="Ready for commands • SecurePath Agent")
try:
await channel.send(embed=embed)
@@ -965,7 +965,7 @@ def check(msg):
try:
# Update progress: Processing image
- progress_embed.set_field_at(0, name="Status", value="🖼️ Processing image with GPT-4.1 Vision...", inline=False)
+ progress_embed.set_field_at(0, name="Status", value="🖼️ Processing image with gpt-5 Vision...", inline=False)
await status_msg.edit(embed=progress_embed)
guild_id = ctx.guild.id if ctx.guild else None
@@ -1041,7 +1041,7 @@ def check(msg):
value="`!analyze Look for support and resistance levels`",
inline=False
)
- help_embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 Vision")
+ help_embed.set_footer(text="SecurePath Agent • Powered by gpt-5 Vision")
await ctx.send(embed=help_embed)
logger.warning("No image URL detected for analysis.")
@@ -1060,7 +1060,6 @@ async def analyze_chart_image(chart_url: str, user_prompt: str = "", user: Optio
logger.warning(f"Image size {len(image_bytes)} bytes exceeds the maximum allowed size.")
return "The submitted image is too large to analyze. Please provide an image smaller than 5 MB."
- # Analysis based on the full image now, as gpt-4o handles it better
base_prompt = (
"analyze this chart with technical precision. extract actionable intelligence:\n\n"
"**sentiment:** [bullish/bearish/neutral + confidence %]\n"
@@ -1075,7 +1074,7 @@ async def analyze_chart_image(chart_url: str, user_prompt: str = "", user: Optio
full_prompt = f"{base_prompt} {user_prompt}" if user_prompt else base_prompt
response = await aclient.chat.completions.create(
- model="gpt-4.1",
+ model="gpt-5",
messages=[
{
"role": "user",
@@ -1094,7 +1093,7 @@ async def analyze_chart_image(chart_url: str, user_prompt: str = "", user: Optio
# Update usage data - a simplified estimation as token count is complex
# A more accurate method would parse the usage from the response if available
estimated_tokens = 1000 # A rough estimate for a complex image
- cost = (estimated_tokens / 1_000_000) * 0.60 # GPT-4.1 input pricing
+ cost = (estimated_tokens / 1_000_000) * 0.60 # gpt-5 input pricing
usage_data['openai_gpt41_mini_vision']['requests'] += 1
usage_data['openai_gpt41_mini_vision']['tokens'] += estimated_tokens
@@ -1106,7 +1105,7 @@ async def analyze_chart_image(chart_url: str, user_prompt: str = "", user: Optio
await log_usage_to_db(
user=user,
command="analyze",
- model="gpt-4.1-vision",
+ model="gpt-5-vision",
input_tokens=estimated_tokens,
output_tokens=500, # Rough estimate
cost=cost,
@@ -1114,7 +1113,7 @@ async def analyze_chart_image(chart_url: str, user_prompt: str = "", user: Optio
channel_id=channel_id
)
- logger.info(f"Estimated OpenAI GPT-4.1 Vision usage: Tokens={estimated_tokens}, Cost=${cost:.6f}")
+ logger.info(f"Estimated OpenAI gpt-5 Vision usage: Tokens={estimated_tokens}, Cost=${cost:.6f}")
return analysis
except Exception as e:
@@ -1222,371 +1221,38 @@ async def ask(ctx: Context, *, question: Optional[str] = None) -> None:
await reset_status()
@bot.command(name='summary')
-async def summary(ctx: Context, *channels: discord.TextChannel) -> None:
+async def summary(ctx: Context, channel: discord.TextChannel = None) -> None:
await bot.change_presence(activity=Activity(type=ActivityType.playing, name="channel summary..."))
logger.debug("Status updated to: [playing] channel summary...")
- if not channels:
- await ctx.send("Please specify one or more channels to summarize. Example: !summary #crypto-news #newsfeed")
+ if channel is None:
+ await ctx.send("Please specify a channel to summarize. Example: !summary #market-analysis")
await reset_status()
return
- # Check permissions for all channels
- channels_without_permission = []
- valid_channels = []
-
- for channel in channels:
- if not channel.permissions_for(channel.guild.me).read_messages:
- channels_without_permission.append(channel.mention)
- logger.warning(f"Missing permissions to read messages in channel {channel.name}")
- else:
- valid_channels.append(channel)
-
- if channels_without_permission:
- await ctx.send(f"I don't have permission to read messages in: {', '.join(channels_without_permission)}")
-
- if not valid_channels:
+ if not channel.permissions_for(channel.guild.me).read_messages:
+ await ctx.send(f"I don't have permission to read messages in {channel.mention}.")
+ logger.warning(f"Missing permissions to read messages in channel {channel.name}")
await reset_status()
return
# Log the summary command query
if db_manager.pool:
username = f"{ctx.author.name}#{ctx.author.discriminator}" if ctx.author.discriminator != "0" else ctx.author.name
- channel_names = ", ".join([f"#{ch.name}" for ch in valid_channels])
await db_manager.log_user_query(
user_id=ctx.author.id,
username=username,
command="summary",
- query_text=f"Summary request for {channel_names}",
+ query_text=f"Summary request for #{channel.name}",
channel_id=ctx.channel.id,
guild_id=ctx.guild.id if ctx.guild else None,
response_generated=False
)
command_counter['summary'] += 1
-
- # Process multiple channels together for unified summary
- await perform_multi_channel_summary(ctx, valid_channels, command='summary')
-
+ await perform_channel_summary(ctx, channel, command='summary')
await reset_status()
-async def perform_multi_channel_summary(ctx: Context, channels: List[discord.TextChannel], command: Optional[str] = None) -> None:
- logger.info(f"Starting multi-channel summary for: {[ch.name for ch in channels]}")
-
- # Send enhanced status message with progress tracking
- channel_mentions = ", ".join([ch.mention for ch in channels])
- status_embed = discord.Embed(
- title="🔍 Analyzing Multiple Channels",
- description=f"Processing messages from {channel_mentions} (last 72 hours)...",
- color=0x1D82B6
- )
- status_embed.add_field(name="Status", value="🔄 Fetching messages from all channels...", inline=False)
- status_msg = await ctx.send(embed=status_embed)
-
- try:
- time_limit = datetime.now(timezone.utc) - timedelta(hours=72)
- all_channel_messages = {}
- total_message_count = 0
-
- # Fetch messages from all channels concurrently
- async def fetch_channel_messages(channel):
- messages = []
- message_count = 0
-
- async for msg in channel.history(after=time_limit, limit=3000, oldest_first=True):
- message_count += 1
- content = msg.content.strip()
- if (content and
- len(content) > 5 and
- not content.startswith(('!ping', '!help', '!commands', '!stats', '!test'))):
- author_name = msg.author.display_name if not msg.author.bot else f"🤖{msg.author.display_name}"
- messages.append(f"[{channel.name}/{author_name}]: {content}")
-
- logger.info(f"Fetched {len(messages)} messages from {channel.name}")
- return channel.name, messages, message_count
-
- # Fetch from all channels concurrently
- fetch_tasks = [fetch_channel_messages(channel) for channel in channels]
- results = await asyncio.gather(*fetch_tasks)
-
- # Combine all messages
- all_messages = []
- for channel_name, messages, count in results:
- all_channel_messages[channel_name] = messages
- all_messages.extend(messages)
- total_message_count += count
-
- logger.info(f"Total messages collected: {len(all_messages)} from {len(channels)} channels")
-
- if not all_messages:
- error_embed = discord.Embed(
- title="⚠️ No Content Found",
- description=f"No substantial messages found in any of the specified channels from the last 72 hours.",
- color=0xFF6B35
- )
- await status_msg.edit(embed=error_embed)
- return
-
- # Update status
- status_embed.set_field_at(0, name="Status", value=f"🧠 Processing {len(all_messages)} messages from {len(channels)} channels...", inline=False)
- await status_msg.edit(embed=status_embed)
-
- # Create chunks from combined messages
- full_text = "\n".join(all_messages)
- chunk_size = 15000
- chunks = [full_text[i:i+chunk_size] for i in range(0, len(full_text), chunk_size)]
-
- logger.info(f"Processing {len(chunks)} chunks for multi-channel summary")
-
- completed_chunks = 0
- start_time = time.time()
-
- async def process_chunk(i, chunk):
- nonlocal completed_chunks
- channel_names = ", ".join([ch.name for ch in channels])
- prompt = f"""analyze messages from multiple channels ({channel_names}) and extract unified actionable intelligence:
-
-**focus areas:**
-• cross-channel market sentiment & themes
-• correlations between different channels
-• price movements & volume patterns across discussions
-• breaking news & catalyst events from all sources
-• whale activity & large transactions mentioned
-• technical analysis consensus
-• regulatory developments
-• project updates & partnerships
-
-**output format:**
-- bullet points only, no tables
-- synthesize insights across channels
-- include specific numbers/percentages
-- flag high-impact info with 🚨
-- note which channel(s) information came from when relevant
-- experienced trader tone
-
-MESSAGES:
-{chunk}"""
-
- for attempt in range(2):
- try:
- response = await aclient.chat.completions.create(
- model='gpt-4.1',
- messages=[{"role": "user", "content": prompt}],
- max_tokens=1500,
- temperature=0.3
- )
- result = response.choices[0].message.content.strip()
- increment_api_call_counter()
-
- # Track processing cost
- if hasattr(response, 'usage') and response.usage:
- usage = response.usage
- input_tokens = getattr(usage, 'prompt_tokens', 0)
- output_tokens = getattr(usage, 'completion_tokens', 0)
- cost = (input_tokens * 0.40 + output_tokens * 1.60) / 1_000_000
-
- if not hasattr(process_chunk, 'total_cost'):
- process_chunk.total_cost = 0
- process_chunk.total_input_tokens = 0
- process_chunk.total_output_tokens = 0
- process_chunk.total_cost += cost
- process_chunk.total_input_tokens += input_tokens
- process_chunk.total_output_tokens += output_tokens
-
- logger.info(f"Successfully processed chunk {i+1}/{len(chunks)}")
-
- # Update progress
- completed_chunks += 1
- try:
- progress_embed = status_msg.embeds[0]
- progress_percentage = (completed_chunks / len(chunks)) * 100
- filled_blocks = int(progress_percentage / 10)
- empty_blocks = 10 - filled_blocks
- progress_bar = "█" * filled_blocks + "░" * empty_blocks
-
- elapsed_time = time.time() - start_time
- if completed_chunks > 0:
- avg_time_per_chunk = elapsed_time / completed_chunks
- remaining_chunks = len(chunks) - completed_chunks
- eta_seconds = int(avg_time_per_chunk * remaining_chunks)
- eta_text = f" • ETA: {eta_seconds}s" if eta_seconds > 0 else " • Almost done!"
- else:
- eta_text = ""
-
- progress_embed.set_field_at(0,
- name="Status",
- value=f"⚙️ Processing chunks: {completed_chunks}/{len(chunks)}\n{progress_bar} {progress_percentage:.0f}%{eta_text}",
- inline=False
- )
- await status_msg.edit(embed=progress_embed)
- except (discord.NotFound, IndexError):
- pass
-
- return result
-
- except Exception as e:
- logger.warning(f"Attempt {attempt+1} failed for chunk {i+1}: {e}")
- if attempt == 1:
- logger.error(f"Failed to process chunk {i+1} after retries")
- return None
- await asyncio.sleep(1)
- return None
-
- # Process all chunks concurrently
- status_embed.set_field_at(0, name="Status", value=f"⚙️ Processing {len(chunks)} chunks concurrently...", inline=False)
- await status_msg.edit(embed=status_embed)
-
- tasks = [process_chunk(i, chunk) for i, chunk in enumerate(chunks)]
- results = await asyncio.gather(*tasks, return_exceptions=True)
-
- # Filter results
- chunk_summaries = []
- for r in results:
- if r and not isinstance(r, Exception) and len(r.strip()) > 50:
- chunk_summaries.append(r)
- elif isinstance(r, Exception):
- logger.error(f"Chunk processing exception: {r}")
-
- if not chunk_summaries:
- error_embed = discord.Embed(
- title="❌ Processing Failed",
- description=f"Unable to process messages from the specified channels.",
- color=0xFF0000
- )
- await status_msg.edit(embed=error_embed)
- return
-
- # Update status for final synthesis
- status_embed.set_field_at(0, name="Status", value=f"🧑💻 Synthesizing {len(chunk_summaries)} summaries across {len(channels)} channels...", inline=False)
- await status_msg.edit(embed=status_embed)
-
- # Enhanced final synthesis prompt for multiple channels
- current_date = datetime.now().strftime("%Y-%m-%d")
- channel_names = ", ".join([f"#{ch.name}" for ch in channels])
- final_prompt = f"""Synthesize these multi-channel summaries into unified actionable intelligence for crypto traders/investors.
-
-DATE: {current_date}
-CHANNELS: {channel_names}
-TIMEFRAME: Last 72 hours
-TOTAL MESSAGES: {len(all_messages):,}
-
-**structure your response:**
-
-**📈 unified market sentiment**
-[cross-channel sentiment analysis with confidence %]
-
-**🚨 key events (by channel)**
-• [significant developments with channel source noted]
-
-**💰 price action consensus**
-• [price movements and levels discussed across channels]
-
-**🔍 technical analysis synthesis**
-• [converging/diverging technical views across channels]
-
-**🏦 regulatory/news compilation**
-• [updates from all channels, note sources]
-
-**🐋 whale activity tracker**
-• [large transactions mentioned across channels]
-
-**🔄 cross-channel insights**
-• [unique correlations or contradictions between channels]
-
-**⚡ actionable insights**
-• [unified trading opportunities and risk factors]
-
-synthesize information across all channels, noting agreements and divergences. identify unique alpha from cross-channel analysis.
-
-CHUNK SUMMARIES:
-{chr(10).join(chunk_summaries)}"""
-
- try:
- response = await aclient.chat.completions.create(
- model='gpt-4.1',
- messages=[{"role": "user", "content": final_prompt}],
- max_tokens=3000, # Increased for multi-channel output
- temperature=0.2
- )
- final_summary = response.choices[0].message.content.strip()
- increment_api_call_counter()
-
- # Calculate total cost
- total_cost = getattr(process_chunk, 'total_cost', 0)
- total_input = getattr(process_chunk, 'total_input_tokens', 0)
- total_output = getattr(process_chunk, 'total_output_tokens', 0)
-
- if hasattr(response, 'usage') and response.usage:
- usage = response.usage
- final_input = getattr(usage, 'prompt_tokens', 0)
- final_output = getattr(usage, 'completion_tokens', 0)
- final_cost = (final_input * 0.40 + final_output * 1.60) / 1_000_000
- total_cost += final_cost
- total_input += final_input
- total_output += final_output
-
- # Log to database
- await log_usage_to_db(
- user=ctx.author,
- command="summary",
- model="gpt-4.1",
- input_tokens=total_input,
- output_tokens=total_output,
- cost=total_cost,
- guild_id=ctx.guild.id if ctx.guild else None,
- channel_id=ctx.channel.id
- )
-
- # Delete status message and send final result
- await status_msg.delete()
-
- # Create summary embed
- summary_embed = discord.Embed(
- title=f"📄 Multi-Channel Intelligence Report",
- description=f"**Channels:** {channel_names}\n**Timeframe:** Last 72 hours | **Total Messages:** {len(all_messages):,}",
- color=0x1D82B6,
- timestamp=datetime.now(timezone.utc)
- )
-
- summary_embed.set_footer(text=f"SecurePath Agent • Cost: ${total_cost:.4f} | Processed {len(chunks)} chunks")
-
- # Send summary
- try:
- if len(final_summary) <= 3800:
- summary_embed.description += f"\n\n{final_summary}"
- await ctx.send(embed=summary_embed)
- else:
- await ctx.send(embed=summary_embed)
- await send_long_embed(
- ctx.channel,
- final_summary,
- color=0x1D82B6,
- title="📈 Detailed Multi-Channel Analysis"
- )
- except discord.HTTPException as e:
- logger.error(f"Failed to send summary embed: {e}")
- fallback_text = f"**Multi-Channel Summary - {channel_names}**\n\n{final_summary[:1800]}{'...' if len(final_summary) > 1800 else ''}"
- await ctx.send(fallback_text)
-
- logger.info(f"Successfully sent multi-channel summary (Cost: ${total_cost:.4f})")
- await log_interaction(user=ctx.author, channel=ctx.channel, command=command, user_input=f"Multi-channel summary: {channel_names}", bot_response=final_summary[:1024])
-
- except Exception as e:
- logger.error(f"Error generating final summary: {e}")
- logger.error(traceback.format_exc())
- error_embed = discord.Embed(
- title="❌ Synthesis Failed",
- description="An error occurred while generating the final summary.",
- color=0xFF0000
- )
- error_embed.add_field(name="Error", value=str(e)[:1000], inline=False)
- await status_msg.edit(embed=error_embed)
-
- except Exception as e:
- logger.error(f"Error in perform_multi_channel_summary: {e}")
- logger.error(traceback.format_exc())
- await ctx.send(f"An error occurred while processing the multi-channel summary.")
-
async def perform_channel_summary(ctx: Context, channel: discord.TextChannel, command: Optional[str] = None) -> None:
logger.info(f"Starting summary for channel: {channel.name} (ID: {channel.id})")
@@ -1709,7 +1375,7 @@ async def process_chunk(i, chunk):
for attempt in range(2): # Retry logic
try:
response = await aclient.chat.completions.create(
- model='gpt-4.1',
+ model='gpt-5',
messages=[{"role": "user", "content": prompt}],
max_tokens=1500, # Increased for better quality
temperature=0.3 # Lower temperature for more focused output
@@ -1844,7 +1510,7 @@ async def process_chunk(i, chunk):
try:
response = await aclient.chat.completions.create(
- model='gpt-4.1',
+ model='gpt-5',
messages=[{"role": "user", "content": final_prompt}],
max_tokens=2500, # Increased for comprehensive output
temperature=0.2 # Lower for more focused synthesis
@@ -1871,7 +1537,7 @@ async def process_chunk(i, chunk):
await log_usage_to_db(
user=ctx.author,
command="summary",
- model="gpt-4.1",
+ model="gpt-5",
input_tokens=total_input,
output_tokens=total_output,
cost=total_cost,
@@ -2061,7 +1727,7 @@ async def send_stats() -> None:
else:
embed.add_field(name="📊 Usage Stats", value="Database offline", inline=True)
- embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+ embed.set_footer(text="SecurePath Agent • Powered by gpt-5 & Perplexity Sonar-Pro")
try:
await channel.send(embed=embed)
@@ -2101,7 +1767,7 @@ async def cache_stats(ctx: Context) -> None:
await ctx.send("You do not have permission to use this command.")
return
hit_rate = calculate_cache_hit_rate()
- embed = discord.Embed(title="📊 Cache Hit Rate", description=f"OpenAI GPT-4.1 Cache Hit Rate: **{hit_rate:.2f}%**", color=0x1D82B6)
+ embed = discord.Embed(title="📊 Cache Hit Rate", description=f"OpenAI gpt-5 Cache Hit Rate: **{hit_rate:.2f}%**", color=0x1D82B6)
await ctx.send(embed=embed)
def calculate_cache_hit_rate() -> float:
@@ -2205,7 +1871,7 @@ async def unified_stats(ctx: Context) -> None:
inline=True
)
- embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+ embed.set_footer(text="SecurePath Agent • Powered by gpt-5 & Perplexity Sonar-Pro")
await ctx.send(embed=embed)
@@ -2216,7 +1882,7 @@ async def commands_help(ctx: Context) -> None:
"""Show SecurePath Agent help and available commands"""
embed = discord.Embed(
title="⚡ SecurePath Agent",
- description="*mario's crypto agent • show me the docs, show me the code*",
+ description="*SecurePath Agent • show me the docs, show me the code*",
color=0x00D4AA, # SecurePath green
timestamp=datetime.now(timezone.utc)
)
@@ -2230,7 +1896,7 @@ async def commands_help(ctx: Context) -> None:
"▸ *example:* `!ask solana vs ethereum fees`\n\n"
"**📊 `!analyze [image]`**\n"
- "▸ advanced chart analysis with gpt-4.1 vision\n"
+ "▸ advanced chart analysis with gpt-5 vision\n"
"▸ sentiment, key levels, patterns, trade setups\n"
"▸ *attach image or use recent chart in channel*\n\n"
@@ -2265,7 +1931,7 @@ async def commands_help(ctx: Context) -> None:
embed.add_field(name="", value="", inline=False)
embed.set_footer(
- text="SecurePath Agent • Powered by Perplexity Sonar-Pro & GPT-4.1 Vision"
+ text="SecurePath Agent • Powered by Perplexity Sonar-Pro & GPT-5 Vision"
)
await ctx.send(embed=embed)
@@ -2294,7 +1960,7 @@ async def ping(ctx: Context) -> None:
embed.add_field(name="Response Time", value=f"{response_time}ms", inline=True)
embed.add_field(name="Database", value=db_status, inline=True)
embed.add_field(name="API Calls Today", value=f"{api_call_counter}", inline=True)
- embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+ embed.set_footer(text="SecurePath Agent • Powered by GPT-5 & Perplexity Sonar-Pro")
await message.edit(content="", embed=embed)
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..7ba5970
--- /dev/null
+++ b/src/__init__.py
@@ -0,0 +1,25 @@
+"""
+SecurePath AI Discord Bot - Refactored Package
+
+A modular, well-structured crypto-focused Discord bot with AI-powered analysis.
+"""
+
+__version__ = "2.0.0"
+__author__ = "SecurePath Team"
+__description__ = "AI-powered crypto analysis Discord bot"
+
+# Lazy imports to avoid circular dependencies
+def get_settings():
+ """Get settings instance."""
+ from .config import get_settings as _get_settings
+ return _get_settings()
+
+def create_bot():
+ """Create bot instance."""
+ from .bot import create_bot as _create_bot
+ return _create_bot()
+
+__all__ = [
+ 'get_settings',
+ 'create_bot',
+]
\ No newline at end of file
diff --git a/src/ai/__init__.py b/src/ai/__init__.py
new file mode 100644
index 0000000..13c25d9
--- /dev/null
+++ b/src/ai/__init__.py
@@ -0,0 +1,13 @@
+"""AI services module for SecurePath bot."""
+
+from .ai_manager import AIManager
+from .openai_service import OpenAIService
+from .perplexity_service import PerplexityService
+from .vision_service import VisionService
+
+__all__ = [
+ 'AIManager',
+ 'OpenAIService',
+ 'PerplexityService',
+ 'VisionService',
+]
\ No newline at end of file
diff --git a/src/ai/ai_manager.py b/src/ai/ai_manager.py
new file mode 100644
index 0000000..8efd1a4
--- /dev/null
+++ b/src/ai/ai_manager.py
@@ -0,0 +1,354 @@
+"""AI service manager for coordinating all AI operations."""
+import asyncio
+import logging
+from typing import Dict, List, Optional, Any
+
+import aiohttp
+import discord
+from aiohttp import ClientSession, TCPConnector
+
+from ..config.settings import get_settings
+from ..services.context_manager import ContextManager
+from ..services.rate_limiter import RateLimiter
+from .openai_service import OpenAIService
+from .perplexity_service import PerplexityService
+from .vision_service import VisionService
+
+logger = logging.getLogger(__name__)
+
+
+class AIManager:
+ """Manager for all AI services and operations."""
+
+ def __init__(self, session: Optional[ClientSession] = None, rate_limiter: Optional[RateLimiter] = None):
+ """Initialize AI manager."""
+ self.settings = get_settings()
+ self.session = session
+ self.rate_limiter = rate_limiter
+
+ # Initialize services
+ self.openai_service = OpenAIService()
+ self.perplexity_service = PerplexityService(session=session)
+ self.vision_service = VisionService(self.openai_service)
+ self.context_manager = ContextManager.get_instance()
+
+ # Usage tracking
+ self.total_requests = 0
+ self.daily_requests = 0
+
+ async def process_query(
+ self,
+ user_id: int,
+ query: str,
+ use_context: bool = True
+ ) -> Dict[str, Any]:
+ """
+ Process a user query using the appropriate AI service.
+
+ Args:
+ user_id: Discord user ID
+ query: User's query
+ use_context: Whether to use conversation context
+
+ Returns:
+ Response dict with content and metadata
+ """
+ # Check rate limits
+ if self.rate_limiter:
+ can_call, error_msg = self.rate_limiter.check_rate_limit(user_id)
+ if not can_call:
+ raise Exception(error_msg)
+
+ # Update context
+ if use_context:
+ self.context_manager.update_context(user_id, query, 'user')
+
+ # Choose AI service based on settings
+ if self.settings.use_perplexity_api:
+ result = await self._process_with_perplexity(user_id, query, use_context)
+ else:
+ result = await self._process_with_openai(user_id, query, use_context)
+
+ # Update context with response
+ if use_context and result.get('content'):
+ self.context_manager.update_context(user_id, result['content'], 'assistant')
+
+ # Track usage
+ self.total_requests += 1
+ self.daily_requests += 1
+
+ return result
+
+ async def analyze_image(
+ self,
+ user_id: int,
+ image_data: bytes = None,
+ attachment: discord.Attachment = None,
+ prompt: str = None,
+ user_query: str = None
+ ) -> Dict[str, Any]:
+ """
+ Analyze an image using vision models.
+
+ Args:
+ user_id: Discord user ID
+ image_data: Raw image bytes
+ attachment: Discord attachment
+ prompt: Custom analysis prompt
+ user_query: User's specific question
+
+ Returns:
+ Analysis result
+ """
+ # Check rate limits
+ if self.rate_limiter:
+ can_call, error_msg = self.rate_limiter.check_rate_limit(user_id)
+ if not can_call:
+ raise Exception(error_msg)
+
+ # Analyze image
+ if attachment:
+ result = await self.vision_service.analyze_discord_image(
+ attachment=attachment,
+ prompt=prompt,
+ user_query=user_query
+ )
+ elif image_data:
+ result = await self.vision_service.analyze_image(
+ image_data=image_data,
+ prompt=prompt,
+ user_query=user_query
+ )
+ else:
+ raise ValueError("Either image_data or attachment must be provided")
+
+ # Track usage
+ self.total_requests += 1
+ self.daily_requests += 1
+
+ return result
+
+ async def find_and_analyze_recent_image(
+ self,
+ user_id: int,
+ channel: discord.TextChannel,
+ user_query: str = None
+ ) -> Dict[str, Any]:
+ """
+ Find and analyze the most recent image in a channel.
+
+ Args:
+ user_id: Discord user ID
+ channel: Discord channel to search
+ user_query: User's specific question
+
+ Returns:
+ Analysis result or error
+ """
+ # Find recent image
+ attachment = await self.vision_service.find_recent_image(channel)
+ if not attachment:
+ raise ValueError("No recent images found in this channel")
+
+ # Analyze the image
+ return await self.analyze_image(
+ user_id=user_id,
+ attachment=attachment,
+ user_query=user_query
+ )
+
+ async def _process_with_perplexity(
+ self,
+ user_id: int,
+ query: str,
+ use_context: bool
+ ) -> Dict[str, Any]:
+ """Process query using Perplexity API."""
+ if not self.session:
+ raise ValueError("HTTP session not initialized")
+
+ # Get messages for API call
+ if use_context:
+ messages = self.context_manager.get_context_messages(user_id)
+ else:
+ messages = [
+ {"role": "system", "content": self.settings.system_prompt},
+ {"role": "user", "content": query}
+ ]
+
+ # Make API call
+ result = await self.perplexity_service.search_completion(
+ messages=messages,
+ max_tokens=800,
+ temperature=0.7
+ )
+
+ # Format citations for Discord
+ if result.get('citations'):
+ citations_text = self.perplexity_service.format_citations_for_discord(
+ result['citations']
+ )
+ result['content'] += citations_text
+
+ return result
+
+ async def _process_with_openai(
+ self,
+ user_id: int,
+ query: str,
+ use_context: bool
+ ) -> Dict[str, Any]:
+ """Process query using OpenAI API."""
+ # Get messages for API call
+ if use_context:
+ messages = self.context_manager.get_context_messages(user_id)
+ else:
+ messages = [
+ {"role": "system", "content": self.settings.system_prompt},
+ {"role": "user", "content": query}
+ ]
+
+ # Make API call
+ return await self.openai_service.chat_completion(
+ messages=messages,
+ max_tokens=800,
+ temperature=0.7
+ )
+
+ async def summarize_messages(
+ self,
+ messages: List[str],
+ channel_name: str,
+ chunk_size: int = 50
+ ) -> str:
+ """
+ Summarize a list of messages using chunked processing.
+
+ Args:
+ messages: List of message strings
+ channel_name: Name of the channel
+ chunk_size: Messages per chunk
+
+ Returns:
+ Final summary
+ """
+ if not messages:
+ return "No messages to summarize"
+
+ # Split messages into chunks
+ chunks = [messages[i:i + chunk_size] for i in range(0, len(messages), chunk_size)]
+
+ # Process chunks in parallel
+ chunk_tasks = []
+ for i, chunk in enumerate(chunks):
+ task = self._summarize_chunk(chunk, i, len(chunks))
+ chunk_tasks.append(task)
+
+ # Wait for all chunks to complete
+ chunk_summaries = await asyncio.gather(*chunk_tasks, return_exceptions=True)
+
+ # Filter successful results
+ valid_summaries = [
+ summary for summary in chunk_summaries
+ if isinstance(summary, str) and len(summary.strip()) > 50
+ ]
+
+ if not valid_summaries:
+ raise Exception("Failed to process message chunks")
+
+ # Create final summary
+ return await self._create_final_summary(valid_summaries, channel_name)
+
+ async def _summarize_chunk(self, messages: List[str], chunk_index: int, total_chunks: int) -> str:
+ """Summarize a chunk of messages."""
+ chunk_text = "\n".join(messages)
+
+ prompt = f"""Extract key crypto/trading insights from this Discord discussion (chunk {chunk_index + 1}/{total_chunks}):
+
+Focus on:
+• Price movements and market sentiment
+• Technical analysis and trading signals
+• News, events, and alpha opportunities
+• DeFi protocols and yield strategies
+• Risk factors and warnings
+
+Ignore: casual chat, memes, off-topic discussions
+
+Messages:
+{chunk_text}
+
+Provide a concise summary of actionable insights only."""
+
+ result = await self.openai_service.chat_completion(
+ messages=[{"role": "user", "content": prompt}],
+ max_tokens=1000,
+ temperature=0.2
+ )
+
+ return result['content']
+
+ async def _create_final_summary(self, chunk_summaries: List[str], channel_name: str) -> str:
+ """Create final summary from chunk summaries."""
+ combined_summaries = "\n\n".join(chunk_summaries)
+
+ prompt = f"""Synthesize these {channel_name} channel summaries into actionable intelligence for crypto traders/investors.
+
+**structure your response:**
+
+**📈 market sentiment**
+[overall sentiment: bullish/bearish/neutral with confidence %]
+
+**🚨 key events**
+• [most significant developments]
+
+**💰 price action**
+• [notable price movements and levels]
+
+**🔍 technical analysis**
+• [key levels, patterns, indicators mentioned]
+
+**🏦 regulatory/news**
+• [regulatory updates, partnerships, announcements]
+
+**🐋 whale activity**
+• [large transactions, institutional moves]
+
+**⚡ actionable insights**
+• [trading opportunities and risk factors]
+
+**no tables, no verbose explanations. pure alpha extraction with technical precision.**
+
+CHUNK SUMMARIES:
+{combined_summaries}"""
+
+ result = await self.openai_service.chat_completion(
+ messages=[{"role": "user", "content": prompt}],
+ max_tokens=2500,
+ temperature=0.2
+ )
+
+ return result['content']
+
+ def get_usage_stats(self) -> Dict[str, Any]:
+ """Get combined usage statistics from all services."""
+ return {
+ 'total_requests': self.total_requests,
+ 'daily_requests': self.daily_requests,
+ 'openai': self.openai_service.get_usage_stats(),
+ 'perplexity': self.perplexity_service.get_usage_stats(),
+ 'cache_hit_rate': self.openai_service.calculate_cache_hit_rate(),
+ }
+
+ def reset_daily_stats(self) -> None:
+ """Reset daily statistics."""
+ self.daily_requests = 0
+ self.openai_service.reset_usage_stats()
+ self.perplexity_service.reset_usage_stats()
+
+ async def cleanup(self) -> None:
+ """Clean up resources."""
+ # Close HTTP session if we own it
+ if hasattr(self, '_owned_session') and self._owned_session:
+ await self.session.close()
+
+ logger.info("AI manager cleanup completed")
\ No newline at end of file
diff --git a/src/ai/openai_service.py b/src/ai/openai_service.py
new file mode 100644
index 0000000..e1b664e
--- /dev/null
+++ b/src/ai/openai_service.py
@@ -0,0 +1,170 @@
+"""OpenAI API integration service."""
+import logging
+from typing import Dict, List, Optional, Any
+
+from openai import AsyncOpenAI
+
+from ..config.settings import get_settings
+from ..config.constants import (
+ OPENAI_MODEL,
+ OPENAI_VISION_MODEL,
+ MAX_TOKENS_RESPONSE,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class OpenAIService:
+ """Service for OpenAI API interactions."""
+
+ def __init__(self):
+ """Initialize OpenAI service."""
+ self.settings = get_settings()
+ self.client = AsyncOpenAI(api_key=self.settings.openai_api_key)
+ self.usage_data = {
+ 'input_tokens': 0,
+ 'cached_input_tokens': 0,
+ 'output_tokens': 0,
+ 'total_cost': 0.0,
+ }
+
+ async def chat_completion(
+ self,
+ messages: List[Dict[str, str]],
+ model: str = OPENAI_MODEL,
+ max_tokens: int = MAX_TOKENS_RESPONSE,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Create a chat completion.
+
+ Args:
+ messages: List of message dicts with role and content
+ model: Model to use
+ max_tokens: Maximum tokens in response
+ temperature: Sampling temperature
+ **kwargs: Additional parameters for the API
+
+ Returns:
+ Response dict with content and usage info
+ """
+ try:
+ response = await self.client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ **kwargs
+ )
+
+ # Track usage
+ if hasattr(response, 'usage') and response.usage:
+ await self._track_usage(response.usage, model)
+
+ return {
+ 'content': response.choices[0].message.content,
+ 'usage': self._format_usage(response.usage) if hasattr(response, 'usage') else None,
+ 'model': model,
+ }
+
+ except Exception as e:
+ logger.error(f"OpenAI API error: {e}")
+ raise
+
+ async def vision_completion(
+ self,
+ prompt: str,
+ image_url: str,
+ max_tokens: int = 1024,
+ temperature: float = 0.7,
+ ) -> Dict[str, Any]:
+ """
+ Create a vision completion for image analysis.
+
+ Args:
+ prompt: Text prompt for the analysis
+ image_url: URL of the image to analyze
+ max_tokens: Maximum tokens in response
+ temperature: Sampling temperature
+
+ Returns:
+ Response dict with content and usage info
+ """
+ messages = [{
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {"type": "image_url", "image_url": {"url": image_url}}
+ ]
+ }]
+
+ return await self.chat_completion(
+ messages=messages,
+ model=OPENAI_VISION_MODEL,
+ max_tokens=max_tokens,
+ temperature=temperature
+ )
+
+ async def _track_usage(self, usage: Any, model: str) -> None:
+ """Track token usage and costs."""
+ input_tokens = getattr(usage, 'prompt_tokens', 0)
+ cached_tokens = getattr(usage, 'prompt_tokens_details', {}).get('cached_tokens', 0)
+ output_tokens = getattr(usage, 'completion_tokens', 0)
+
+ # Calculate costs based on model
+ if model == OPENAI_MODEL:
+ # GPT-4 pricing (example rates, adjust as needed)
+ input_cost = (input_tokens - cached_tokens) * 0.40 / 1_000_000
+ cached_cost = cached_tokens * 0.20 / 1_000_000 # Cached tokens are cheaper
+ output_cost = output_tokens * 1.60 / 1_000_000
+ else:
+ # Vision model pricing
+ input_cost = input_tokens * 0.50 / 1_000_000
+ cached_cost = 0
+ output_cost = output_tokens * 1.50 / 1_000_000
+
+ total_cost = input_cost + cached_cost + output_cost
+
+ # Update usage data
+ self.usage_data['input_tokens'] += input_tokens
+ self.usage_data['cached_input_tokens'] += cached_tokens
+ self.usage_data['output_tokens'] += output_tokens
+ self.usage_data['total_cost'] += total_cost
+
+ logger.info(
+ f"OpenAI usage - Model: {model}, "
+ f"Input: {input_tokens} (cached: {cached_tokens}), "
+ f"Output: {output_tokens}, Cost: ${total_cost:.4f}"
+ )
+
+ def _format_usage(self, usage: Any) -> Dict[str, Any]:
+ """Format usage data for response."""
+ if not usage:
+ return {}
+
+ return {
+ 'input_tokens': getattr(usage, 'prompt_tokens', 0),
+ 'cached_tokens': getattr(usage, 'prompt_tokens_details', {}).get('cached_tokens', 0),
+ 'output_tokens': getattr(usage, 'completion_tokens', 0),
+ 'total_tokens': getattr(usage, 'total_tokens', 0),
+ }
+
+ def get_usage_stats(self) -> Dict[str, Any]:
+ """Get current usage statistics."""
+ return self.usage_data.copy()
+
+ def reset_usage_stats(self) -> None:
+ """Reset usage statistics."""
+ for key in self.usage_data:
+ self.usage_data[key] = 0.0 if key == 'total_cost' else 0
+
+ def calculate_cache_hit_rate(self) -> float:
+ """Calculate cache hit rate percentage."""
+ total_input = self.usage_data['input_tokens']
+ cached_input = self.usage_data['cached_input_tokens']
+
+ if total_input == 0:
+ return 0.0
+
+ return (cached_input / total_input) * 100
\ No newline at end of file
diff --git a/src/ai/perplexity_service.py b/src/ai/perplexity_service.py
new file mode 100644
index 0000000..72ee29d
--- /dev/null
+++ b/src/ai/perplexity_service.py
@@ -0,0 +1,218 @@
+"""Perplexity API integration service."""
+import logging
+import time
+from datetime import datetime, timedelta
+from typing import Dict, List, Optional, Any, Tuple
+
+import aiohttp
+from aiohttp import ClientSession, ClientTimeout
+
+from ..config.settings import get_settings
+from ..config.constants import PERPLEXITY_MODEL
+
+logger = logging.getLogger(__name__)
+
+
+class PerplexityService:
+ """Service for Perplexity API interactions."""
+
+ # Elite sources for crypto/DeFi research
+ DOMAIN_FILTER = [
+ "ethereum.org", # Official Ethereum docs
+ "github.com", # Source code & repos
+ "defillama.com", # DeFi analytics
+ "etherscan.io", # On-chain data
+ "coinmarketcap.com", # Market data
+ "coingecko.com", # Market data
+ "docs.uniswap.org", # Protocol docs
+ "coindesk.com", # Reputable news
+ "-reddit.com", # Exclusion: Forum noise
+ "-pinterest.com" # Exclusion: Irrelevant
+ ]
+
+ def __init__(self, session: Optional[ClientSession] = None):
+ """Initialize Perplexity service."""
+ self.settings = get_settings()
+ self.session = session
+ self.usage_data = {
+ 'requests': 0,
+ 'tokens': 0,
+ 'cost': 0.0,
+ }
+
+ async def search_completion(
+ self,
+ messages: List[Dict[str, str]],
+ max_tokens: int = 800,
+ temperature: float = 0.7,
+ search_recency_days: int = 90,
+ return_citations: bool = True,
+ ) -> Dict[str, Any]:
+ """
+ Create a search-based completion using Perplexity.
+
+ Args:
+ messages: List of message dicts with role and content
+ max_tokens: Maximum tokens in response
+ temperature: Sampling temperature
+ search_recency_days: Days to look back for search results
+ return_citations: Whether to return citations
+
+ Returns:
+ Response dict with content, citations, and usage info
+ """
+ if not self.session:
+ raise ValueError("Session not initialized")
+
+ # Prepare headers
+ headers = {
+ "Authorization": f"Bearer {self.settings.perplexity_api_key}",
+ "Content-Type": "application/json"
+ }
+
+ # Calculate date filter
+ date_filter = (datetime.now() - timedelta(days=search_recency_days)).strftime("%m/%d/%Y")
+
+ # Enhance system prompt with current date
+ current_date = datetime.now().strftime("%Y-%m-%d")
+ enhanced_messages = self._enhance_messages_with_date(messages, current_date)
+
+ # Prepare request data
+ data = {
+ "model": PERPLEXITY_MODEL,
+ "messages": enhanced_messages,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "search_after_date_filter": date_filter,
+ "search_domain_filter": self.DOMAIN_FILTER,
+ "search_context_size": "high",
+ "return_citations": return_citations,
+ "return_images": False,
+ }
+
+ # Track request
+ self.usage_data['requests'] += 1
+ start_time = time.time()
+
+ try:
+ timeout = ClientTimeout(total=self.settings.perplexity_timeout)
+ async with self.session.post(
+ self.settings.perplexity_api_url,
+ json=data,
+ headers=headers,
+ timeout=timeout
+ ) as response:
+ elapsed_time = time.time() - start_time
+ logger.info(f"Perplexity API request completed in {elapsed_time:.2f}s")
+
+ if response.status != 200:
+ error_text = await response.text()
+ logger.error(f"Perplexity API error {response.status}: {error_text}")
+ raise Exception(f"API error {response.status}: {error_text}")
+
+ resp_json = await response.json()
+
+ # Extract response content
+ answer = resp_json.get('choices', [{}])[0].get('message', {}).get('content', '')
+
+ # Process citations
+ citations = self._process_citations(resp_json)
+
+ # Track usage
+ usage = resp_json.get('usage', {})
+ await self._track_usage(usage)
+
+ return {
+ 'content': answer,
+ 'citations': citations,
+ 'usage': usage,
+ 'model': PERPLEXITY_MODEL,
+ 'elapsed_time': elapsed_time,
+ }
+
+ except asyncio.TimeoutError:
+ logger.error(f"Perplexity API timeout after {self.settings.perplexity_timeout}s")
+ raise Exception("⏱️ Request timed out. Please try again.")
+ except Exception as e:
+ logger.error(f"Perplexity API error: {e}")
+ raise
+
+ def _enhance_messages_with_date(
+ self,
+ messages: List[Dict[str, str]],
+ current_date: str
+ ) -> List[Dict[str, str]]:
+ """Enhance messages with current date context."""
+ enhanced = messages.copy()
+
+ # Update system message with date
+ if enhanced and enhanced[0]['role'] == 'system':
+ enhanced[0]['content'] = (
+ f"Today is {current_date}. All information must be accurate up to this date. "
+ f"{enhanced[0]['content']}"
+ )
+
+ return enhanced
+
+ def _process_citations(self, response_data: Dict[str, Any]) -> List[Tuple[str, str]]:
+ """Process and format citations from response."""
+ citations = []
+
+ # Extract from extras.citations
+ extras_citations = (
+ response_data.get('choices', [{}])[0]
+ .get('extras', {})
+ .get('citations', [])
+ )
+
+ for cite in extras_citations:
+ title = cite.get('title', 'Source')
+ url = cite.get('url', '#')
+ if url != '#' and title != 'Source':
+ citations.append((title, url))
+
+ # Also check search_results for additional sources
+ search_results = response_data.get('search_results', [])
+ for result in search_results:
+ title = result.get('title', '')
+ url = result.get('url', '')
+ if url and title and (title, url) not in citations:
+ citations.append((title, url))
+
+ logger.debug(f"Processed {len(citations)} citations")
+ return citations[:6] # Limit to top 6 citations
+
+ async def _track_usage(self, usage: Dict[str, Any]) -> None:
+ """Track token usage and costs."""
+ tokens = usage.get('total_tokens', 0)
+
+ # Estimate cost (adjust based on actual Perplexity pricing)
+ cost = tokens * 0.0002 # Example rate
+
+ self.usage_data['tokens'] += tokens
+ self.usage_data['cost'] += cost
+
+ logger.info(f"Perplexity usage - Tokens: {tokens}, Cost: ${cost:.4f}")
+
+ def format_citations_for_discord(self, citations: List[Tuple[str, str]]) -> str:
+ """Format citations for Discord message."""
+ if not citations:
+ return ""
+
+ formatted = "\n\n**Sources:**\n"
+ for i, (title, url) in enumerate(citations, 1):
+ # Truncate title if too long
+ if len(title) > 60:
+ title = title[:57] + "..."
+ formatted += f"[{i}] [{title}]({url})\n"
+
+ return formatted
+
+ def get_usage_stats(self) -> Dict[str, Any]:
+ """Get current usage statistics."""
+ return self.usage_data.copy()
+
+ def reset_usage_stats(self) -> None:
+ """Reset usage statistics."""
+ for key in self.usage_data:
+ self.usage_data[key] = 0.0 if key == 'cost' else 0
\ No newline at end of file
diff --git a/src/ai/vision_service.py b/src/ai/vision_service.py
new file mode 100644
index 0000000..7995cf7
--- /dev/null
+++ b/src/ai/vision_service.py
@@ -0,0 +1,231 @@
+"""Vision analysis service for image processing."""
+import base64
+import io
+import logging
+from typing import Dict, List, Optional, Any, Tuple
+
+import aiohttp
+import discord
+from PIL import Image
+
+from ..config.settings import get_settings
+from ..config.constants import (
+ MAX_IMAGE_SIZE_MB,
+ SUPPORTED_IMAGE_FORMATS,
+ OPENAI_VISION_MODEL,
+)
+from .openai_service import OpenAIService
+
+logger = logging.getLogger(__name__)
+
+
+class VisionService:
+ """Service for image analysis using vision models."""
+
+ def __init__(self, openai_service: OpenAIService):
+ """Initialize vision service."""
+ self.settings = get_settings()
+ self.openai_service = openai_service
+
+ async def analyze_image(
+ self,
+ image_data: bytes,
+ prompt: str = None,
+ user_query: str = None
+ ) -> Dict[str, Any]:
+ """
+ Analyze an image using GPT-4 Vision.
+
+ Args:
+ image_data: Raw image bytes
+ prompt: Custom analysis prompt
+ user_query: User's specific question about the image
+
+ Returns:
+ Analysis result with content and usage info
+ """
+ # Validate image
+ validation_result = self._validate_image(image_data)
+ if not validation_result['valid']:
+ raise ValueError(validation_result['error'])
+
+ # Convert to base64
+ image_base64 = base64.b64encode(image_data).decode('utf-8')
+ image_url = f"data:image/jpeg;base64,{image_base64}"
+
+ # Create analysis prompt
+ analysis_prompt = self._create_analysis_prompt(prompt, user_query)
+
+ # Analyze with OpenAI Vision
+ try:
+ result = await self.openai_service.vision_completion(
+ prompt=analysis_prompt,
+ image_url=image_url,
+ max_tokens=1500,
+ temperature=0.3 # Lower temperature for more focused analysis
+ )
+
+ logger.info(f"Vision analysis completed - {len(result['content'])} chars")
+ return result
+
+ except Exception as e:
+ logger.error(f"Vision analysis failed: {e}")
+ raise
+
+ async def analyze_discord_image(
+ self,
+ attachment: discord.Attachment,
+ prompt: str = None,
+ user_query: str = None
+ ) -> Dict[str, Any]:
+ """
+ Analyze a Discord image attachment.
+
+ Args:
+ attachment: Discord attachment object
+ prompt: Custom analysis prompt
+ user_query: User's specific question about the image
+
+ Returns:
+ Analysis result with content and usage info
+ """
+ # Validate attachment
+ if not self._is_supported_image(attachment.filename):
+ raise ValueError(f"Unsupported image format. Supported: {', '.join(SUPPORTED_IMAGE_FORMATS)}")
+
+ if attachment.size > MAX_IMAGE_SIZE_MB * 1024 * 1024:
+ raise ValueError(f"Image too large. Max size: {MAX_IMAGE_SIZE_MB}MB")
+
+ # Download image
+ try:
+ image_data = await attachment.read()
+ return await self.analyze_image(image_data, prompt, user_query)
+
+ except Exception as e:
+ logger.error(f"Failed to download Discord image: {e}")
+ raise ValueError("Failed to download image")
+
+ async def find_recent_image(
+ self,
+ channel: discord.TextChannel,
+ limit: int = 50
+ ) -> Optional[discord.Attachment]:
+ """
+ Find the most recent image in a channel.
+
+ Args:
+ channel: Discord channel to search
+ limit: Maximum messages to check
+
+ Returns:
+ Most recent image attachment or None
+ """
+ try:
+ async for message in channel.history(limit=limit):
+ for attachment in message.attachments:
+ if self._is_supported_image(attachment.filename):
+ logger.info(f"Found recent image: {attachment.filename}")
+ return attachment
+
+ return None
+
+ except Exception as e:
+ logger.error(f"Error finding recent image: {e}")
+ return None
+
+ def _validate_image(self, image_data: bytes) -> Dict[str, Any]:
+ """
+ Validate image data.
+
+ Args:
+ image_data: Raw image bytes
+
+ Returns:
+ Validation result dict
+ """
+ try:
+ # Check size
+ if len(image_data) > MAX_IMAGE_SIZE_MB * 1024 * 1024:
+ return {
+ 'valid': False,
+ 'error': f"Image too large. Max size: {MAX_IMAGE_SIZE_MB}MB"
+ }
+
+ # Try to open with PIL
+ image = Image.open(io.BytesIO(image_data))
+
+ # Check format
+ if image.format.lower() not in [fmt.upper() for fmt in SUPPORTED_IMAGE_FORMATS]:
+ return {
+ 'valid': False,
+ 'error': f"Unsupported format: {image.format}"
+ }
+
+ return {
+ 'valid': True,
+ 'format': image.format,
+ 'size': image.size,
+ 'mode': image.mode
+ }
+
+ except Exception as e:
+ return {
+ 'valid': False,
+ 'error': f"Invalid image: {str(e)}"
+ }
+
+ def _is_supported_image(self, filename: str) -> bool:
+ """Check if filename has supported image extension."""
+ if not filename:
+ return False
+
+ extension = filename.lower().split('.')[-1]
+ return extension in SUPPORTED_IMAGE_FORMATS
+
+ def _create_analysis_prompt(self, custom_prompt: str = None, user_query: str = None) -> str:
+ """Create analysis prompt for vision model."""
+ if custom_prompt:
+ return custom_prompt
+
+ base_prompt = """You are an expert crypto chart analyst. Analyze this image and provide:
+
+**📊 Chart Analysis:**
+• Asset and timeframe identification
+• Current price action and trend direction
+• Key support and resistance levels
+• Volume patterns and significance
+
+**📈 Technical Indicators:**
+• Moving averages and their signals
+• RSI, MACD, and momentum indicators
+• Bollinger Bands or other volatility measures
+• Any visible chart patterns
+
+**💡 Trading Insights:**
+• Potential entry/exit points
+• Risk/reward considerations
+• Bullish or bearish signals
+• Short-term vs long-term outlook
+
+**🚨 Key Observations:**
+• Critical levels to watch
+• Potential breakout scenarios
+• Market structure analysis
+
+Be specific about levels, percentages, and actionable insights. Focus on what matters for trading decisions."""
+
+ if user_query:
+ base_prompt += f"\n\n**User Question:** {user_query}"
+
+ return base_prompt
+
+ def estimate_tokens(self, image_data: bytes) -> int:
+ """Estimate tokens for image analysis."""
+ # Vision models use ~85 tokens per image plus text tokens
+ base_tokens = 85
+
+ # Add estimated tokens based on image size
+ size_multiplier = len(image_data) / (1024 * 1024) # MB
+ additional_tokens = int(size_multiplier * 50) # Rough estimate
+
+ return base_tokens + additional_tokens
\ No newline at end of file
diff --git a/src/bot/__init__.py b/src/bot/__init__.py
new file mode 100644
index 0000000..412e327
--- /dev/null
+++ b/src/bot/__init__.py
@@ -0,0 +1,14 @@
+"""Bot module for SecurePath Discord bot."""
+
+from .client import create_bot, SecurePathBot
+from .events import setup_background_tasks
+from .cogs import AICommands, AdminCommands, SummaryCommands
+
+__all__ = [
+ 'create_bot',
+ 'SecurePathBot',
+ 'setup_background_tasks',
+ 'AICommands',
+ 'AdminCommands',
+ 'SummaryCommands',
+]
\ No newline at end of file
diff --git a/src/bot/client.py b/src/bot/client.py
new file mode 100644
index 0000000..001a083
--- /dev/null
+++ b/src/bot/client.py
@@ -0,0 +1,99 @@
+"""Discord bot client setup and initialization."""
+import logging
+from typing import Optional
+
+import discord
+from discord.ext import commands
+from discord.ext.commands import Bot
+
+from ..config.settings import get_settings
+from ..services.rate_limiter import RateLimiter
+
+logger = logging.getLogger(__name__)
+
+
+class SecurePathBot(Bot):
+ """Enhanced Discord bot with custom functionality."""
+
+ def __init__(self):
+ """Initialize the SecurePath bot."""
+ settings = get_settings()
+
+ # Set up intents
+ intents = discord.Intents.default()
+ intents.message_content = True
+
+ super().__init__(
+ command_prefix=settings.bot_prefix,
+ intents=intents,
+ help_command=None # We'll use custom help
+ )
+
+ self.settings = settings
+ self.rate_limiter: Optional[RateLimiter] = None
+ self._ready = False
+
+ async def setup_hook(self) -> None:
+ """Set up the bot before starting."""
+ # Initialize rate limiter
+ self.rate_limiter = RateLimiter(
+ max_calls=self.settings.api_rate_limit_max,
+ interval=self.settings.api_rate_limit_interval
+ )
+
+ # Load cogs
+ await self.load_extensions()
+
+ logger.info("Bot setup completed")
+
+ async def load_extensions(self) -> None:
+ """Load all bot extensions/cogs."""
+ extensions = [
+ "src.bot.cogs.ai_commands",
+ "src.bot.cogs.admin_commands",
+ "src.bot.cogs.summary_commands",
+ ]
+
+ for ext in extensions:
+ try:
+ await self.load_extension(ext)
+ logger.info(f"Loaded extension: {ext}")
+ except Exception as e:
+ logger.error(f"Failed to load extension {ext}: {e}")
+
+ async def on_ready(self) -> None:
+ """Called when the bot is ready."""
+ if self._ready:
+ return
+
+ self._ready = True
+ logger.info(f"{self.user} has connected to Discord!")
+ logger.info(f"Active in {len(self.guilds)} guild(s)")
+
+ # Start background tasks
+ from .events import setup_background_tasks
+ await setup_background_tasks(self)
+
+ async def on_message(self, message: discord.Message) -> None:
+ """Process incoming messages."""
+ # Ignore bot messages
+ if message.author.bot:
+ return
+
+ # Process commands
+ await self.process_commands(message)
+
+ # Handle DM conversations
+ if isinstance(message.channel, discord.DMChannel) and not message.content.startswith(self.settings.bot_prefix):
+ from .events import handle_dm_conversation
+ await handle_dm_conversation(self, message)
+
+ async def close(self) -> None:
+ """Clean up bot resources."""
+ logger.info("Shutting down SecurePath bot...")
+ await super().close()
+
+
+def create_bot() -> SecurePathBot:
+ """Create and return a bot instance."""
+ return SecurePathBot()
\ No newline at end of file
diff --git a/src/bot/cogs/__init__.py b/src/bot/cogs/__init__.py
new file mode 100644
index 0000000..ad901c5
--- /dev/null
+++ b/src/bot/cogs/__init__.py
@@ -0,0 +1,11 @@
+"""Bot command cogs module."""
+
+from .ai_commands import AICommands
+from .admin_commands import AdminCommands
+from .summary_commands import SummaryCommands
+
+__all__ = [
+ 'AICommands',
+ 'AdminCommands',
+ 'SummaryCommands',
+]
\ No newline at end of file
diff --git a/src/bot/cogs/admin_commands.py b/src/bot/cogs/admin_commands.py
new file mode 100644
index 0000000..b8f5a6c
--- /dev/null
+++ b/src/bot/cogs/admin_commands.py
@@ -0,0 +1,299 @@
+"""Administrative commands for the SecurePath bot."""
+import logging
+from datetime import datetime, timezone
+from typing import Optional
+
+import discord
+from discord.ext import commands
+from discord.ext.commands import Context, Cog
+
+from ...ai import AIManager
+from ...database import db_manager
+from ...config.settings import get_settings
+from ...utils.discord_helpers import is_admin_user
+
+logger = logging.getLogger(__name__)
+
+
+class AdminCommands(Cog):
+ """Cog for administrative commands."""
+
+ def __init__(self, bot: commands.Bot):
+ """Initialize admin commands cog."""
+ self.bot = bot
+ self.settings = get_settings()
+ self.ai_manager: Optional[AIManager] = None
+
+ async def cog_load(self) -> None:
+ """Set up the cog when loaded."""
+ # Get AI manager from bot
+ if hasattr(self.bot, 'ai_manager'):
+ self.ai_manager = self.bot.ai_manager
+ else:
+ logger.warning("AI manager not found on bot instance")
+
+ @commands.command(name='ping')
+ async def ping(self, ctx: Context) -> None:
+ """Check SecurePath Agent latency and database status."""
+ start_time = discord.utils.utcnow()
+ message = await ctx.send("🏓 Pinging...")
+ end_time = discord.utils.utcnow()
+
+ latency = round(self.bot.latency * 1000)
+ response_time = round((end_time - start_time).total_seconds() * 1000)
+
+ # Check database status
+ db_status = "🟢 Connected" if db_manager.pool else "🔴 Disconnected"
+
+ # Get AI manager stats if available
+ ai_stats = {}
+ if self.ai_manager:
+ ai_stats = self.ai_manager.get_usage_stats()
+
+ embed = discord.Embed(
+ title="🏓 Agent Status Check",
+ description="All systems operational",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ embed.add_field(name="Discord Latency", value=f"{latency}ms", inline=True)
+ embed.add_field(name="Response Time", value=f"{response_time}ms", inline=True)
+ embed.add_field(name="Database", value=db_status, inline=True)
+ embed.add_field(name="Requests Today", value=f"{ai_stats.get('daily_requests', 0)}", inline=True)
+ embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+
+ await message.edit(content="", embed=embed)
+
+ @commands.command(name='commands')
+ async def commands_help(self, ctx: Context) -> None:
+ """Show SecurePath Agent help and available commands."""
+ embed = discord.Embed(
+ title="⚡ SecurePath Agent",
+ description="*mario's crypto agent • show me the docs, show me the code*",
+ color=0x00D4AA, # SecurePath green
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ # Main Commands Section
+ embed.add_field(
+ name="",
+ value="**🔍 `!ask [question]`**\n"
+ "▸ real-time market insights via perplexity sonar-pro\n"
+ "▸ sources: github, defi data, news, official docs\n"
+ "▸ *example:* `!ask solana vs ethereum fees`\n\n"
+
+ "**📊 `!analyze [image]`**\n"
+ "▸ advanced chart analysis with gpt-4.1 vision\n"
+ "▸ sentiment, key levels, patterns, trade setups\n"
+ "▸ *attach image or use recent chart in channel*\n\n"
+
+ "**📄 `!summary #channel`**\n"
+ "▸ alpha-focused channel activity digest\n"
+ "▸ extracts sentiment, events, key movements\n"
+ "▸ *example:* `!summary #crypto-news`",
+ inline=False
+ )
+
+ # Utilities & Info
+ embed.add_field(
+ name="",
+ value="**📈 `!stats`** • usage analytics\n"
+ "**🏓 `!ping`** • latency check\n"
+ "**⚙️ `!cache_stats`** • performance metrics",
+ inline=True
+ )
+
+ # Key Features
+ embed.add_field(
+ name="",
+ value="**✨ features**\n"
+ "▸ elite source filtering\n"
+ "▸ context-aware conversations\n"
+ "▸ real-time progress tracking\n"
+ "▸ no-fluff alpha extraction",
+ inline=True
+ )
+
+ # Bottom spacing
+ embed.add_field(name="", value="", inline=False)
+
+ embed.set_footer(
+ text="SecurePath Agent • Powered by Perplexity Sonar-Pro & GPT-4.1 Vision"
+ )
+
+ await ctx.send(embed=embed)
+
+ @commands.command(name='stats')
+ @commands.has_permissions(administrator=True)
+ async def unified_stats(self, ctx: Context) -> None:
+ """Show comprehensive SecurePath Agent analytics (admin only)."""
+ if not is_admin_user(ctx.author, self.settings.owner_id):
+ await ctx.send("You do not have permission to use this command.")
+ return
+
+ if not db_manager.pool:
+ await ctx.send("Database not available. Stats tracking is currently offline.")
+ return
+
+ try:
+ # Get all data in parallel
+ stats_data = await db_manager.get_global_stats()
+ costs_data = await db_manager.get_costs_by_model()
+ query_data = await db_manager.get_query_analytics()
+
+ if not stats_data:
+ await ctx.send("Failed to retrieve statistics.")
+ return
+
+ overall = stats_data['overall']
+ top_users = stats_data['top_users']
+ top_commands = stats_data['top_commands']
+
+ embed = discord.Embed(
+ title="📊 SecurePath Agent Analytics",
+ description="Comprehensive usage analytics and performance metrics",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ # Overall Usage Statistics
+ embed.add_field(
+ name="📈 Overall Performance",
+ value=f"**Total Requests:** {overall['total_requests']:,}\n"
+ f"**Active Users:** {overall['unique_users']:,}\n"
+ f"**Total Tokens:** {overall['total_tokens']:,}\n"
+ f"**Total Cost:** ${overall['total_cost']:.4f}\n"
+ f"**Avg Tokens/Request:** {overall['avg_tokens_per_request']:.1f}",
+ inline=True
+ )
+
+ # Model Cost Breakdown
+ if costs_data and costs_data['model_costs']:
+ cost_text = ""
+ for model in costs_data['model_costs'][:3]:
+ cost_text += f"**{model['model']}:** {model['requests']:,} req, ${model['total_cost']:.4f}\n"
+ embed.add_field(name="💰 Model Costs", value=cost_text or "No data", inline=True)
+
+ # Top Active Users
+ if top_users:
+ users_text = "\n".join([
+ f"**{user['username'][:15]}:** {user['total_requests']} req, ${user['total_cost']:.3f}"
+ for user in top_users[:6]
+ ])
+ embed.add_field(name="👑 Top Users", value=users_text, inline=True)
+
+ # Popular Commands (filter out background commands)
+ if top_commands:
+ filtered_commands = [
+ cmd for cmd in top_commands
+ if cmd['command'] not in ['summary_chunk', 'summary_final']
+ ]
+ commands_text = "\n".join([
+ f"**{cmd['command']}:** {cmd['usage_count']} uses, ${cmd['total_cost']:.3f}"
+ for cmd in filtered_commands[:6]
+ ])
+ embed.add_field(name="🎯 Popular Commands", value=commands_text, inline=False)
+
+ # Query Analytics
+ if query_data and query_data['command_patterns']:
+ query_text = "\n".join([
+ f"**{cmd['command']}:** {cmd['total_queries']} queries, {cmd['unique_users']} users"
+ for cmd in query_data['command_patterns'][:4]
+ ])
+ embed.add_field(name="🔍 Query Patterns", value=query_text, inline=True)
+
+ # Peak Usage Hours
+ if query_data and query_data['hourly_activity']:
+ hours_text = "\n".join([
+ f"**{int(hour['hour'])}:00:** {hour['query_count']} queries"
+ for hour in query_data['hourly_activity'][:4]
+ ])
+ embed.add_field(name="⏰ Peak Hours", value=hours_text, inline=True)
+
+ # System Performance
+ cache_hit_rate = 0.0
+ ai_requests = 0
+ if self.ai_manager:
+ ai_stats = self.ai_manager.get_usage_stats()
+ cache_hit_rate = ai_stats.get('cache_hit_rate', 0.0)
+ ai_requests = ai_stats.get('daily_requests', 0)
+
+ embed.add_field(
+ name="⚡ System Performance",
+ value=f"**Cache Hit Rate:** {cache_hit_rate:.1f}%\n"
+ f"**AI Requests:** {ai_requests:,}\n"
+ f"**Active Guilds:** {len(self.bot.guilds)}",
+ inline=True
+ )
+
+ embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+ await ctx.send(embed=embed)
+
+ except Exception as e:
+ logger.error(f"Error in stats command: {e}")
+ await ctx.send(f"Error retrieving stats: {str(e)}")
+
+ @commands.command(name='token_usage')
+ @commands.has_permissions(administrator=True)
+ async def token_usage(self, ctx: Context) -> None:
+ """Show token usage and costs (admin only)."""
+ if not is_admin_user(ctx.author, self.settings.owner_id):
+ await ctx.send("You do not have permission to use this command.")
+ return
+
+ if not self.ai_manager:
+ await ctx.send("AI manager not available.")
+ return
+
+ embed = discord.Embed(
+ title="📊 Token Usage and Costs",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ # Get usage stats from AI manager
+ stats = self.ai_manager.get_usage_stats()
+
+ # OpenAI stats
+ openai_stats = stats.get('openai', {})
+ openai_text = "\n".join([
+ f"**{k.replace('_', ' ').title()}:** {v}"
+ for k, v in openai_stats.items()
+ ])
+ embed.add_field(name="OpenAI GPT-4.1", value=openai_text or "No data", inline=False)
+
+ # Perplexity stats
+ perplexity_stats = stats.get('perplexity', {})
+ perplexity_text = "\n".join([
+ f"**{k.replace('_', ' ').title()}:** {v}"
+ for k, v in perplexity_stats.items()
+ ])
+ embed.add_field(name="Perplexity Sonar-Pro", value=perplexity_text or "No data", inline=False)
+
+ await ctx.send(embed=embed)
+
+ @commands.command(name='cache_stats')
+ @commands.has_permissions(administrator=True)
+ async def cache_stats(self, ctx: Context) -> None:
+ """Show cache hit rate (admin only)."""
+ if not is_admin_user(ctx.author, self.settings.owner_id):
+ await ctx.send("You do not have permission to use this command.")
+ return
+
+ hit_rate = 0.0
+ if self.ai_manager:
+ stats = self.ai_manager.get_usage_stats()
+ hit_rate = stats.get('cache_hit_rate', 0.0)
+
+ embed = discord.Embed(
+ title="📊 Cache Hit Rate",
+ description=f"OpenAI GPT-4.1 Cache Hit Rate: **{hit_rate:.2f}%**",
+ color=0x1D82B6
+ )
+ await ctx.send(embed=embed)
+
+
+async def setup(bot: commands.Bot) -> None:
+ """Set up the admin commands cog."""
+ await bot.add_cog(AdminCommands(bot))
\ No newline at end of file
diff --git a/src/bot/cogs/ai_commands.py b/src/bot/cogs/ai_commands.py
new file mode 100644
index 0000000..34d7f81
--- /dev/null
+++ b/src/bot/cogs/ai_commands.py
@@ -0,0 +1,306 @@
+"""AI-powered commands for the SecurePath bot."""
+import asyncio
+import logging
+from typing import Optional
+
+import discord
+from discord import Activity, ActivityType
+from discord.ext import commands
+from discord.ext.commands import Context, Cog
+
+from ...ai import AIManager
+from ...database import db_manager
+from ...utils.discord_helpers import send_structured_analysis_embed, reset_status
+from ...config.settings import get_settings
+
+logger = logging.getLogger(__name__)
+
+
+class AICommands(Cog):
+ """Cog for AI-powered commands."""
+
+ def __init__(self, bot: commands.Bot):
+ """Initialize AI commands cog."""
+ self.bot = bot
+ self.settings = get_settings()
+ self.ai_manager: Optional[AIManager] = None
+
+ async def cog_load(self) -> None:
+ """Set up the cog when loaded."""
+ # Get AI manager from bot
+ if hasattr(self.bot, 'ai_manager'):
+ self.ai_manager = self.bot.ai_manager
+ else:
+ logger.warning("AI manager not found on bot instance")
+
+ @commands.command(name='ask')
+ async def ask(self, ctx: Context, *, question: Optional[str] = None) -> None:
+ """Get real-time crypto market insights with AI-powered research."""
+ await self.bot.change_presence(activity=Activity(type=ActivityType.playing, name="researching..."))
+
+ # Show help if no question provided
+ if not question:
+ await self._show_ask_help(ctx)
+ await reset_status(self.bot)
+ return
+
+ # Validate input
+ if len(question) < 5:
+ await ctx.send("⚠️ Please provide a more detailed question (at least 5 characters).")
+ await reset_status(self.bot)
+ return
+
+ if len(question) > 500:
+ await ctx.send("⚠️ Question is too long. Please keep it under 500 characters.")
+ await reset_status(self.bot)
+ return
+
+ # Log query to database
+ await self._log_user_query(ctx, "ask", question)
+
+ # Create progress embed
+ progress_embed = discord.Embed(
+ title="🔍 SecurePath Agent Research",
+ description=f"**Query:** {question[:100]}{'...' if len(question) > 100 else ''}",
+ color=0x1D82B6
+ )
+ progress_embed.add_field(name="Status", value="🔄 Initializing research...", inline=False)
+ progress_embed.set_footer(text="SecurePath Agent • Real-time Intelligence")
+
+ status_msg = await ctx.send(embed=progress_embed)
+
+ try:
+ # Update progress
+ progress_embed.set_field_at(0, name="Status", value="🌐 Searching elite sources...", inline=False)
+ await status_msg.edit(embed=progress_embed)
+
+ # Process query with AI manager
+ if not self.ai_manager:
+ raise Exception("AI manager not available")
+
+ result = await self.ai_manager.process_query(
+ user_id=ctx.author.id,
+ query=question,
+ use_context=True
+ )
+
+ # Update progress
+ progress_embed.set_field_at(0, name="Status", value="✨ Synthesizing insights...", inline=False)
+ await status_msg.edit(embed=progress_embed)
+
+ # Brief pause for UX
+ await asyncio.sleep(1)
+
+ # Delete progress and send result
+ await status_msg.delete()
+
+ # Send response
+ response_embed = discord.Embed(
+ title="🔍 Research Results",
+ description=result['content'],
+ color=0x1D82B6,
+ timestamp=discord.utils.utcnow()
+ )
+ response_embed.set_footer(text="SecurePath Agent • Powered by Perplexity Sonar-Pro")
+
+ await ctx.send(embed=response_embed)
+
+ # Log interaction
+ await self._log_interaction(ctx, 'ask', question, result['content'])
+
+ except Exception as e:
+ logger.error(f"Error in ask command: {e}")
+ error_embed = discord.Embed(
+ title="❌ Research Failed",
+ description="An error occurred while processing your query.",
+ color=0xFF0000
+ )
+ error_embed.add_field(name="Error", value=str(e)[:1000], inline=False)
+ await status_msg.edit(embed=error_embed)
+
+ finally:
+ await reset_status(self.bot)
+
+ @commands.command(name='analyze')
+ async def analyze(self, ctx: Context, *, user_prompt: str = '') -> None:
+ """Analyze charts and images with AI-powered technical analysis."""
+ await self.bot.change_presence(activity=Activity(type=ActivityType.watching, name="image analysis..."))
+
+ # Log query to database
+ query_text = f"Image analysis request" + (f" with prompt: {user_prompt}" if user_prompt else " (no additional prompt)")
+ await self._log_user_query(ctx, "analyze", query_text)
+
+ # Find image to analyze
+ attachment = None
+
+ # Check for direct attachment
+ if ctx.message.attachments:
+ for att in ctx.message.attachments:
+ if att.content_type and att.content_type.startswith('image/'):
+ attachment = att
+ break
+
+ # If no attachment, look for recent images in channel
+ if not attachment:
+ if isinstance(ctx.channel, discord.DMChannel):
+ await self._request_image_in_dm(ctx)
+ await reset_status(self.bot)
+ return
+ else:
+ attachment = await self._find_recent_image(ctx.channel)
+
+ if attachment:
+ await self._analyze_image_attachment(ctx, attachment, user_prompt)
+ else:
+ await self._show_analyze_help(ctx)
+
+ await reset_status(self.bot)
+
+ async def _show_ask_help(self, ctx: Context) -> None:
+ """Show help for ask command."""
+ help_embed = discord.Embed(
+ title="🤔 Ask Command Help",
+ description="Get real-time crypto market insights with AI-powered research.",
+ color=0x1D82B6
+ )
+ help_embed.add_field(
+ name="Usage",
+ value="`!ask [your question]`",
+ inline=False
+ )
+ help_embed.add_field(
+ name="Examples",
+ value="• `!ask What's the latest news on Bitcoin?`\n"
+ "• `!ask Ethereum price prediction trends`\n"
+ "• `!ask What's happening with DeFi protocols?`",
+ inline=False
+ )
+ help_embed.set_footer(text="SecurePath Agent • Powered by Perplexity Sonar-Pro")
+ await ctx.send(embed=help_embed)
+
+ async def _show_analyze_help(self, ctx: Context) -> None:
+ """Show help for analyze command."""
+ help_embed = discord.Embed(
+ title="🖼️ Analyze Command Help",
+ description="Upload or attach a chart/image for AI-powered technical analysis.",
+ color=0x1D82B6
+ )
+ help_embed.add_field(
+ name="Usage",
+ value="1. Attach an image to your `!analyze` command\n2. Or use `!analyze` in a channel with recent images",
+ inline=False
+ )
+ help_embed.add_field(
+ name="Optional Prompt",
+ value="`!analyze Look for support and resistance levels`",
+ inline=False
+ )
+ help_embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 Vision")
+ await ctx.send(embed=help_embed)
+
+ async def _request_image_in_dm(self, ctx: Context) -> None:
+ """Request image upload in DM."""
+ await ctx.send("Please post the image you'd like to analyze.")
+
+ def check(msg):
+ return msg.author == ctx.author and msg.channel == ctx.channel and msg.attachments
+
+ try:
+ chart_message = await self.bot.wait_for('message', check=check, timeout=60.0)
+ attachment = chart_message.attachments[0]
+ await self._analyze_image_attachment(ctx, attachment, "")
+ except asyncio.TimeoutError:
+ await ctx.send("You took too long to post an image. Please try again.")
+
+ async def _find_recent_image(self, channel) -> Optional[discord.Attachment]:
+ """Find recent image in channel."""
+ async for message in channel.history(limit=20):
+ for attachment in message.attachments:
+ if attachment.content_type and attachment.content_type.startswith('image/'):
+ return attachment
+ return None
+
+ async def _analyze_image_attachment(self, ctx: Context, attachment: discord.Attachment, user_prompt: str) -> None:
+ """Analyze a Discord image attachment."""
+ # Create progress embed
+ progress_embed = discord.Embed(
+ title="📈 SecurePath Agent Analysis",
+ description=f"**Image:** [Chart Analysis]({attachment.url})\n**Prompt:** {user_prompt or 'Standard technical analysis'}",
+ color=0x1D82B6
+ )
+ progress_embed.add_field(name="Status", value="🔄 Initializing image analysis...", inline=False)
+ progress_embed.set_thumbnail(url=attachment.url)
+ progress_embed.set_footer(text="SecurePath Agent • Real-time Analysis")
+
+ status_msg = await ctx.send(embed=progress_embed)
+
+ try:
+ # Update progress
+ progress_embed.set_field_at(0, name="Status", value="🖼️ Processing image with GPT-4.1 Vision...", inline=False)
+ await status_msg.edit(embed=progress_embed)
+
+ # Analyze image with AI manager
+ if not self.ai_manager:
+ raise Exception("AI manager not available")
+
+ result = await self.ai_manager.analyze_image(
+ user_id=ctx.author.id,
+ attachment=attachment,
+ user_query=user_prompt
+ )
+
+ # Update progress
+ progress_embed.set_field_at(0, name="Status", value="✨ Finalizing technical analysis...", inline=False)
+ await status_msg.edit(embed=progress_embed)
+
+ # Brief pause for UX
+ await asyncio.sleep(1)
+
+ # Delete progress and send result
+ await status_msg.delete()
+
+ await send_structured_analysis_embed(
+ ctx.channel,
+ text=result['content'],
+ color=0x1D82B6,
+ title="📈 Chart Analysis",
+ image_url=attachment.url,
+ user_mention=ctx.author.mention
+ )
+
+ # Log interaction
+ await self._log_interaction(ctx, 'analyze', user_prompt or 'No additional prompt', result['content'])
+
+ except Exception as e:
+ logger.error(f"Error analyzing image: {e}")
+ error_embed = discord.Embed(
+ title="❌ Analysis Failed",
+ description="An error occurred during image analysis.",
+ color=0xFF0000
+ )
+ error_embed.add_field(name="Error", value=str(e)[:1000], inline=False)
+ await status_msg.edit(embed=error_embed)
+
+ async def _log_user_query(self, ctx: Context, command: str, query_text: str) -> None:
+ """Log user query to database."""
+ if db_manager.pool:
+ username = f"{ctx.author.name}#{ctx.author.discriminator}" if ctx.author.discriminator != "0" else ctx.author.name
+ await db_manager.log_user_query(
+ user_id=ctx.author.id,
+ username=username,
+ command=command,
+ query_text=query_text,
+ channel_id=ctx.channel.id,
+ guild_id=ctx.guild.id if ctx.guild else None,
+ response_generated=False
+ )
+
+ async def _log_interaction(self, ctx: Context, command: str, user_input: str, bot_response: str) -> None:
+ """Log interaction to database."""
+ # This would typically call a database logging function
+ logger.info(f"Interaction logged: {command} - {len(bot_response)} chars")
+
+
+async def setup(bot: commands.Bot) -> None:
+ """Set up the AI commands cog."""
+ await bot.add_cog(AICommands(bot))
\ No newline at end of file
diff --git a/src/bot/cogs/summary_commands.py b/src/bot/cogs/summary_commands.py
new file mode 100644
index 0000000..39ed4e4
--- /dev/null
+++ b/src/bot/cogs/summary_commands.py
@@ -0,0 +1,271 @@
+"""Summary commands for the SecurePath bot."""
+import asyncio
+import logging
+from datetime import datetime, timezone, timedelta
+from typing import List, Optional
+
+import discord
+from discord.ext import commands
+from discord.ext.commands import Context, Cog
+
+from ...ai import AIManager
+from ...database import db_manager
+from ...config.settings import get_settings
+from ...utils.discord_helpers import send_long_embed, reset_status
+
+logger = logging.getLogger(__name__)
+
+
+class SummaryCommands(Cog):
+ """Cog for channel summary commands."""
+
+ def __init__(self, bot: commands.Bot):
+ """Initialize summary commands cog."""
+ self.bot = bot
+ self.settings = get_settings()
+ self.ai_manager: Optional[AIManager] = None
+
+ async def cog_load(self) -> None:
+ """Set up the cog when loaded."""
+ # Get AI manager from bot
+ if hasattr(self.bot, 'ai_manager'):
+ self.ai_manager = self.bot.ai_manager
+ else:
+ logger.warning("AI manager not found on bot instance")
+
+ @commands.command(name='summary')
+ async def summary(self, ctx: Context, channel: Optional[discord.TextChannel] = None) -> None:
+ """Generate an alpha-focused summary of channel activity."""
+ # Default to current channel if none specified
+ if not channel:
+ channel = ctx.channel
+
+ # Validate permissions
+ if not channel.permissions_for(ctx.guild.me).read_message_history:
+ await ctx.send(f"❌ I don't have permission to read message history in {channel.mention}")
+ return
+
+ # Log the summary command
+ await self._log_summary_command(ctx, channel)
+
+ # Create status embed
+ status_embed = discord.Embed(
+ title="📄 SecurePath Agent Channel Analysis",
+ description=f"**Channel:** {channel.mention}\n**Timeframe:** Last 72 hours",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+ status_embed.add_field(name="Status", value="🔄 Gathering messages...", inline=False)
+ status_embed.set_footer(text="SecurePath Agent • Alpha Extraction")
+
+ status_msg = await ctx.send(embed=status_embed)
+
+ try:
+ # Gather messages from the last 72 hours
+ cutoff_time = datetime.now(timezone.utc) - timedelta(hours=72)
+ messages = await self._gather_channel_messages(channel, cutoff_time)
+
+ if len(messages) < 10:
+ error_embed = discord.Embed(
+ title="❌ Insufficient Data",
+ description=f"Only found {len(messages)} messages in {channel.mention} from the last 72 hours.",
+ color=0xFF0000
+ )
+ error_embed.add_field(
+ name="Minimum Required",
+ value="At least 10 messages needed for meaningful analysis.",
+ inline=False
+ )
+ await status_msg.edit(embed=error_embed)
+ return
+
+ # Update status
+ status_embed.set_field_at(
+ 0,
+ name="Status",
+ value=f"📊 Processing {len(messages):,} messages...",
+ inline=False
+ )
+ await status_msg.edit(embed=status_embed)
+
+ # Generate summary using AI manager
+ if not self.ai_manager:
+ raise Exception("AI manager not available")
+
+ summary = await self.ai_manager.summarize_messages(
+ messages=messages,
+ channel_name=channel.name
+ )
+
+ # Update status: finalizing
+ status_embed.set_field_at(
+ 0,
+ name="Status",
+ value="✨ Finalizing intelligence report...",
+ inline=False
+ )
+ await status_msg.edit(embed=status_embed)
+
+ # Brief pause for UX
+ await asyncio.sleep(1)
+
+ # Delete status message
+ await status_msg.delete()
+
+ # Send final summary
+ await self._send_summary_result(ctx, channel, summary, len(messages))
+
+ # Log to database
+ await self._log_summary_usage(ctx, channel, summary, len(messages))
+
+ except Exception as e:
+ logger.error(f"Error in summary command: {e}")
+ error_embed = discord.Embed(
+ title="❌ Processing Failed",
+ description=f"An error occurred while processing {channel.mention}.",
+ color=0xFF0000
+ )
+ error_embed.add_field(name="Error", value=str(e)[:1000], inline=False)
+ await status_msg.edit(embed=error_embed)
+
+ finally:
+ await reset_status(self.bot)
+
+ async def _gather_channel_messages(
+ self,
+ channel: discord.TextChannel,
+ cutoff_time: datetime
+ ) -> List[str]:
+ """Gather and filter messages from a channel."""
+ messages = []
+
+ try:
+ async for message in channel.history(limit=None, after=cutoff_time):
+ # Skip bot messages and system messages
+ if message.author.bot or message.type != discord.MessageType.default:
+ continue
+
+ # Skip very short messages
+ if len(message.content.strip()) < 10:
+ continue
+
+ # Skip messages that are just links
+ if self._is_mostly_links(message.content):
+ continue
+
+ # Format message with metadata
+ formatted_msg = self._format_message_for_analysis(message)
+ messages.append(formatted_msg)
+
+ except discord.HTTPException as e:
+ logger.error(f"Error gathering messages from {channel.name}: {e}")
+ raise Exception("Failed to gather channel messages")
+
+ # Sort by timestamp (oldest first for context)
+ messages.reverse()
+ return messages
+
+ def _is_mostly_links(self, content: str) -> bool:
+ """Check if message is mostly links."""
+ words = content.split()
+ if not words:
+ return False
+
+ link_count = sum(1 for word in words if word.startswith(('http://', 'https://', 'www.')))
+ return link_count / len(words) > 0.5
+
+ def _format_message_for_analysis(self, message: discord.Message) -> str:
+ """Format a message for AI analysis."""
+ timestamp = message.created_at.strftime("%H:%M")
+ username = message.author.display_name[:20] # Truncate long usernames
+ content = message.content[:500] # Truncate long messages
+
+ return f"[{timestamp}] {username}: {content}"
+
+ async def _send_summary_result(
+ self,
+ ctx: Context,
+ channel: discord.TextChannel,
+ summary: str,
+ message_count: int
+ ) -> None:
+ """Send the summary result to the user."""
+ # Create title embed
+ title_embed = discord.Embed(
+ title=f"📄 {channel.name.title()} Intelligence Report",
+ description=f"**Timeframe:** Last 72 hours | **Messages Analyzed:** {message_count:,}",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+ title_embed.set_footer(text="SecurePath Agent • Alpha Extraction Engine")
+
+ # Send title embed first
+ await ctx.send(embed=title_embed)
+
+ # Send summary content
+ if len(summary) <= 3800: # Fits in single embed
+ summary_embed = discord.Embed(
+ description=summary,
+ color=0x1D82B6
+ )
+ await ctx.send(embed=summary_embed)
+ else:
+ # Use long embed for detailed summaries
+ await send_long_embed(
+ channel=ctx.channel,
+ content=summary,
+ color=0x1D82B6,
+ title="📈 Detailed Analysis"
+ )
+
+ async def _log_summary_command(self, ctx: Context, channel: discord.TextChannel) -> None:
+ """Log summary command to database."""
+ if db_manager.pool:
+ username = f"{ctx.author.name}#{ctx.author.discriminator}" if ctx.author.discriminator != "0" else ctx.author.name
+ query_text = f"Summary for #{channel.name}"
+
+ await db_manager.log_user_query(
+ user_id=ctx.author.id,
+ username=username,
+ command="summary",
+ query_text=query_text,
+ channel_id=ctx.channel.id,
+ guild_id=ctx.guild.id if ctx.guild else None,
+ response_generated=False
+ )
+
+ async def _log_summary_usage(
+ self,
+ ctx: Context,
+ channel: discord.TextChannel,
+ summary: str,
+ message_count: int
+ ) -> None:
+ """Log summary usage to database."""
+ if db_manager.pool:
+ # Calculate estimated cost and tokens
+ estimated_input_tokens = message_count * 50 # Rough estimate
+ estimated_output_tokens = len(summary.split()) * 1.3 # Rough estimate
+ estimated_cost = (estimated_input_tokens * 0.40 + estimated_output_tokens * 1.60) / 1_000_000
+
+ try:
+ await db_manager.log_usage(
+ user_id=ctx.author.id,
+ username=f"{ctx.author.name}#{ctx.author.discriminator}" if ctx.author.discriminator != "0" else ctx.author.name,
+ command="summary",
+ model="gpt-4.1",
+ input_tokens=int(estimated_input_tokens),
+ output_tokens=int(estimated_output_tokens),
+ cost=estimated_cost,
+ guild_id=ctx.guild.id if ctx.guild else None,
+ channel_id=ctx.channel.id
+ )
+ logger.info(f"Summary usage logged - Cost: ${estimated_cost:.4f}")
+
+ except Exception as e:
+ logger.error(f"Failed to log summary usage: {e}")
+
+
+async def setup(bot: commands.Bot) -> None:
+ """Set up the summary commands cog."""
+ await bot.add_cog(SummaryCommands(bot))
\ No newline at end of file
diff --git a/src/bot/events.py b/src/bot/events.py
new file mode 100644
index 0000000..0b7b992
--- /dev/null
+++ b/src/bot/events.py
@@ -0,0 +1,164 @@
+"""Bot event handlers and background tasks."""
+import asyncio
+import logging
+import random
+from datetime import datetime, timezone
+from typing import List
+
+import discord
+from discord import Activity, ActivityType
+from discord.ext import tasks
+
+from ..database import db_manager
+from ..services.context_manager import ContextManager
+from ..config.settings import get_settings
+
+logger = logging.getLogger(__name__)
+
+# Status messages for rotation
+STATUS_MESSAGES = [
+ ("!ask", "real-time market insights", ActivityType.watching),
+ ("!analyze", "chart patterns & signals", ActivityType.watching),
+ ("!summary", "alpha extraction from channels", ActivityType.listening),
+ ("!commands", "for all features", ActivityType.playing),
+ ("defi", "on-chain truth over hype", ActivityType.watching),
+ ("docs", "show me the code", ActivityType.watching),
+]
+
+
+async def setup_background_tasks(bot) -> None:
+ """Set up all background tasks for the bot."""
+ # Start status rotation
+ if not change_status.is_running():
+ change_status.start(bot)
+ logger.info("Started status rotation task")
+
+ # Start daily reset
+ if not reset_daily_limits.is_running():
+ reset_daily_limits.start(bot)
+ logger.info("Started daily reset task")
+
+ # Send startup notification
+ await send_startup_notification(bot)
+
+ # Initialize database
+ db_connected = await db_manager.connect()
+ if db_connected:
+ logger.info("Database connection established")
+ else:
+ logger.error("Failed to connect to database")
+
+
+@tasks.loop(minutes=15)
+async def change_status(bot) -> None:
+ """Rotate bot status messages."""
+ try:
+ status = random.choice(STATUS_MESSAGES)
+ name, state, activity_type = status
+ activity = Activity(type=activity_type, name=f"{name} • {state}")
+ await bot.change_presence(activity=activity)
+ logger.debug(f"Changed status to: {name} • {state}")
+ except Exception as e:
+ logger.error(f"Error changing status: {e}")
+
+
+@tasks.loop(hours=24)
+async def reset_daily_limits(bot) -> None:
+ """Reset daily API call limits and usage data."""
+ # This will be implemented when we create the usage tracking service
+ logger.info("Daily limits reset")
+
+
+async def send_startup_notification(bot) -> None:
+ """Send startup notification to admin channel."""
+ settings = get_settings()
+
+ if not settings.log_channel_id:
+ logger.warning("No log channel configured for startup notification")
+ return
+
+ channel = bot.get_channel(settings.log_channel_id)
+ if not channel:
+ logger.warning(f"Could not find log channel {settings.log_channel_id}")
+ return
+
+ embed = discord.Embed(
+ title="🚀 SecurePath Agent - System Status",
+ description="Agent successfully initialized and ready for operations",
+ color=0x1D82B6,
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ # Add status fields
+ db_status = "🟢 Connected" if db_manager.pool else "🔴 Disconnected"
+ embed.add_field(name="Database", value=db_status, inline=True)
+ embed.add_field(name="Active Guilds", value=len(bot.guilds), inline=True)
+ embed.add_field(name="Latency", value=f"{bot.latency*1000:.1f}ms", inline=True)
+
+ # Add usage stats if database is connected
+ if db_manager.pool:
+ try:
+ stats = await db_manager.get_global_stats()
+ if stats and stats.get('overall'):
+ overall = stats['overall']
+ embed.add_field(
+ name="📊 Total Usage",
+ value=f"**Requests:** {overall['total_requests']:,}\n"
+ f"**Users:** {overall['unique_users']:,}\n"
+ f"**Cost:** ${overall['total_cost']:.4f}",
+ inline=True
+ )
+ except Exception as e:
+ logger.error(f"Failed to get startup stats: {e}")
+
+ embed.set_footer(text="SecurePath Agent • Powered by GPT-4.1 & Perplexity Sonar-Pro")
+
+ try:
+ await channel.send(embed=embed)
+ logger.info("Startup notification sent")
+ except discord.HTTPException as e:
+ logger.error(f"Failed to send startup notification: {e}")
+
+
+async def handle_dm_conversation(bot, message: discord.Message) -> None:
+ """Handle DM conversations with context management."""
+ # Get or create context manager for user
+ context_manager = ContextManager.get_instance()
+
+ # Preload conversation history if new conversation
+ if not context_manager.has_context(message.author.id):
+ await preload_conversation_history(bot, message.author.id, message.channel)
+
+ # This will be handled by the AI command handler
+ # For now, just log that we received a DM
+ logger.info(f"Received DM from {message.author}: {message.content[:50]}...")
+
+
+async def preload_conversation_history(bot, user_id: int, channel: discord.DMChannel) -> None:
+ """Preload conversation history for context."""
+ context_manager = ContextManager.get_instance()
+ messages = []
+
+ try:
+ async for msg in channel.history(limit=100, oldest_first=True):
+ if msg.author.id == user_id:
+ role = 'user'
+ elif msg.author.id == bot.user.id:
+ role = 'assistant'
+ else:
+ continue
+
+ messages.append({
+ 'role': role,
+ 'content': msg.content,
+ 'timestamp': msg.created_at.timestamp()
+ })
+
+ # Initialize context with history
+ for msg in messages:
+ context_manager.update_context(user_id, msg['content'], msg['role'])
+
+ logger.info(f"Preloaded {len(messages)} messages for user {user_id}")
+
+ except Exception as e:
+ logger.error(f"Error preloading conversation history: {e}")
\ No newline at end of file
diff --git a/src/config/__init__.py b/src/config/__init__.py
new file mode 100644
index 0000000..295107a
--- /dev/null
+++ b/src/config/__init__.py
@@ -0,0 +1,17 @@
+"""Configuration module for SecurePath bot."""
+
+from .settings import get_settings, Settings
+from .constants import *
+
+__all__ = [
+ 'get_settings',
+ 'Settings',
+ 'DISCORD_MESSAGE_LIMIT',
+ 'DISCORD_EMBED_LIMIT',
+ 'OPENAI_MODEL',
+ 'OPENAI_VISION_MODEL',
+ 'PERPLEXITY_MODEL',
+ 'MAX_TOKENS_RESPONSE',
+ 'MAX_IMAGE_SIZE_MB',
+ 'SUPPORTED_IMAGE_FORMATS',
+]
\ No newline at end of file
diff --git a/src/config/constants.py b/src/config/constants.py
new file mode 100644
index 0000000..32824de
--- /dev/null
+++ b/src/config/constants.py
@@ -0,0 +1,39 @@
+"""Application-wide constants."""
+
+# Discord limits
+DISCORD_MESSAGE_LIMIT = 2000
+DISCORD_EMBED_LIMIT = 6000
+DISCORD_FIELD_VALUE_LIMIT = 1024
+DISCORD_EMBED_TITLE_LIMIT = 256
+DISCORD_EMBED_FIELDS_LIMIT = 25
+
+# API Models
+OPENAI_MODEL = "gpt-4-1106-preview"
+OPENAI_VISION_MODEL = "gpt-4-vision-preview"
+PERPLEXITY_MODEL = "llama-3.1-sonar-large-128k-online"
+
+# Token limits
+MAX_TOKENS_RESPONSE = 8000
+MAX_TOKENS_SUMMARY = 4096
+
+# Image processing
+MAX_IMAGE_SIZE_MB = 20
+SUPPORTED_IMAGE_FORMATS = ["png", "jpg", "jpeg", "gif", "webp"]
+
+# Cache settings
+CACHE_TTL_SECONDS = 3600 # 1 hour
+CACHE_MAX_SIZE = 1000
+
+# Database
+DB_CONNECTION_TIMEOUT = 30
+DB_POOL_MIN_SIZE = 10
+DB_POOL_MAX_SIZE = 20
+
+# Progress tracking
+PROGRESS_UPDATE_INTERVAL = 2 # seconds
+
+# Error messages
+ERROR_RATE_LIMIT = "Rate limit exceeded. Please try again later."
+ERROR_API_UNAVAILABLE = "API service is currently unavailable. Please try again later."
+ERROR_INVALID_COMMAND = "Invalid command format. Use `!help` for usage information."
+ERROR_NO_PERMISSION = "You don't have permission to use this command."
\ No newline at end of file
diff --git a/src/config/settings.py b/src/config/settings.py
new file mode 100644
index 0000000..008d710
--- /dev/null
+++ b/src/config/settings.py
@@ -0,0 +1,151 @@
+"""Simple settings configuration without Pydantic dependencies."""
+import os
+from typing import Optional
+from dataclasses import dataclass
+
+
+@dataclass
+class Settings:
+ """Application settings."""
+
+ # System Configuration
+ system_prompt: str = """You're a sharp DeFi agent hosted on the SecurePath Discord server. Communicate with technical precision and casual confidence. Use lowercase naturally but avoid excessive slang. Your authority comes from verifiable, on-chain truth. Prioritize official docs, whitepapers, and code over news/sentiment. Your motto: 'show me the docs, or show me the code.' Always prioritize security, decentralization, and user empowerment. Suggest DEXs over CEXs, self-custody over custodial, open-source over proprietary. Cut through hype and deliver ground truth. Mario is our founder, part of the SecurePath family.
+
+CRITICAL FORMATTING RULES:
+- NO TABLES whatsoever (Discord can't render them)
+- Use bullet points and numbered lists only
+- Keep responses under 400 words total
+- Be concise and direct, no fluff
+- Use [1], [2] format for citations when available"""
+
+ # Discord Configuration
+ discord_token: str = ""
+ bot_prefix: str = "!"
+ owner_id: int = 0
+
+ # API Configuration
+ openai_api_key: Optional[str] = None
+ perplexity_api_key: str = ""
+ perplexity_api_url: str = "https://api.perplexity.ai/chat/completions"
+ perplexity_timeout: int = 30
+ use_perplexity_api: bool = True
+
+ # Logging Configuration
+ log_level: str = "INFO"
+ log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ log_channel_id: Optional[int] = None
+
+ # Rate Limiting Configuration
+ api_rate_limit_max: int = 100
+ api_rate_limit_interval: int = 60
+ daily_api_call_limit: int = 1000
+
+ # Context Management
+ max_context_messages: int = 50
+ max_context_age: int = 3600
+ max_messages_per_channel: int = 1000
+
+ # Retry Configuration
+ max_retries: int = 3
+ retry_delay: int = 5
+
+ # Feature Configuration
+ stats_interval: int = 86400
+
+ # Channel IDs
+ summary_channel_id: Optional[int] = None
+ chartist_channel_id: Optional[int] = None
+ news_channel_id: Optional[int] = None
+ news_bot_user_id: Optional[int] = None
+
+ # Database Configuration
+ database_url: Optional[str] = None
+
+ @classmethod
+ def from_env(cls) -> 'Settings':
+ """Create settings from environment variables."""
+ # Load from .env if available
+ try:
+ from dotenv import load_dotenv
+ load_dotenv()
+ except ImportError:
+ pass
+
+ def get_bool(key: str, default: bool = False) -> bool:
+ """Parse boolean from environment variable."""
+ value = os.getenv(key, str(default)).lower()
+ return value in ['true', '1', 't', 'yes', 'y']
+
+ def get_int(key: str, default: int = 0) -> int:
+ """Parse integer from environment variable."""
+ try:
+ return int(os.getenv(key, str(default)))
+ except ValueError:
+ return default
+
+ def get_optional_int(key: str) -> Optional[int]:
+ """Parse optional integer from environment variable."""
+ value = os.getenv(key)
+ if value:
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ return None
+
+ return cls(
+ # Discord Configuration
+ discord_token=os.getenv('DISCORD_TOKEN', ''),
+ bot_prefix=os.getenv('BOT_PREFIX', '!'),
+ owner_id=get_int('OWNER_ID'),
+
+ # API Configuration
+ openai_api_key=os.getenv('OPENAI_API_KEY'),
+ perplexity_api_key=os.getenv('PERPLEXITY_API_KEY', ''),
+ perplexity_api_url=os.getenv('PERPLEXITY_API_URL', 'https://api.perplexity.ai/chat/completions'),
+ perplexity_timeout=get_int('PERPLEXITY_TIMEOUT', 30),
+ use_perplexity_api=get_bool('USE_PERPLEXITY_API', True),
+
+ # Logging Configuration
+ log_level=os.getenv('LOG_LEVEL', 'INFO').upper(),
+ log_format=os.getenv('LOG_FORMAT', '%(asctime)s - %(name)s - %(levelname)s - %(message)s'),
+ log_channel_id=get_optional_int('LOG_CHANNEL_ID'),
+
+ # Rate Limiting
+ api_rate_limit_max=get_int('API_RATE_LIMIT_MAX', 100),
+ api_rate_limit_interval=get_int('API_RATE_LIMIT_INTERVAL', 60),
+ daily_api_call_limit=get_int('DAILY_API_CALL_LIMIT', 1000),
+
+ # Context Management
+ max_context_messages=get_int('MAX_CONTEXT_MESSAGES', 50),
+ max_context_age=get_int('MAX_CONTEXT_AGE', 3600),
+ max_messages_per_channel=get_int('MAX_MESSAGES_PER_CHANNEL', 1000),
+
+ # Retry Configuration
+ max_retries=get_int('MAX_RETRIES', 3),
+ retry_delay=get_int('RETRY_DELAY', 5),
+
+ # Feature Configuration
+ stats_interval=get_int('STATS_INTERVAL', 86400),
+
+ # Channel IDs
+ summary_channel_id=get_optional_int('SUMMARY_CHANNEL_ID'),
+ chartist_channel_id=get_optional_int('CHARTIST_CHANNEL_ID'),
+ news_channel_id=get_optional_int('NEWS_CHANNEL_ID'),
+ news_bot_user_id=get_optional_int('NEWS_BOT_USER_ID'),
+
+ # Database
+ database_url=os.getenv('DATABASE_URL'),
+ )
+
+
+# Singleton instance
+_settings: Optional[Settings] = None
+
+
+def get_settings() -> Settings:
+ """Get or create settings instance."""
+ global _settings
+ if _settings is None:
+ _settings = Settings.from_env()
+ return _settings
\ No newline at end of file
diff --git a/src/database/__init__.py b/src/database/__init__.py
new file mode 100644
index 0000000..32c2a78
--- /dev/null
+++ b/src/database/__init__.py
@@ -0,0 +1,229 @@
+"""Database module with repository pattern."""
+import logging
+from decimal import Decimal
+from typing import Optional, Dict, List, Any
+
+from .connection import DatabaseManager, db_manager as _db_manager
+from .models import UsageRecord, UserQuery, UserAnalytics
+from .repositories import UsageRepository, AnalyticsRepository
+
+logger = logging.getLogger(__name__)
+
+
+class UnifiedDatabaseManager:
+ """Unified database manager that provides backward compatibility."""
+
+ def __init__(self):
+ """Initialize unified database manager."""
+ self.db_manager = _db_manager
+ self.usage_repo: Optional[UsageRepository] = None
+ self.analytics_repo: Optional[AnalyticsRepository] = None
+
+ @property
+ def pool(self):
+ """Get database pool for backward compatibility."""
+ return self.db_manager.pool
+
+ async def connect(self) -> bool:
+ """Connect to database and initialize repositories."""
+ success = await self.db_manager.connect()
+
+ if success:
+ self.usage_repo = UsageRepository(self.db_manager)
+ self.analytics_repo = AnalyticsRepository(self.db_manager)
+
+ return success
+
+ async def disconnect(self) -> None:
+ """Disconnect from database."""
+ await self.db_manager.disconnect()
+ self.usage_repo = None
+ self.analytics_repo = None
+
+ # Backward compatibility methods
+ async def log_usage(
+ self,
+ user_id: int,
+ username: str,
+ command: str,
+ model: str,
+ input_tokens: int = 0,
+ output_tokens: int = 0,
+ cached_tokens: int = 0,
+ cost: float = 0.0,
+ guild_id: Optional[int] = None,
+ channel_id: Optional[int] = None
+ ) -> bool:
+ """Log usage (backward compatibility method)."""
+ if not self.usage_repo:
+ return False
+
+ record = UsageRecord(
+ user_id=user_id,
+ username=username,
+ command=command,
+ model=model,
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ cached_tokens=cached_tokens,
+ cost=Decimal(str(cost)),
+ guild_id=guild_id,
+ channel_id=channel_id
+ )
+
+ # Create usage record
+ success = await self.usage_repo.create_usage_record(record)
+
+ if success:
+ # Update user analytics
+ await self.analytics_repo.create_or_update_user_analytics(
+ user_id=user_id,
+ username=username,
+ tokens_used=record.total_tokens,
+ cost=record.cost
+ )
+
+ # Update daily summary
+ await self.usage_repo.update_daily_summary(record)
+
+ return success
+
+ async def log_user_query(
+ self,
+ user_id: int,
+ username: str,
+ command: str,
+ query_text: str,
+ channel_id: Optional[int] = None,
+ guild_id: Optional[int] = None,
+ response_generated: bool = False,
+ error_occurred: bool = False
+ ) -> bool:
+ """Log user query (backward compatibility method)."""
+ if not self.analytics_repo:
+ return False
+
+ query = UserQuery(
+ user_id=user_id,
+ username=username,
+ command=command,
+ query_text=query_text,
+ channel_id=channel_id,
+ guild_id=guild_id,
+ response_generated=response_generated,
+ error_occurred=error_occurred
+ )
+
+ return await self.analytics_repo.log_user_query(query)
+
+ async def get_global_stats(self) -> Optional[Dict[str, Any]]:
+ """Get global statistics (backward compatibility method)."""
+ if not self.usage_repo or not self.analytics_repo:
+ return None
+
+ try:
+ # Get overall stats
+ overall_stats = await self.usage_repo.get_global_stats()
+ if not overall_stats:
+ return None
+
+ # Get top users
+ top_users = await self.analytics_repo.get_top_users(10)
+
+ # Get top commands
+ top_commands = await self.usage_repo.get_top_commands(10)
+
+ # Get daily stats
+ daily_stats = await self.usage_repo.get_daily_stats(7)
+
+ from .models import model_to_dict
+
+ return {
+ 'overall': model_to_dict(overall_stats),
+ 'top_users': [model_to_dict(user) for user in top_users],
+ 'top_commands': [model_to_dict(cmd) for cmd in top_commands],
+ 'daily_stats': daily_stats
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get global stats: {e}")
+ return None
+
+ async def get_user_stats(self, user_id: int) -> Optional[Dict[str, Any]]:
+ """Get user statistics (backward compatibility method)."""
+ if not self.usage_repo or not self.analytics_repo:
+ return None
+
+ try:
+ # Get user analytics
+ user_analytics = await self.analytics_repo.get_user_analytics(user_id)
+ if not user_analytics:
+ return None
+
+ # Get usage stats
+ usage_stats = await self.usage_repo.get_user_usage_stats(user_id)
+
+ return {
+ 'user_data': model_to_dict(user_analytics),
+ 'command_stats': usage_stats.get('commands', []),
+ 'recent_activity': usage_stats.get('recent_activity', [])
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get user stats: {e}")
+ return None
+
+ async def get_costs_by_model(self) -> Optional[Dict[str, Any]]:
+ """Get cost breakdown by model (backward compatibility method)."""
+ if not self.usage_repo:
+ return None
+
+ try:
+ model_costs = await self.usage_repo.get_model_costs()
+ return {
+ 'model_costs': [model_to_dict(cost) for cost in model_costs]
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get model costs: {e}")
+ return None
+
+ async def get_query_analytics(self) -> Optional[Dict[str, Any]]:
+ """Get query analytics (backward compatibility method)."""
+ if not self.analytics_repo:
+ return None
+
+ try:
+ # Get popular queries
+ popular_queries = await self.analytics_repo.get_popular_queries(20, 7)
+
+ # Get command patterns
+ command_patterns = await self.analytics_repo.get_query_patterns(7)
+
+ # Get hourly activity
+ hourly_activity = await self.analytics_repo.get_hourly_activity(7)
+
+ return {
+ 'popular_queries': popular_queries,
+ 'command_patterns': [model_to_dict(pattern) for pattern in command_patterns],
+ 'hourly_activity': [model_to_dict(activity) for activity in hourly_activity]
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get query analytics: {e}")
+ return None
+
+
+# Create global instance for backward compatibility
+db_manager = UnifiedDatabaseManager()
+
+# Export everything for easy imports
+__all__ = [
+ 'db_manager',
+ 'DatabaseManager',
+ 'UsageRepository',
+ 'AnalyticsRepository',
+ 'UsageRecord',
+ 'UserQuery',
+ 'UserAnalytics',
+]
\ No newline at end of file
diff --git a/src/database/connection.py b/src/database/connection.py
new file mode 100644
index 0000000..cd38636
--- /dev/null
+++ b/src/database/connection.py
@@ -0,0 +1,234 @@
+"""Database connection management."""
+import asyncio
+import logging
+from typing import Optional
+from urllib.parse import urlparse
+
+import asyncpg
+
+from ..config.settings import get_settings
+from ..config.constants import DB_CONNECTION_TIMEOUT, DB_POOL_MIN_SIZE, DB_POOL_MAX_SIZE
+
+logger = logging.getLogger(__name__)
+
+
+class DatabaseManager:
+ """Manages database connections and connection pooling."""
+
+ def __init__(self):
+ """Initialize database manager."""
+ self.pool: Optional[asyncpg.Pool] = None
+ self.settings = get_settings()
+ self._connected = False
+
+ async def connect(self) -> bool:
+ """
+ Initialize database connection pool.
+
+ Returns:
+ True if connection successful, False otherwise
+ """
+ if self._connected:
+ return True
+
+ if not self.settings.database_url:
+ logger.error("DATABASE_URL not configured")
+ return False
+
+ try:
+ # Parse the database URL for asyncpg
+ parsed = urlparse(self.settings.database_url)
+
+ # Create connection pool
+ self.pool = await asyncpg.create_pool(
+ host=parsed.hostname,
+ port=parsed.port,
+ user=parsed.username,
+ password=parsed.password,
+ database=parsed.path[1:], # Remove leading slash
+ ssl='require',
+ min_size=DB_POOL_MIN_SIZE,
+ max_size=DB_POOL_MAX_SIZE,
+ command_timeout=DB_CONNECTION_TIMEOUT
+ )
+
+ # Test connection
+ async with self.pool.acquire() as conn:
+ await conn.fetchval('SELECT 1')
+
+ self._connected = True
+ logger.info("Database connection pool created successfully")
+
+ # Initialize tables
+ await self._init_tables()
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to connect to database: {e}")
+ self._connected = False
+ return False
+
+ async def disconnect(self) -> None:
+ """Close database connection pool."""
+ if self.pool:
+ await self.pool.close()
+ self.pool = None
+ self._connected = False
+ logger.info("Database connection pool closed")
+
+ async def get_connection(self):
+ """
+ Get a database connection from the pool.
+
+ Returns:
+ Database connection context manager
+ """
+ if not self._connected or not self.pool:
+ raise RuntimeError("Database not connected")
+ return self.pool.acquire()
+
+ async def execute(self, query: str, *args) -> None:
+ """
+ Execute a query without returning results.
+
+ Args:
+ query: SQL query
+ *args: Query parameters
+ """
+ async with self.get_connection() as conn:
+ await conn.execute(query, *args)
+
+ async def fetch_one(self, query: str, *args):
+ """
+ Fetch a single row.
+
+ Args:
+ query: SQL query
+ *args: Query parameters
+
+ Returns:
+ Single row or None
+ """
+ async with self.get_connection() as conn:
+ return await conn.fetchrow(query, *args)
+
+ async def fetch_many(self, query: str, *args):
+ """
+ Fetch multiple rows.
+
+ Args:
+ query: SQL query
+ *args: Query parameters
+
+ Returns:
+ List of rows
+ """
+ async with self.get_connection() as conn:
+ return await conn.fetch(query, *args)
+
+ async def fetch_value(self, query: str, *args):
+ """
+ Fetch a single value.
+
+ Args:
+ query: SQL query
+ *args: Query parameters
+
+ Returns:
+ Single value or None
+ """
+ async with self.get_connection() as conn:
+ return await conn.fetchval(query, *args)
+
+ @property
+ def is_connected(self) -> bool:
+ """Check if database is connected."""
+ return self._connected and self.pool is not None
+
+ async def _init_tables(self) -> None:
+ """Create database tables if they don't exist."""
+ try:
+ async with self.get_connection() as conn:
+ # Usage tracking table
+ await conn.execute('''
+ CREATE TABLE IF NOT EXISTS usage_tracking (
+ id SERIAL PRIMARY KEY,
+ user_id BIGINT NOT NULL,
+ username VARCHAR(255),
+ command VARCHAR(50) NOT NULL,
+ model VARCHAR(50) NOT NULL,
+ input_tokens INTEGER DEFAULT 0,
+ output_tokens INTEGER DEFAULT 0,
+ cached_tokens INTEGER DEFAULT 0,
+ total_tokens INTEGER DEFAULT 0,
+ cost DECIMAL(10, 8) DEFAULT 0,
+ timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
+ guild_id BIGINT,
+ channel_id BIGINT
+ )
+ ''')
+
+ # Create indexes for better performance
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_usage_user_id ON usage_tracking(user_id)')
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_usage_timestamp ON usage_tracking(timestamp)')
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_usage_command ON usage_tracking(command)')
+
+ # Daily usage summary table
+ await conn.execute('''
+ CREATE TABLE IF NOT EXISTS daily_usage_summary (
+ id SERIAL PRIMARY KEY,
+ date DATE NOT NULL,
+ total_requests INTEGER DEFAULT 0,
+ total_tokens INTEGER DEFAULT 0,
+ total_cost DECIMAL(10, 6) DEFAULT 0,
+ unique_users INTEGER DEFAULT 0,
+ top_command VARCHAR(50),
+ UNIQUE(date)
+ )
+ ''')
+
+ # User analytics table
+ await conn.execute('''
+ CREATE TABLE IF NOT EXISTS user_analytics (
+ user_id BIGINT PRIMARY KEY,
+ username VARCHAR(255),
+ first_interaction TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
+ last_interaction TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
+ total_requests INTEGER DEFAULT 0,
+ total_tokens INTEGER DEFAULT 0,
+ total_cost DECIMAL(10, 6) DEFAULT 0,
+ favorite_command VARCHAR(50),
+ avg_tokens_per_request DECIMAL(8, 2) DEFAULT 0
+ )
+ ''')
+
+ # User queries table
+ await conn.execute('''
+ CREATE TABLE IF NOT EXISTS user_queries (
+ id SERIAL PRIMARY KEY,
+ user_id BIGINT NOT NULL,
+ username VARCHAR(255),
+ command VARCHAR(50) NOT NULL,
+ query_text TEXT NOT NULL,
+ channel_id BIGINT,
+ guild_id BIGINT,
+ timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
+ response_generated BOOLEAN DEFAULT FALSE,
+ error_occurred BOOLEAN DEFAULT FALSE
+ )
+ ''')
+
+ # Create indexes for queries table
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_queries_user_id ON user_queries(user_id)')
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_queries_timestamp ON user_queries(timestamp)')
+ await conn.execute('CREATE INDEX IF NOT EXISTS idx_queries_command ON user_queries(command)')
+
+ logger.info("Database tables initialized successfully")
+
+ except Exception as e:
+ logger.error(f"Failed to initialize database tables: {e}")
+ raise
+
+
+# Global database manager instance
+db_manager = DatabaseManager()
\ No newline at end of file
diff --git a/src/database/models.py b/src/database/models.py
new file mode 100644
index 0000000..5ac4b4e
--- /dev/null
+++ b/src/database/models.py
@@ -0,0 +1,146 @@
+"""Simple database models using dataclasses."""
+from datetime import datetime, date
+from decimal import Decimal
+from typing import Optional
+from dataclasses import dataclass
+
+
+@dataclass
+class UsageRecord:
+ """Model for usage tracking records."""
+ user_id: int
+ username: str
+ command: str
+ model: str
+ input_tokens: int = 0
+ output_tokens: int = 0
+ cached_tokens: int = 0
+ total_tokens: int = 0
+ cost: Decimal = Decimal('0')
+ timestamp: Optional[datetime] = None
+ guild_id: Optional[int] = None
+ channel_id: Optional[int] = None
+ id: Optional[int] = None
+
+
+@dataclass
+class UserAnalytics:
+ """Model for user analytics."""
+ user_id: int
+ username: str
+ first_interaction: Optional[datetime] = None
+ last_interaction: Optional[datetime] = None
+ total_requests: int = 0
+ total_tokens: int = 0
+ total_cost: Decimal = Decimal('0')
+ favorite_command: Optional[str] = None
+ avg_tokens_per_request: Decimal = Decimal('0')
+
+
+@dataclass
+class DailyUsageSummary:
+ """Model for daily usage summaries."""
+ date: date
+ total_requests: int = 0
+ total_tokens: int = 0
+ total_cost: Decimal = Decimal('0')
+ unique_users: int = 0
+ top_command: Optional[str] = None
+ id: Optional[int] = None
+
+
+@dataclass
+class UserQuery:
+ """Model for user queries."""
+ user_id: int
+ username: str
+ command: str
+ query_text: str
+ channel_id: Optional[int] = None
+ guild_id: Optional[int] = None
+ timestamp: Optional[datetime] = None
+ response_generated: bool = False
+ error_occurred: bool = False
+ id: Optional[int] = None
+
+
+@dataclass
+class GlobalStats:
+ """Model for global statistics."""
+ total_requests: int
+ unique_users: int
+ total_tokens: int
+ total_cost: Decimal
+ avg_tokens_per_request: Decimal
+
+
+@dataclass
+class CommandStats:
+ """Model for command statistics."""
+ command: str
+ usage_count: int
+ total_cost: Decimal
+
+
+@dataclass
+class ModelCosts:
+ """Model for model cost breakdown."""
+ model: str
+ requests: int
+ input_tokens: int
+ output_tokens: int
+ cached_tokens: int
+ total_cost: Decimal
+ avg_cost_per_request: Decimal
+
+
+@dataclass
+class QueryPattern:
+ """Model for query patterns."""
+ command: str
+ total_queries: int
+ unique_users: int
+ avg_query_length: Decimal
+
+
+@dataclass
+class HourlyActivity:
+ """Model for hourly activity stats."""
+ hour: int
+ query_count: int
+
+
+def dict_to_model(data_dict: dict, model_class):
+ """Convert dictionary to dataclass model instance."""
+ if not data_dict:
+ return None
+
+ # Filter dictionary to only include fields that exist in the model
+ import inspect
+ model_fields = set(inspect.signature(model_class).parameters.keys())
+ filtered_dict = {k: v for k, v in data_dict.items() if k in model_fields}
+
+ return model_class(**filtered_dict)
+
+
+def model_to_dict(model_instance) -> dict:
+ """Convert dataclass model instance to dictionary."""
+ if hasattr(model_instance, '__dict__'):
+ return model_instance.__dict__
+ return {}
+
+
+# Backward compatibility functions
+def create_usage_record(**kwargs) -> UsageRecord:
+ """Create usage record from keyword arguments."""
+ return UsageRecord(**kwargs)
+
+
+def create_user_analytics(**kwargs) -> UserAnalytics:
+ """Create user analytics from keyword arguments."""
+ return UserAnalytics(**kwargs)
+
+
+def create_user_query(**kwargs) -> UserQuery:
+ """Create user query from keyword arguments."""
+ return UserQuery(**kwargs)
\ No newline at end of file
diff --git a/src/database/repositories/__init__.py b/src/database/repositories/__init__.py
new file mode 100644
index 0000000..65e251a
--- /dev/null
+++ b/src/database/repositories/__init__.py
@@ -0,0 +1,9 @@
+"""Database repositories module."""
+
+from .usage_repository import UsageRepository
+from .analytics_repository import AnalyticsRepository
+
+__all__ = [
+ 'UsageRepository',
+ 'AnalyticsRepository',
+]
\ No newline at end of file
diff --git a/src/database/repositories/analytics_repository.py b/src/database/repositories/analytics_repository.py
new file mode 100644
index 0000000..a88f1dd
--- /dev/null
+++ b/src/database/repositories/analytics_repository.py
@@ -0,0 +1,333 @@
+"""Repository for user analytics data."""
+import logging
+from datetime import datetime, timezone
+from decimal import Decimal
+from typing import List, Optional, Dict, Any
+
+from ..connection import DatabaseManager
+from ..models import UserAnalytics, UserQuery, QueryPattern, HourlyActivity, dict_to_model
+
+logger = logging.getLogger(__name__)
+
+
+class AnalyticsRepository:
+ """Repository for managing user analytics and query data."""
+
+ def __init__(self, db_manager: DatabaseManager):
+ """Initialize analytics repository."""
+ self.db = db_manager
+
+ async def create_or_update_user_analytics(
+ self,
+ user_id: int,
+ username: str,
+ tokens_used: int = 0,
+ cost: Decimal = Decimal('0')
+ ) -> bool:
+ """
+ Create or update user analytics record.
+
+ Args:
+ user_id: Discord user ID
+ username: User's display name
+ tokens_used: Number of tokens used in this interaction
+ cost: Cost of this interaction
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ await self.db.execute('''
+ INSERT INTO user_analytics
+ (user_id, username, last_interaction, total_requests, total_tokens, total_cost)
+ VALUES ($1, $2, NOW(), 1, $3, $4)
+ ON CONFLICT (user_id)
+ DO UPDATE SET
+ username = EXCLUDED.username,
+ last_interaction = NOW(),
+ total_requests = user_analytics.total_requests + 1,
+ total_tokens = user_analytics.total_tokens + EXCLUDED.total_tokens,
+ total_cost = user_analytics.total_cost + EXCLUDED.total_cost,
+ avg_tokens_per_request = CASE
+ WHEN user_analytics.total_requests > 0
+ THEN (user_analytics.total_tokens + EXCLUDED.total_tokens) / (user_analytics.total_requests + 1)
+ ELSE EXCLUDED.total_tokens
+ END
+ ''', user_id, username, tokens_used, cost)
+
+ logger.debug(f"Updated analytics for user {user_id}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to update user analytics: {e}")
+ return False
+
+ async def get_user_analytics(self, user_id: int) -> Optional[UserAnalytics]:
+ """
+ Get analytics for a specific user.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ User analytics or None if not found
+ """
+ try:
+ row = await self.db.fetch_one('''
+ SELECT * FROM user_analytics WHERE user_id = $1
+ ''', user_id)
+
+ if row:
+ return dict_to_model(dict(row), UserAnalytics)
+ return None
+
+ except Exception as e:
+ logger.error(f"Failed to get user analytics: {e}")
+ return None
+
+ async def get_top_users(self, limit: int = 10) -> List[UserAnalytics]:
+ """
+ Get top users by total requests.
+
+ Args:
+ limit: Maximum number of users to return
+
+ Returns:
+ List of user analytics sorted by total requests
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT * FROM user_analytics
+ ORDER BY total_requests DESC
+ LIMIT $1
+ ''', limit)
+
+ return [dict_to_model(dict(row), UserAnalytics) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get top users: {e}")
+ return []
+
+ async def log_user_query(self, query: UserQuery) -> bool:
+ """
+ Log a user query to the database.
+
+ Args:
+ query: User query to log
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ await self.db.execute('''
+ INSERT INTO user_queries
+ (user_id, username, command, query_text, channel_id, guild_id,
+ response_generated, error_occurred)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ''', query.user_id, query.username, query.command, query.query_text,
+ query.channel_id, query.guild_id, query.response_generated,
+ query.error_occurred)
+
+ logger.debug(f"Logged query for user {query.user_id}, command {query.command}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to log user query: {e}")
+ return False
+
+ async def get_query_patterns(self, days: int = 7) -> List[QueryPattern]:
+ """
+ Get query patterns by command for the last N days.
+
+ Args:
+ days: Number of days to look back
+
+ Returns:
+ List of query patterns
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ command,
+ COUNT(*) as total_queries,
+ COUNT(DISTINCT user_id) as unique_users,
+ COALESCE(AVG(LENGTH(query_text)), 0) as avg_query_length
+ FROM user_queries
+ WHERE timestamp >= NOW() - INTERVAL '%s days'
+ GROUP BY command
+ ORDER BY total_queries DESC
+ ''' % days)
+
+ return [dict_to_model(dict(row), QueryPattern) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get query patterns: {e}")
+ return []
+
+ async def get_hourly_activity(self, days: int = 7) -> List[HourlyActivity]:
+ """
+ Get hourly activity patterns for the last N days.
+
+ Args:
+ days: Number of days to look back
+
+ Returns:
+ List of hourly activity data
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ EXTRACT(HOUR FROM timestamp) as hour,
+ COUNT(*) as query_count
+ FROM user_queries
+ WHERE timestamp >= NOW() - INTERVAL '%s days'
+ GROUP BY EXTRACT(HOUR FROM timestamp)
+ ORDER BY query_count DESC
+ ''' % days)
+
+ return [HourlyActivity(hour=int(row['hour']), query_count=row['query_count']) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get hourly activity: {e}")
+ return []
+
+ async def get_popular_queries(self, limit: int = 20, days: int = 7) -> List[Dict[str, Any]]:
+ """
+ Get most popular queries for the last N days.
+
+ Args:
+ limit: Maximum number of queries to return
+ days: Number of days to look back
+
+ Returns:
+ List of popular queries with metadata
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ query_text,
+ command,
+ COUNT(*) as frequency,
+ username,
+ MAX(timestamp) as last_used
+ FROM user_queries
+ WHERE timestamp >= NOW() - INTERVAL '%s days'
+ GROUP BY query_text, command, username
+ ORDER BY frequency DESC
+ LIMIT $1
+ ''' % days, limit)
+
+ return [dict(row) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get popular queries: {e}")
+ return []
+
+ async def get_user_query_history(
+ self,
+ user_id: int,
+ limit: int = 50
+ ) -> List[UserQuery]:
+ """
+ Get query history for a specific user.
+
+ Args:
+ user_id: Discord user ID
+ limit: Maximum number of queries to return
+
+ Returns:
+ List of user queries
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT * FROM user_queries
+ WHERE user_id = $1
+ ORDER BY timestamp DESC
+ LIMIT $2
+ ''', user_id, limit)
+
+ return [dict_to_model(dict(row), UserQuery) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get user query history: {e}")
+ return []
+
+ async def update_favorite_command(self, user_id: int) -> bool:
+ """
+ Update a user's favorite command based on usage patterns.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ # Get most used command for this user
+ row = await self.db.fetch_one('''
+ SELECT command, COUNT(*) as usage_count
+ FROM user_queries
+ WHERE user_id = $1
+ GROUP BY command
+ ORDER BY usage_count DESC
+ LIMIT 1
+ ''', user_id)
+
+ if row:
+ await self.db.execute('''
+ UPDATE user_analytics
+ SET favorite_command = $1
+ WHERE user_id = $2
+ ''', row['command'], user_id)
+
+ logger.debug(f"Updated favorite command for user {user_id}: {row['command']}")
+
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to update favorite command: {e}")
+ return False
+
+ async def get_analytics_summary(self) -> Dict[str, Any]:
+ """
+ Get a comprehensive analytics summary.
+
+ Returns:
+ Dictionary with various analytics metrics
+ """
+ try:
+ # Total users
+ total_users = await self.db.fetch_value('''
+ SELECT COUNT(*) FROM user_analytics
+ ''')
+
+ # Active users (last 7 days)
+ active_users = await self.db.fetch_value('''
+ SELECT COUNT(*) FROM user_analytics
+ WHERE last_interaction >= NOW() - INTERVAL '7 days'
+ ''')
+
+ # Most active user
+ most_active = await self.db.fetch_one('''
+ SELECT username, total_requests
+ FROM user_analytics
+ ORDER BY total_requests DESC
+ LIMIT 1
+ ''')
+
+ # Average requests per user
+ avg_requests = await self.db.fetch_value('''
+ SELECT COALESCE(AVG(total_requests), 0)
+ FROM user_analytics
+ ''')
+
+ return {
+ 'total_users': total_users or 0,
+ 'active_users_7d': active_users or 0,
+ 'most_active_user': dict(most_active) if most_active else None,
+ 'avg_requests_per_user': float(avg_requests or 0),
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get analytics summary: {e}")
+ return {}
\ No newline at end of file
diff --git a/src/database/repositories/usage_repository.py b/src/database/repositories/usage_repository.py
new file mode 100644
index 0000000..699fa12
--- /dev/null
+++ b/src/database/repositories/usage_repository.py
@@ -0,0 +1,248 @@
+"""Repository for usage tracking data."""
+import logging
+from datetime import datetime, timezone, date
+from decimal import Decimal
+from typing import List, Optional, Dict, Any
+
+from ..connection import DatabaseManager
+from ..models import UsageRecord, GlobalStats, CommandStats, ModelCosts, dict_to_model
+
+logger = logging.getLogger(__name__)
+
+
+class UsageRepository:
+ """Repository for managing usage tracking data."""
+
+ def __init__(self, db_manager: DatabaseManager):
+ """Initialize usage repository."""
+ self.db = db_manager
+
+ async def create_usage_record(self, record: UsageRecord) -> bool:
+ """
+ Create a new usage record.
+
+ Args:
+ record: Usage record to create
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ # Calculate total tokens if not provided
+ if record.total_tokens == 0:
+ record.total_tokens = record.input_tokens + record.output_tokens + record.cached_tokens
+
+ await self.db.execute('''
+ INSERT INTO usage_tracking
+ (user_id, username, command, model, input_tokens, output_tokens,
+ cached_tokens, total_tokens, cost, guild_id, channel_id)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
+ ''', record.user_id, record.username, record.command, record.model,
+ record.input_tokens, record.output_tokens, record.cached_tokens,
+ record.total_tokens, record.cost, record.guild_id, record.channel_id)
+
+ logger.debug(f"Created usage record for user {record.user_id}, command {record.command}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to create usage record: {e}")
+ return False
+
+ async def get_global_stats(self) -> Optional[GlobalStats]:
+ """
+ Get global usage statistics.
+
+ Returns:
+ Global statistics or None if error
+ """
+ try:
+ row = await self.db.fetch_one('''
+ SELECT
+ COUNT(*) as total_requests,
+ COUNT(DISTINCT user_id) as unique_users,
+ COALESCE(SUM(total_tokens), 0) as total_tokens,
+ COALESCE(SUM(cost), 0) as total_cost,
+ COALESCE(AVG(total_tokens), 0) as avg_tokens_per_request
+ FROM usage_tracking
+ ''')
+
+ if row:
+ return dict_to_model(dict(row), GlobalStats)
+ return None
+
+ except Exception as e:
+ logger.error(f"Failed to get global stats: {e}")
+ return None
+
+ async def get_top_commands(self, limit: int = 10) -> List[CommandStats]:
+ """
+ Get most used commands.
+
+ Args:
+ limit: Maximum number of commands to return
+
+ Returns:
+ List of command statistics
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ command,
+ COUNT(*) as usage_count,
+ COALESCE(SUM(cost), 0) as total_cost
+ FROM usage_tracking
+ GROUP BY command
+ ORDER BY usage_count DESC
+ LIMIT $1
+ ''', limit)
+
+ return [dict_to_model(dict(row), CommandStats) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get top commands: {e}")
+ return []
+
+ async def get_model_costs(self) -> List[ModelCosts]:
+ """
+ Get cost breakdown by model.
+
+ Returns:
+ List of model cost statistics
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ model,
+ COUNT(*) as requests,
+ COALESCE(SUM(input_tokens), 0) as input_tokens,
+ COALESCE(SUM(output_tokens), 0) as output_tokens,
+ COALESCE(SUM(cached_tokens), 0) as cached_tokens,
+ COALESCE(SUM(cost), 0) as total_cost,
+ COALESCE(AVG(cost), 0) as avg_cost_per_request
+ FROM usage_tracking
+ GROUP BY model
+ ORDER BY total_cost DESC
+ ''')
+
+ return [dict_to_model(dict(row), ModelCosts) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get model costs: {e}")
+ return []
+
+ async def get_user_usage_stats(self, user_id: int) -> Dict[str, Any]:
+ """
+ Get usage statistics for a specific user.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ Dictionary with user usage statistics
+ """
+ try:
+ # Get overall user stats
+ overall = await self.db.fetch_one('''
+ SELECT
+ COUNT(*) as total_requests,
+ COALESCE(SUM(total_tokens), 0) as total_tokens,
+ COALESCE(SUM(cost), 0) as total_cost,
+ COALESCE(AVG(total_tokens), 0) as avg_tokens_per_request
+ FROM usage_tracking
+ WHERE user_id = $1
+ ''', user_id)
+
+ # Get command breakdown
+ commands = await self.db.fetch_many('''
+ SELECT
+ command,
+ COUNT(*) as count,
+ COALESCE(SUM(total_tokens), 0) as tokens,
+ COALESCE(SUM(cost), 0) as cost
+ FROM usage_tracking
+ WHERE user_id = $1
+ GROUP BY command
+ ORDER BY count DESC
+ ''', user_id)
+
+ # Get recent activity (last 7 days)
+ recent = await self.db.fetch_many('''
+ SELECT
+ DATE(timestamp) as date,
+ COUNT(*) as requests,
+ COALESCE(SUM(cost), 0) as daily_cost
+ FROM usage_tracking
+ WHERE user_id = $1 AND timestamp >= NOW() - INTERVAL '7 days'
+ GROUP BY DATE(timestamp)
+ ORDER BY date DESC
+ ''', user_id)
+
+ return {
+ 'overall': dict(overall) if overall else {},
+ 'commands': [dict(row) for row in commands],
+ 'recent_activity': [dict(row) for row in recent]
+ }
+
+ except Exception as e:
+ logger.error(f"Failed to get user usage stats: {e}")
+ return {}
+
+ async def get_daily_stats(self, days: int = 7) -> List[Dict[str, Any]]:
+ """
+ Get daily usage statistics.
+
+ Args:
+ days: Number of days to look back
+
+ Returns:
+ List of daily statistics
+ """
+ try:
+ rows = await self.db.fetch_many('''
+ SELECT
+ DATE(timestamp) as date,
+ COUNT(*) as total_requests,
+ COUNT(DISTINCT user_id) as unique_users,
+ COALESCE(SUM(total_tokens), 0) as total_tokens,
+ COALESCE(SUM(cost), 0) as total_cost
+ FROM usage_tracking
+ WHERE timestamp >= NOW() - INTERVAL '%s days'
+ GROUP BY DATE(timestamp)
+ ORDER BY date DESC
+ ''' % days)
+
+ return [dict(row) for row in rows]
+
+ except Exception as e:
+ logger.error(f"Failed to get daily stats: {e}")
+ return []
+
+ async def update_daily_summary(self, record: UsageRecord) -> bool:
+ """
+ Update daily usage summary for a usage record.
+
+ Args:
+ record: Usage record to process
+
+ Returns:
+ True if successful, False otherwise
+ """
+ try:
+ today = datetime.now(timezone.utc).date()
+
+ await self.db.execute('''
+ INSERT INTO daily_usage_summary
+ (date, total_requests, total_tokens, total_cost, unique_users)
+ VALUES ($1, 1, $2, $3, 1)
+ ON CONFLICT (date)
+ DO UPDATE SET
+ total_requests = daily_usage_summary.total_requests + 1,
+ total_tokens = daily_usage_summary.total_tokens + EXCLUDED.total_tokens,
+ total_cost = daily_usage_summary.total_cost + EXCLUDED.total_cost
+ ''', today, record.total_tokens, record.cost)
+
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to update daily summary: {e}")
+ return False
\ No newline at end of file
diff --git a/src/services/__init__.py b/src/services/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/services/context_manager.py b/src/services/context_manager.py
new file mode 100644
index 0000000..6f11d93
--- /dev/null
+++ b/src/services/context_manager.py
@@ -0,0 +1,174 @@
+"""Context management for user conversations."""
+import time
+from collections import deque
+from typing import Any, Deque, Dict, List, Optional
+
+from ..config.settings import get_settings
+
+
+class ContextManager:
+ """Manages conversation context for users."""
+
+ _instance: Optional['ContextManager'] = None
+
+ def __init__(self):
+ """Initialize context manager."""
+ self.settings = get_settings()
+ self.user_contexts: Dict[int, Deque[Dict[str, Any]]] = {}
+
+ @classmethod
+ def get_instance(cls) -> 'ContextManager':
+ """Get or create singleton instance."""
+ if cls._instance is None:
+ cls._instance = cls()
+ return cls._instance
+
+ def get_user_context(self, user_id: int) -> Deque[Dict[str, Any]]:
+ """
+ Get context for a user, creating if necessary.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ User's context deque
+ """
+ return self.user_contexts.setdefault(
+ user_id,
+ deque(maxlen=self.settings.max_context_messages)
+ )
+
+ def update_context(self, user_id: int, content: str, role: str) -> None:
+ """
+ Update user's conversation context.
+
+ Args:
+ user_id: Discord user ID
+ content: Message content
+ role: Message role (user/assistant/system)
+ """
+ context = self.get_user_context(user_id)
+ current_time = time.time()
+
+ # Initialize with system prompt if empty
+ if not context and role == 'user':
+ context.append({
+ 'role': 'system',
+ 'content': self.settings.system_prompt.strip(),
+ 'timestamp': current_time,
+ })
+
+ # Validate role alternation
+ if context:
+ last_role = context[-1]['role']
+
+ # Check for valid role transitions
+ valid_transition = (
+ (last_role == 'system' and role == 'user') or
+ (last_role == 'user' and role == 'assistant') or
+ (last_role == 'assistant' and role == 'user')
+ )
+
+ if not valid_transition:
+ return # Skip invalid transitions
+
+ # Add message to context
+ context.append({
+ 'role': role,
+ 'content': content.strip(),
+ 'timestamp': current_time,
+ })
+
+ # Clean up old messages
+ self._cleanup_old_messages(user_id)
+
+ def get_context_messages(self, user_id: int) -> List[Dict[str, str]]:
+ """
+ Get formatted context messages for API calls.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ List of formatted messages
+ """
+ context = self.get_user_context(user_id)
+
+ # Convert to API format
+ messages = [
+ {"role": msg['role'], "content": msg['content']}
+ for msg in context
+ ]
+
+ # Ensure system message is first
+ if not messages or messages[0]['role'] != 'system':
+ messages.insert(0, {
+ "role": "system",
+ "content": self.settings.system_prompt.strip(),
+ })
+
+ return self._validate_message_order(messages)
+
+ def _cleanup_old_messages(self, user_id: int) -> None:
+ """Remove messages older than max context age."""
+ if user_id not in self.user_contexts:
+ return
+
+ current_time = time.time()
+ cutoff_time = current_time - self.settings.max_context_age
+
+ # Filter out old messages
+ context = self.user_contexts[user_id]
+ self.user_contexts[user_id] = deque(
+ [msg for msg in context if msg['timestamp'] >= cutoff_time],
+ maxlen=self.settings.max_context_messages
+ )
+
+ def _validate_message_order(self, messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
+ """Ensure messages alternate roles correctly."""
+ if not messages:
+ return messages
+
+ cleaned = [messages[0]] # Start with system message
+
+ for i in range(1, len(messages)):
+ last_role = cleaned[-1]['role']
+ current_role = messages[i]['role']
+
+ # Determine expected role
+ if last_role in ['system', 'assistant']:
+ expected_role = 'user'
+ elif last_role == 'user':
+ expected_role = 'assistant'
+ else:
+ continue # Skip unknown roles
+
+ if current_role == expected_role:
+ cleaned.append(messages[i])
+
+ return cleaned
+
+ def clear_context(self, user_id: int) -> None:
+ """Clear context for a specific user."""
+ if user_id in self.user_contexts:
+ del self.user_contexts[user_id]
+
+ def has_context(self, user_id: int) -> bool:
+ """Check if user has any context."""
+ return user_id in self.user_contexts and len(self.user_contexts[user_id]) > 0
+
+ def get_context_summary(self, user_id: int) -> Dict[str, Any]:
+ """Get summary information about user's context."""
+ if user_id not in self.user_contexts:
+ return {"messages": 0, "oldest_timestamp": None}
+
+ context = self.user_contexts[user_id]
+ if not context:
+ return {"messages": 0, "oldest_timestamp": None}
+
+ return {
+ "messages": len(context),
+ "oldest_timestamp": context[0]['timestamp'],
+ "newest_timestamp": context[-1]['timestamp'],
+ "roles": [msg['role'] for msg in context]
+ }
\ No newline at end of file
diff --git a/src/services/rate_limiter.py b/src/services/rate_limiter.py
new file mode 100644
index 0000000..bc6a57a
--- /dev/null
+++ b/src/services/rate_limiter.py
@@ -0,0 +1,130 @@
+"""Rate limiting service for API calls."""
+import time
+from typing import Dict, List, Optional, Tuple
+
+
+class RateLimiter:
+ """Rate limiter for controlling API usage."""
+
+ def __init__(self, max_calls: int, interval: int):
+ """
+ Initialize rate limiter.
+
+ Args:
+ max_calls: Maximum calls allowed per interval
+ interval: Time interval in seconds
+ """
+ self.max_calls = max_calls
+ self.interval = interval
+ self.calls: Dict[int, List[float]] = {}
+
+ def is_rate_limited(self, user_id: int) -> bool:
+ """
+ Check if a user is rate limited.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ True if rate limited, False otherwise
+ """
+ current_time = time.time()
+
+ # Initialize user's call list if not exists
+ self.calls.setdefault(user_id, [])
+
+ # Remove calls outside the interval window
+ self.calls[user_id] = [
+ t for t in self.calls[user_id]
+ if current_time - t <= self.interval
+ ]
+
+ # Check if rate limit exceeded
+ if len(self.calls[user_id]) >= self.max_calls:
+ return True
+
+ # Record this call
+ self.calls[user_id].append(current_time)
+ return False
+
+ def check_rate_limit(self, user_id: Optional[int] = None) -> Tuple[bool, Optional[str]]:
+ """
+ Check if an API call can be made.
+
+ Args:
+ user_id: Discord user ID (optional)
+
+ Returns:
+ Tuple of (can_make_call, error_message)
+ """
+ # If no user_id provided, allow the call (system calls)
+ if user_id is None:
+ return True, None
+
+ if self.is_rate_limited(user_id):
+ time_until_reset = self.get_time_until_reset(user_id)
+ error_msg = (
+ f"🚫 Rate limit exceeded. Please wait {time_until_reset} seconds "
+ f"before making another request."
+ )
+ return False, error_msg
+
+ return True, None
+
+ def get_time_until_reset(self, user_id: int) -> int:
+ """
+ Get time in seconds until rate limit resets for a user.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ Seconds until rate limit resets
+ """
+ if user_id not in self.calls or not self.calls[user_id]:
+ return 0
+
+ oldest_call = min(self.calls[user_id])
+ current_time = time.time()
+ time_passed = current_time - oldest_call
+
+ if time_passed >= self.interval:
+ return 0
+
+ return int(self.interval - time_passed)
+
+ def get_remaining_calls(self, user_id: int) -> int:
+ """
+ Get remaining API calls for a user.
+
+ Args:
+ user_id: Discord user ID
+
+ Returns:
+ Number of remaining calls
+ """
+ current_time = time.time()
+
+ # Clean up old calls
+ if user_id in self.calls:
+ self.calls[user_id] = [
+ t for t in self.calls[user_id]
+ if current_time - t <= self.interval
+ ]
+ return max(0, self.max_calls - len(self.calls[user_id]))
+
+ return self.max_calls
+
+ def reset_user(self, user_id: int) -> None:
+ """
+ Reset rate limit for a specific user.
+
+ Args:
+ user_id: Discord user ID
+ """
+ if user_id in self.calls:
+ del self.calls[user_id]
+
+ def reset_all(self) -> None:
+ """Reset rate limits for all users."""
+ self.calls.clear()
\ No newline at end of file
diff --git a/src/utils/__init__.py b/src/utils/__init__.py
new file mode 100644
index 0000000..05e895a
--- /dev/null
+++ b/src/utils/__init__.py
@@ -0,0 +1,98 @@
+"""Utility modules for SecurePath bot."""
+
+from .discord_helpers import (
+ reset_status,
+ send_long_message,
+ send_long_embed,
+ send_structured_analysis_embed,
+ format_percentage,
+ format_price,
+ format_large_number,
+ create_progress_embed,
+ is_admin_user,
+ get_user_display_name,
+ truncate_text,
+ extract_command_args,
+)
+
+from .validators import (
+ validate_discord_id,
+ validate_url,
+ validate_image_url,
+ validate_query_length,
+ validate_command_name,
+ validate_username,
+ sanitize_filename,
+ validate_model_name,
+ validate_cost,
+ validate_token_count,
+ extract_mentions,
+ extract_channel_mentions,
+ is_spam_like,
+)
+
+from .formatting import (
+ format_currency,
+ format_percentage as format_percentage_detailed,
+ format_large_number as format_large_number_detailed,
+ format_duration,
+ format_timestamp,
+ clean_text_for_discord,
+ escape_markdown,
+ format_code_block,
+ format_inline_code,
+ format_usage_stats,
+ format_model_name,
+ format_error_message,
+ format_list_items,
+ format_embed_field_value,
+ truncate_with_ellipsis,
+)
+
+__all__ = [
+ # Discord helpers
+ 'reset_status',
+ 'send_long_message',
+ 'send_long_embed',
+ 'send_structured_analysis_embed',
+ 'create_progress_embed',
+ 'is_admin_user',
+ 'get_user_display_name',
+ 'truncate_text',
+ 'extract_command_args',
+
+ # Validators
+ 'validate_discord_id',
+ 'validate_url',
+ 'validate_image_url',
+ 'validate_query_length',
+ 'validate_command_name',
+ 'validate_username',
+ 'sanitize_filename',
+ 'validate_model_name',
+ 'validate_cost',
+ 'validate_token_count',
+ 'extract_mentions',
+ 'extract_channel_mentions',
+ 'is_spam_like',
+
+ # Formatting
+ 'format_currency',
+ 'format_percentage',
+ 'format_percentage_detailed',
+ 'format_price',
+ 'format_large_number',
+ 'format_large_number_detailed',
+ 'format_duration',
+ 'format_timestamp',
+ 'clean_text_for_discord',
+ 'escape_markdown',
+ 'format_code_block',
+ 'format_inline_code',
+ 'format_usage_stats',
+ 'format_model_name',
+ 'format_error_message',
+ 'format_list_items',
+ 'format_embed_field_value',
+ 'truncate_with_ellipsis',
+]
\ No newline at end of file
diff --git a/src/utils/discord_helpers.py b/src/utils/discord_helpers.py
new file mode 100644
index 0000000..559afdb
--- /dev/null
+++ b/src/utils/discord_helpers.py
@@ -0,0 +1,259 @@
+"""Discord-specific utility functions."""
+import asyncio
+import logging
+import random
+from typing import Optional, Union
+
+import discord
+from discord import Activity, ActivityType
+
+logger = logging.getLogger(__name__)
+
+# Status messages for rotation
+STATUS_MESSAGES = [
+ ("!ask", "real-time market insights", ActivityType.watching),
+ ("!analyze", "chart patterns & signals", ActivityType.watching),
+ ("!summary", "alpha extraction from channels", ActivityType.listening),
+ ("!commands", "for all features", ActivityType.playing),
+ ("defi", "on-chain truth over hype", ActivityType.watching),
+ ("docs", "show me the code", ActivityType.watching),
+]
+
+
+async def reset_status(bot) -> None:
+ """Reset bot status to a random default status."""
+ try:
+ status = random.choice(STATUS_MESSAGES)
+ name, state, activity_type = status
+ activity = Activity(type=activity_type, name=f"{name} • {state}")
+ await bot.change_presence(activity=activity)
+ logger.debug(f"Reset status to: {name} • {state}")
+ except Exception as e:
+ logger.error(f"Error resetting status: {e}")
+
+
+async def send_long_message(channel, content: str, max_length: int = 2000) -> None:
+ """
+ Send a long message by splitting it into chunks.
+
+ Args:
+ channel: Discord channel to send to
+ content: Message content
+ max_length: Maximum length per message
+ """
+ if len(content) <= max_length:
+ await channel.send(content)
+ return
+
+ # Split content into chunks
+ chunks = []
+ current_chunk = ""
+
+ for line in content.split('\n'):
+ if len(current_chunk) + len(line) + 1 <= max_length:
+ current_chunk += line + '\n'
+ else:
+ if current_chunk:
+ chunks.append(current_chunk.strip())
+ current_chunk = line + '\n'
+
+ if current_chunk:
+ chunks.append(current_chunk.strip())
+
+ # Send chunks
+ for chunk in chunks:
+ await channel.send(chunk)
+ await asyncio.sleep(0.5) # Small delay to avoid rate limits
+
+
+async def send_long_embed(
+ channel,
+ content: str,
+ color: int = 0x1D82B6,
+ title: str = None,
+ max_description_length: int = 4096
+) -> None:
+ """
+ Send content as embeds, splitting if necessary.
+
+ Args:
+ channel: Discord channel to send to
+ content: Content to send
+ color: Embed color
+ title: Embed title
+ max_description_length: Maximum description length per embed
+ """
+ if len(content) <= max_description_length:
+ embed = discord.Embed(
+ title=title,
+ description=content,
+ color=color
+ )
+ await channel.send(embed=embed)
+ return
+
+ # Split content into chunks
+ chunks = []
+ current_chunk = ""
+
+ for paragraph in content.split('\n\n'):
+ if len(current_chunk) + len(paragraph) + 2 <= max_description_length:
+ current_chunk += paragraph + '\n\n'
+ else:
+ if current_chunk:
+ chunks.append(current_chunk.strip())
+ current_chunk = paragraph + '\n\n'
+
+ if current_chunk:
+ chunks.append(current_chunk.strip())
+
+ # Send chunks as embeds
+ for i, chunk in enumerate(chunks):
+ embed_title = title if i == 0 else f"{title} (continued)"
+ embed = discord.Embed(
+ title=embed_title,
+ description=chunk,
+ color=color
+ )
+ await channel.send(embed=embed)
+ await asyncio.sleep(0.5)
+
+
+async def send_structured_analysis_embed(
+ channel,
+ text: str,
+ color: int = 0x1D82B6,
+ title: str = "Analysis",
+ image_url: Optional[str] = None,
+ user_mention: Optional[str] = None
+) -> None:
+ """
+ Send a structured analysis embed with proper formatting.
+
+ Args:
+ channel: Discord channel to send to
+ text: Analysis content
+ color: Embed color
+ title: Embed title
+ image_url: Optional image URL to include
+ user_mention: Optional user mention
+ """
+ try:
+ # Create main embed
+ embed = discord.Embed(
+ title=title,
+ color=color,
+ timestamp=discord.utils.utcnow()
+ )
+
+ # Add image if provided
+ if image_url:
+ embed.set_image(url=image_url)
+
+ # Add user mention if provided
+ if user_mention:
+ embed.description = f"Analysis requested by {user_mention}"
+
+ # Try to fit content in embed description
+ if len(text) <= 4096:
+ embed.description = (embed.description or "") + f"\n\n{text}"
+ await channel.send(embed=embed)
+ else:
+ # Send title embed first, then use long embed for content
+ await channel.send(embed=embed)
+ await send_long_embed(
+ channel=channel,
+ content=text,
+ color=color,
+ title="📊 Detailed Analysis"
+ )
+
+ except discord.HTTPException as e:
+ logger.error(f"Failed to send analysis embed: {e}")
+ # Fallback to text message
+ fallback_text = f"**{title}**\n\n{text[:1800]}{'...' if len(text) > 1800 else ''}"
+ await channel.send(fallback_text)
+
+
+def format_percentage(value: float, decimals: int = 2) -> str:
+ """Format a percentage value with proper sign and color."""
+ formatted = f"{value:+.{decimals}f}%"
+ return formatted
+
+
+def format_price(value: float, currency: str = "USD") -> str:
+ """Format a price value with currency symbol."""
+ if currency.upper() == "USD":
+ return f"${value:,.2f}"
+ else:
+ return f"{value:,.4f} {currency}"
+
+
+def format_large_number(value: int) -> str:
+ """Format large numbers with K, M, B suffixes."""
+ if value >= 1_000_000_000:
+ return f"{value / 1_000_000_000:.1f}B"
+ elif value >= 1_000_000:
+ return f"{value / 1_000_000:.1f}M"
+ elif value >= 1_000:
+ return f"{value / 1_000:.1f}K"
+ else:
+ return str(value)
+
+
+def create_progress_embed(
+ title: str,
+ description: str = None,
+ status: str = "Initializing...",
+ color: int = 0x1D82B6
+) -> discord.Embed:
+ """Create a standard progress embed."""
+ embed = discord.Embed(
+ title=title,
+ description=description,
+ color=color
+ )
+ embed.add_field(name="Status", value=status, inline=False)
+ embed.set_footer(text="SecurePath Agent")
+ return embed
+
+
+def is_admin_user(user: discord.User, owner_id: int) -> bool:
+ """Check if user is an admin."""
+ return user.id == owner_id
+
+
+def get_user_display_name(user: discord.User) -> str:
+ """Get user's display name for database storage."""
+ if user.discriminator != "0":
+ return f"{user.name}#{user.discriminator}"
+ else:
+ return user.name
+
+
+def truncate_text(text: str, max_length: int, suffix: str = "...") -> str:
+ """Truncate text to specified length with suffix."""
+ if len(text) <= max_length:
+ return text
+ return text[:max_length - len(suffix)] + suffix
+
+
+def extract_command_args(content: str, prefix: str) -> tuple[str, str]:
+ """
+ Extract command and arguments from message content.
+
+ Args:
+ content: Message content
+ prefix: Bot prefix
+
+ Returns:
+ Tuple of (command, arguments)
+ """
+ if not content.startswith(prefix):
+ return "", ""
+
+ parts = content[len(prefix):].split(maxsplit=1)
+ command = parts[0].lower() if parts else ""
+ args = parts[1] if len(parts) > 1 else ""
+
+ return command, args
\ No newline at end of file
diff --git a/src/utils/formatting.py b/src/utils/formatting.py
new file mode 100644
index 0000000..85a7cc7
--- /dev/null
+++ b/src/utils/formatting.py
@@ -0,0 +1,353 @@
+"""Text formatting utilities for consistent output."""
+import re
+from datetime import datetime, timezone
+from decimal import Decimal
+from typing import Optional, List, Dict, Any
+
+
+def format_currency(amount: float, currency: str = "USD", decimals: int = 4) -> str:
+ """
+ Format currency amount with appropriate symbol.
+
+ Args:
+ amount: Amount to format
+ currency: Currency code
+ decimals: Number of decimal places
+
+ Returns:
+ Formatted currency string
+ """
+ if currency.upper() == "USD":
+ if amount >= 1:
+ return f"${amount:,.{min(decimals, 2)}f}"
+ else:
+ return f"${amount:.{decimals}f}"
+ else:
+ return f"{amount:.{decimals}f} {currency.upper()}"
+
+
+def format_percentage(value: float, decimals: int = 2, show_sign: bool = True) -> str:
+ """
+ Format percentage with optional sign.
+
+ Args:
+ value: Percentage value
+ decimals: Number of decimal places
+ show_sign: Whether to show + sign for positive values
+
+ Returns:
+ Formatted percentage string
+ """
+ if show_sign:
+ return f"{value:+.{decimals}f}%"
+ else:
+ return f"{value:.{decimals}f}%"
+
+
+def format_large_number(value: int, decimals: int = 1) -> str:
+ """
+ Format large numbers with K, M, B, T suffixes.
+
+ Args:
+ value: Number to format
+ decimals: Number of decimal places for abbreviated values
+
+ Returns:
+ Formatted number string
+ """
+ if abs(value) >= 1_000_000_000_000:
+ return f"{value / 1_000_000_000_000:.{decimals}f}T"
+ elif abs(value) >= 1_000_000_000:
+ return f"{value / 1_000_000_000:.{decimals}f}B"
+ elif abs(value) >= 1_000_000:
+ return f"{value / 1_000_000:.{decimals}f}M"
+ elif abs(value) >= 1_000:
+ return f"{value / 1_000:.{decimals}f}K"
+ else:
+ return f"{value:,}"
+
+
+def format_duration(seconds: float) -> str:
+ """
+ Format duration in human-readable format.
+
+ Args:
+ seconds: Duration in seconds
+
+ Returns:
+ Formatted duration string
+ """
+ if seconds < 60:
+ return f"{seconds:.1f}s"
+ elif seconds < 3600:
+ minutes = seconds / 60
+ return f"{minutes:.1f}m"
+ elif seconds < 86400:
+ hours = seconds / 3600
+ return f"{hours:.1f}h"
+ else:
+ days = seconds / 86400
+ return f"{days:.1f}d"
+
+
+def format_timestamp(dt: datetime, format_type: str = "relative") -> str:
+ """
+ Format timestamp in various formats.
+
+ Args:
+ dt: Datetime to format
+ format_type: Type of formatting (relative, short, long, iso)
+
+ Returns:
+ Formatted timestamp string
+ """
+ if format_type == "iso":
+ return dt.isoformat()
+ elif format_type == "short":
+ return dt.strftime("%Y-%m-%d %H:%M")
+ elif format_type == "long":
+ return dt.strftime("%Y-%m-%d %H:%M:%S UTC")
+ elif format_type == "relative":
+ now = datetime.now(timezone.utc)
+ if dt.tzinfo is None:
+ dt = dt.replace(tzinfo=timezone.utc)
+
+ diff = now - dt
+
+ if diff.total_seconds() < 60:
+ return "just now"
+ elif diff.total_seconds() < 3600:
+ minutes = int(diff.total_seconds() / 60)
+ return f"{minutes}m ago"
+ elif diff.total_seconds() < 86400:
+ hours = int(diff.total_seconds() / 3600)
+ return f"{hours}h ago"
+ elif diff.days < 7:
+ return f"{diff.days}d ago"
+ else:
+ return dt.strftime("%Y-%m-%d")
+ else:
+ return str(dt)
+
+
+def clean_text_for_discord(text: str, max_length: int = 2000) -> str:
+ """
+ Clean text for Discord message format.
+
+ Args:
+ text: Text to clean
+ max_length: Maximum length for Discord messages
+
+ Returns:
+ Cleaned text
+ """
+ if not text:
+ return ""
+
+ # Remove or replace problematic characters
+ cleaned = text.replace('\r\n', '\n').replace('\r', '\n')
+
+ # Remove excessive whitespace
+ cleaned = re.sub(r'\n{3,}', '\n\n', cleaned)
+ cleaned = re.sub(r' {2,}', ' ', cleaned)
+
+ # Truncate if too long
+ if len(cleaned) > max_length:
+ cleaned = cleaned[:max_length - 3] + "..."
+
+ return cleaned.strip()
+
+
+def escape_markdown(text: str) -> str:
+ """
+ Escape Discord markdown characters.
+
+ Args:
+ text: Text to escape
+
+ Returns:
+ Escaped text
+ """
+ # Discord markdown characters that need escaping
+ markdown_chars = ['*', '_', '`', '~', '\\', '|', '>', '#']
+
+ for char in markdown_chars:
+ text = text.replace(char, f'\\{char}')
+
+ return text
+
+
+def format_code_block(code: str, language: str = "") -> str:
+ """
+ Format code in Discord code block.
+
+ Args:
+ code: Code to format
+ language: Programming language for syntax highlighting
+
+ Returns:
+ Formatted code block
+ """
+ return f"```{language}\n{code}\n```"
+
+
+def format_inline_code(code: str) -> str:
+ """
+ Format inline code for Discord.
+
+ Args:
+ code: Code to format
+
+ Returns:
+ Formatted inline code
+ """
+ return f"`{code}`"
+
+
+def format_usage_stats(stats: Dict[str, Any]) -> str:
+ """
+ Format usage statistics for display.
+
+ Args:
+ stats: Statistics dictionary
+
+ Returns:
+ Formatted statistics string
+ """
+ lines = []
+
+ if 'total_requests' in stats:
+ lines.append(f"**Requests:** {format_large_number(stats['total_requests'])}")
+
+ if 'total_tokens' in stats:
+ lines.append(f"**Tokens:** {format_large_number(stats['total_tokens'])}")
+
+ if 'total_cost' in stats:
+ cost = float(stats['total_cost'])
+ lines.append(f"**Cost:** {format_currency(cost)}")
+
+ if 'unique_users' in stats:
+ lines.append(f"**Users:** {format_large_number(stats['unique_users'])}")
+
+ return "\n".join(lines)
+
+
+def format_model_name(model: str) -> str:
+ """
+ Format AI model name for display.
+
+ Args:
+ model: Model name to format
+
+ Returns:
+ Formatted model name
+ """
+ model_display_names = {
+ 'gpt-4.1': 'GPT-4.1',
+ 'gpt-4-1106-preview': 'GPT-4 Turbo',
+ 'gpt-4-vision-preview': 'GPT-4 Vision',
+ 'gpt-4o': 'GPT-4o',
+ 'gpt-4o-mini': 'GPT-4o Mini',
+ 'sonar-pro': 'Perplexity Sonar-Pro',
+ 'llama-3.1-sonar-large-128k-online': 'Perplexity Sonar-Pro',
+ }
+
+ return model_display_names.get(model, model.title())
+
+
+def format_error_message(error: str, max_length: int = 1000) -> str:
+ """
+ Format error message for user display.
+
+ Args:
+ error: Error message
+ max_length: Maximum length for error message
+
+ Returns:
+ Formatted error message
+ """
+ if not error:
+ return "An unknown error occurred."
+
+ # Clean up technical details
+ cleaned = str(error)
+
+ # Remove file paths and line numbers
+ cleaned = re.sub(r'File ".*?", line \d+', '', cleaned)
+
+ # Remove module paths
+ cleaned = re.sub(r'\w+\.\w+\.\w+:', '', cleaned)
+
+ # Truncate if too long
+ if len(cleaned) > max_length:
+ cleaned = cleaned[:max_length - 3] + "..."
+
+ return cleaned.strip() or "An error occurred while processing your request."
+
+
+def format_list_items(items: List[str], max_items: int = 10) -> str:
+ """
+ Format list of items for Discord display.
+
+ Args:
+ items: List of items to format
+ max_items: Maximum number of items to show
+
+ Returns:
+ Formatted list string
+ """
+ if not items:
+ return "No items to display."
+
+ # Limit number of items
+ display_items = items[:max_items]
+
+ # Format as bullet points
+ formatted = "\n".join(f"• {item}" for item in display_items)
+
+ # Add "and X more" if truncated
+ if len(items) > max_items:
+ remaining = len(items) - max_items
+ formatted += f"\n*...and {remaining} more*"
+
+ return formatted
+
+
+def format_embed_field_value(value: Any, max_length: int = 1024) -> str:
+ """
+ Format value for Discord embed field.
+
+ Args:
+ value: Value to format
+ max_length: Maximum length for embed field value
+
+ Returns:
+ Formatted value string
+ """
+ if value is None:
+ return "N/A"
+
+ str_value = str(value)
+
+ if len(str_value) > max_length:
+ str_value = str_value[:max_length - 3] + "..."
+
+ return str_value or "N/A"
+
+
+def truncate_with_ellipsis(text: str, max_length: int, suffix: str = "...") -> str:
+ """
+ Truncate text with ellipsis if too long.
+
+ Args:
+ text: Text to potentially truncate
+ max_length: Maximum allowed length
+ suffix: Suffix to add when truncating
+
+ Returns:
+ Truncated text with suffix if needed
+ """
+ if not text or len(text) <= max_length:
+ return text
+
+ return text[:max_length - len(suffix)] + suffix
\ No newline at end of file
diff --git a/src/utils/validators.py b/src/utils/validators.py
new file mode 100644
index 0000000..5163c4a
--- /dev/null
+++ b/src/utils/validators.py
@@ -0,0 +1,267 @@
+"""Input validation utilities."""
+import re
+from typing import Optional, Tuple
+from urllib.parse import urlparse
+
+
+def validate_discord_id(discord_id: str) -> bool:
+ """
+ Validate Discord ID format.
+
+ Args:
+ discord_id: Discord ID as string
+
+ Returns:
+ True if valid Discord ID format
+ """
+ if not discord_id or not discord_id.isdigit():
+ return False
+
+ # Discord IDs are typically 17-19 digits
+ return 15 <= len(discord_id) <= 20
+
+
+def validate_url(url: str) -> bool:
+ """
+ Validate URL format.
+
+ Args:
+ url: URL string to validate
+
+ Returns:
+ True if valid URL format
+ """
+ try:
+ result = urlparse(url)
+ return all([result.scheme, result.netloc])
+ except:
+ return False
+
+
+def validate_image_url(url: str) -> bool:
+ """
+ Validate image URL format.
+
+ Args:
+ url: URL string to validate
+
+ Returns:
+ True if valid image URL
+ """
+ if not validate_url(url):
+ return False
+
+ # Check for common image extensions
+ image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp')
+ return any(url.lower().endswith(ext) for ext in image_extensions)
+
+
+def validate_query_length(query: str, min_length: int = 5, max_length: int = 500) -> Tuple[bool, Optional[str]]:
+ """
+ Validate query text length.
+
+ Args:
+ query: Query text to validate
+ min_length: Minimum allowed length
+ max_length: Maximum allowed length
+
+ Returns:
+ Tuple of (is_valid, error_message)
+ """
+ if not query or not query.strip():
+ return False, "Query cannot be empty"
+
+ query_length = len(query.strip())
+
+ if query_length < min_length:
+ return False, f"Query too short. Minimum {min_length} characters required."
+
+ if query_length > max_length:
+ return False, f"Query too long. Maximum {max_length} characters allowed."
+
+ return True, None
+
+
+def validate_command_name(command: str) -> bool:
+ """
+ Validate command name format.
+
+ Args:
+ command: Command name to validate
+
+ Returns:
+ True if valid command name
+ """
+ if not command:
+ return False
+
+ # Command names should be alphanumeric with underscores/hyphens
+ pattern = r'^[a-zA-Z0-9_-]+$'
+ return bool(re.match(pattern, command)) and len(command) <= 32
+
+
+def validate_username(username: str) -> bool:
+ """
+ Validate username format.
+
+ Args:
+ username: Username to validate
+
+ Returns:
+ True if valid username
+ """
+ if not username:
+ return False
+
+ # Remove discriminator if present
+ clean_username = username.split('#')[0]
+
+ # Username length check
+ if not (2 <= len(clean_username) <= 32):
+ return False
+
+ # Allow alphanumeric, underscores, dots, and hyphens
+ pattern = r'^[a-zA-Z0-9._-]+$'
+ return bool(re.match(pattern, clean_username))
+
+
+def sanitize_filename(filename: str) -> str:
+ """
+ Sanitize filename for safe storage.
+
+ Args:
+ filename: Original filename
+
+ Returns:
+ Sanitized filename
+ """
+ if not filename:
+ return "unnamed_file"
+
+ # Remove path separators and dangerous characters
+ sanitized = re.sub(r'[<>:"/\\|?*]', '_', filename)
+
+ # Remove leading/trailing dots and spaces
+ sanitized = sanitized.strip('. ')
+
+ # Ensure reasonable length
+ if len(sanitized) > 255:
+ name, ext = sanitized.rsplit('.', 1) if '.' in sanitized else (sanitized, '')
+ max_name_length = 250 - len(ext) - 1 if ext else 255
+ sanitized = name[:max_name_length] + ('.' + ext if ext else '')
+
+ return sanitized or "unnamed_file"
+
+
+def validate_model_name(model: str) -> bool:
+ """
+ Validate AI model name format.
+
+ Args:
+ model: Model name to validate
+
+ Returns:
+ True if valid model name
+ """
+ valid_models = [
+ 'gpt-4.1',
+ 'gpt-4-1106-preview',
+ 'gpt-4-vision-preview',
+ 'gpt-4o',
+ 'gpt-4o-mini',
+ 'sonar-pro',
+ 'llama-3.1-sonar-large-128k-online'
+ ]
+
+ return model in valid_models
+
+
+def validate_cost(cost: float) -> bool:
+ """
+ Validate cost value.
+
+ Args:
+ cost: Cost value to validate
+
+ Returns:
+ True if valid cost
+ """
+ return isinstance(cost, (int, float)) and cost >= 0 and cost < 1000
+
+
+def validate_token_count(tokens: int) -> bool:
+ """
+ Validate token count.
+
+ Args:
+ tokens: Token count to validate
+
+ Returns:
+ True if valid token count
+ """
+ return isinstance(tokens, int) and 0 <= tokens <= 1000000
+
+
+def extract_mentions(text: str) -> list[str]:
+ """
+ Extract Discord mentions from text.
+
+ Args:
+ text: Text to extract mentions from
+
+ Returns:
+ List of user IDs mentioned
+ """
+ # Discord user mention pattern: <@!?123456789>
+ pattern = r'<@!?(\d+)>'
+ matches = re.findall(pattern, text)
+ return matches
+
+
+def extract_channel_mentions(text: str) -> list[str]:
+ """
+ Extract Discord channel mentions from text.
+
+ Args:
+ text: Text to extract channel mentions from
+
+ Returns:
+ List of channel IDs mentioned
+ """
+ # Discord channel mention pattern: <#123456789>
+ pattern = r'<#(\d+)>'
+ matches = re.findall(pattern, text)
+ return matches
+
+
+def is_spam_like(text: str) -> bool:
+ """
+ Check if text appears to be spam.
+
+ Args:
+ text: Text to check
+
+ Returns:
+ True if text appears spam-like
+ """
+ if not text:
+ return False
+
+ # Check for excessive repetition
+ words = text.lower().split()
+ if len(words) > 5:
+ unique_words = set(words)
+ if len(unique_words) / len(words) < 0.3: # Less than 30% unique words
+ return True
+
+ # Check for excessive caps
+ caps_ratio = sum(1 for c in text if c.isupper()) / len(text) if text else 0
+ if caps_ratio > 0.7 and len(text) > 20:
+ return True
+
+ # Check for excessive special characters
+ special_chars = sum(1 for c in text if not c.isalnum() and not c.isspace())
+ if special_chars / len(text) > 0.5 and len(text) > 10:
+ return True
+
+ return False
\ No newline at end of file