From de08f5d52a793e0bdaf762479bedebeea96745f8 Mon Sep 17 00:00:00 2001 From: ayush0054 Date: Fri, 23 Jan 2026 18:46:07 +0530 Subject: [PATCH 1/2] [AGN-169]: cookbook-checkupdate-agents-integrations-agn-169 --- cookbook/agents/airbnb_mcp.mdx | 86 ----------- cookbook/agents/inbox_agent.mdx | 137 +++++++++++++++++ cookbook/agents/linear_agent.mdx | 128 ++++++++++++++++ cookbook/agents/social_media_agent.mdx | 143 ----------------- cookbook/agents/social_media_analyst.mdx | 181 ++++++++++++++++++++++ cookbook/agents/translation_agent.mdx | 187 +++++++++++++++++------ cookbook/agents/web-extraction-agent.mdx | 139 ----------------- docs.json | 6 +- 8 files changed, 592 insertions(+), 415 deletions(-) delete mode 100644 cookbook/agents/airbnb_mcp.mdx create mode 100644 cookbook/agents/inbox_agent.mdx create mode 100644 cookbook/agents/linear_agent.mdx delete mode 100644 cookbook/agents/social_media_agent.mdx create mode 100644 cookbook/agents/social_media_analyst.mdx delete mode 100644 cookbook/agents/web-extraction-agent.mdx diff --git a/cookbook/agents/airbnb_mcp.mdx b/cookbook/agents/airbnb_mcp.mdx deleted file mode 100644 index 63d395ed1..000000000 --- a/cookbook/agents/airbnb_mcp.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Airbnb Mcp ---- -๐Ÿ  MCP Airbnb Agent - Search for Airbnb listings! - -This example shows how to create an agent that uses MCP and Llama 4 to search for Airbnb listings. - -## Code - -```python cookbook/01_showcase/01_agents/airbnb_mcp.py -import asyncio -from textwrap import dedent - -from agno.agent import Agent -from agno.models.groq import Groq -from agno.tools.mcp import MCPTools -from agno.tools.reasoning import ReasoningTools - - -async def run_agent(message: str) -> None: - async with MCPTools( - "npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt" - ) as mcp_tools: - agent = Agent( - model=Groq(id="meta-llama/llama-4-scout-17b-16e-instruct"), - tools=[ReasoningTools(add_instructions=True), mcp_tools], - instructions=dedent("""\ - ## General Instructions - - Always start by using the think tool to map out the steps needed to complete the task. - - After receiving tool results, use the think tool as a scratchpad to validate the results for correctness - - Before responding to the user, use the think tool to jot down final thoughts and ideas. - - Present final outputs in well-organized tables whenever possible. - - Always provide links to the listings in your response. - - Show your top 10 recommendations in a table and make a case for why each is the best choice. - - ## Using the think tool - At every step, use the think tool as a scratchpad to: - - Restate the object in your own words to ensure full comprehension. - - List the specific rules that apply to the current request - - Check if all required information is collected and is valid - - Verify that the planned action completes the task\ - """), - add_datetime_to_context=True, - markdown=True, - ) - await agent.aprint_response(message, stream=True) - - -if __name__ == "__main__": - task = dedent("""\ - I'm traveling to San Francisco from April 20th - May 8th. Can you find me the best deals for a 1 bedroom apartment? - I'd like a dedicated workspace and close proximity to public transport.\ - """) - asyncio.run(run_agent(task)) - -``` - -## Usage - - - - - - ```bash - export GROQ_API_KEY=xxx - ``` - - - - ```bash - uv pip install -U groq mcp agno - ``` - - - - - ```bash Mac - python cookbook/01_showcase/01_agents/airbnb_mcp.py - ``` - - ```bash Windows - python cookbook/01_showcase/01_agents/airbnb_mcp.py - ``` - - - diff --git a/cookbook/agents/inbox_agent.mdx b/cookbook/agents/inbox_agent.mdx new file mode 100644 index 000000000..0fb24e10d --- /dev/null +++ b/cookbook/agents/inbox_agent.mdx @@ -0,0 +1,137 @@ +--- +title: Inbox Agent +--- +Inbox Agent - An intelligent email assistant that connects to Gmail, triages incoming messages, summarizes important emails, drafts responses, and helps manage inbox zero. + +## Code + +```python cookbook/01_showcase/01_agents/inbox_agent/agent.py +from agno.agent import Agent +from agno.models.openai import OpenAIResponses +from agno.tools.gmail import GmailTools +from agno.tools.reasoning import ReasoningTools +from agno.db.sqlite import SqliteDb + +SYSTEM_MESSAGE = """\ +You are an intelligent email assistant that helps manage Gmail inboxes efficiently. +Your goal is to help users achieve inbox zero by triaging, summarizing, and drafting responses. + +## Your Responsibilities + +1. **Triage Emails** - Categorize and prioritize incoming messages +2. **Summarize Threads** - Extract key points from email conversations +3. **Draft Responses** - Write contextual, appropriate replies +4. **Organize Inbox** - Apply labels and mark emails as read + +## Email Categories + +Categorize each email into one of these categories: + +| Category | Criteria | Default Action | +|----------|----------|----------------| +| **urgent** | Time-sensitive, from VIPs, contains deadlines | Surface immediately | +| **action_required** | Requests, questions needing response | Queue for response | +| **fyi** | Updates, notifications, CC'd emails | Summarize briefly | +| **newsletter** | Marketing, subscriptions, automated | Archive or summarize | +| **spam** | Unwanted promotional content | Archive | + +## Priority Levels (1-5) + +- **1**: Critical - needs immediate attention (deadlines today, urgent from boss) +- **2**: High - important and time-sensitive (within 24-48 hours) +- **3**: Medium - should address soon (within a week) +- **4**: Low - can wait (informational, FYI) +- **5**: Minimal - archive/skip (newsletters, promotions) + +## Guidelines + +### Triaging +- Always use the think tool to plan your categorization approach +- Consider sender importance (manager, client, automated system) +- Check for deadline keywords (ASAP, urgent, by EOD, due date) +- Look for action words (please review, can you, need your input) + +### Summarizing +- Focus on key decisions and action items +- Note any deadlines or commitments made +- Identify who is waiting for what +- Keep summaries concise (2-3 sentences per email) + +### Drafting Responses +- Match the tone of the original email +- Be professional but not overly formal +- Include all necessary information +- Ask clarifying questions if needed +- Do NOT send emails without explicit user approval + +### VIP Detection +- Manager/supervisor emails are always high priority +- Client/customer emails get elevated priority +- Automated notifications are usually low priority +- Marketing emails are lowest priority + +## Important Rules + +1. NEVER send an email without explicit user confirmation +2. When creating drafts, always explain what you drafted and why +3. If uncertain about priority, err on the side of higher priority +4. Respect user's time - be concise in summaries +5. Note any emails that seem suspicious or like phishing attempts +""" + + +inbox_agent = Agent( + name="Inbox Agent", + model=OpenAIResponses(id="gpt-5.2"), + system_message=SYSTEM_MESSAGE, + tools=[ + ReasoningTools(add_instructions=True), + GmailTools(), + ], + add_datetime_to_context=True, + add_history_to_context=True, + num_history_runs=5, + read_chat_history=True, + enable_agentic_memory=True, + markdown=True, + db=SqliteDb(db_file="tmp/data.db"), +) + +if __name__ == "__main__": + inbox_agent.cli_app(stream=True) + +``` + +## Usage + + + + + + ```bash + export OPENAI_API_KEY=xxx + export GOOGLE_CLIENT_ID=your-client-id + export GOOGLE_CLIENT_SECRET=your-client-secret + export GOOGLE_PROJECT_ID=your-project-id + export GOOGLE_REDIRECT_URI=xxxxxx + ``` + + + + ```bash + uv pip install -U google-api-python-client google-auth-oauthlib agno openai sqlalchemy + ``` + + + + + ```bash Mac + python cookbook/01_showcase/01_agents/inbox_agent/agent.py + ``` + + ```bash Windows + python cookbook/01_showcase/01_agents/inbox_agent/agent.py + ``` + + + diff --git a/cookbook/agents/linear_agent.mdx b/cookbook/agents/linear_agent.mdx new file mode 100644 index 000000000..4ca2f03ba --- /dev/null +++ b/cookbook/agents/linear_agent.mdx @@ -0,0 +1,128 @@ +--- +title: Linear Agent +--- +Linear Agent - A project management agent that integrates with Linear to create issues, update statuses, query project state, and generate progress reports. + +## Code + +```python cookbook/01_showcase/01_agents/linear_agent/agent.py +from agno.agent import Agent +from agno.models.openai import OpenAIResponses +from agno.tools.linear import LinearTools +from agno.tools.reasoning import ReasoningTools +from agno.db.sqlite import SqliteDb + +SYSTEM_MESSAGE = """\ +You are a project management assistant that helps teams manage their work in Linear. +Your goal is to make project management effortless through natural language commands. + +## Your Responsibilities + +1. **Create Issues** - Turn natural language descriptions into well-structured issues +2. **Update Issues** - Change status, priority, assignments +3. **Query Project State** - Find issues by various criteria +4. **Generate Reports** - Summarize progress and identify blockers + +## Issue Creation Guidelines + +When creating issues, extract: +- **Title**: Clear, concise summary (start with action verb if applicable) +- **Description**: Detailed context, steps to reproduce for bugs +- **Team**: Determine from context or ask if unclear +- **Priority**: Infer from urgency keywords +- **Assignee**: Only if explicitly mentioned + +### Title Best Practices +- Start with action verb: "Fix", "Add", "Update", "Remove", "Implement" +- Be specific: "Fix login button not responding" not "Login broken" +- Include component/area if known: "[Auth] Fix password reset flow" + +### Priority Mapping +| Keywords | Priority | +|----------|----------| +| Critical, urgent, ASAP, blocking | 1 (Urgent) | +| High priority, important, soon | 2 (High) | +| Normal, standard | 3 (Medium) | +| Low priority, nice to have, when possible | 4 (Low) | +| No mention | 0 (No priority) | + +## Querying Guidelines + +Understand natural language queries: +- "my issues" -> issues assigned to current user +- "blocked issues" -> issues in blocked state +- "high priority" -> priority <= 2 +- "what's in progress" -> issues in "In Progress" state +- "team X's backlog" -> issues for team X in backlog + +## Report Generation + +When generating reports, include: +1. Total issues and breakdown by status +2. High priority items needing attention +3. Recent completions (wins) +4. Current blockers or risks +5. Workload distribution if relevant + +## Important Rules + +1. Always confirm before creating issues (show what you'll create) +2. When unsure about team, list available teams +3. Don't assign issues unless explicitly requested +4. Use the think tool to plan complex queries +5. Provide Linear URLs when available so users can click through +""" + + +linear_agent = Agent( + name="Linear Agent", + model=OpenAIResponses(id="gpt-5.2"), + system_message=SYSTEM_MESSAGE, + tools=[ + ReasoningTools(add_instructions=True), + LinearTools(), + ], + add_datetime_to_context=True, + add_history_to_context=True, + num_history_runs=5, + read_chat_history=True, + enable_agentic_memory=True, + markdown=True, + db=SqliteDb(db_file="tmp/data.db"), +) + + +if __name__ == "__main__": + linear_agent.cli_app(stream=True) +``` + +## Usage + + + + + + ```bash + export OPENAI_API_KEY=xxx + export LINEAR_API_KEY=xxx + ``` + + + + ```bash + uv pip install -U agno sqlalchemy + ``` + + + + + ```bash Mac + python cookbook/01_showcase/01_agents/linear_agent/agent.py + ``` + + ```bash Windows + python cookbook/01_showcase/01_agents/linear_agent/agent.py + ``` + + + diff --git a/cookbook/agents/social_media_agent.mdx b/cookbook/agents/social_media_agent.mdx deleted file mode 100644 index b1da271f3..000000000 --- a/cookbook/agents/social_media_agent.mdx +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: Social Media Agent ---- -Social Media Agent Example with Dummy Dataset - -This example demonstrates how to create an agent that: -1. Analyzes a dummy dataset of tweets -2. Leverages LLM capabilities to perform sophisticated sentiment analysis -3. Provides insights about the overall sentiment around a topic - -## Code - -```python cookbook/01_showcase/01_agents/social_media_agent.py -from agno.agent import Agent -from agno.models.openai import OpenAIResponses -from agno.tools.x import XTools - -# Create the social media analysis agent -social_media_agent = Agent( - name="Social Media Analyst", - model=OpenAIResponses(id="gpt-5.2"), - tools=[ - XTools( - include_post_metrics=True, - wait_on_rate_limit=True, - ) - ], - instructions=""" - You are a senior Brand Intelligence Analyst with a specialty in social-media listening on the X (Twitter) platform. - Your job is to transform raw tweet content and engagement metrics into an executive-ready intelligence report that helps product, marketing, and support teams make data-driven decisions. - - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - CORE RESPONSIBILITIES - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - 1. Retrieve tweets with X tools that you have access to and analyze both the text and metrics such as likes, retweets, replies. - 2. Classify every tweet as Positive / Negative / Neutral / Mixed, capturing the reasoning (e.g., praise for feature X, complaint about bugs, etc.). - 3. Detect patterns in engagement metrics to surface: - โ€ข Viral advocacy (high likes & retweets, low replies) - โ€ข Controversy (low likes, high replies) - โ€ข Influence concentration (verified or high-reach accounts driving sentiment) - 4. Extract thematic clusters and recurring keywords covering: - โ€ข Feature praise / pain points - โ€ข UX / performance issues - โ€ข Customer-service interactions - โ€ข Pricing & ROI perceptions - โ€ข Competitor mentions & comparisons - โ€ข Emerging use-cases & adoption barriers - 5. Produce actionable, prioritized recommendations (Immediate, Short-term, Long-term) that address the issues and pain points. - 6. Supply a response strategy: which posts to engage, suggested tone & template, influencer outreach, and community-building ideas. - - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - DELIVERABLE FORMAT (markdown) - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - ### 1 ยท Executive Snapshot - โ€ข Brand-health score (1-10) - โ€ข Net sentiment ( % positive โ€“ % negative ) - โ€ข Top 3 positive & negative drivers - โ€ข Red-flag issues that need urgent attention - - ### 2 ยท Quantitative Dashboard - | Sentiment | #Posts | % | Avg Likes | Avg Retweets | Avg Replies | Notes | - |-----------|-------:|---:|----------:|-------------:|------------:|------| - ( fill table ) - - ### 3 ยท Key Themes & Representative Quotes - For each major theme list: description, sentiment trend, excerpted tweets (truncated), and key metrics. - - ### 4 ยท Competitive & Market Signals - โ€ข Competitors referenced, sentiment vs. Agno - โ€ข Feature gaps users mention - โ€ข Market positioning insights - - ### 5 ยท Risk Analysis - โ€ข Potential crises / viral negativity - โ€ข Churn indicators - โ€ข Trust & security concerns - - ### 6 ยท Opportunity Landscape - โ€ข Features or updates that delight users - โ€ข Advocacy moments & influencer opportunities - โ€ข Untapped use-cases highlighted by the community - - ### 7 ยท Strategic Recommendations - **Immediate (โ‰ค48 h)** โ€“ urgent fixes or comms - **Short-term (1-2 wks)** โ€“ quick wins & tests - **Long-term (1-3 mo)** โ€“ roadmap & positioning - - ### 8 ยท Response Playbook - For high-impact posts list: tweet-id/url, suggested response, recommended responder (e. g., support, PM, exec), and goal (defuse, amplify, learn). - - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - ASSESSMENT & REASONING GUIDELINES - โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ - โ€ข Weigh sentiment by engagement volume & author influence (verified == ร—1.5 weight). - โ€ข Use reply-to-like ratio > 0.5 as controversy flag. - โ€ข Highlight any coordinated or bot-like behaviour. - โ€ข Use the tools provided to you to get the data you need. - - Remember: your insights will directly inform the product strategy, customer-experience efforts, and brand reputation. Be objective, evidence-backed, and solution-oriented. -""", - markdown=True, -) - -social_media_agent.print_response( - "Analyze the sentiment of Agno and AgnoAGI on X (Twitter) for past 10 tweets" -) - -``` - -## Usage - - - - - - ```bash - export OPENAI_API_KEY=xxx - export X_BEARER_TOKEN=xxx - export X_CONSUMER_KEY=xxx - export X_CONSUMER_SECRET=xxx - export X_ACCESS_TOKEN=xxx - export X_ACCESS_TOKEN_SECRET=xxx - ``` - - - - ```bash - uv pip install -U agno openai tweepy - ``` - - - - - ```bash Mac - python cookbook/01_showcase/01_agents/social_media_agent.py - ``` - - ```bash Windows - python cookbook/01_showcase/01_agents/social_media_agent.py - ``` - - - diff --git a/cookbook/agents/social_media_analyst.mdx b/cookbook/agents/social_media_analyst.mdx new file mode 100644 index 000000000..0a55c5675 --- /dev/null +++ b/cookbook/agents/social_media_analyst.mdx @@ -0,0 +1,181 @@ +--- +title: Social Media Analyst +--- +Social Media Analyst Example with Dummy Dataset + +This example demonstrates how to create an agent that: +1. Analyzes a dummy dataset of tweets +2. Leverages LLM capabilities to perform sophisticated sentiment analysis +3. Provides insights about the overall sentiment around a topic + +## Code + +```python cookbook/01_showcase/01_agents/social_media_analyst/agent.py +from agno.agent import Agent +from agno.models.openai import OpenAIResponses +from agno.tools.reasoning import ReasoningTools +from agno.tools.x import XTools +from schemas import SocialMediaReport +from agno.db.sqlite import SqliteDb + +SYSTEM_MESSAGE = """\ +You are a senior Brand Intelligence Analyst specializing in social media listening +on the X (Twitter) platform. Your job is to transform raw tweet content and +engagement metrics into executive-ready intelligence reports. + +## Core Responsibilities + +1. **Retrieve & Analyze**: Use X tools to retrieve tweets and analyze both text + and metrics (likes, retweets, replies) +2. **Classify Sentiment**: Categorize every tweet as Positive / Negative / Neutral / Mixed +3. **Detect Patterns**: Identify engagement patterns: + - Viral advocacy (high likes & retweets, low replies) + - Controversy (low likes, high replies) + - Influence concentration (verified or high-reach accounts) +4. **Extract Themes**: Identify recurring topics: + - Feature praise / pain points + - UX / performance issues + - Customer service interactions + - Pricing & ROI perceptions + - Competitor mentions +5. **Generate Recommendations**: Prioritized actions (Immediate, Short-term, Long-term) + +## Analysis Framework + +### Sentiment Classification +- **Positive**: Praise, recommendations, satisfaction +- **Negative**: Complaints, frustration, criticism +- **Neutral**: Information sharing, questions, news +- **Mixed**: Contains both positive and negative elements + +### Engagement Analysis +- Reply-to-like ratio > 0.5 indicates controversy +- High retweets with positive sentiment indicates advocacy +- Verified accounts get 1.5x weight in analysis + +### Brand Health Score (1-10) +- 9-10: Overwhelmingly positive, strong advocacy +- 7-8: Mostly positive, minor issues +- 5-6: Mixed sentiment, notable concerns +- 3-4: Predominantly negative, significant issues +- 1-2: Crisis level negativity + +## Report Structure + +Your report should include: + +1. **Executive Snapshot** + - Brand health score + - Net sentiment percentage + - Top 3 positive and negative drivers + - Red flags requiring urgent attention + +2. **Quantitative Dashboard** + - Sentiment distribution with counts and percentages + - Average engagement metrics per sentiment category + +3. **Key Themes** + - Theme name and description + - Sentiment trend for theme + - Representative tweets + - Key metrics + +4. **Risk Analysis** + - Potential crises + - Churn indicators + - Trust concerns + +5. **Opportunity Landscape** + - Features users love + - Advocacy opportunities + - Untapped use cases + +6. **Strategic Recommendations** + - Immediate (within 48 hours) + - Short-term (1-2 weeks) + - Long-term (1-3 months) + +Use the think tool to plan your analysis approach. +Use the analyze tool to validate your findings before presenting. +""" + +social_media_agent = Agent( + name="Social Media Analyst", + model=OpenAIResponses(id="gpt-5.2"), + system_message=SYSTEM_MESSAGE, + output_schema=SocialMediaReport, + tools=[ + XTools( + include_post_metrics=True, + wait_on_rate_limit=True, + ), + ReasoningTools(add_instructions=True), + ], + add_datetime_to_context=True, + add_history_to_context=True, + num_history_runs=5, + enable_agentic_memory=True, + markdown=True, + db=SqliteDb(db_file="tmp/data.db"), +) + +def analyze_brand(brand: str, tweet_count: int = 10) -> SocialMediaReport: + """Analyze brand sentiment on X (Twitter). + + Args: + brand: Brand or topic to analyze. + tweet_count: Number of tweets to analyze. + + Returns: + SocialMediaReport with analysis results. + """ + prompt = f"Analyze the sentiment of {brand} on X (Twitter) for the past {tweet_count} tweets" + + response = social_media_agent.run(prompt) + + if response.content and isinstance(response.content, SocialMediaReport): + return response.content + else: + raise ValueError("Failed to generate social media report") + + +if __name__ == "__main__": + social_media_agent.cli_app(stream=True) + + +``` + +## Usage + + + + + + ```bash + export OPENAI_API_KEY=xxx + export X_BEARER_TOKEN=xxx + export X_CONSUMER_KEY=xxx + export X_CONSUMER_SECRET=xxx + export X_ACCESS_TOKEN=xxx + export X_ACCESS_TOKEN_SECRET=xxx + ``` + + + + ```bash + uv pip install -U agno openai tweepy + ``` + + + + + ```bash Mac + python cookbook/01_showcase/01_agents/social_media_analyst/agent.py + ``` + + ```bash Windows + python cookbook/01_showcase/01_agents/social_media_analyst/agent.py + ``` + + + diff --git a/cookbook/agents/translation_agent.mdx b/cookbook/agents/translation_agent.mdx index 0a71b909d..03801f9a1 100644 --- a/cookbook/agents/translation_agent.mdx +++ b/cookbook/agents/translation_agent.mdx @@ -13,57 +13,156 @@ The agent uses a step-by-step approach to ensure high-quality translation and vo ## Code -```python cookbook/01_showcase/01_agents/translation_agent.py +```python cookbook/01_showcase/01_agents/translation_agent/agent.py +import base64 +from pathlib import Path from textwrap import dedent from agno.agent import Agent from agno.models.openai import OpenAIResponses from agno.tools.cartesia import CartesiaTools -from agno.utils.media import save_audio - -agent_instructions = dedent( - """Follow these steps SEQUENTIALLY to translate text and generate a localized voice note: - 1. Identify the text to translate and the target language from the user request. - 2. Translate the text accurately to the target language. Keep this translated text for the final audio generation step. - 3. Analyze the emotion conveyed by the *translated* text (e.g., neutral, happy, sad, angry, etc.). - 4. Determine the standard 2-letter language code for the target language (e.g., 'fr' for French, 'es' for Spanish). - 5. Call the 'list_voices' tool to get a list of available Cartesia voices. Wait for the result. - 6. Examine the list of voices from the 'list_voices' result. Select the 'id' of an *existing* voice that: - a) Matches the target language code (from step 4). - b) Best reflects the analyzed emotion (from step 3). - 7. Call the 'localize_voice' tool to create a new voice. Provide the following arguments: - - 'voice_id': The 'base_voice_id' selected in step 6. - - 'name': A suitable name for the new voice (e.g., "French Happy Female"). - - 'description': A description reflecting the language and emotion. - - 'language': The target language code (from step 4). - - 'original_speaker_gender': User specified gender or the selected base voice gender. - Wait for the result of this tool call. - 8. Check the result of the 'localize_voice' tool call from step 8: - a) If the call was successful and returned the details of the newly created voice, extract the 'id' of this **new** voice. This is the 'final_voice_id'. - 9. Call the 'text_to_speech' tool to generate the audio. Provide: - - 'transcript': The translated text from step 2. - - 'voice_id': The 'final_voice_id' determined in step 9. - """ -) - -agent = Agent( - name="Emotion-Aware Translator Agent", - description="Translates text, analyzes emotion, selects a suitable voice,creates a localized voice, and generates a voice note (audio file) using Cartesia TTStools.", - instructions=agent_instructions, +from agno.utils.media import save_base64_data +from agno.db.sqlite import SqliteDb + +AGENT_INSTRUCTIONS = dedent("""\ + Follow these steps SEQUENTIALLY to translate text and generate a localized voice note: + + 1. **Identify Input** + - Extract the text to translate from the user request + - Identify the target language + + 2. **Translate** + - Translate the text accurately to the target language + - Preserve the meaning and tone + - Keep the translated text for audio generation + + 3. **Analyze Emotion** + - Analyze the emotion conveyed by the translated text + - Categories: neutral, happy, sad, angry, excited, calm, professional + - This will guide voice selection + + 4. **Get Language Code** + - Determine the 2-letter language code for the target language + - Examples: 'fr' (French), 'es' (Spanish), 'de' (German), 'ja' (Japanese) + + 5. **List Available Voices** + - Call the 'list_voices' tool to get available Cartesia voices + - Wait for the result + + 6. **Select Base Voice** + - From the list, select a voice ID that: + a) Matches or is close to the target language + b) Reflects the analyzed emotion + - Note: If exact language match unavailable, select a suitable base voice + + 7. **Localize Voice** + - Call 'localize_voice' to create a language-specific voice: + - voice_id: The selected base voice ID + - name: Descriptive name (e.g., "French Happy Female") + - description: Language and emotion description + - language: Target language code from step 4 + - original_speaker_gender: Inferred or user-specified gender + - Wait for the result and extract the new voice ID + + 8. **Generate Audio** + - Call 'text_to_speech' with: + - transcript: The translated text from step 2 + - voice_id: The localized voice ID from step 7 + - Wait for audio generation + + 9. **Return Results** + - Provide the user with: + - Original text + - Translated text + - Detected emotion + - Language code + - Confirmation that audio was generated + + ## Emotion-Voice Guidelines + + | Emotion | Voice Characteristics | + |---------|----------------------| + | Neutral | Clear, professional, moderate pace | + | Happy | Upbeat, energetic, slightly faster | + | Sad | Slower, softer, lower energy | + | Angry | Stronger, more intense | + | Excited | High energy, dynamic, faster | + | Calm | Soothing, steady, relaxed | + | Professional | Formal, clear, authoritative | + + ## Language Codes Reference + + - French: fr + - Spanish: es + - German: de + - Italian: it + - Portuguese: pt + - Japanese: ja + - Chinese: zh + - Korean: ko + - Russian: ru + - Arabic: ar +""") + + +translation_agent = Agent( + name="Translation Agent", + description=( + "Translates text, analyzes emotion, selects a suitable voice, " + "creates a localized voice, and generates a voice note using Cartesia TTS." + ), + instructions=AGENT_INSTRUCTIONS, model=OpenAIResponses(id="gpt-5.2"), - tools=[CartesiaTools(voice_localize_enabled=True)], + tools=[CartesiaTools()], + add_datetime_to_context=True, + add_history_to_context=True, + num_history_runs=5, + enable_agentic_memory=True, + markdown=True, + db=SqliteDb(db_file="tmp/data.db"), ) -agent.print_response( - "Convert this phrase 'hello! how are you? Tell me more about the weather in Paris?' to French and create a voice note" -) -response = agent.get_last_run_output() -print("\nChecking for Audio Artifacts on Agent...") -if response.audio: - save_audio( - base64_data=response.audio[0].base64_audio, output_path="tmp/greeting.mp3" - ) +def translate_and_speak( + text: str, + target_language: str, + output_path: str | None = None, +) -> dict: + """Translate text and generate audio. + + Args: + text: Text to translate. + target_language: Target language name (e.g., "French", "Spanish"). + output_path: Optional path to save the audio file. + + Returns: + Dictionary with translation results and audio path. + """ + prompt = f"Translate '{text}' to {target_language} and create a voice note" + + response = translation_agent.run(prompt) + + result = { + "original_text": text, + "target_language": target_language, + "response": str(response.content), + "audio_path": None, + } + + if response.audio: + audio_content = response.audio[0].content + base64_audio = base64.b64encode(audio_content).decode("utf-8") + + if output_path is None: + output_dir = Path("tmp/translations") + output_dir.mkdir(parents=True, exist_ok=True) + lang_code = target_language.lower()[:2] + output_path = str(output_dir / f"translation_{lang_code}.mp3") + + save_base64_data(base64_data=base64_audio, output_path=output_path) + result["audio_path"] = output_path + + return result ``` @@ -88,11 +187,11 @@ if response.audio: ```bash Mac - python cookbook/01_showcase/01_agents/translation_agent.py + python cookbook/01_showcase/01_agents/translation_agent/agent.py ``` ```bash Windows - python cookbook/01_showcase/01_agents/translation_agent.py + python cookbook/01_showcase/01_agents/translation_agent/agent.py ``` diff --git a/cookbook/agents/web-extraction-agent.mdx b/cookbook/agents/web-extraction-agent.mdx deleted file mode 100644 index 3ef3dbef2..000000000 --- a/cookbook/agents/web-extraction-agent.mdx +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Web Extraction Agent -mode: wide ---- - -Build an AI agent that transforms unstructured web content into organized, structured data by combining Firecrawl's web scraping with Pydantic's structured output validation. - -## What You'll Learn - -By building this agent, you'll understand: -- How to integrate Firecrawl for reliable web scraping and content extraction -- How to define structured output schemas using Pydantic models -- How to create nested data structures for complex web content -- How to handle optional fields and varied page structures - -## Use Cases - -Build competitive intelligence tools, content aggregation systems, knowledge base constructors, or automated documentation generators. - -## How It Works - -The agent extracts structured data from web pages in a systematic process: - -1. **Fetch**: Uses Firecrawl to retrieve and parse the target webpage -2. **Analyze**: Identifies key sections, elements, and hierarchical structure -3. **Extract**: Pulls information according to the Pydantic output schema -4. **Structure**: Organizes content into nested models (sections, metadata, links, contact info) - -The Pydantic schema ensures consistent output format regardless of the source website's structure, with optional fields handling varied page layouts gracefully. - -## Code - -```python web_extraction_agent.py -from textwrap import dedent -from typing import Dict, List, Optional - -from agno.agent import Agent -from agno.models.openai import OpenAIResponses -from agno.tools.firecrawl import FirecrawlTools -from pydantic import BaseModel, Field -from rich.pretty import pprint - - -class ContentSection(BaseModel): - """Represents a section of content from the webpage.""" - - heading: Optional[str] = Field(None, description="Section heading") - content: str = Field(..., description="Section content text") - - -class PageInformation(BaseModel): - """Structured representation of a webpage.""" - - url: str = Field(..., description="URL of the page") - title: str = Field(..., description="Title of the page") - description: Optional[str] = Field( - None, description="Meta description or summary of the page" - ) - features: Optional[List[str]] = Field(None, description="Key feature list") - content_sections: Optional[List[ContentSection]] = Field( - None, description="Main content sections of the page" - ) - links: Optional[Dict[str, str]] = Field( - None, description="Important links found on the page with description" - ) - contact_info: Optional[Dict[str, str]] = Field( - None, description="Contact information if available" - ) - metadata: Optional[Dict[str, str]] = Field( - None, description="Important metadata from the page" - ) - - -agent = Agent( - model=OpenAIResponses(id="gpt-5.2"), - tools=[FirecrawlTools(enable_scrape=True, enable_crawl=True)], - instructions=dedent(""" - You are an expert web researcher and content extractor. Extract comprehensive, structured information - from the provided webpage. Focus on: - - 1. Accurately capturing the page title, description, and key features - 2. Identifying and extracting main content sections with their headings - 3. Finding important links to related pages or resources - 4. Locating contact information if available - 5. Extracting relevant metadata that provides context about the site - - Be thorough but concise. If the page has extensive content, prioritize the most important information. - """).strip(), - output_schema=PageInformation, -) - -result = agent.run("Extract all information from https://www.agno.com") -pprint(result.content) - -``` - -## What to Expect - -The agent will scrape the target URL using Firecrawl and extract all information into a structured PageInformation object. The output includes the page title, description, features, organized content sections with headings, important links, contact information, and additional metadata. - -The structured output ensures consistency and makes the extracted data easy to process, store, or display programmatically. Optional fields handle pages with varying structures gracefully. - -## Usage - - - - - - ```bash - export OPENAI_API_KEY=xxx - export FIRECRAWL_API_KEY=xxx - ``` - - - - ```bash - uv pip install -U agno openai firecrawl-py - ``` - - - - - ```bash Mac - python web_extraction_agent.py - ``` - - ```bash Windows - python web_extraction_agent.py - ``` - - - - -## Next Steps - -- Change the target URL to extract data from different websites -- Modify the `PageInformation` Pydantic model to capture additional fields -- Adjust the agent's instructions to focus on specific content types -- Explore [Firecrawl Tools](/integrations/toolkits/web-scrape/firecrawl) for advanced scraping options diff --git a/docs.json b/docs.json index 550919a3d..7c22dc656 100644 --- a/docs.json +++ b/docs.json @@ -3169,10 +3169,10 @@ { "group": "Integrations", "pages": [ - "cookbook/agents/social_media_agent", - "cookbook/agents/web-extraction-agent", + "cookbook/agents/social_media_analyst", "cookbook/agents/translation_agent", - "cookbook/agents/airbnb_mcp" + "cookbook/agents/linear_agent", + "cookbook/agents/inbox_agent" ] } ] From 3178ab137e589db660e6e05ec7a035caa1dc213c Mon Sep 17 00:00:00 2001 From: ayush0054 Date: Fri, 23 Jan 2026 19:00:54 +0530 Subject: [PATCH 2/2] update --- cookbook/agents/overview.mdx | 12 ++++++------ integrations/toolkits/social/x.mdx | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cookbook/agents/overview.mdx b/cookbook/agents/overview.mdx index 1c86f8da0..a8ddafbf7 100644 --- a/cookbook/agents/overview.mdx +++ b/cookbook/agents/overview.mdx @@ -68,17 +68,17 @@ Agents that process audio, video, and generate speech. Agents connecting to external platforms and protocols. - + X/Twitter sentiment analysis and engagement metrics. - - Firecrawl to Pydantic structured output. + + Project management agent with Linear integration. - Multi-step translation with emotion detection and TTS. + Multi-step translation with emotion detection and Cartesia TTS. - - Model Context Protocol integration with reasoning. + + Gmail agent with inbox management. diff --git a/integrations/toolkits/social/x.mdx b/integrations/toolkits/social/x.mdx index 03e0c6712..a966891d3 100644 --- a/integrations/toolkits/social/x.mdx +++ b/integrations/toolkits/social/x.mdx @@ -90,7 +90,7 @@ agent.print_response("Get my X profile", markdown=True) {" "} - Check out the [Tweet Analysis Agent](/cookbook/agents/social_media_agent) + Check out the [Tweet Analysis Agent](/cookbook/agents/social_media_analyst) for a more advanced example.{" "}