diff --git a/README.md b/README.md index e072218..726faff 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -## Agent Api +## Agent API This repo contains the code for a production-grade agentic system built with: @@ -42,13 +42,7 @@ Required: Set the `OPENAI_API_KEY` environment variable using export OPENAI_API_KEY=*** ``` -> you may use any model provider, just need to update the agents in the /agents folder - -Optional: Set the `EXA_API_KEY` if you'd like to use Exa search - -```sh -export EXA_API_KEY=*** -``` +> You may use any supported model provider, just need to update the respective Agent, Team or Workflow. 3. Start the workspace: diff --git a/agents/sage.py b/agents/sage.py index d58108b..573214b 100644 --- a/agents/sage.py +++ b/agents/sage.py @@ -7,11 +7,12 @@ from agno.tools.duckduckgo import DuckDuckGoTools from agno.vectordb.pgvector import PgVector, SearchType +from agents.settings import agent_settings from db.session import db_url def get_sage( - model_id: str = "gpt-4o", + model_id: Optional[str] = None, user_id: Optional[str] = None, session_id: Optional[str] = None, debug_mode: bool = True, @@ -22,12 +23,18 @@ def get_sage( additional_context += f"You are interacting with the user: {user_id}" additional_context += "" + model_id = model_id or agent_settings.gpt_4 + return Agent( name="Sage", agent_id="sage", user_id=user_id, session_id=session_id, - model=OpenAIChat(id=model_id), + model=OpenAIChat( + id=model_id, + max_tokens=agent_settings.default_max_completion_tokens, + temperature=agent_settings.default_temperature, + ), # Tools available to the agent tools=[DuckDuckGoTools()], # Storage for the agent diff --git a/agents/scholar.py b/agents/scholar.py index 477de6b..c459f8a 100644 --- a/agents/scholar.py +++ b/agents/scholar.py @@ -6,15 +6,18 @@ from agno.storage.agent.postgres import PostgresAgentStorage from agno.tools.duckduckgo import DuckDuckGoTools +from agents.settings import agent_settings from db.session import db_url def get_scholar( - model_id: str = "gpt-4o", + model_id: Optional[str] = None, user_id: Optional[str] = None, session_id: Optional[str] = None, debug_mode: bool = True, ) -> Agent: + model_id = model_id or agent_settings.gpt_4 + additional_context = "" if user_id: additional_context += "" @@ -26,7 +29,11 @@ def get_scholar( agent_id="scholar", user_id=user_id, session_id=session_id, - model=OpenAIChat(id=model_id), + model=OpenAIChat( + id=model_id, + max_tokens=agent_settings.default_max_completion_tokens, + temperature=agent_settings.default_temperature, + ), # Tools available to the agent tools=[DuckDuckGoTools()], # Storage for the agent diff --git a/agents/settings.py b/agents/settings.py new file mode 100644 index 0000000..4986e7c --- /dev/null +++ b/agents/settings.py @@ -0,0 +1,18 @@ +from pydantic_settings import BaseSettings + + +class AgentSettings(BaseSettings): + """Agent settings that can be set using environment variables. + + Reference: https://pydantic-docs.helpmanual.io/usage/settings/ + """ + + gpt_4_mini: str = "gpt-4o-mini" + gpt_4: str = "gpt-4o" + embedding_model: str = "text-embedding-3-small" + default_max_completion_tokens: int = 16000 + default_temperature: float = 0 + + +# Create an TeamSettings object +agent_settings = AgentSettings() diff --git a/api/routes/playground.py b/api/routes/playground.py index b0d5bb6..a7235fa 100644 --- a/api/routes/playground.py +++ b/api/routes/playground.py @@ -4,8 +4,8 @@ from agents.sage import get_sage from agents.scholar import get_scholar -from teams.finance_researcher_team import get_finance_researcher_team -from teams.multi_language_team import get_multi_language_team +from teams.finance_researcher import get_finance_researcher_team +from teams.multi_language import get_multi_language_team from workflows.blog_post_generator import get_blog_post_generator from workflows.investment_report_generator import get_investment_report_generator from workspace.dev_resources import dev_fastapi diff --git a/api/routes/teams.py b/api/routes/teams.py new file mode 100644 index 0000000..0e96369 --- /dev/null +++ b/api/routes/teams.py @@ -0,0 +1,98 @@ +from enum import Enum +from typing import AsyncGenerator, List, Optional + +from agno.team import Team +from fastapi import APIRouter, HTTPException, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + +from teams.operator import TeamType, get_available_teams, get_team +from utils.log import logger + +###################################################### +## Router for the Agent Interface +###################################################### + +teams_router = APIRouter(prefix="/teams", tags=["Teams"]) + + +class Model(str, Enum): + gpt_4o = "gpt-4o" + o3_mini = "o3-mini" + + +@teams_router.get("", response_model=List[str]) +async def list_teams(): + """ + Returns a list of all available team IDs. + + Returns: + List[str]: List of team identifiers + """ + return get_available_teams() + + +async def chat_response_streamer(team: Team, message: str) -> AsyncGenerator: + """ + Stream team responses chunk by chunk. + + Args: + team: The team instance to interact with + message: User message to process + + Yields: + Text chunks from the team response + """ + run_response = await team.arun(message, stream=True) + async for chunk in run_response: + # chunk.content only contains the text response from the Agent. + # For advanced use cases, we should yield the entire chunk + # that contains the tool calls and intermediate steps. + yield chunk.content + + +class RunRequest(BaseModel): + """Request model for an running an team""" + + message: str + stream: bool = True + model: Model = Model.gpt_4o + user_id: Optional[str] = None + session_id: Optional[str] = None + + +@teams_router.post("/{team_id}/runs", status_code=status.HTTP_200_OK) +async def run_team(team_id: TeamType, body: RunRequest): + """ + Sends a message to a specific team and returns the response. + + Args: + team_id: The ID of the team to interact with + body: Request parameters including the message + + Returns: + Either a streaming response or the complete team response + """ + logger.debug(f"RunRequest: {body}") + + try: + team: Team = get_team( + model_id=body.model.value, + team_id=team_id, + user_id=body.user_id, + session_id=body.session_id, + ) + except Exception as e: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Team not found: {str(e)}") + + if body.stream: + return StreamingResponse( + chat_response_streamer(team, body.message), + media_type="text/event-stream", + ) + else: + response = await team.arun(body.message, stream=False) + # response.content only contains the text response from the Agent. + # For advanced use cases, we should yield the entire response + # that contains the tool calls and intermediate steps. + return response.content diff --git a/api/routes/v1_router.py b/api/routes/v1_router.py index d76ca9a..0af814b 100644 --- a/api/routes/v1_router.py +++ b/api/routes/v1_router.py @@ -3,8 +3,10 @@ from api.routes.agents import agents_router from api.routes.playground import playground_router from api.routes.status import status_router +from api.routes.teams import teams_router v1_router = APIRouter(prefix="/v1") v1_router.include_router(status_router) v1_router.include_router(agents_router) +v1_router.include_router(teams_router) v1_router.include_router(playground_router) diff --git a/pyproject.toml b/pyproject.toml index afc3e1c..80c3b31 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,12 +6,11 @@ readme = "README.md" authors = [{ name = "Agno", email = "hello@agno.com" }] dependencies = [ - "agno[aws]==1.3.5", + "agno[aws]==1.4.6", "aiofiles", "alembic", "beautifulsoup4", "duckduckgo-search", - "exa_py", "fastapi[standard]", "googlesearch-python", "lxml_html_clean", @@ -20,10 +19,7 @@ dependencies = [ "pgvector", "psycopg[binary]", "pycountry", - "pypdf", "sqlalchemy", - "streamlit", - "tiktoken", "typer", "yfinance", ] diff --git a/requirements.txt b/requirements.txt index b634bf7..141fda1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,19 +1,15 @@ # This file was autogenerated by uv via the following command: # ./scripts/generate_requirements.sh -agno==1.3.5 +agno==1.4.6 agno-aws==0.0.1 agno-docker==0.0.1 aiofiles==24.1.0 alembic==1.15.1 -altair==5.5.0 annotated-types==0.7.0 anyio==4.9.0 -attrs==25.3.0 beautifulsoup4==4.13.3 -blinker==1.9.0 boto3==1.37.19 botocore==1.37.19 -cachetools==5.5.2 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 @@ -23,7 +19,6 @@ docker==7.1.0 docstring-parser==0.16 duckduckgo-search==7.5.3 email-validator==2.2.0 -exa-py==1.9.1 fastapi==0.115.12 fastapi-cli==0.0.7 feedparser==6.0.11 @@ -37,13 +32,10 @@ httpcore==1.0.7 httptools==0.6.4 httpx==0.28.1 idna==3.10 -iniconfig==2.1.0 jinja2==3.1.6 jiter==0.9.0 jmespath==1.0.1 joblib==1.4.2 -jsonschema==4.23.0 -jsonschema-specifications==2024.10.1 lxml==5.3.1 lxml-html-clean==0.4.2 mako==1.3.9 @@ -51,44 +43,33 @@ markdown-it-py==3.0.0 markupsafe==3.0.2 mdurl==0.1.2 multitasking==0.0.11 -narwhals==1.32.0 newspaper4k==0.9.3.1 nltk==3.9.1 numpy==2.2.4 openai==1.68.2 -packaging==24.2 pandas==2.2.3 peewee==3.17.9 pgvector==0.4.0 pillow==11.1.0 platformdirs==4.3.7 -pluggy==1.5.0 primp==0.14.0 -protobuf==5.29.4 psycopg==3.2.6 psycopg-binary==3.2.6 -pyarrow==19.0.1 pycountry==24.6.1 pydantic==2.10.6 pydantic-core==2.27.2 pydantic-settings==2.8.1 -pydeck==0.9.1 pygments==2.19.1 -pypdf==5.4.0 -pytest==8.3.5 -pytest-mock==3.14.0 python-dateutil==2.9.0.post0 python-dotenv==1.1.0 python-multipart==0.0.20 pytz==2025.2 pyyaml==6.0.2 -referencing==0.36.2 regex==2024.11.6 requests==2.32.3 requests-file==2.1.0 rich==13.9.4 rich-toolkit==0.13.2 -rpds-py==0.23.1 s3transfer==0.11.4 sgmllib3k==1.0.0 shellingham==1.5.4 @@ -98,13 +79,8 @@ sniffio==1.3.1 soupsieve==2.6 sqlalchemy==2.0.39 starlette==0.46.1 -streamlit==1.43.2 -tenacity==9.0.0 -tiktoken==0.9.0 tldextract==5.2.0 -toml==0.10.2 tomli==2.2.1 -tornado==6.4.2 tqdm==4.67.1 typer==0.15.2 typing-extensions==4.12.2 diff --git a/teams/finance_researcher_team.py b/teams/finance_researcher.py similarity index 82% rename from teams/finance_researcher_team.py rename to teams/finance_researcher.py index b0de5ac..9289a25 100644 --- a/teams/finance_researcher_team.py +++ b/teams/finance_researcher.py @@ -1,4 +1,5 @@ from textwrap import dedent +from typing import Optional from agno.agent import Agent from agno.models.openai import OpenAIChat @@ -63,7 +64,11 @@ web_agent = Agent( name="Web Agent", role="Search the web for information", - model=OpenAIChat(id=team_settings.gpt_4), + model=OpenAIChat( + id=team_settings.gpt_4, + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), tools=[DuckDuckGoTools(cache_results=True)], agent_id="web-agent", instructions=[ @@ -75,7 +80,14 @@ ) -def get_finance_researcher_team(debug_mode: bool = False): +def get_finance_researcher_team( + model_id: Optional[str] = None, + user_id: Optional[str] = None, + session_id: Optional[str] = None, + debug_mode: bool = True, +): + model_id = model_id or team_settings.gpt_4 + return Team( name="Finance Researcher Team", team_id="financial-researcher-team", @@ -84,8 +96,14 @@ def get_finance_researcher_team(debug_mode: bool = False): instructions=[ "You are a team of finance researchers!", ], + session_id=session_id, + user_id=user_id, description="You are a team of finance researchers!", - model=OpenAIChat(id=team_settings.gpt_4), + model=OpenAIChat( + id=model_id, + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), success_criteria="A good financial research report.", enable_agentic_context=True, expected_output="A good financial research report.", diff --git a/teams/multi_language_team.py b/teams/multi_language.py similarity index 63% rename from teams/multi_language_team.py rename to teams/multi_language.py index adc6815..5706209 100644 --- a/teams/multi_language_team.py +++ b/teams/multi_language.py @@ -1,3 +1,5 @@ +from typing import Optional + from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.storage.postgres import PostgresStorage @@ -10,40 +12,71 @@ name="Japanese Agent", agent_id="japanese-agent", role="You only answer in Japanese", - model=OpenAIChat(id="gpt-4o"), + model=OpenAIChat( + id="gpt-4o", + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), ) chinese_agent = Agent( name="Chinese Agent", agent_id="chinese-agent", role="You only answer in Chinese", - model=OpenAIChat(id="gpt-4o"), + model=OpenAIChat( + id="gpt-4o", + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), ) spanish_agent = Agent( name="Spanish Agent", agent_id="spanish-agent", role="You only answer in Spanish", - model=OpenAIChat(id="gpt-4o"), + model=OpenAIChat( + id="gpt-4o", + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), ) french_agent = Agent( name="French Agent", agent_id="french-agent", role="You only answer in French", - model=OpenAIChat(id="gpt-4o"), + model=OpenAIChat( + id="gpt-4o", + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), ) german_agent = Agent( name="German Agent", agent_id="german-agent", role="You only answer in German", - model=OpenAIChat(id="gpt-4o"), + model=OpenAIChat( + id="gpt-4o", + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), ) -def get_multi_language_team(debug_mode: bool = False): +def get_multi_language_team( + model_id: Optional[str] = None, + user_id: Optional[str] = None, + session_id: Optional[str] = None, + debug_mode: bool = True, +): + model_id = model_id or team_settings.gpt_4 + return Team( name="Multi Language Team", mode="route", team_id="multi-language-team", - model=OpenAIChat(id=team_settings.gpt_4), + model=OpenAIChat( + id=model_id, + max_tokens=team_settings.default_max_completion_tokens, + temperature=team_settings.default_temperature, + ), members=[ spanish_agent, japanese_agent, @@ -61,6 +94,8 @@ def get_multi_language_team(debug_mode: bool = False): "Always check the language of the user's input before routing to an agent.", "For unsupported languages like Italian, respond in English with the above message.", ], + session_id=session_id, + user_id=user_id, markdown=True, show_tool_calls=True, show_members_responses=True, diff --git a/teams/operator.py b/teams/operator.py new file mode 100644 index 0000000..1f2cbbd --- /dev/null +++ b/teams/operator.py @@ -0,0 +1,30 @@ +from enum import Enum +from typing import List, Optional + +from teams.finance_researcher import get_finance_researcher_team +from teams.multi_language import get_multi_language_team + + +class TeamType(Enum): + FINANCE_RESEARCHER = "finance-researcher" + MULTI_LANGUAGE = "multi-language" + + +def get_available_teams() -> List[str]: + """Returns a list of all available team IDs.""" + return [team.value for team in TeamType] + + +def get_team( + model_id: Optional[str] = None, + team_id: Optional[TeamType] = None, + user_id: Optional[str] = None, + session_id: Optional[str] = None, + debug_mode: bool = True, +): + if team_id == TeamType.FINANCE_RESEARCHER: + return get_finance_researcher_team( + model_id=model_id, user_id=user_id, session_id=session_id, debug_mode=debug_mode + ) + else: + return get_multi_language_team(model_id=model_id, user_id=user_id, session_id=session_id, debug_mode=debug_mode) diff --git a/workflows/blog_post_generator.py b/workflows/blog_post_generator.py index 1420c53..5f377fb 100644 --- a/workflows/blog_post_generator.py +++ b/workflows/blog_post_generator.py @@ -1,38 +1,10 @@ -"""🎨 Blog Post Generator - Your AI Content Creation Studio! - -This advanced example demonstrates how to build a sophisticated blog post generator that combines -web research capabilities with professional writing expertise. The workflow uses a multi-stage -approach: -1. Intelligent web research and source gathering -2. Content extraction and processing -3. Professional blog post writing with proper citations - -Key capabilities: -- Advanced web research and source evaluation -- Content scraping and processing -- Professional writing with SEO optimization -- Automatic content caching for efficiency -- Source attribution and fact verification - -Example blog topics to try: -- "The Rise of Artificial General Intelligence: Latest Breakthroughs" -- "How Quantum Computing is Revolutionizing Cybersecurity" -- "Sustainable Living in 2024: Practical Tips for Reducing Carbon Footprint" -- "The Future of Work: AI and Human Collaboration" -- "Space Tourism: From Science Fiction to Reality" -- "Mindfulness and Mental Health in the Digital Age" -- "The Evolution of Electric Vehicles: Current State and Future Trends" - -Run `pip install openai duckduckgo-search newspaper4k lxml_html_clean sqlalchemy agno` to install dependencies. -""" - import json from textwrap import dedent from typing import Dict, Iterator, Optional from agno.agent import Agent from agno.models.openai import OpenAIChat -from agno.storage.workflow.postgres import PostgresWorkflowStorage +from agno.storage.postgres import PostgresStorage from agno.tools.duckduckgo import DuckDuckGoTools from agno.tools.newspaper4k import Newspaper4kTools from agno.utils.log import logger @@ -370,7 +342,7 @@ def write_blog_post(self, topic: str, scraped_articles: Dict[str, ScrapedArticle def get_blog_post_generator(debug_mode: bool = False) -> BlogPostGenerator: return BlogPostGenerator( workflow_id="generate-blog-post-on", - storage=PostgresWorkflowStorage( + storage=PostgresStorage( table_name="blog_post_generator_workflows", db_url=db_url, auto_upgrade_schema=True, diff --git a/workflows/investment_report_generator.py b/workflows/investment_report_generator.py index fd1c911..f7d693e 100644 --- a/workflows/investment_report_generator.py +++ b/workflows/investment_report_generator.py @@ -3,7 +3,7 @@ from agno.agent import Agent, RunResponse from agno.models.openai import OpenAIChat -from agno.storage.workflow.postgres import PostgresWorkflowStorage +from agno.storage.postgres import PostgresStorage from agno.tools.yfinance import YFinanceTools from agno.utils.log import logger from agno.workflow import Workflow @@ -144,7 +144,7 @@ def run(self, companies: str) -> Iterator[RunResponse]: # type: ignore def get_investment_report_generator(debug_mode: bool = False) -> InvestmentReportGenerator: return InvestmentReportGenerator( workflow_id="generate-investment-report", - storage=PostgresWorkflowStorage( + storage=PostgresStorage( table_name="investment_report_generator_workflows", db_url=db_url, auto_upgrade_schema=True,