Legion Examples
Explore practical examples of Legion's core components to help you build powerful AI applications.
Agents
Agents are autonomous entities that can understand, plan, and execute tasks using natural language. Here's how to create a basic agent:
from legion.agents import agent@agent(model="openai:gpt-4o-mini",temperature=0.2,)class MathHelper:"""You are an expert at math who helps users solve math problems.You should always show your work and explain your reasoning step by step.When appropriate, provide multiple approaches to solving problems."""# Create an instance of the agentmath_helper = MathHelper()# Use the agentresponse = await math_helper.aprocess("What is the derivative of f(x) = x^3 + 2x^2 - 5x + 3?")print(response.content)
Agent with Memory
You can create agents with memory to maintain context across multiple interactions:
from legion.agents import agentfrom legion.memory import ConversationBufferMemory@agent(model="openai:gpt-4o",temperature=0.7,memory=ConversationBufferMemory(memory_key="chat_history"))class TravelAdvisor:"""You are a helpful travel advisor who provides personalized travel recommendations.You should remember user preferences from previous messages and tailor your recommendations accordingly."""# Create an instance of the agentadvisor = TravelAdvisor()# First interactionresponse1 = await advisor.aprocess("I'm planning a trip to Japan in April. I love food and history.")print(response1.content)# Second interaction (agent remembers previous context)response2 = await advisor.aprocess("What cities would you recommend I visit?")print(response2.content)
Tools
Tools are reusable functions that agents can leverage to interact with external systems and APIs. Here's how to create and use tools:
from typing import Annotated, Listfrom pydantic import Fieldfrom legion.agents import agentfrom legion.interface.decorators import tool# Define standalone tools@tooldef search_weather(location: Annotated[str, Field(description="City and country to get weather for")]) -> str:"""Search for current weather in a location."""# In a real implementation, this would call a weather APIreturn f"Weather in {location}: 72°F, Sunny"@tooldef convert_currency(amount: Annotated[float, Field(description="Amount to convert")],from_currency: Annotated[str, Field(description="Source currency code (e.g., USD)")],to_currency: Annotated[str, Field(description="Target currency code (e.g., EUR)")]) -> str:"""Convert an amount from one currency to another."""# In a real implementation, this would call a currency APIconversion_rate = 0.92 # Example: USD to EURconverted = amount * conversion_ratereturn f"{amount} {from_currency} = {converted:.2f} {to_currency}"# Create an agent with tools@agent(model="openai:gpt-4o",temperature=0.3,tools=[search_weather, convert_currency])class TravelAssistant:"""You are a travel assistant that helps users plan their trips."""# Agent-specific tool@tooldef format_itinerary(self,destinations: Annotated[List[str], Field(description="List of destinations to visit")],days: Annotated[int, Field(description="Number of days for the trip")]) -> str:"""Format a travel itinerary based on destinations and days."""itinerary = "Travel Itinerary:\n\n"days_per_destination = max(1, days // len(destinations))current_day = 1for destination in destinations:itinerary += f"Days {current_day}-{current_day + days_per_destination - 1}: {destination}\n"current_day += days_per_destinationreturn itinerary# Use the agent with toolsassistant = TravelAssistant()response = await assistant.aprocess("I'm planning a trip to Paris for 5 days. What's the weather like, and how much is 100 USD in EUR?")print(response.content)
Blocks
Blocks are composable units of functionality that can be chained together to create complex behaviors. Here's how to create and use blocks:
from typing import Listfrom pydantic import BaseModel, Fieldfrom legion.blocks import block# Define input/output schemasclass TextInput(BaseModel):text: str = Field(description="Input text to process")class SentimentOutput(BaseModel):sentiment: str = Field(description="Detected sentiment (positive, negative, neutral)")confidence: float = Field(description="Confidence score (0-1)")class KeywordsOutput(BaseModel):keywords: List[str] = Field(description="Extracted keywords from text")# Create blocks using decorator syntax@block(input_schema=TextInput,output_schema=SentimentOutput,tags=["text", "analysis"])def analyze_sentiment(input_data: TextInput) -> SentimentOutput:"""Analyze the sentiment of input text."""# In a real implementation, this would use an NLP modeltext = input_data.text.lower()# Simple rule-based sentiment analysispositive_words = ["good", "great", "excellent", "happy", "love"]negative_words = ["bad", "terrible", "awful", "sad", "hate"]positive_count = sum(1 for word in positive_words if word in text)negative_count = sum(1 for word in negative_words if word in text)if positive_count > negative_count:return SentimentOutput(sentiment="positive", confidence=0.8)elif negative_count > positive_count:return SentimentOutput(sentiment="negative", confidence=0.8)else:return SentimentOutput(sentiment="neutral", confidence=0.6)@block(input_schema=TextInput,output_schema=KeywordsOutput,tags=["text", "extraction"])def extract_keywords(input_data: TextInput) -> KeywordsOutput:"""Extract keywords from input text."""# In a real implementation, this would use an NLP modeltext = input_data.text# Simple keyword extraction (just for demonstration)words = [word.strip('.,!?()[]{}":;') for word in text.split()]# Filter out common stop words and short wordsstop_words = ["the", "and", "is", "in", "to", "a", "of", "for"]keywords = [word for word in words if word.lower() not in stop_words and len(word) > 3]# Remove duplicates and limit to top 5unique_keywords = list(set(keywords))[:5]return KeywordsOutput(keywords=unique_keywords)# Use blocks individuallytext_input = TextInput(text="I really love this product! It's excellent and makes me happy.")sentiment_result = analyze_sentiment(text_input)keywords_result = extract_keywords(text_input)print(f"Sentiment: {sentiment_result.sentiment} (Confidence: {sentiment_result.confidence})")print(f"Keywords: {', '.join(keywords_result.keywords)}")
Chains
Chains are sequential workflows that connect blocks and agents to accomplish complex tasks. Here's how to create and use chains:
from typing import Listfrom pydantic import BaseModel, Fieldfrom legion.blocks import blockfrom legion.groups.decorators import chain# Define input/output schemasclass TextInput(BaseModel):text: str = Field(description="Input text to process")class TextAnalysisOutput(BaseModel):sentiment: str = Field(description="Detected sentiment")keywords: List[str] = Field(description="Extracted keywords")summary: str = Field(description="Text summary")# Define blocks (reusing the blocks from previous example)# analyze_sentiment and extract_keywords blocks defined above@block(input_schema=TextInput,output_schema=BaseModel, # Using dynamic outputtags=["text", "summarization"])def summarize_text(input_data: TextInput) -> BaseModel:"""Summarize the input text."""# In a real implementation, this would use an NLP modeltext = input_data.text# Simple summarization (just for demonstration)words = text.split()if len(words) > 10:summary = " ".join(words[:10]) + "..."else:summary = textclass SummaryOutput(BaseModel):summary: str = Field(description="Text summary")return SummaryOutput(summary=summary)# Create a chain that combines the blocks@chainclass TextAnalysisChain:"""A chain that analyzes text by determining sentiment, extracting keywords, and generating a summary."""# Define the blocks to be executed in sequencemembers = [analyze_sentiment,extract_keywords,summarize_text]# Define how to combine the outputs from each blockdef combine_outputs(self, outputs):return TextAnalysisOutput(sentiment=outputs[0].sentiment,keywords=outputs[1].keywords,summary=outputs[2].summary)# Use the chainanalysis_chain = TextAnalysisChain()input_data = TextInput(text="The new smartphone has an excellent camera and impressive battery life. I'm really happy with my purchase and would recommend it to anyone looking for a high-quality device.")result = await analysis_chain.aprocess(input_data)print(f"Sentiment: {result.sentiment}")print(f"Keywords: {', '.join(result.keywords)}")print(f"Summary: {result.summary}")
Teams
Teams are collaborative groups of agents that work together to solve complex problems. Here's how to create and use teams:
from legion.agents import agentfrom legion.groups.decorators import teamfrom legion.interface.decorators import toolfrom typing import Annotated, Listfrom pydantic import Field# Define specialized agents@agent(model="openai:gpt-4o-mini",temperature=0.3)class Researcher:"""You are a research specialist who excels at finding and organizing information.Your role is to gather relevant facts and data on a given topic.You should be thorough and objective in your research."""@tooldef search_information(self,query: Annotated[str, Field(description="Search query")]) -> List[str]:"""Search for information on a given query."""# In a real implementation, this would use a search APIif "climate change" in query.lower():return ["Global temperatures have risen by 1.1°C since pre-industrial times.","Sea levels are rising at a rate of 3.7mm per year.","Arctic sea ice is declining at a rate of 13.1% per decade.","CO2 levels in the atmosphere are at their highest in 800,000 years."]elif "renewable energy" in query.lower():return ["Solar and wind power are the fastest-growing renewable energy sources.","Renewable energy provided about 28% of global electricity in 2021.","The cost of solar panels has decreased by 90% since 2009.","Many countries have pledged to reach net-zero emissions by 2050."]else:return ["No specific information found for this query."]@agent(model="openai:gpt-4o-mini",temperature=0.7)class Writer:"""You are a skilled writer who excels at creating engaging and informative content.Your role is to transform raw information into well-structured, compelling narratives.You should adapt your writing style to the target audience and purpose."""@tooldef draft_content(self,topic: Annotated[str, Field(description="Content topic")],facts: Annotated[List[str], Field(description="Facts to include")],style: Annotated[str, Field(description="Writing style (formal, casual, technical)")] = "formal") -> str:"""Draft content based on provided facts and style."""# In a real implementation, this would call an LLMintro = f"Here's a draft on {topic} in a {style} style:\n\n"if style == "formal":content = f"# {topic.title()}\n\n"content += "## Introduction\n\n"content += f"This document provides an overview of key facts regarding {topic}.\n\n"content += "## Key Findings\n\n"for fact in facts:content += f"- {fact}\n"else:content = f"# Let's Talk About {topic.title()}!\n\n"content += f"Hey there! Today we're diving into {topic} - here's what you need to know:\n\n"for fact in facts:content += f"* {fact}\n"return intro + content@agent(model="openai:gpt-4o-mini",temperature=0.5)class Editor:"""You are a meticulous editor who ensures content is accurate, clear, and polished.Your role is to review and refine content, checking for errors and improving clarity.You should maintain the original voice while enhancing the overall quality."""@tooldef review_content(self,content: Annotated[str, Field(description="Content to review")],focus_areas: Annotated[List[str], Field(description="Areas to focus on (grammar, clarity, structure)")] = ["grammar", "clarity"]) -> str:"""Review and improve content based on focus areas."""# In a real implementation, this would call an LLMreturn f"Reviewed content with focus on {', '.join(focus_areas)}:\n\n{content}\n\n[Minor grammar and formatting improvements applied]"# Create a team of agents@teamclass ContentCreationTeam:"""A team that collaborates to research, write, and edit content on various topics."""# Define team membersmembers = {"researcher": Researcher(),"writer": Writer(),"editor": Editor()}# Define team workflowasync def process_topic(self, topic: str, style: str = "formal"):# Step 1: Research the topicresearch_results = await self.members["researcher"].aprocess(f"Find information about {topic}")facts = research_results.tool_results["search_information"]# Step 2: Write content based on researchwriting_results = await self.members["writer"].aprocess(f"Write content about {topic} using these facts: {facts}")draft = writing_results.tool_results["draft_content"]# Step 3: Edit and finalize contentediting_results = await self.members["editor"].aprocess(f"Review this content: {draft}")final_content = editing_results.tool_results["review_content"]return final_content# Use the teamcontent_team = ContentCreationTeam()final_article = await content_team.process_topic("renewable energy", style="casual")print(final_article)