Skip to content

Commit

Permalink
Merge pull request #318 from MervinPraison/develop
Browse files Browse the repository at this point in the history
Update PraisonAI to version 2.0.56 and enhance agent functionality
  • Loading branch information
MervinPraison authored Jan 22, 2025
2 parents e97e443 + 969d2d0 commit 0529f92
Show file tree
Hide file tree
Showing 28 changed files with 2,130 additions and 274 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
FROM python:3.11-slim
WORKDIR /app
COPY . .
RUN pip install flask praisonai==2.0.55 gunicorn markdown
RUN pip install flask praisonai==2.0.56 gunicorn markdown
EXPOSE 8080
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]
45 changes: 45 additions & 0 deletions agents/any-llm-agents-advanced.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from praisonaiagents import Agent

# Detailed LLM configuration
llm_config = {
"model": "gemini/gemini-1.5-flash-latest", # Model name without provider prefix

# Core settings
"temperature": 0.7, # Controls randomness (like temperature)
"timeout": 30, # Timeout in seconds
"top_p": 0.9, # Nucleus sampling parameter
"max_tokens": 1000, # Max tokens in response

# Advanced parameters
"presence_penalty": 0.1, # Penalize repetition of topics (-2.0 to 2.0)
"frequency_penalty": 0.1, # Penalize token repetition (-2.0 to 2.0)

# API settings (optional)
"api_key": None, # Your API key (or use environment variable)
"base_url": None, # Custom API endpoint if needed

# Response formatting
"response_format": { # Force specific response format
"type": "text" # Options: "text", "json_object"
},

# Additional controls
"seed": 42, # For reproducible responses
"stop_phrases": ["##", "END"], # Custom stop sequences
}

agent = Agent(
instructions="You are a helpful Assistant specialized in scientific explanations. "
"Provide clear, accurate, and engaging responses.",
llm=llm_config, # Pass the detailed configuration
verbose=True, # Enable detailed output
markdown=True, # Format responses in markdown
self_reflect=True, # Enable self-reflection
max_reflect=3, # Maximum reflection iterations
min_reflect=1 # Minimum reflection iterations
)

# Test the agent
response = agent.start("Why is the sky blue? Please explain in simple terms.")

print(response)
18 changes: 18 additions & 0 deletions agents/any-llm-agents-async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import asyncio
from praisonaiagents import Agent

async def main():
agent = Agent(
instructions="You are a helpful assistant",
llm="gemini/gemini-1.5-flash-8b",
self_reflect=True,
verbose=True
)

# Use achat instead of start/chat for async operation
response = await agent.achat("Why sky is Blue in 1000 words?")
print(response)

if __name__ == "__main__":
# Run the async main function
asyncio.run(main())
10 changes: 10 additions & 0 deletions agents/any-llm-agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from praisonaiagents import Agent

agent = Agent(
instructions="You are a helpful assistant",
llm="gemini/gemini-1.5-flash-8b",
self_reflect=True,
verbose=True
)

agent.start("Why sky is Blue?")
27 changes: 24 additions & 3 deletions agents/async_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,14 @@
)
from duckduckgo_search import DDGS
from pydantic import BaseModel
import logging

# Configure logging with more detail
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s'
)
logger = logging.getLogger(__name__)

console = Console()

Expand Down Expand Up @@ -48,31 +56,44 @@ def sync_search_tool(query: str) -> List[Dict]:
error_logs.append(error_msg)
return []

async def async_search_tool(query: str) -> List[Dict]:
async def async_search_tool(query: str) -> Dict:
"""
Asynchronous search using DuckDuckGo.
Args:
query (str): The search query.
Returns:
list: Search results
"""
logger.debug(f"Starting async_search_tool with query: {query}")
display_tool_call(f"Running async search for: {query}", console)
await asyncio.sleep(1) # Simulate network delay
try:
results = []
ddgs = DDGS()
logger.debug("Performing DuckDuckGo search")
for result in ddgs.text(keywords=query, max_results=5):
results.append({
"title": result.get("title", ""),
"url": result.get("href", ""),
"snippet": result.get("body", "")
})
return results

logger.debug(f"Search completed, found {len(results)} results")
return {
"query": query,
"results": results,
"total_results": len(results)
}
except Exception as e:
error_msg = f"Error during async search: {e}"
logger.error(f"Error in async_search_tool: {e}", exc_info=True)
display_error(error_msg, console)
error_logs.append(error_msg)
return []
return {
"query": query,
"results": [],
"total_results": 0
}

# 3. Define both sync and async callbacks
def sync_callback(output: TaskOutput):
Expand Down
7 changes: 7 additions & 0 deletions agents/async_example_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@
from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
from duckduckgo_search import DDGS
from pydantic import BaseModel
import logging

# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)

# 1. Define output model for structured results
class SearchResult(BaseModel):
Expand Down
8 changes: 8 additions & 0 deletions agents/basic-agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from praisonaiagents import Agent

agent = Agent(
instructions="You are a helpful assistant",
llm="gpt-4o-mini"
)

agent.start("Why sky is Blue?")
Loading

0 comments on commit 0529f92

Please sign in to comment.