Skip to content

Commit fa7d2c0

Browse files
committed
Add new agent scripts and update version to 0.0.55
- Introduced three new Python scripts: `deepseek-reasoning-output-tokens.py`, `llm-deepseek-reasoning-steps.py`, and `llm-langchain-toolcall.py` to enhance reasoning capabilities and multi-source information retrieval. - Updated `pyproject.toml` and `uv.lock` to reflect the new version 0.0.55. - Enhanced `wikipedia-agent.py` to utilize the new `gpt-4o-mini` model for improved performance. - Improved `agent.py` to support tool calls in chat completions, refining the overall functionality of the agent framework. These changes aim to expand the capabilities of the PraisonAI framework by integrating new reasoning features and improving user experience with updated versioning.
1 parent 190fa50 commit fa7d2c0

8 files changed

+186
-65
lines changed
+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from praisonaiagents import Agent, Task, PraisonAIAgents
2+
3+
llm_config = {
4+
"model": "deepseek/deepseek-reasoner",
5+
"max_tokens": 1
6+
}
7+
8+
reasoning_agent = Agent(role="Helpful Assistant", llm=llm_config, reasoning_steps=True)
9+
small_agent = Agent(role="Helpful Assistant", llm="openai/gpt-3.5-turbo")
10+
11+
reasoning_task = Task(description="How many r's in the word 'Strawberry'?", agent=reasoning_agent)
12+
small_task = Task(description="With the provided reasoning tell me how many r's in the word 'Strawberry'?", agent=small_agent)
13+
14+
agents = PraisonAIAgents(
15+
agents=[reasoning_agent, small_agent],
16+
tasks=[reasoning_task, small_task]
17+
)
18+
19+
agents.start()

agents/llm-langchain-toolcall.py

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from praisonaiagents import Agent, Task, PraisonAIAgents
2+
from langchain_community.tools import YouTubeSearchTool
3+
from langchain_community.utilities import WikipediaAPIWrapper
4+
5+
# Create an agent with both tools
6+
agent = Agent(
7+
name="SearchAgent",
8+
role="Research Assistant",
9+
goal="Search for information from multiple sources",
10+
backstory="I am an AI assistant that can search YouTube and Wikipedia.",
11+
tools=[YouTubeSearchTool, WikipediaAPIWrapper],
12+
llm="openai/gpt-4o-mini"
13+
)
14+
15+
# Create tasks to demonstrate both tools
16+
task = Task(
17+
name="search_task",
18+
description="Search for information about 'AI advancements' on both YouTube and Wikipedia",
19+
expected_output="Combined information from YouTube videos and Wikipedia articles",
20+
agent=agent
21+
)
22+
23+
# Create and start the workflow
24+
agents = PraisonAIAgents(
25+
agents=[agent],
26+
tasks=[task],
27+
verbose=True
28+
)
29+
30+
agents.start()

agents/praisonaiagents/agent/agent.py

+132-63
Original file line numberDiff line numberDiff line change
@@ -97,36 +97,79 @@ def process_stream_chunks(chunks):
9797

9898
content_list = []
9999
reasoning_list = []
100+
tool_calls = []
101+
current_tool_call = None
100102

103+
# First pass: Get initial tool call data
101104
for chunk in chunks:
102105
if not hasattr(chunk, "choices") or not chunk.choices:
103106
continue
104107

105-
# Track usage from each chunk
106-
if hasattr(chunk, "usage"):
107-
completion_tokens += getattr(chunk.usage, "completion_tokens", 0)
108-
prompt_tokens += getattr(chunk.usage, "prompt_tokens", 0)
109-
110108
delta = getattr(chunk.choices[0], "delta", None)
111109
if not delta:
112110
continue
113-
111+
112+
# Handle content and reasoning
114113
if hasattr(delta, "content") and delta.content:
115114
content_list.append(delta.content)
116115
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
117116
reasoning_list.append(delta.reasoning_content)
117+
118+
# Handle tool calls
119+
if hasattr(delta, "tool_calls") and delta.tool_calls:
120+
for tool_call_delta in delta.tool_calls:
121+
if tool_call_delta.index is not None and tool_call_delta.id:
122+
# Found the initial tool call
123+
current_tool_call = {
124+
"id": tool_call_delta.id,
125+
"type": "function",
126+
"function": {
127+
"name": tool_call_delta.function.name,
128+
"arguments": ""
129+
}
130+
}
131+
while len(tool_calls) <= tool_call_delta.index:
132+
tool_calls.append(None)
133+
tool_calls[tool_call_delta.index] = current_tool_call
134+
current_tool_call = tool_calls[tool_call_delta.index]
135+
elif current_tool_call is not None and hasattr(tool_call_delta.function, "arguments"):
136+
if tool_call_delta.function.arguments:
137+
current_tool_call["function"]["arguments"] += tool_call_delta.function.arguments
138+
139+
# Remove any None values and empty tool calls
140+
tool_calls = [tc for tc in tool_calls if tc and tc["id"] and tc["function"]["name"]]
118141

119142
combined_content = "".join(content_list) if content_list else ""
120143
combined_reasoning = "".join(reasoning_list) if reasoning_list else None
121144
finish_reason = getattr(last_chunk.choices[0], "finish_reason", None) if hasattr(last_chunk, "choices") and last_chunk.choices else None
122145

146+
# Create ToolCall objects
147+
processed_tool_calls = []
148+
if tool_calls:
149+
try:
150+
from openai.types.chat import ChatCompletionMessageToolCall
151+
for tc in tool_calls:
152+
tool_call = ChatCompletionMessageToolCall(
153+
id=tc["id"],
154+
type=tc["type"],
155+
function={
156+
"name": tc["function"]["name"],
157+
"arguments": tc["function"]["arguments"]
158+
}
159+
)
160+
processed_tool_calls.append(tool_call)
161+
except Exception as e:
162+
print(f"Error processing tool call: {e}")
163+
123164
message = ChatCompletionMessage(
124165
content=combined_content,
125-
reasoning_content=combined_reasoning
166+
role="assistant",
167+
reasoning_content=combined_reasoning,
168+
tool_calls=processed_tool_calls if processed_tool_calls else None
126169
)
127170

128171
choice = Choice(
129-
finish_reason=finish_reason,
172+
finish_reason=finish_reason or "tool_calls" if processed_tool_calls else None,
130173
index=0,
131174
message=message
132175
)
@@ -528,6 +571,53 @@ def clear_history(self):
528571
def __str__(self):
529572
return f"Agent(name='{self.name}', role='{self.role}', goal='{self.goal}')"
530573

574+
def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
575+
"""Process streaming response and return final response"""
576+
try:
577+
# Create the response stream
578+
response_stream = client.chat.completions.create(
579+
model=self.llm,
580+
messages=messages,
581+
temperature=temperature,
582+
tools=formatted_tools if formatted_tools else None,
583+
stream=True
584+
)
585+
586+
full_response_text = ""
587+
reasoning_content = ""
588+
chunks = []
589+
590+
# Create Live display with proper configuration
591+
with Live(
592+
display_generating("", start_time),
593+
console=self.console,
594+
refresh_per_second=4,
595+
transient=True,
596+
vertical_overflow="ellipsis",
597+
auto_refresh=True
598+
) as live:
599+
for chunk in response_stream:
600+
chunks.append(chunk)
601+
if chunk.choices[0].delta.content:
602+
full_response_text += chunk.choices[0].delta.content
603+
live.update(display_generating(full_response_text, start_time))
604+
605+
# Update live display with reasoning content if enabled
606+
if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
607+
rc = chunk.choices[0].delta.reasoning_content
608+
if rc:
609+
reasoning_content += rc
610+
live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
611+
612+
# Clear the last generating display with a blank line
613+
self.console.print()
614+
final_response = process_stream_chunks(chunks)
615+
return final_response
616+
617+
except Exception as e:
618+
display_error(f"Error in stream processing: {e}")
619+
return None
620+
531621
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
532622
start_time = time.time()
533623
logging.debug(f"{self.name} sending messages to LLM: {messages}")
@@ -554,20 +644,31 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
554644
logging.warning(f"Tool {tool} not recognized")
555645

556646
try:
557-
initial_response = client.chat.completions.create(
558-
model=self.llm,
559-
messages=messages,
560-
temperature=temperature,
561-
tools=formatted_tools if formatted_tools else None,
562-
stream=False
563-
)
647+
if stream:
648+
# Process as streaming response with formatted tools
649+
final_response = self._process_stream_response(
650+
messages,
651+
temperature,
652+
start_time,
653+
formatted_tools=formatted_tools if formatted_tools else None,
654+
reasoning_steps=reasoning_steps
655+
)
656+
else:
657+
# Process as regular non-streaming response
658+
final_response = client.chat.completions.create(
659+
model=self.llm,
660+
messages=messages,
661+
temperature=temperature,
662+
tools=formatted_tools if formatted_tools else None,
663+
stream=False
664+
)
564665

565-
tool_calls = getattr(initial_response.choices[0].message, 'tool_calls', None)
666+
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
566667

567668
if tool_calls:
568669
messages.append({
569-
"role": "assistant",
570-
"content": initial_response.choices[0].message.content,
670+
"role": "assistant",
671+
"content": final_response.choices[0].message.content,
571672
"tool_calls": tool_calls
572673
})
573674

@@ -590,55 +691,24 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
590691
"content": results_str
591692
})
592693

593-
if stream:
594-
response_stream = client.chat.completions.create(
595-
model=self.llm,
596-
messages=messages,
597-
temperature=temperature,
598-
stream=True
599-
)
600-
full_response_text = ""
601-
reasoning_content = ""
602-
chunks = []
603-
604-
# Create Live display with proper configuration
605-
with Live(
606-
display_generating("", start_time),
607-
console=self.console,
608-
refresh_per_second=4,
609-
transient=True,
610-
vertical_overflow="ellipsis",
611-
auto_refresh=True
612-
) as live:
613-
for chunk in response_stream:
614-
chunks.append(chunk)
615-
if chunk.choices[0].delta.content:
616-
full_response_text += chunk.choices[0].delta.content
617-
live.update(display_generating(full_response_text, start_time))
618-
619-
# Update live display with reasoning content if enabled
620-
if reasoning_steps and hasattr(chunk.choices[0].delta, "reasoning_content"):
621-
rc = chunk.choices[0].delta.reasoning_content
622-
if rc:
623-
reasoning_content += rc
624-
live.update(display_generating(f"{full_response_text}\n[Reasoning: {reasoning_content}]", start_time))
625-
626-
# Clear the last generating display with a blank line
627-
self.console.print()
628-
629-
final_response = process_stream_chunks(chunks)
630-
return final_response
631-
else:
632-
if tool_calls:
694+
# Get final response after tool calls
695+
if stream:
696+
final_response = self._process_stream_response(
697+
messages,
698+
temperature,
699+
start_time,
700+
formatted_tools=formatted_tools if formatted_tools else None,
701+
reasoning_steps=reasoning_steps
702+
)
703+
else:
633704
final_response = client.chat.completions.create(
634705
model=self.llm,
635706
messages=messages,
636707
temperature=temperature,
637708
stream=False
638709
)
639-
return final_response
640-
else:
641-
return initial_response
710+
711+
return final_response
642712

643713
except Exception as e:
644714
display_error(f"Error in chat completion: {e}")
@@ -758,8 +828,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
758828

759829
tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
760830
response_text = response.choices[0].message.content.strip()
761-
762-
if tool_calls:
831+
if tool_calls: ## TODO: Most likely this tool call is already called in _chat_completion, so maybe we can remove this.
763832
messages.append({
764833
"role": "assistant",
765834
"content": response_text,

agents/praisonaiagents/llm/llm.py

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
from rich.console import Console
1818
from rich.live import Live
1919

20+
# TODO: Include in-build tool calling in LLM class
21+
# TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
2022
class LLMContextLengthExceededException(Exception):
2123
"""Raised when LLM context length is exceeded"""
2224
def __init__(self, message: str):

agents/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "0.0.54"
7+
version = "0.0.55"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
authors = [
1010
{ name="Mervin Praison" }

agents/uv.lock

+1-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

agents/wikipedia-agent.py

+1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
self_reflect=True,
88
min_reflect=3,
99
max_reflect=5,
10+
llm="gpt-4o-mini"
1011
)
1112
agent.start(
1213
"What is the history of AI?"

0 commit comments

Comments
 (0)