Skip to content

Commit

Permalink
fix: support chat without stream
Browse files Browse the repository at this point in the history
  • Loading branch information
yugasun authored Mar 10, 2025
1 parent 99fee4d commit fab4aca
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 8 deletions.
8 changes: 4 additions & 4 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
display_error(f"Error in chat completion: {e}")
return None

def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
# Log all parameter values when in debug mode
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
param_info = {
Expand Down Expand Up @@ -842,7 +842,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
agent_tools=agent_tools
)

response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps)
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream)
if not response:
return None

Expand Down Expand Up @@ -879,7 +879,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
"content": "Function returned an empty output"
})

response = self._chat_completion(messages, temperature=temperature)
response = self._chat_completion(messages, temperature=temperature, stream=stream)
if not response:
return None
response_text = response.choices[0].message.content.strip()
Expand Down Expand Up @@ -1129,7 +1129,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
model=self.llm,
messages=messages,
temperature=temperature,
tools=formatted_tools
tools=formatted_tools,
)
result = await self._achat_completion(response, tools)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
Expand Down
10 changes: 6 additions & 4 deletions src/praisonai-agents/praisonaiagents/agents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def process_video(video_path: str, seconds_per_frame=2):
return base64_frames

class PraisonAIAgents:
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10):
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True):
# Add check at the start if memory is requested
if memory:
try:
Expand All @@ -68,15 +68,16 @@ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_r
for agent in agents:
agent.user_id = self.user_id

self.agents = agents
self.tasks = {}
self.agents: List[Agent] = agents
self.tasks: Dict[int, Task] = {}
if max_retries < 3:
max_retries = 3
self.completion_checker = completion_checker if completion_checker else self.default_completion_checker
self.task_id_counter = 0
self.verbose = verbose
self.max_retries = max_retries
self.process = process
self.stream = stream

# Check for manager_llm in environment variable if not provided
self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
Expand Down Expand Up @@ -665,7 +666,8 @@ def _get_multimodal_message(text_prompt, images):
task_prompt,
tools=task.tools,
output_json=task.output_json,
output_pydantic=task.output_pydantic
output_pydantic=task.output_pydantic,
stream=self.stream,
)

if agent_output:
Expand Down

0 comments on commit fab4aca

Please sign in to comment.