diff --git a/src/praisonai-agents/praisonaiagents/agent/agent.py b/src/praisonai-agents/praisonaiagents/agent/agent.py index 261fc61f..cdd75e34 100644 --- a/src/praisonai-agents/praisonaiagents/agent/agent.py +++ b/src/praisonai-agents/praisonaiagents/agent/agent.py @@ -713,7 +713,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r display_error(f"Error in chat completion: {e}") return None - def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False): + def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=None): # Log all parameter values when in debug mode if logging.getLogger().getEffectiveLevel() == logging.DEBUG: param_info = { @@ -842,7 +842,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd agent_tools=agent_tools ) - response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps) + response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream) if not response: return None @@ -879,7 +879,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd "content": "Function returned an empty output" }) - response = self._chat_completion(messages, temperature=temperature) + response = self._chat_completion(messages, temperature=temperature, stream=stream) if not response: return None response_text = response.choices[0].message.content.strip() @@ -949,7 +949,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process") messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"}) - response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True) + response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream) response_text = response.choices[0].message.content.strip() reflection_count += 1 continue # Continue the loop for more reflections @@ -1129,7 +1129,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None model=self.llm, messages=messages, temperature=temperature, - tools=formatted_tools + tools=formatted_tools, ) result = await self._achat_completion(response, tools) if logging.getLogger().getEffectiveLevel() == logging.DEBUG: diff --git a/src/praisonai-agents/praisonaiagents/agents/agents.py b/src/praisonai-agents/praisonaiagents/agents/agents.py index 3aeec8a6..9587eb7a 100644 --- a/src/praisonai-agents/praisonaiagents/agents/agents.py +++ b/src/praisonai-agents/praisonaiagents/agents/agents.py @@ -45,7 +45,7 @@ def process_video(video_path: str, seconds_per_frame=2): return base64_frames class PraisonAIAgents: - def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10): + def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=None): # Add check at the start if memory is requested if memory: try: @@ -68,8 +68,8 @@ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_r for agent in agents: agent.user_id = self.user_id - self.agents = agents - self.tasks = {} + self.agents: List[Agent] = agents + self.tasks: Dict[int, Task] = {} if max_retries < 3: max_retries = 3 self.completion_checker = completion_checker if completion_checker else self.default_completion_checker @@ -77,6 +77,7 @@ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_r self.verbose = verbose self.max_retries = max_retries self.process = process + self.stream = stream # Check for manager_llm in environment variable if not provided self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o') @@ -665,7 +666,8 @@ def _get_multimodal_message(text_prompt, images): task_prompt, tools=task.tools, output_json=task.output_json, - output_pydantic=task.output_pydantic + output_pydantic=task.output_pydantic, + stream=self.stream, ) if agent_output: