Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/multi-agents' into multi-agents
Browse files Browse the repository at this point in the history
  • Loading branch information
yhjun1026 committed Dec 27, 2023
2 parents 5485918 + 07828aa commit d6847ea
Show file tree
Hide file tree
Showing 11 changed files with 61 additions and 52 deletions.
30 changes: 16 additions & 14 deletions dbgpt/agent/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@
from typing import Any, Dict, List, Optional, Union

from ..memory.gpts_memory import GptsMemory
from dbgpt.core import LLMClient
from dbgpt.core.interface.llm import ModelMetadata



class Agent:
"""
An interface for AI agent.
Expand Down Expand Up @@ -48,25 +50,25 @@ def describe(self):
async def a_send(
self,
message: Union[Dict, str],
recipient: "Agent",
reviewer: "Agent",
recipient: Agent,
reviewer: Agent,
request_reply: Optional[bool] = True,
is_recovery: Optional[bool] = False,
):
"""(Abstract async method) Send a message to another agent."""

async def a_receive(
self,
message: Union[Dict],
sender: "Agent",
reviewer: "Agent",
message: Optional[Dict],
sender: Agent,
reviewer: Agent,
request_reply: Optional[bool] = None,
silent: Optional[bool] = False,
is_recovery: Optional[bool] = False,
):
"""(Abstract async method) Receive a message from another agent."""

async def a_review(self, message: Union[Dict, str], censored: "Agent"):
async def a_review(self, message: Union[Dict, str], censored: Agent):
"""
Args:
Expand All @@ -84,21 +86,21 @@ async def a_generate_reply(
self,
message: Optional[Dict],
sender: Agent,
reviewer: "Agent",
reviewer: Agent,
silent: Optional[bool] = False,
**kwargs,
) -> Union[str, Dict, None]:
"""(Abstract async method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
messages (Optional[Dict]): a dict of messages received from other agents.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
"""

async def a_reasoning_reply(
self, messages: Union[List[str]]
self, messages: Optional[List[Dict]]
) -> Union[str, Dict, None]:
"""
Based on the requirements of the current agent, reason about the current task goal through LLM
Expand All @@ -112,7 +114,7 @@ async def a_reasoning_reply(
async def a_action_reply(
self,
messages: Optional[str],
sender: "Agent",
sender: Agent,
**kwargs,
) -> Union[str, Dict, None]:
"""
Expand All @@ -128,8 +130,8 @@ async def a_action_reply(
async def a_verify_reply(
self,
message: Optional[Dict],
sender: "Agent",
reviewer: "Agent",
sender: Agent,
reviewer: Agent,
**kwargs,
) -> Union[str, Dict, None]:
"""
Expand All @@ -151,7 +153,7 @@ class AgentResource:
introduce: str

@staticmethod
def from_dict(d: Dict[str, Any]) -> AgentResource:
def from_dict(d: Dict[str, Any]) -> Optional[AgentResource]:
if d is None:
return None
return AgentResource(
Expand All @@ -167,7 +169,7 @@ def to_dict(self) -> Dict[str, Any]:
@dataclass
class AgentContext:
conv_id: str
llm_provider: Optional["LLMClient"]
llm_provider: LLMClient

gpts_name: Optional[str] = None
resource_db: Optional[AgentResource] = None
Expand Down
22 changes: 12 additions & 10 deletions dbgpt/agent/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ class ConversableAgent(Agent):
def __init__(
self,
name: str,
describe: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
describe: str = DEFAULT_SYSTEM_MESSAGE,
memory: GptsMemory = GptsMemory(),
agent_context: Optional[AgentContext] = None,
agent_context: AgentContext = None,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand All @@ -55,7 +55,7 @@ def __init__(
if is_termination_msg is not None
else (lambda x: x.get("content") == "TERMINATE")
)

self.client = AIWrapper(llm_client=agent_context.llm_provider)

self.human_input_mode = human_input_mode
Expand All @@ -72,8 +72,9 @@ def __init__(
self.dialogue_memory_rounds = 5
self._default_auto_reply = default_auto_reply
self._reply_func_list = []
self._max_consecutive_auto_reply_dict = {}

self.agent_context: AgentContext = agent_context
self.agent_context = agent_context

def register_reply(
self,
Expand Down Expand Up @@ -136,7 +137,8 @@ def max_consecutive_auto_reply(self, sender: Optional[Agent] = None) -> int:
)

@property
def chat_messages(self) -> Dict[Agent, List[Dict]]:
# def chat_messages(self) -> Dict[Agent, List[Dict]]:
def chat_messages(self) -> Any:
"""A dictionary of conversations from agent to list of messages."""
all_gpts_messages = self.memory.message_memory.get_by_agent(
self.agent_context.conv_id, self.name
Expand Down Expand Up @@ -189,13 +191,13 @@ def _message_to_dict(message: Union[Dict, str]):
else:
return dict(message)

def append_rely_message(self, message: Optional[Dict], role) -> bool:
def append_rely_message(self, message: Union[Dict, str], role) -> None:
message = self._message_to_dict(message)
message["role"] = role
# create oai message to be appended to the oai conversation that can be passed to oai directly.
self._rely_messages.append(message)

def reset_rely_message(self) -> bool:
def reset_rely_message(self) -> None:
# create oai message to be appended to the oai conversation that can be passed to oai directly.
self._rely_messages = []

Expand Down Expand Up @@ -355,9 +357,9 @@ def _process_action_reply(self, action_reply: Optional[Union[str, Dict, None]]):
return dict(action_reply)

def _gpts_message_to_ai_message(
self, gpts_messages: List[GptsMessage]
self, gpts_messages: Optional[List[GptsMessage]]
) -> List[Dict]:
oai_messages: list[dict] = []
oai_messages: List[Dict] = []
###Based on the current agent, all messages received are user, and all messages sent are assistant.
for item in gpts_messages:
role = ""
Expand Down Expand Up @@ -668,7 +670,7 @@ def _select_llm_model(self, old_model: str = None):
return now_model.model

async def a_reasoning_reply(
self, messages: Union[List[Dict]]
self, messages: Optional[List[Dict]] = None
) -> Union[str, Dict, None]:
"""(async) Reply based on the conversation history and the sender.
Args:
Expand Down
8 changes: 4 additions & 4 deletions dbgpt/agent/agents/expand/code_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dbgpt.util.string_utils import str_to_bool

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent
from dbgpt.core.interface.message import ModelMessageRoleType

Expand Down Expand Up @@ -59,8 +59,8 @@ class CodeAssistantAgent(ConversableAgent):

def __init__(
self,
agent_context: "AgentContext",
memory: GptsMemory = None,
agent_context: AgentContext,
memory: Optional[GptsMemory] = None,
describe: Optional[str] = DEFAULT_DESCRIBE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand Down Expand Up @@ -115,7 +115,7 @@ async def generate_code_execution_reply(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Union[Dict, Literal[False]]] = None,
):
"""Generate a reply using code execution."""
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/agents/expand/dashboard_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from dbgpt.util.json_utils import find_json_objects

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent

try:
Expand Down Expand Up @@ -54,7 +54,7 @@ class DashboardAssistantAgent(ConversableAgent):
def __init__(
self,
memory: GptsMemory,
agent_context: "AgentContext",
agent_context: AgentContext,
describe: Optional[str] = DEFAULT_DESCRIBE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand Down Expand Up @@ -82,7 +82,7 @@ async def generate_dashboard_reply(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Union[Dict, Literal[False]]] = None,
):
"""Generate a reply using code execution."""
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/agents/expand/data_scientist_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from dbgpt.util.json_utils import find_json_objects

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent

try:
Expand Down Expand Up @@ -56,7 +56,7 @@ class DataScientistAgent(ConversableAgent):
def __init__(
self,
memory: GptsMemory,
agent_context: "AgentContext",
agent_context: AgentContext,
describe: Optional[str] = DEFAULT_DESCRIBE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand Down Expand Up @@ -93,7 +93,7 @@ async def generate_analysis_chart_reply(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Union[Dict, Literal[False]]] = None,
):
"""Generate a reply using code execution."""
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/agents/expand/plugin_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dbgpt.util.json_utils import find_json_objects

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent

try:
Expand Down Expand Up @@ -65,7 +65,7 @@ class PluginAgent(ConversableAgent):
def __init__(
self,
memory: GptsMemory,
agent_context: "AgentContext",
agent_context: AgentContext,
describe: Optional[str] = DEFAULT_DESCRIBE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand Down Expand Up @@ -98,7 +98,7 @@ async def tool_call(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Union[Dict, Literal[False]]] = None,
):
"""Generate a reply using code execution."""
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/agents/expand/sql_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from dbgpt.agent.plugin.commands.command_mange import ApiCall
from ...memory.gpts_memory import GptsMemory
from ..agent import Agent
from ..agent import Agent, AgentContext

try:
from termcolor import colored
Expand Down Expand Up @@ -48,7 +48,7 @@ class SQLAssistantAgent(ConversableAgent):
def __init__(
self,
memory: GptsMemory,
agent_context: "AgentContext",
agent_context: AgentContext,
describe: Optional[str] = DEFAULT_DESCRIBE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
Expand Down Expand Up @@ -84,7 +84,7 @@ async def generate_analysis_chart_reply(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Union[Dict, Literal[False]]] = None,
):
"""Generate a reply using code execution."""
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/agent/agents/llm/llm_client.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import logging
import traceback
from typing import Callable, Dict, Optional
from typing import Callable, Dict, Optional, Union

from dbgpt.core import LLMClient
from dbgpt.core.interface.output_parser import BaseOutputParser
Expand Down Expand Up @@ -34,7 +34,7 @@ def __init__(
@classmethod
def instantiate(
cls,
template: str | Callable | None,
template: Optional[Union[str, Callable]] = None,
context: Optional[Dict] = None,
allow_format_str_template: Optional[bool] = False,
):
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/agents/plan_group_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def __init__(
self.planner = planner

async def a_reasoning_reply(
self, messages: Union[List[Dict]]
self, messages: Optional[List[Dict]] = None
) -> Union[str, Dict, None]:
if messages is None or len(messages) <= 0:
message = None
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/agent/agents/planner_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(
self,
memory: GptsMemory,
plan_chat: PlanChat,
agent_context: "AgentContext",
agent_context: AgentContext,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
Expand Down Expand Up @@ -136,7 +136,7 @@ async def _a_planning(
self,
message: Optional[str] = None,
sender: Optional[Agent] = None,
reviewer: "Agent" = None,
reviewer: Optional[Agent] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
json_objects = find_json_objects(message)
Expand Down
Loading

0 comments on commit d6847ea

Please sign in to comment.