Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,12 @@ llm_contents_report.json
/user_questions.txt
/traces/

evalate_threads/evaluate.py
evalate_threads/quick_test.py
evalate_threads/test_adk_intergration.py
/evaluate_threads/evaluate.py
/evaluate_threads/quick_test.py
/evaluate_threads/test_adk_intergration.py

agents/matmaster_agent/.adk/

agents/matmaster_agent/test_mat.evalset.json
evaluate_threads/test_adk_integration.py
tests/
250 changes: 250 additions & 0 deletions agents/langgraph/progressive_agent_demo/agent.ipynb

Large diffs are not rendered by default.

41 changes: 41 additions & 0 deletions agents/langgraph/progressive_agent_demo/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import os
from dotenv import load_dotenv
from loguru import logger
from langchain.chat_models import init_chat_model

from typing import Annotated

from typing_extensions import TypedDict

from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages


class State(TypedDict):
# Messages have the type "list". The `add_messages` function
# in the annotation defines how this state key should be updated
# (in this case, it appends messages to the list, rather than overwriting them)
messages: Annotated[list, add_messages]


graph_builder = StateGraph(State)

load_dotenv()

llm = init_chat_model(
"azure_openai:gpt-4.1",
azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],
)

def chatbot(state: State):
return {"messages": [llm.invoke(state["messages"])]}

graph_builder.add_node("chatbot", chatbot)
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("chatbot", END)

graph = graph_builder.compile()

if __name__ == "__main__":
res = graph.invoke({"messages": [{"role": "user", "content": "Hello, how are you?"}]})
print(res)
110 changes: 110 additions & 0 deletions agents/langgraph/progressive_agent_demo/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
"""
agent 调用一个复杂参数的工具 abacus_prepare,然后用 progressive 的方式进行优化参数填写的例子

"""

import os
from dotenv import load_dotenv
from pydantic import BaseModel
from pathlib import Path
from loguru import logger

load_dotenv()

from langchain_openai import AzureChatOpenAI
from mem0 import MemoryClient

memory_client = MemoryClient()



class AbacusPrepareInput(BaseModel):
stru_file: Path
stru_type: str
job_type: str
lcao: bool
nspin: int
soc: bool
dftu: bool
dftu_param: dict
init_mag: dict
afm: bool
extra_input: dict
name: str

def abacus_prepare(input: AbacusPrepareInput):
logger.info(input)
return "abacus_prepare"

def test_llm():
deployment_name = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME")
print(deployment_name)

client = AzureChatOpenAI(
deployment_name=deployment_name,
)

response = client.invoke("你是什么模型,告诉我你是 gpt-4 还是 gpt-5,精确版本号")

print(response)


def test_bare_toolcall():
data = {
"stru_file": "./demo.cif",
"stru_type": "cif",
"job_type": "scf",
"lcao": True,
"nspin": 1,
"soc": False,
"dftu": True,
"dftu_param": {"Fe": 7.0, "Ni": ("d", 4.0)},
"init_mag": {"Fe": 2.0, "Ni": 2.0},
"afm": False,
"extra_input": {"smearing_sigma": 0.01, "dft_functional": "pbe"}
}

abacus_prepare(data)


def test_mem0():
# messages = [
# { "role": "user", "content": "Hi, I'm Ting. 我是中国人,我早晨习惯喝粥" },
# { "role": "assistant", "content": "你好,你是中国人,你早晨习惯喝粥,那我早晨会推荐你喝小米粥和八宝粥" }
# ]

# add_result = memory_client.add(messages, user_id="ting", metadata={"category": "breakfast"}, output_format="v1.1")
# print("add_result")
# print(add_result)

query = "What can I cook?"

search_result = memory_client.search(
query,
user_id="ting",
metadata={"category": "breakfast"}
)
print("search_result")
print(search_result)

memories = memory_client.get_all(user_id="ting")
print("all memories")
print(memories)



def main():

# test_llm()

# test_bare_toolcall()

test_mem0()




if __name__ == "__main__":
main()


71 changes: 71 additions & 0 deletions agents/langgraph/progressive_agent_demo/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from pydantic import BaseModel
from pathlib import Path
from langchain_core.tools import tool
from loguru import logger
from pprint import pprint
from typing import Optional

class AbacusPrepareInput(BaseModel):
stru_file: Path
stru_type: str
job_type: str
lcao: bool
nspin: int
soc: bool
dftu: bool
dftu_param: Optional[dict]
init_mag: Optional[dict]
afm: Optional[bool]
extra_input: Optional[dict]
name: Optional[str]

def abacus_prepare(input: AbacusPrepareInput):
pprint(input.model_dump())
return "abacus_prepare"

@tool
def abacus_prepare_tool(
stru_file: Path,
stru_type: str,
job_type: str,
lcao: bool,
nspin: int,
soc: bool,
dftu: bool,
dftu_param: dict = {},
init_mag: dict = {},
afm: bool = False,
extra_input: dict = {},
name: str = "",
):
"""
Prepare ABACUS input file directory from structure file and provided information.
"""
input = AbacusPrepareInput(
stru_file=stru_file,
stru_type=stru_type,
job_type=job_type,
lcao=lcao,
nspin=nspin,
soc=soc,
dftu=dftu,
dftu_param=dftu_param,
init_mag=init_mag,
afm=afm,
extra_input=extra_input,
name=name,
)
return abacus_prepare(input)

if __name__ == "__main__":
input_dict = {
"stru_file": "./demo.cif",
"stru_type": "cif",
"job_type": "scf",
"lcao": True,
"nspin": 1,
"soc": False,
"dftu": True,
}
res = abacus_prepare_tool.invoke(input_dict)
print(res)
10 changes: 10 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,16 @@ dependencies = [
"anyio>=4.10.0",
"pytest-asyncio>=1.1.0",
"litellm>=1.74.15.post2",
"langgraph>=0.6.6",
"langchain-azure-ai>=0.1.4",
"langchain-litellm>=0.2.2",
"loguru>=0.7.3",
"mem0ai>=0.1.116",
"langsmith>=0.4.20",
"langchain[azure-ai,openai]>=0.3.27",
"ipykernel>=6.30.1",
"pip>=25.2",
"langchain-tavily>=0.2.11",
]

[tool.setuptools]
Expand Down
Loading