Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,6 @@ websockets==15.0.1
Werkzeug==3.1.3
wsproto==1.2.0
zipp==3.23.0
# Memory module dependencies
mem0ai>=0.1.115
qdrant-client>=1.7.0
107 changes: 104 additions & 3 deletions slaver/agents/slaver_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from rich.panel import Panel
from rich.text import Text
from tools.memory import ActionStep, AgentMemory, SceneMemory
from tools.memory import MemoryManager
from tools.monitoring import AgentLogger, LogLevel, Monitor

logger = getLogger(__name__)
Expand Down Expand Up @@ -40,6 +41,7 @@ def __init__(
verbosity_level: LogLevel = LogLevel.INFO,
step_callbacks: Optional[List[Callable]] = None,
log_file: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
):
self.tools = tools
self.model = model
Expand All @@ -50,8 +52,9 @@ def __init__(
self.max_steps = max_steps
self.step_number = 0
self.state = {}
self.memory = AgentMemory()
self.scene = SceneMemory(collaborator)
self.memory = AgentMemory() # Maintain backward compatibility
self.memory_manager = MemoryManager(config=config) # New memory manager with config
self.scene = SceneMemory(collaborator) # Scene memory for environment tracking
self.logger = AgentLogger(level=verbosity_level, log_file=log_file)
self.monitor = Monitor(self.model, self.logger)
self.step_callbacks = step_callbacks if step_callbacks is not None else []
Expand Down Expand Up @@ -85,6 +88,16 @@ async def run(

if reset:
self.memory.reset()
# Record task start to new memory system
await self.memory_manager.add_message(
role="system",
content=f"Starting task: {task}",
metadata={
"task_type": "task_start",
"robot_name": self.robot_name,
"step_number": self.step_number
}
)
self.step_number = 1

self.logger.log_task(
Expand All @@ -103,12 +116,34 @@ async def run(
)
answer = await self.step(step)
if answer == "final_answer":
# Record task completion to new memory system
await self.memory_manager.add_message(
role="system",
content=f"Task completed: {self.task}",
metadata={
"task_type": "task_complete",
"robot_name": self.robot_name,
"steps_taken": self.step_number,
"message_type": "task_completion"
}
)
return "Mission accomplished"

self.collaborator.record_agent_status(self.robot_name, answer)
step.end_time = time.time()
self.step_number += 1

# Record task failure to new memory system
await self.memory_manager.add_message(
role="system",
content=f"Task failed: {self.task} - Maximum steps reached",
metadata={
"task_type": "task_failed",
"robot_name": self.robot_name,
"steps_taken": self.step_number,
"message_type": "task_failure"
}
)
return "Maximum number of attempts reached, Mission not completed"

def step(self) -> Optional[Any]:
Expand Down Expand Up @@ -208,6 +243,19 @@ async def step(self, memory_step: ActionStep) -> Union[None, Any]:
stop_sequences=["Observation:"],
)
memory_step.model_output_message = model_message

# Record model output to new memory system
await self.memory_manager.add_message(
role="assistant",
content=model_message.content or str(model_message.raw),
metadata={
"step_number": self.step_number,
"robot_name": self.robot_name,
"model_path": self.model_path,
"message_type": "model_output"
}
)

self.logger.log_markdown(
content=(
model_message.content
Expand All @@ -230,5 +278,58 @@ async def step(self, memory_step: ActionStep) -> Union[None, Any]:
return "final_answer"
else:
self.tool_call.append(current_call)

# Record tool call to new memory system
await self.memory_manager.add_message(
role="assistant",
content=f"Calling tool: {tool_name}",
metadata={
"step_number": self.step_number,
"tool_name": tool_name,
"tool_arguments": tool_arguments,
"robot_name": self.robot_name,
"message_type": "tool_call"
}
)

return await self._execute_tool_call(tool_name, tool_arguments, memory_step)
observation = await self._execute_tool_call(tool_name, tool_arguments, memory_step)

# Record tool execution result to new memory system
await self.memory_manager.add_message(
role="system",
content=f"Tool {tool_name} execution result: {observation}",
metadata={
"step_number": self.step_number,
"tool_name": tool_name,
"observation_type": "tool_result",
"robot_name": self.robot_name,
"message_type": "observation"
}
)

return observation

# Convenience methods for new memory system
async def search_memory(self, query: str, limit: int = 10):
"""Search messages in memory"""
return await self.memory_manager.search_messages(query, limit)

async def get_memory_stats(self):
"""Get memory statistics"""
return await self.memory_manager.get_memory_stats()

async def record_important_info(self, thinking: str, content: list, **kwargs):
"""Record important information to long-term memory"""
return await self.memory_manager.record_important_info(thinking, content, **kwargs)

async def retrieve_important_info(self, keywords: list, limit: int = 5):
"""Retrieve important information from long-term memory"""
return await self.memory_manager.retrieve_important_info(keywords, limit)

def save_memory_state(self, filepath: str):
"""Save memory state"""
self.memory_manager.save_state(filepath)

def load_memory_state(self, filepath: str):
"""Load memory state"""
self.memory_manager.load_state(filepath)
59 changes: 59 additions & 0 deletions slaver/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,5 +54,64 @@ robot:
call_type: remote
path: "http://127.0.0.1:8000"

# Memory System Configuration
memory:
# Short-term memory configuration
short_term:
# Maximum number of messages to store in short-term memory
max_size: 1000
# Auto-migrate to long-term memory when short-term exceeds this count
auto_migrate_threshold: 100
# Auto-migrate to long-term memory after this many hours
migration_age_hours: 24

# Long-term memory configuration
long_term:
# Agent identifier
agent_name: "roboos_agent"
# User identifier
user_name: "default_user"
# Run identifier
run_name: "default_run"

# Vector store configuration
vector_store:
# Vector store provider (qdrant, chroma, etc.)
provider: "qdrant"
config:
# Store vectors on disk for persistence
on_disk: true
# Collection name for storing vectors
collection_name: "roboos_memory"
# Qdrant server configuration
host: "localhost"
port: 6333
# API key for Qdrant (if required)
api_key: null

# mem0 configuration
mem0:
# Embedding model configuration
embedder:
provider: "huggingface"
config:
model: "sentence-transformers/all-MiniLM-L6-v2"

# LLM model configuration for memory operations
llm:
provider: "openai"
config:
# Use local vLLM service with OpenAI-compatible API
model: "RoboBrain2.0-7B"
api_key: "EMPTY" # vLLM doesn't require real API key
base_url: "http://localhost:4567/v1"

# Memory retrieval configuration
retrieval:
# Number of memories to retrieve by default
default_limit: 5
# Similarity threshold for memory retrieval
similarity_threshold: 0.7

# Output reasoning context, time cost and other information
profiling: true
160 changes: 160 additions & 0 deletions slaver/tools/config_loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Configuration Loader

Utility for loading and managing configuration from config.yaml file.
"""

import os
import yaml
from typing import Any, Dict, Optional
from logging import getLogger

logger = getLogger(__name__)


class ConfigLoader:
"""Configuration loader for RoboOS system"""

def __init__(self, config_path: Optional[str] = None):
"""Initialize configuration loader

Args:
config_path: Path to config.yaml file. If None, uses default path.
"""
if config_path is None:
# Default config path relative to this file
current_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(current_dir, '..', 'config.yaml')

self.config_path = config_path
self._config: Optional[Dict[str, Any]] = None

def load_config(self) -> Dict[str, Any]:
"""Load configuration from YAML file

Returns:
Configuration dictionary

Raises:
FileNotFoundError: If config file doesn't exist
yaml.YAMLError: If config file is invalid YAML
"""
if self._config is not None:
return self._config

if not os.path.exists(self.config_path):
raise FileNotFoundError(f"Config file not found: {self.config_path}")

try:
with open(self.config_path, 'r', encoding='utf-8') as f:
self._config = yaml.safe_load(f)

logger.info(f"Configuration loaded from {self.config_path}")
return self._config

except yaml.YAMLError as e:
logger.error(f"Failed to parse config file {self.config_path}: {e}")
raise
except Exception as e:
logger.error(f"Failed to load config file {self.config_path}: {e}")
raise

def get_memory_config(self) -> Dict[str, Any]:
"""Get memory system configuration

Returns:
Memory configuration dictionary
"""
config = self.load_config()
return config.get("memory", {})

def get_short_term_config(self) -> Dict[str, Any]:
"""Get short-term memory configuration

Returns:
Short-term memory configuration dictionary
"""
memory_config = self.get_memory_config()
return memory_config.get("short_term", {})

def get_long_term_config(self) -> Dict[str, Any]:
"""Get long-term memory configuration

Returns:
Long-term memory configuration dictionary
"""
memory_config = self.get_memory_config()
return memory_config.get("long_term", {})

def get_model_config(self) -> Dict[str, Any]:
"""Get model configuration

Returns:
Model configuration dictionary
"""
config = self.load_config()
return config.get("model", {})

def get_tool_config(self) -> Dict[str, Any]:
"""Get tool configuration

Returns:
Tool configuration dictionary
"""
config = self.load_config()
return config.get("tool", {})

def get_collaborator_config(self) -> Dict[str, Any]:
"""Get collaborator configuration

Returns:
Collaborator configuration dictionary
"""
config = self.load_config()
return config.get("collaborator", {})

def get_robot_config(self) -> Dict[str, Any]:
"""Get robot configuration

Returns:
Robot configuration dictionary
"""
config = self.load_config()
return config.get("robot", {})

def reload_config(self) -> Dict[str, Any]:
"""Reload configuration from file

Returns:
Updated configuration dictionary
"""
self._config = None
return self.load_config()


def load_config(config_path: Optional[str] = None) -> Dict[str, Any]:
"""Convenience function to load configuration

Args:
config_path: Path to config.yaml file

Returns:
Configuration dictionary
"""
loader = ConfigLoader(config_path)
return loader.load_config()


def get_memory_config(config_path: Optional[str] = None) -> Dict[str, Any]:
"""Convenience function to get memory configuration

Args:
config_path: Path to config.yaml file

Returns:
Memory configuration dictionary
"""
loader = ConfigLoader(config_path)
return loader.get_memory_config()
Loading