This commit is contained in:
tjb-tech 2025-02-16 22:45:21 +08:00
parent d85d99659b
commit a465a51bbc
13 changed files with 453 additions and 323 deletions

View file

@ -3,6 +3,45 @@ import importlib
from autoagent import MetaChain
from autoagent.util import debug_print
import asyncio
from constant import DOCKER_WORKPLACE_NAME
from autoagent.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from autoagent.environment.utils import setup_metachain
from autoagent.types import Response
from autoagent import MetaChain
from autoagent.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from autoagent.agents.meta_agent import tool_editor, agent_editor
from autoagent.tools.meta.edit_tools import list_tools
from autoagent.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from autoagent.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from autoagent.environment.browser_env import BrowserEnv
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from autoagent.agents import get_system_triage_agent
from autoagent.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
import re
from autoagent.cli_utils.metachain_meta_agent import meta_agent
from autoagent.cli_utils.metachain_meta_workflow import meta_workflow
from autoagent.cli_utils.file_select import select_and_copy_files
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
from constant import COMPLETION_MODEL
@click.group()
def cli():
"""The command line interface for autoagent"""
@ -60,4 +99,301 @@ async def async_workflow(workflow_name: str, system_input: str):
result = await workflow_func(system_input) # 使用 await 等待异步函数完成
debug_print(True, result, title=f'Result of running {workflow_name} workflow', color='pink3')
return result
return result
def clear_screen():
console = Console()
console.print("[bold green]Coming soon...[/bold green]")
print('\033[u\033[J\033[?25h', end='') # Restore cursor and clear everything after it, show cursor
def get_config(container_name, port):
container_name = container_name
port_info = check_container_ports(container_name)
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
print("port: ", port)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def update_guidance(context_variables):
console = Console()
# print the logo
logo_text = Text(MC_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
console.print(version_table)
console.print(Panel(NOTES,title="Important Notes", expand=True))
@cli.command(name='main') # 修改这里,使用连字符
@click.option('--container_name', default='quick_start', help='the function to get the agent')
@click.option('--port', default=12345, help='the port to run the container')
@click.option('--test_pull_name', default='autoagent_mirror', help='the name of the test pull')
@click.option('--git_clone', default=True, help='whether to clone a mirror of the repository')
def main(container_name: str, port: int, test_pull_name: str, git_clone: bool):
"""
Run deep research with a given model, container name, port
"""
print(f"port: {port}")
model = COMPLETION_MODEL
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(container_name, port)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{container_name}_{model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up autoagent...[/cyan]\n")
clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
# select the mode
while True:
update_guidance(context_variables)
mode = single_select_menu(['user mode', 'agent editor', 'workflow editor', 'exit'], "Please select the mode:")
match mode:
case 'user mode':
clear_screen()
user_mode(model, context_variables, False)
case 'agent editor':
clear_screen()
meta_agent(model, context_variables, False)
case 'workflow editor':
clear_screen()
meta_workflow(model, context_variables, False)
case 'exit':
console = Console()
logo_text = Text(GOODBYE_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
def user_mode(model: str, context_variables: dict, debug: bool = True):
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "User mode completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=debug)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")
@cli.command(name='deep-research') # 修改这里,使用连字符
@click.option('--container_name', default='deepresearch', help='the function to get the agent')
@click.option('--port', default=12346, help='the port to run the container')
def deep_research(container_name: str, port: int):
"""
Run deep research with a given model, container name, port
"""
print(f"port: {port}")
model = COMPLETION_MODEL
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(container_name, port)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{container_name}_{model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up autoagent...[/cyan]\n")
clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
update_guidance(context_variables)
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=False)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")

View file

@ -5,12 +5,12 @@ def setup_metachain(workplace_name: str, env: DockerEnv):
cmd = "pip list | grep autoagent"
response = env.run_command(cmd, print_stream)
if response['status'] == 0:
print("Metachain is already installed.")
print("AutoAgent is already installed.")
return
cmd = f"cd /{workplace_name}/MetaChain && pip install -e ."
response = env.run_command(cmd, print_stream)
if response['status'] == 0:
print("Metachain is installed.")
print("AutoAgent is installed.")
return
else:
raise Exception(f"Failed to install autoagent. {response['result']}")

View file

@ -3,7 +3,7 @@ from rich.console import Console
from rich.markup import escape
import json
from typing import List
from constant import DEBUG, DEFAULT_LOG, LOG_PATH
from constant import DEBUG, DEFAULT_LOG, LOG_PATH, MC_MODE
from pathlib import Path
BAR_LENGTH = 60
class MetaChainLogger:
@ -31,12 +31,15 @@ class MetaChainLogger:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = "\n".join(map(str, args))
color = kwargs.get("color", "white")
if MC_MODE: color = "grey58"
title = kwargs.get("title", "INFO")
log_str = f"[{timestamp}]\n{message}"
if self.debug:
# print_in_box(log_str, color=color, title=title)
self.console.print(self._wrap_title(title, f"bold {color}"))
self.console.print(escape(log_str), highlight=True, emoji=True)
print_str = escape(log_str)
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
log_str = self._wrap_title(title) + "\n" + log_str
if self.log_path: self._write_log(log_str)
def lprint(self, *args: str, **kwargs: dict):
@ -44,42 +47,61 @@ class MetaChainLogger:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = "\n".join(map(str, args))
color = kwargs.get("color", "white")
if MC_MODE: color = "grey58"
title = kwargs.get("title", "")
log_str = f"[{timestamp}]\n{message}"
# print_in_box(log_str, color=color, title=title)
self.console.print(self._wrap_title(title, f"bold {color}"))
self.console.print(escape(log_str), highlight=True, emoji=True)
print_str = escape(log_str)
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _wrap_timestamp(self, timestamp: str, color: bool = True):
color_bos = "[grey58]" if color else ""
color_eos = "[/grey58]" if color else ""
return f"{color_bos}[{timestamp}]{color_eos}"
def _print_tool_execution(self, message, timestamp: str):
self.console.print(self._wrap_title("Tool Execution", "bold pink3"))
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["pink3", "blue", "purple"]
self.console.print(self._wrap_title("Tool Execution", f"bold {colors[0]}"))
self.console.print(self._wrap_timestamp(timestamp, color=True))
self.console.print("[bold blue]Tool Execution:[/bold blue]", end=" ")
self.console.print(f"[bold purple]{message['name']}[/bold purple]\n[bold blue]Result:[/bold blue]")
self.console.print(f"---\n{escape(message['content'])}\n---")
self.console.print(f"[bold {colors[1]}]Tool Execution:[/bold {colors[1]}]", end=" ")
self.console.print(f"[bold {colors[2]}]{message['name']}[/bold {colors[2]}]\n[bold {colors[1]}]Result:[/bold {colors[1]}]")
print_str = f"---\n{escape(message['content'])}\n---"
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _save_tool_execution(self, message, timestamp: str):
self._write_log(self._wrap_title("Tool Execution"))
self._write_log(f"{self._wrap_timestamp(timestamp, color=False)}\ntool execution: {message['name']}\nResult:\n---\n{message['content']}\n---")
def _print_assistant_message(self, message, timestamp: str):
self.console.print(self._wrap_title("Assistant Message", "bold light_salmon3"))
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold blue]{message['sender']}[/bold blue]:", end=" ")
if message["content"]: self.console.print(escape(message["content"]), highlight=True, emoji=True)
else: self.console.print(None, highlight=True, emoji=True)
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["light_salmon3", "blue", "purple"]
self.console.print(self._wrap_title("Assistant Message", f"bold {colors[0]}"))
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold {colors[1]}]{message['sender']}[/bold {colors[1]}]:", end=" ")
if message["content"]:
print_str = escape(message["content"])
if MC_MODE: print_str = f"[grey58]{print_str}[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
else:
print_str = None
if MC_MODE: print_str = "[grey58]None[/grey58]"
self.console.print(print_str, highlight=True, emoji=True)
def _save_assistant_message(self, message, timestamp: str):
self._write_log(self._wrap_title("Assistant Message"))
content = message["content"] if message["content"] else None
self._write_log(f"{self._wrap_timestamp(timestamp, color=False)}\n{message['sender']}: {content}")
def _print_tool_call(self, tool_calls: List, timestamp: str):
if len(tool_calls) >= 1: self.console.print(self._wrap_title("Tool Calls", "bold light_pink1"))
if MC_MODE: colors = ["grey58"] * 3
else: colors = ["light_pink1", "blue", "purple"]
if len(tool_calls) >= 1: self.console.print(self._wrap_title("Tool Calls", f"bold {colors[0]}"))
for tool_call in tool_calls:
f = tool_call["function"]
name, args = f["name"], f["arguments"]
arg_str = self._warp_args(args)
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold purple]{name}[/bold purple]({escape(arg_str)})")
print_arg_str = escape(arg_str)
if MC_MODE: print_arg_str = f"[grey58]{print_arg_str}[/grey58]"
self.console.print(f"{self._wrap_timestamp(timestamp, color=True)}\n[bold {colors[2]}]{name}[/bold {colors[2]}]({print_arg_str})")
def _save_tool_call(self, tool_calls: List, timestamp: str):
if len(tool_calls) >= 1: self._write_log(self._wrap_title("Tool Calls"))

View file

@ -331,17 +331,31 @@ def function_to_json(func) -> dict:
# )
# parameters[param.name] = {"type": param_type}
for param in signature.parameters.values():
if param.name == "context_variables":
continue
try:
param_info = get_type_info(param.annotation, type_map)
if isinstance(param_info, dict) and "additionalProperties" in param_info:
del param_info["additionalProperties"]
parameters[param.name] = get_type_info(param.annotation, type_map)
except KeyError as e:
raise KeyError(f"Unknown type annotation {param.annotation} for parameter {param.name}: {str(e)}")
required = [
param.name
for param in signature.parameters.values()
if param.default == inspect._empty
]
if not parameters:
parameters["dummy"] = {
"type": "string",
"description": "Dummy parameter (not used). Added to satisfy non-empty schema requirements."
}
required = []
return {
"type": "function",
"function": {

View file

@ -1,5 +1,6 @@
import os
from dotenv import load_dotenv
import platform
# utils:
load_dotenv() # 加载.env文件
def str_to_bool(value):
@ -10,8 +11,8 @@ def str_to_bool(value):
if isinstance(value, bool):
return value
if not value:
return False
if value == None:
return None
value = str(value).lower().strip()
if value in true_values:
@ -21,33 +22,70 @@ def str_to_bool(value):
return True # default return True
DOCKER_WORKPLACE_NAME = os.getenv('DOCKER_WORKPLACE_NAME', 'workplace_meta')
DOCKER_WORKPLACE_NAME = os.getenv('DOCKER_WORKPLACE_NAME', 'workplace')
GITHUB_AI_TOKEN = os.getenv('GITHUB_AI_TOKEN', None)
AI_USER = os.getenv('AI_USER', None)
LOCAL_ROOT = os.getenv('LOCAL_ROOT', os.getcwd())
DEBUG = str_to_bool(os.getenv('DEBUG', True))
DEBUG = str_to_bool(os.getenv('DEBUG', False))
DEFAULT_LOG = str_to_bool(os.getenv('DEFAULT_LOG', False))
LOG_PATH = os.getenv('LOG_PATH', None)
EVAL_MODE = str_to_bool(os.getenv('EVAL_MODE', False))
BASE_IMAGES = os.getenv('BASE_IMAGES', "tjbtech1/gaia-bookworm:v2")
BASE_IMAGES = os.getenv('BASE_IMAGES', None)
def get_architecture():
machine = platform.machine().lower()
if 'x86' in machine or 'amd64' in machine or 'i386' in machine:
return "tjbtech1/metachain:amd64_latest"
elif 'arm' in machine:
return "tjbtech1/metachain:latest"
else:
return "tjbtech1/metachain:latest"
if BASE_IMAGES is None:
BASE_IMAGES = get_architecture()
COMPLETION_MODEL = os.getenv('COMPLETION_MODEL', "claude-3-5-haiku-20241022")
EMBEDDING_MODEL = os.getenv('EMBEDDING_MODEL', "text-embedding-3-small")
MC_MODE = str_to_bool(os.getenv('MC_MODE', False))
MC_MODE = str_to_bool(os.getenv('MC_MODE', True))
# add Env for function call and non-function call
FN_CALL = str_to_bool(os.getenv('FN_CALL', True))
FN_CALL = str_to_bool(os.getenv('FN_CALL', None))
API_BASE_URL = os.getenv('API_BASE_URL', None)
ADD_USER = str_to_bool(os.getenv('ADD_USER', False))
ADD_USER = str_to_bool(os.getenv('ADD_USER', None))
NON_FN_CALL = str_to_bool(os.getenv('NON_FN_CALL', False))
NOT_SUPPORT_SENDER = ["mistral", "groq"]
MUST_ADD_USER = ["deepseek-reasoner", "o1-mini", "deepseek-r1"]
NOT_SUPPORT_FN_CALL = ["o1-mini", "deepseek-reasoner", "deepseek-r1", "llama", "grok-2"]
NOT_USE_FN_CALL = [ "deepseek-chat"] + NOT_SUPPORT_FN_CALL
if ADD_USER is None:
ADD_USER = False
for model in MUST_ADD_USER:
if model in COMPLETION_MODEL:
ADD_USER = True
break
if FN_CALL is None:
FN_CALL = True
for model in NOT_USE_FN_CALL:
if model in COMPLETION_MODEL:
FN_CALL = False
break
NON_FN_CALL = False
for model in NOT_SUPPORT_FN_CALL:
if model in COMPLETION_MODEL:
NON_FN_CALL = True
break
if EVAL_MODE:
DEFAULT_LOG = False
# print(FN_CALL, NON_FN_CALL, ADD_USER)

View file

@ -6,32 +6,24 @@ from rich.style import Style
from rich.console import Console
from rich.box import DOUBLE
from rich.markdown import Markdown
# MC_LOGO = """\
# ███╗ ███╗███████╗████████╗ █████╗ ██████╗██╗ ██╗ █████╗ ██╗███╗ ██╗
# ████╗ ████║██╔════╝╚══██╔══╝██╔══██╗██╔════╝██║ ██║██╔══██╗██║████╗ ██║
# ██╔████╔██║█████╗ ██║ ███████║██║ ███████║███████║██║██╔██╗ ██║
# ██║╚██╔╝██║██╔══╝ ██║ ██╔══██║██║ ██╔══██║██╔══██║██║██║╚██╗██║
# ██║ ╚═╝ ██║███████╗ ██║ ██║ ██║╚██████╗██║ ██║██║ ██║██║██║ ╚████║
# ╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝
# ╔═══ 𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒 ═══╗
# """.strip()
MC_LOGO = """\
𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒
""".strip()
𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒 \
"""
version_table = Table(show_header=False, box=DOUBLE, expand=True)
version_table.add_column("Key", style="cyan")
version_table.add_column("Value", style="green")
version_table.add_row("Version", "0.1.0")
version_table.add_row("Author", "MetaChain Team@HKU")
version_table.add_row("Version", "0.2.0")
version_table.add_row("Author", "AutoAgent Team@HKU")
version_table.add_row("License", "MIT")
NOTES = """\
@ -52,3 +44,4 @@ GOODBYE_LOGO = """\
· 𝓜𝓮𝓽𝓪𝓒𝓱𝓪𝓲𝓷-𝓐𝓘 ·
""".strip()

View file

View file

@ -1,262 +0,0 @@
from constant import DOCKER_WORKPLACE_NAME
from autoagent.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from autoagent.environment.utils import setup_metachain
from autoagent.types import Response
from autoagent import MetaChain
from autoagent.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from autoagent.agents.meta_agent import tool_editor, agent_editor
from autoagent.tools.meta.edit_tools import list_tools
from autoagent.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from autoagent.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from autoagent.environment.browser_env import BrowserEnv
from autoagent.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from autoagent.agents import get_system_triage_agent
from autoagent.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
import re
from playground.cli.metachain_meta_agent import meta_agent
from playground.cli.metachain_meta_workflow import meta_workflow
from playground.cli.file_select import select_and_copy_files
def get_args():
parser = argparse.ArgumentParser(description="working@tjb-tech")
parser.add_argument('--container_name', type=str, default='gpu_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--test_pull_name', type=str, default='test_pull_1010')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--port', type=int, default=12350)
parser.add_argument('--git_clone', action='store_true', default=False)
args = parser.parse_args()
return args
def clear_screen():
console = Console()
console.print("[bold green]Coming soon...[/bold green]")
print('\033[u\033[J\033[?25h', end='') # Restore cursor and clear everything after it, show cursor
def get_config(args):
container_name = args.container_name
port_info = check_container_ports(container_name)
port = args.port
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = args.port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
git_clone=args.git_clone,
test_pull_name=args.test_pull_name,
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def user_mode(model: str, context_variables: dict, debug: bool = True):
logger = LoggerManager.get_logger()
console = Console()
system_triage_agent = get_system_triage_agent(model)
assert system_triage_agent.agent_teams != {}, "System Triage Agent must have agent teams"
messages = []
agent = system_triage_agent
agents = {system_triage_agent.name.replace(' ', '_'): system_triage_agent}
for agent_name in system_triage_agent.agent_teams.keys():
agents[agent_name.replace(' ', '_')] = system_triage_agent.agent_teams[agent_name]("placeholder").agent
agents["Upload_files"] = "select"
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
# logger.info('User mode completed. See you next time! :waving_hand:', color='green', title='EXIT')
logo_text = "User mode completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
if hasattr(agent, "name"):
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
messages.append({"role": "user", "content": query})
response = client.run(agent, messages, context_variables, debug=debug)
messages.extend(response.messages)
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
elif agent == "select":
code_env: DockerEnv = context_variables["code_env"]
local_workplace = code_env.local_workplace
files_dir = os.path.join(local_workplace, "files")
os.makedirs(files_dir, exist_ok=True)
select_and_copy_files(files_dir, console)
else:
console.print(f"[bold red]Unknown agent: {agent}[/bold red]")
def tool_to_table(tool_dict: dict):
table = Table(title="Tool List", show_lines=True)
table.add_column("Tool Name")
table.add_column("Description")
for tool_name in tool_dict.keys():
if tool_name == "tool_dummy":
continue
table.add_row(tool_name, tool_dict[tool_name]["docstring"])
return table
def agent_to_table(agent_dict: dict):
table = Table(title="Agent List", show_lines=True)
table.add_column("Agent Name")
table.add_column("Description")
for agent_name in agent_dict.keys():
if agent_name == "get_dummy_agent":
continue
table.add_row(agent_name, agent_dict[agent_name]["docstring"])
return table
def update_guidance(context_variables):
tool_dict = json.loads(list_tools(context_variables))
# print(tool_dict)
tool_table = tool_to_table(tool_dict)
agent_dict = json.loads(list_agents(context_variables))
agent_table = agent_to_table(agent_dict)
console = Console()
columns = Columns([tool_table, agent_table])
# print the logo
logo_text = Text(MC_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
console.print(version_table)
console.print(Panel(NOTES,title="Important Notes", expand=True))
def main(args):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(args)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{args.container_name}_{args.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up autoagent...[/cyan]\n")
setup_metachain(workplace_name=docker_config.workplace_name, env=code_env)
clear_screen()
# console = Console()
# console.clear()
# print('\033[H\033[J') # ANSI 转义序列清屏
# print('\033[3J\033[H\033[2J')
# clear_screen()
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
# select the mode
while True:
update_guidance(context_variables)
mode = single_select_menu(['user mode', 'agent editor', 'workflow editor', 'exit'], "Please select the mode:")
match mode:
case 'user mode':
clear_screen()
user_mode(args.model, context_variables, args.debug)
case 'agent editor':
clear_screen()
meta_agent(args.model, context_variables, args.debug)
case 'workflow editor':
clear_screen()
meta_workflow(args.model, context_variables, args.debug)
case 'exit':
console = Console()
logo_text = Text(GOODBYE_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
if __name__ == "__main__":
args = get_args()
main(args)

View file

@ -1,14 +0,0 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
cd ../..
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export BASE_IMAGES=tjbtech1/gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-haiku-20241022
# export COMPLETION_MODEL=gpt-4o-2024-08-06
export DEBUG=False
export MC_MODE=True
export AI_USER=tjb-tech
python playground/cli/metachain_cli.py --container_name quick_start --model ${COMPLETION_MODEL} --test_pull_name mirror_branch_0207 --debug --port 12345 --git_clone

View file

@ -10,8 +10,7 @@ license = MIT
[options]
package_dir =
= .
packages =
autoagent
packages = find_namespace:
zip_safe = True
include_package_data = True
install_requires =
@ -64,12 +63,16 @@ install_requires =
moviepy
faster_whisper
sentence_transformers
[options.packages.find]
where = .
include = autoagent*
python_requires = >=3.10
[options.entry_points]
console_scripts =
mc = autoagent.cli:cli
auto = autoagent.cli:cli
[tool.autopep8]
max_line_length = 120
ignore = E501,W6