This commit is contained in:
tjb-tech 2025-02-07 21:37:18 +08:00
commit 8da954425d
168 changed files with 25157 additions and 0 deletions

27
.gitignore vendored Normal file
View file

@ -0,0 +1,27 @@
workplace_*/
workspace_*/
*.log
code_db/*
results/*
__pycache__/
tmp/*
logs/*
*.tar.gz
*.egg-info
.DS_Store
*.csv
eval_data/*
evaluation_results/*
casestudy_results/*
evaluation/*/data/
evaluation/*/data/*
evaluation/**/data/
.env
terminal_tmp/*

53
constant.py Normal file
View file

@ -0,0 +1,53 @@
import os
from dotenv import load_dotenv
# utils:
load_dotenv() # 加载.env文件
def str_to_bool(value):
"""convert string to bool"""
true_values = {'true', 'yes', '1', 'on', 't', 'y'}
false_values = {'false', 'no', '0', 'off', 'f', 'n'}
if isinstance(value, bool):
return value
if not value:
return False
value = str(value).lower().strip()
if value in true_values:
return True
if value in false_values:
return False
return True # default return True
DOCKER_WORKPLACE_NAME = os.getenv('DOCKER_WORKPLACE_NAME', 'workplace_meta')
GITHUB_AI_TOKEN = os.getenv('GITHUB_AI_TOKEN', None)
AI_USER = os.getenv('AI_USER', None)
LOCAL_ROOT = os.getenv('LOCAL_ROOT', os.getcwd())
DEBUG = str_to_bool(os.getenv('DEBUG', True))
DEFAULT_LOG = str_to_bool(os.getenv('DEFAULT_LOG', False))
LOG_PATH = os.getenv('LOG_PATH', None)
EVAL_MODE = str_to_bool(os.getenv('EVAL_MODE', False))
BASE_IMAGES = os.getenv('BASE_IMAGES', "tjb-gaia-bookworm:v2")
COMPLETION_MODEL = os.getenv('COMPLETION_MODEL', "claude-3-5-haiku-20241022")
EMBEDDING_MODEL = os.getenv('EMBEDDING_MODEL', "text-embedding-3-small")
MC_MODE = str_to_bool(os.getenv('MC_MODE', False))
# add Env for function call and non-function call
FN_CALL = str_to_bool(os.getenv('FN_CALL', True))
API_BASE_URL = os.getenv('API_BASE_URL', None)
ADD_USER = str_to_bool(os.getenv('ADD_USER', False))
NON_FN_CALL = str_to_bool(os.getenv('NON_FN_CALL', False))
NOT_SUPPORT_SENDER = ["mistral", "groq"]
if EVAL_MODE:
DEFAULT_LOG = False

0
evaluation/README.md Normal file
View file

View file

@ -0,0 +1,49 @@
import argparse
import json
def main():
parser = argparse.ArgumentParser(description="Get agent's gaia score")
parser.add_argument('--file', default='/Users/tangjiabin/Documents/reasoning/metachain/evaluation_results/gaia/system_triage_agent/claude-3-5-sonnet-20241022_maxiter/output.jsonl', type=str, help="Path to the agent's output.jsonl")
args = parser.parse_args()
this_log = args.file
outs = []
with open(this_log, 'r') as f:
lines = f.readlines()
for line in lines:
outs.append(json.loads(line))
print(f'Reading {this_log}')
print(f'Metadata:\n {outs[0]["metadata"]}')
total = 0
success = 0
l1_total = 0
l1_success = 0
l2_total = 0
l2_success = 0
l3_total = 0
l3_success = 0
for out in outs:
total += 1
if out['test_result']['score']:
success += 1
if out['instance']['Level'] == "1":
l1_total += 1
if out['test_result']['score']:
l1_success += 1
elif out['instance']['Level'] == "2":
l2_total += 1
if out['test_result']['score']:
l2_success += 1
elif out['instance']['Level'] == "3":
l3_total += 1
if out['test_result']['score']:
l3_success += 1
print(f'Success rate: {success}/{total} = {success/total * 100:.4f}%')
print(f'L1 success rate: {l1_success}/{l1_total} = {l1_success/l1_total * 100:.4f}%')
print(f'L2 success rate: {l2_success}/{l2_total} = {l2_success/l2_total * 100:.4f}%')
print(f'L3 success rate: {l3_success}/{l3_total} = {l3_success/l3_total * 100:.4f}%')
if __name__ == '__main__':
main()

View file

@ -0,0 +1,299 @@
from metachain.environment.docker_container import init_container
import argparse
from constant import DOCKER_WORKPLACE_NAME
from datasets import load_dataset
import huggingface_hub
from metachain import MetaChain
from metachain.logger import MetaChainLogger, LoggerManager
from evaluation.utils import make_metadata, prepare_dataset, update_progress, check_port_available, run_evaluation, clean_msg
from evaluation.types import EvalMetadata, EvalOutput
import metachain.agents as agenthub
import os.path as osp
import pandas as pd
import asyncio
import re
import os
import shutil
from metachain.registry import registry
from evaluation.gaia.scorer import question_scorer
import json
# from metachain.util import run_command_in_container
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from metachain.types import Response
from metachain.util import function_to_json
from metachain.main import run_in_client, run_in_client_non_async
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from metachain.environment.utils import setup_metachain
import subprocess
DATASET_CACHE_DIR = osp.join(osp.dirname(__file__), 'data')
# Note: You should run this script in the root directory of the project metachain
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--container_name', type=str, default='gaia_test')
parser.add_argument('--model', type=str, default='claude-3-5-sonnet-20241022')
parser.add_argument('--git_clone', action='store_true', default=False)
parser.add_argument('--setup_package', type=str, default=None)
parser.add_argument('--test_pull_name', type=str, default='main')
parser.add_argument('--debug', action='store_true', default=False)
# metadata
parser.add_argument('--agent_func', type=str, default='get_system_triage_agent')
parser.add_argument('--eval_note', type=str, default=None)
parser.add_argument('--eval_output_dir', type=str, default='./evaluation_results')
parser.add_argument('--data_split', type=str, default=None)
# gaia level
parser.add_argument('--level', type=str, default='1')
parser.add_argument('--eval_n_limit', type=int, default=None)
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--eval_num_workers', type=int, default=1)
args = parser.parse_args()
return args
def get_config(metadata: EvalMetadata, instance_id: str):
container_name = metadata.container_name+f'_{instance_id}'
port_info = check_container_ports(container_name)
port = metadata.port
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = metadata.port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_gaia_whole", f"gaia_eval_{instance_id}")
os.makedirs(local_root, exist_ok=True)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
git_clone=metadata.git_clone,
test_pull_name=metadata.test_pull_name,
)
return docker_config
def process_instance(
instance: pd.Series,
metadata: EvalMetadata,
logger: MetaChainLogger,
) -> EvalOutput:
docker_config = get_config(metadata, instance_id=instance['instance_id'])
code_env = None
try:
code_env, web_env, file_env = create_environment(docker_config)
local_workplace = code_env.local_workplace
docker_workplace = code_env.docker_workplace
# Setup the logger properly, so you can run multi-processing to parallelize the evaluation
logger.info(f'Starting evaluation for instance {instance["instance_id"]}.')
if instance['file_name'] != '':
assert metadata.data_split is not None
src_file = os.path.join(
DATASET_CACHE_DIR, '2023', metadata.data_split, instance['file_name']
)
assert os.path.exists(src_file)
extension_name = instance['file_name'].split('.')[-1]
dest_file = os.path.join(local_workplace, f'file.{extension_name}')
shutil.copy(src_file, dest_file)
file_name = dest_file.split('/')[-1]
else:
dest_file = None
# Prepare instruction
instruction = f"{instance['Question']}\n"
logger.info(f'Instruction: {instruction}')
if dest_file:
instruction += f"\n\nThe mentioned file is provided in the workspace at: {osp.join(docker_workplace, file_name)}"
instruction += 'IMPORTANT: Any agent cannot stop using tools until the task is done. Don\'t tell me how to do bot do it using tools!\n'
instruction += 'IMPORTANT: System Triage Agent must hand off the task to the suitable agents, and finally answer the question util there is no more sub-task to do.\n'
instruction += 'IMPORTANT: When you meet something you are not sure about, you should use the `Web Surfer Agent` to search the web. And when you are required to compute something, you should use the `Programming Agent` to compute. Take Advantage of agents as much as possible.\n'
instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
instruction += 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
instruction += (
'For example: The answer to the question is <solution> 42 </solution>.\n'
)
logger.info(f'Instruction:\n{instruction}')
system_triage_agent = registry.agents[metadata.agent_func](model=metadata.model)
messages = [
{
'role': 'user',
'content': instruction
}
]
context_variables = {"code_env": code_env, "web_env": web_env, "file_env": file_env}
# Here's how you can run the agent (similar to the `main` function) and get the final task state
tool_editor_agent = get_tool_editor_agent(model=metadata.model)
response: Response | None = asyncio.run(
run_in_client(
agent=system_triage_agent,
messages=messages,
context_variables = context_variables,
logger=logger,
meta_agent=tool_editor_agent,
docker_config=docker_config,
code_env=code_env,
)
)
# response: Response | None = run_in_client_non_async(
# agent=system_triage_agent,
# messages=messages,
# context_variables = context_variables,
# logger=logger
# )
messages.extend(response.messages)
# save messages to a file
messages_file = osp.join(metadata.eval_output_dir, f"gaia_{instance['instance_id']}", f'messages_{metadata.agent_func.replace("get_", "")}.json')
os.makedirs(osp.dirname(messages_file), exist_ok=True)
messages = clean_msg(messages)
with open(messages_file, 'w', encoding='utf-8') as f:
json.dump(messages, f, ensure_ascii=False, indent=4)
# ======= Attempt to evaluate the agent's edits =======
# If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
if response is None:
raise ValueError('Response should not be None.')
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw)
if len(model_answer) == 0:
logger.info(f'Failed to parse model answer: {model_answer_raw}', title='WARNING', color='yellow')
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
logger.info(
f'Final message: {model_answer} | Ground truth: {instance["Final answer"]}',
title='INFO', color='green'
)
score = question_scorer(
model_answer=model_answer, ground_truth=instance['Final answer']
)
test_result = {
'score': score,
'model_answer_raw': model_answer_raw,
'model_answer': model_answer,
'ground_truth': instance['Final answer'],
}
# Save the output
output = EvalOutput(
instance_id=instance['instance_id'],
instance=instance.to_dict(),
instruction=instance['Question'],
metadata=metadata,
messages=messages,
test_result=test_result,
)
finally:
# 清理资源
if code_env is not None:
try:
# 停止容器
code_env.stop_container()
logger.info(f"Container {docker_config.container_name} stopped successfully")
# 可选:删除容器
# subprocess.run(["docker", "rm", docker_config.container_name],
# capture_output=True, text=True)
# logger.info(f"Container {docker_config.container_name} removed successfully")
# 可选:删除工作目录
except Exception as e:
logger.error(f"Error during cleanup: {str(e)}")
# 清理端口标记文件
port_file = os.path.join(os.getcwd(), f".port_{docker_config.communication_port}")
if os.path.exists(port_file):
os.remove(port_file)
logger.info(f"Port {docker_config.communication_port} released")
return output
def map_instance_to_port(dataset: pd.DataFrame, metadata: EvalMetadata):
port_dict = {}
for idx, row in dataset.iterrows():
port_dict[row['instance_id']] = metadata.port + idx
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def main(args):
metadata: EvalMetadata = make_metadata(
model=args.model,
dataset_name="gaia",
agent_func=args.agent_func,
eval_note=args.eval_note,
eval_output_dir=args.eval_output_dir,
data_split=args.data_split,
details={'gaia-level': args.level},
port=args.port,
container_name=args.container_name,
git_clone=args.git_clone,
test_pull_name=args.test_pull_name,
)
log_path = osp.join(metadata.eval_output_dir, 'logs', f'agent_{metadata.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path))
dataset = load_dataset('gaia-benchmark/GAIA', args.level)
huggingface_hub.snapshot_download(
'gaia-benchmark/GAIA',
repo_type='dataset',
local_dir=DATASET_CACHE_DIR,
)
gaia_tests = dataset[metadata.data_split].to_pandas()
gaia_tests.rename(columns={'task_id': 'instance_id'}, inplace=True)
output_file = osp.join(metadata.eval_output_dir, 'output.jsonl')
prepared_dataset = prepare_dataset(gaia_tests, output_file, args.eval_n_limit)
run_evaluation(
dataset=prepared_dataset,
metadata=metadata,
output_file=output_file,
num_workers=args.eval_num_workers,
process_instance_func=process_instance,
)
if __name__ == "__main__":
args = get_args()
main(args)
# print(check_container_exist('gaia_lite_eval_c61d22de-5f6c-4958-a7f6-5e9707bd3466'))

View file

@ -0,0 +1,235 @@
from metachain.environment.docker_container import init_container
import argparse
from constant import DOCKER_WORKPLACE_NAME
from datasets import load_dataset
import huggingface_hub
from metachain import MetaChain
from metachain.logger import MetaChainLogger, LoggerManager
from evaluation.utils import make_metadata, prepare_dataset, update_progress, check_port_available, run_evaluation, clean_msg
from evaluation.types import EvalMetadata, EvalOutput
import metachain.agents as agenthub
import os.path as osp
import pandas as pd
import asyncio
import re
import os
import shutil
from metachain.registry import registry
from evaluation.gaia.scorer import question_scorer
import json
# from metachain.util import run_command_in_container
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from metachain.types import Response
from metachain.util import function_to_json
from metachain.main import run_in_client
import subprocess
DATASET_CACHE_DIR = osp.join(osp.dirname(__file__), 'data')
# Note: You should run this script in the root directory of the project metachain
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--container_name', type=str, default='gaia_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--git_clone', action='store_true', default=False)
parser.add_argument('--setup_package', type=str, default=None)
parser.add_argument('--test_pull_name', type=str, default='main')
parser.add_argument('--debug', action='store_true', default=False)
# metadata
parser.add_argument('--agent_func', type=str, default='get_system_triage_agent')
parser.add_argument('--eval_note', type=str, default=None)
parser.add_argument('--eval_output_dir', type=str, default='./evaluation_results')
parser.add_argument('--data_split', type=str, default=None)
# gaia level
parser.add_argument('--level', type=str, default='1')
parser.add_argument('--eval_n_limit', type=int, default=None)
parser.add_argument('--port', type=int, default=12345)
parser.add_argument('--eval_num_workers', type=int, default=1)
args = parser.parse_args()
return args
def get_config(metadata: EvalMetadata, instance_id: str):
container_name = metadata.container_name+f'_{instance_id}'
port_info = check_container_ports(container_name)
port = metadata.port
if port_info:
port = port_info[0]
else:
while not check_port_available(port):
port += 1
local_root = os.path.join(os.getcwd(), f"workspace_gaia_whole", f"gaia_eval_{instance_id}")
os.makedirs(local_root, exist_ok=True)
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
)
return docker_config
def process_instance(
instance: pd.Series,
metadata: EvalMetadata,
logger: MetaChainLogger,
) -> EvalOutput:
docker_config = get_config(metadata, instance_id=instance['instance_id'])
code_env, web_env, file_env = create_environment(docker_config)
local_workplace = code_env.local_workplace
docker_workplace = code_env.docker_workplace
# Setup the logger properly, so you can run multi-processing to parallelize the evaluation
logger.info(f'Starting evaluation for instance {instance["instance_id"]}.')
if instance['file_name'] != '':
assert metadata.data_split is not None
src_file = os.path.join(
DATASET_CACHE_DIR, '2023', metadata.data_split, instance['file_name']
)
assert os.path.exists(src_file)
extension_name = instance['file_name'].split('.')[-1]
dest_file = os.path.join(local_workplace, f'file.{extension_name}')
shutil.copy(src_file, dest_file)
file_name = dest_file.split('/')[-1]
else:
dest_file = None
# Prepare instruction
instruction = f"{instance['Question']}\n"
logger.info(f'Instruction: {instruction}')
if dest_file:
instruction += f"\n\nThe mentioned file is provided in the workspace at: {osp.join(docker_workplace, file_name)}"
instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n'
instruction += 'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
instruction += (
'For example: The answer to the question is <solution> 42 </solution>.\n'
)
logger.info(f'Instruction:\n{instruction}')
system_triage_agent = registry.agents[metadata.agent_func](model=metadata.model, file_env=file_env, web_env=web_env, code_env=code_env)
messages = [
{
'role': 'user',
'content': instruction
}
]
context_variables = {}
# Here's how you can run the agent (similar to the `main` function) and get the final task state
response: Response | None = asyncio.run(
run_in_client(
agent=system_triage_agent,
messages=messages,
context_variables = context_variables,
logger=logger
)
)
messages.extend(response.messages)
# save messages to a file
messages_file = osp.join(metadata.eval_output_dir, f"gaia_{instance['instance_id']}", f'messages_{metadata.agent_func.replace("get_", "")}.json')
os.makedirs(osp.dirname(messages_file), exist_ok=True)
messages = clean_msg(messages)
with open(messages_file, 'w', encoding='utf-8') as f:
json.dump(messages, f, ensure_ascii=False, indent=4)
# ======= Attempt to evaluate the agent's edits =======
# If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
if response is None:
raise ValueError('Response should not be None.')
model_answer_raw = response.messages[-1]['content']
# attempt to parse model_answer
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw)
if len(model_answer) == 0:
logger.info(f'Failed to parse model answer: {model_answer_raw}', title='WARNING', color='yellow')
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
logger.info(
f'Final message: {model_answer} | Ground truth: {instance["Final answer"]}',
title='INFO', color='green'
)
score = question_scorer(
model_answer=model_answer, ground_truth=instance['Final answer']
)
test_result = {
'score': score,
'model_answer_raw': model_answer_raw,
'model_answer': model_answer,
'ground_truth': instance['Final answer'],
}
# Save the output
output = EvalOutput(
instance_id=instance['instance_id'],
instance=instance.to_dict(),
instruction=instance['Question'],
metadata=metadata,
messages=messages,
test_result=test_result,
)
return output
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def main(args):
metadata: EvalMetadata = make_metadata(
model=args.model,
dataset_name="gaia",
agent_func=args.agent_func,
eval_note=args.eval_note,
eval_output_dir=args.eval_output_dir,
data_split=args.data_split,
details={'gaia-level': args.level},
port=args.port,
container_name=args.container_name,
)
log_path = osp.join(metadata.eval_output_dir, 'logs', f'agent_{metadata.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path))
dataset = load_dataset('gaia-benchmark/GAIA', args.level)
huggingface_hub.snapshot_download(
'gaia-benchmark/GAIA',
repo_type='dataset',
local_dir=DATASET_CACHE_DIR,
)
gaia_tests = dataset[metadata.data_split].to_pandas()
gaia_tests.rename(columns={'task_id': 'instance_id'}, inplace=True)
output_file = osp.join(metadata.eval_output_dir, 'output.jsonl')
prepared_dataset = prepare_dataset(gaia_tests, output_file, args.eval_n_limit)
run_evaluation(
dataset=prepared_dataset,
metadata=metadata,
output_file=output_file,
num_workers=args.eval_num_workers,
process_instance_func=process_instance,
)
if __name__ == "__main__":
args = get_args()
main(args)
# print(check_container_exist('gaia_lite_eval_c61d22de-5f6c-4958-a7f6-5e9707bd3466'))

102
evaluation/gaia/scorer.py Normal file
View file

@ -0,0 +1,102 @@
import re
import string
import warnings
def normalize_number_str(number_str: str) -> float:
# we replace these common units and commas to allow
# conversion to float
for char in ['$', '%', ',']:
number_str = number_str.replace(char, '')
try:
return float(number_str)
except ValueError:
print(f'String {number_str} cannot be normalized to number str.')
return float('inf')
def split_string(
s: str,
char_list: list[str] = None,
) -> list[str]:
if char_list is None:
char_list = [',', ';']
pattern = f"[{''.join(char_list)}]"
return re.split(pattern, s)
def question_scorer(
model_answer: str,
ground_truth: str,
) -> bool:
def is_float(element: any) -> bool:
try:
float(element)
return True
except ValueError:
return False
# if gt is a number
if is_float(ground_truth):
print(f'Evaluating {model_answer} as a number.')
normalized_answer = normalize_number_str(model_answer)
return normalized_answer == float(ground_truth)
# if gt is a list
elif any(char in ground_truth for char in [',', ';']):
print(f'Evaluating {model_answer} as a comma separated list.')
# question with the fish: normalization removes punct
gt_elems = split_string(ground_truth)
ma_elems = split_string(model_answer)
# check length is the same
if len(gt_elems) != len(ma_elems):
warnings.warn(
'Answer lists have different lengths, returning False.',
UserWarning,
stacklevel=2,
)
return False
# compare each element as float or str
comparisons = []
for ma_elem, gt_elem in zip(ma_elems, gt_elems):
if is_float(gt_elem):
normalized_ma_elem = normalize_number_str(ma_elem)
comparisons.append(normalized_ma_elem == float(gt_elem))
else:
# we do not remove punct since comparisons can include punct
comparisons.append(
normalize_str(ma_elem, remove_punct=False)
== normalize_str(gt_elem, remove_punct=False)
)
return all(comparisons)
# if gt is a str
else:
print(f'Evaluating {model_answer} as a string.')
return normalize_str(model_answer) == normalize_str(ground_truth)
def normalize_str(input_str, remove_punct=True) -> str:
"""Normalize a string by:
- Removing all white spaces
- Optionally removing punctuation (if remove_punct is True)
- Converting to lowercase
Parameters:
- input_str: str, the string to normalize
- remove_punct: bool, whether to remove punctuation (default: True)
Returns:
- str, the normalized string
"""
# Remove all white spaces. Required e.g for seagull vs. sea gull
no_spaces = re.sub(r'\s', '', input_str)
# Remove punctuation, if specified.
if remove_punct:
translator = str.maketrans('', '', string.punctuation)
return no_spaces.lower().translate(translator)
else:
return no_spaces.lower()

View file

@ -0,0 +1,12 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
cd ../../../
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export DEBUG=True
export BASE_IMAGES=tjbtech1/gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-sonnet-20241022
python evaluation/gaia/run_infer.py --container_name gaia_lite_eval --model ${COMPLETION_MODEL} --test_pull_name test_pull_1225 --debug --eval_num_workers 1 --port 12345 --data_split validation --level 2023_all --agent_func get_system_triage_agent --git_clone
# python /Users/tangjiabin/Documents/reasoning/metachain/test_gaia_tool.py

View file

@ -0,0 +1,17 @@
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports, check_container_exist, check_container_running
from metachain.tools.files import create_file
if __name__ == "__main__":
import os
os.environ["BASE_IMAGES"] = "tjbtech1/gaia-bookworm:amd64"
config = DockerConfig(container_name = "gaia_amd64_test",
workplace_name = "workplace_gaia_amd64_test",
communication_port = 12345,
conda_path = "/root/miniconda3"
)
env = DockerEnv(config)
env.init_container()
res = create_file(path = 'test.py', content = 'print("hello world")', env = env)
print(res)

View file

@ -0,0 +1,125 @@
from pathlib import Path
from tqdm import tqdm
import multiprocessing
from copy import deepcopy
import re
from lm_eval.tasks.minerva_math.utils import (
last_boxed_only_string,
normalize_final_answer,
get_unnormalized_answer,
remove_boxed,
is_equiv,
)
import yaml
import argparse
def load_yaml(path: Path):
with open(path, "r") as f:
data = yaml.load(f, Loader=yaml.CLoader)
return data
def save_yaml(path: Path, data, sort_keys=True):
with open(path, "w") as f:
yaml.dump(data, f, sort_keys=sort_keys)
ANS_RE_GSM8k = re.compile(r"#### (\-?[\$0-9\.\,]+)")
INVALID_ANS_GSM8k = "[invalid]"
GSM8K_IGNORE_REGEXES = [",", "\\$", "\\.$"]
def filter_ignores(st, regexes_to_ignore):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
st = re.sub(s, "", st)
return st
def extract_answer_gsm8k(completion):
match = ANS_RE_GSM8k.search(completion)
if match:
match_str = match.group(1).strip()
match_str = filter_ignores(
match_str,
GSM8K_IGNORE_REGEXES,
)
return match_str
else:
return INVALID_ANS_GSM8k
def is_correct_gsm8k(model_completion, gt_example):
gt_answer = extract_answer_gsm8k(gt_example)
assert gt_answer != INVALID_ANS_GSM8k
model_answer = extract_answer_gsm8k(model_completion)
return model_answer == gt_answer or is_equiv(model_answer, gt_answer)
def my_get_unnormalized_answer(og_pred):
og_pred = get_unnormalized_answer(og_pred)
print(og_pred)
og_pred = re.sub(r"\\+[\(\[](.+?)\\+[\)\]]", "\\1", og_pred)
return og_pred
def is_correct_minerva(og_pred, gt):
pred = normalize_final_answer(my_get_unnormalized_answer(og_pred))
print(pred)
print(gt)
# gt = normalize_final_answer(remove_boxed(last_boxed_only_string(gt)))
# string equality check needed because of https://github.com/EleutherAI/lm-evaluation-harness/issues/2212
return pred == gt or is_equiv(pred, gt)
def is_correct(sample: str, gt_answer: str, dset: str):
if dset == "gsm8k":
return is_correct_gsm8k(sample, gt_answer)
elif dset == "math":
return is_correct_minerva(sample, gt_answer)
else:
raise ValueError(f"Dataset {dset} not supported")
def get_tasks(config):
sample_paths = Path(config.samples_dir).glob("*.yaml")
tasks = []
for sample_path in tqdm(sample_paths, desc="Loading generations"):
save_path = config.save_dir / sample_path.name
task_config = deepcopy(config)
task_config.sample_path = sample_path
task_config.save_path = save_path
tasks.append(task_config)
return tasks
def main(args):
tasks = Path(args.save_dir).glob("*.yaml")
corrects = []
for task in tqdm(tasks, desc="Evaluating"):
result = load_yaml(task)
correct = is_correct(result["answer"], result["gt_answer"], "math")
corrects.append(correct)
print(f"Accuracy: {sum(corrects) / len(corrects)}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save_dir", type=str, default="evaluation_results/math500/math_solver")
args = parser.parse_args()
main(args)

View file

@ -0,0 +1,23 @@
MATH_COT_PROMPT = """Problem:
Find the domain of the expression $\\frac{\\sqrt{x-2}}{\\sqrt{5-x}}$.}
Solution:
The expressions inside each square root must be non-negative. Therefore, $x-2 \\ge 0$, so $x\\ge2$, and $5 - x \\ge 0$, so $x \\le 5$. Also, the denominator cannot be equal to zero, so $5-x>0$, which gives $x<5$. Therefore, the domain of the expression is $\\boxed{[2,5)}$.\nFinal Answer: The final answer is $[2,5)$. I hope it is correct.
Problem:
If $\\det \\mathbf{A} = 2$ and $\\det \\mathbf{B} = 12,$ then find $\\det (\\mathbf{A} \\mathbf{B}).$
Solution:
We have that $\\det (\\mathbf{A} \\mathbf{B}) = (\\det \\mathbf{A})(\\det \\mathbf{B}) = (2)(12) = \\boxed{24}.$\nFinal Answer: The final answer is $24$. I hope it is correct.
Problem:
Terrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight?
Solution:
If Terrell lifts two 20-pound weights 12 times, he lifts a total of $2\\cdot 12\\cdot20=480$ pounds of weight. If he lifts two 15-pound weights instead for $n$ times, he will lift a total of $2\\cdot15\\cdot n=30n$ pounds of weight. Equating this to 480 pounds, we can solve for $n$:\n\\begin{align*}\n30n&=480\\\n\\Rightarrow\\qquad n&=480/30=\\boxed{16}\n\\end{align*}\nFinal Answer: The final answer is $16$. I hope it is correct.
Problem:
If the system of equations\n\n\\begin{align*}\n6x-4y&=a,\\\n6y-9x &=b.\n\\end{align*}has a solution $(x, y)$ where $x$ and $y$ are both nonzero,\nfind $\\frac{a}{b},$ assuming $b$ is nonzero.
Solution:
If we multiply the first equation by $-\\frac{3}{2}$, we obtain\n\n$$6y-9x=-\\frac{3}{2}a.$$Since we also know that $6y-9x=b$, we have\n\n$$-\\frac{3}{2}a=b\\Rightarrow\\frac{a}{b}=\\boxed{-\\frac{2}{3}}.$$\nFinal Answer: The final answer is $-\\frac{2}{3}$. I hope it is correct."""

View file

@ -0,0 +1,121 @@
import torch
from datasets import load_dataset
from tqdm import tqdm
import multiprocessing
import random
import requests
from functools import partial
import argparse
from pathlib import Path
import yaml
from metachain.agents.math.math_solver_agent import get_math_solver_agent
from metachain import MetaChain
from metachain.workflows.math_solver_workflow_flow import majority_voting
import importlib
import os
import asyncio
from evaluation.math500.prompts import MATH_COT_PROMPT
def save_yaml(path: Path, data, sort_keys=True):
with open(path, "w") as f:
yaml.dump(data, f, sort_keys=sort_keys)
async def run_inference(item, save_dir, workflow):
outpath = save_dir / f"{item['id']}.yaml"
if outpath.exists():
return
prompt = MATH_COT_PROMPT + f"\n\nProblem:\n{item['problem']}\n\nYour task is to solve this problem."
prompt += "Please given your final answer (answer ONLY) within the format of `Final Answer: The final answer is <answer>. I hope it is correct.` after your reasoning \n"
prompt += "For example: According to ...\nFinal Answer: The final answer is $24$. I hope it is correct.\n"
if workflow == "majority_voting":
answer = await majority_voting(prompt)
elif workflow == None:
agent = get_math_solver_agent(model="deepseek/deepseek-chat")
client = MetaChain()
messages = [
{"role": "user", "content": prompt},
]
context_variables = {
}
response = await client.run_async(agent, messages, context_variables)
answer = response.messages[-1]['content']
else: raise ValueError(f"Unknown workflow: {workflow}")
out = {
"prompt": prompt,
"question": item["problem"],
"answer": answer,
"gt_answer": item["answer"],
}
save_yaml(outpath, out)
async def main(args):
test_dataset = list(
load_dataset(
"HuggingFaceH4/MATH-500", "default", split="test", trust_remote_code=True
)
)
print(f"Number of test items: {len(test_dataset)}")
random.seed(12345)
for i, data in enumerate(test_dataset):
data["id"] = i
random.shuffle(test_dataset)
if args.limit is not None:
limit = args.limit
else:
limit = len(test_dataset)
if args.stride is not None:
stride = args.stride
else:
stride = 1
if args.offset is not None:
offset = args.offset
else:
offset = 0
test_dataset = test_dataset[offset:limit:stride]
print(f"Total number of items to process: {len(test_dataset)}")
if args.workflow == None:
save_dir = os.path.join(args.save_dir, "math_solver")
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
else:
save_dir = os.path.join(args.save_dir, args.workflow)
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
predictions = []
for item in tqdm(test_dataset):
predictions.append(await run_inference(item, save_dir, args.workflow))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_few_shot", type=int, default=2)
parser.add_argument("--limit", type=int, default=3)
parser.add_argument("--stride", type=int, default=1)
parser.add_argument("--offset", type=int, default=0)
parser.add_argument("--save_dir", type=str, default="evaluation_results/math500")
parser.add_argument("--workflow", type=str, default=None)
args = parser.parse_args()
asyncio.run(main(args))

65
evaluation/types.py Normal file
View file

@ -0,0 +1,65 @@
from pydantic import BaseModel
from typing import Any, List
import json
import logging
logger = logging.getLogger(__name__)
class EvalMetadata(BaseModel):
agent_func: str
model: str
eval_output_dir: str
start_time: str
dataset: str | None = None
data_split: str | None = None
details: dict[str, Any] | None = None
container_name: str | None = None
port: int | None = None
git_clone: bool | None = None
test_pull_name: str | None = None
def model_dump(self, *args, **kwargs):
dumped_dict = super().model_dump(*args, **kwargs)
# avoid leaking sensitive information
return dumped_dict
def model_dump_json(self, *args, **kwargs):
dumped = super().model_dump_json(*args, **kwargs)
dumped_dict = json.loads(dumped)
logger.debug(f'Dumped metadata: {dumped_dict}')
return json.dumps(dumped_dict)
class EvalOutput(BaseModel):
# NOTE: User-specified
instance_id: str
# output of the evaluation
# store anything that is needed for the score calculation
test_result: dict[str, Any]
instruction: str | None = None
# Interaction info
metadata: EvalMetadata | None = None
# list[tuple[dict[str, Any], dict[str, Any]]] - for compatibility with the old format
messages: List | None = None
error: str | None = None
# Optionally save the input test instance
instance: dict[str, Any] | None = None
def model_dump(self, *args, **kwargs):
dumped_dict = super().model_dump(*args, **kwargs)
# Remove None values
dumped_dict = {k: v for k, v in dumped_dict.items() if v is not None}
# Apply custom serialization for metadata (to avoid leaking sensitive information)
if self.metadata is not None:
dumped_dict['metadata'] = self.metadata.model_dump()
return dumped_dict
def model_dump_json(self, *args, **kwargs):
dumped = super().model_dump_json(*args, **kwargs)
dumped_dict = json.loads(dumped)
# Apply custom serialization for metadata (to avoid leaking sensitive information)
if 'metadata' in dumped_dict:
dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json())
return json.dumps(dumped_dict)

379
evaluation/utils.py Normal file
View file

@ -0,0 +1,379 @@
import os
import pathlib
import subprocess
import time
from typing import Any, TextIO, List, Dict
from .types import EvalMetadata, EvalOutput
import pandas as pd
import json
from typing import Callable, Awaitable
from tqdm import tqdm
from metachain.logger import MetaChainLogger, LoggerManager
import multiprocessing as mp
import psutil
import traceback
import socket
import queue # 添加这行导入
def make_metadata(
model: str,
dataset_name: str,
agent_func: str,
eval_note: str | None,
eval_output_dir: str,
data_split: str | None = None,
details: dict[str, Any] | None = None,
port: int | None = None,
container_name: str | None = None,
git_clone: bool = False,
test_pull_name: str | None = None,
) -> EvalMetadata:
eval_note = f'_N_{eval_note}' if eval_note else ''
eval_output_path = os.path.join(
eval_output_dir,
dataset_name,
agent_func.replace('get_', ''),
f'{model}_maxiter{eval_note}',
)
pathlib.Path(eval_output_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(eval_output_path, 'logs')).mkdir(
parents=True, exist_ok=True
)
metadata = EvalMetadata(
agent_func=agent_func,
model=model,
eval_output_dir=eval_output_path,
start_time=time.strftime('%Y-%m-%d %H:%M:%S'),
dataset=dataset_name,
data_split=data_split,
details=details,
port=port,
container_name=container_name,
git_clone=git_clone,
test_pull_name=test_pull_name,
)
metadata_json = metadata.model_dump_json()
with open(os.path.join(eval_output_path, 'metadata.json'), 'w') as f:
f.write(metadata_json)
return metadata
def prepare_dataset(
dataset: pd.DataFrame,
output_file: str,
eval_n_limit: int,
eval_ids: list[str] | None = None,
skip_num: int | None = None,
):
assert (
'instance_id' in dataset.columns
), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
logger = LoggerManager.get_logger()
id_column = 'instance_id'
logger.info(f'Writing evaluation output to {output_file}')
finished_ids: set[str] = set()
if os.path.exists(output_file):
with open(output_file, 'r') as f:
for line in f:
data = json.loads(line)
finished_ids.add(str(data[id_column]))
logger.info(
f'\nOutput file {output_file} already exists. Loaded {len(finished_ids)} finished instances.', title='Warning', color='red'
)
if eval_ids:
eval_ids_converted = [dataset[id_column].dtype.type(id) for id in eval_ids]
dataset = dataset[dataset[id_column].isin(eval_ids_converted)]
logger.info(f'Limiting evaluation to {len(eval_ids)} specific instances.')
elif skip_num and skip_num >= 0:
skip_num = min(skip_num, len(dataset))
dataset = dataset.iloc[skip_num:]
logger.info(
f'Starting evaluation with skipping first {skip_num} instances ({len(dataset)} instances to run).'
)
if eval_n_limit and eval_n_limit > 0:
dataset = dataset.head(eval_n_limit)
logger.info(f'Limiting evaluation to {eval_n_limit} instances.')
elif eval_n_limit and eval_n_limit > 0:
dataset = dataset.head(eval_n_limit)
logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
new_dataset = [
instance
for _, instance in dataset.iterrows()
if str(instance[id_column]) not in finished_ids
]
logger.info(
f'Finished instances: {len(finished_ids)}, Remaining instances: {len(new_dataset)}'
)
return pd.DataFrame(new_dataset)
def _process_and_queue(process_instance_func, instance, metadata, use_mp, max_retries, queue):
"""包装函数,将结果放入队列"""
try:
result = _process_instance_wrapper(
process_instance_func, instance, metadata, use_mp, max_retries
)
queue.put(result)
except Exception as e:
print(f"Error processing instance {instance.get('instance_id', 'unknown')}: {str(e)}")
traceback.print_exc()
# 在发生错误时也要把错误结果放入队列,避免主进程等待
queue.put(None) # 或者放入一个表示错误的特殊值
# finally:
# # 确保子进程中的资源被释放
# queue.close()
def run_evaluation(
dataset: pd.DataFrame,
metadata: EvalMetadata | None,
output_file: str,
num_workers: int,
process_instance_func: Callable[
[pd.Series, EvalMetadata, bool], Awaitable[EvalOutput]
],
max_retries: int = 3, # number of retries for each instance
):
logger = LoggerManager.get_logger()
use_multiprocessing = num_workers > 1
if metadata is not None:
logger.info(
f'Evaluation started with Agent {metadata.agent_func}\n'
)
else:
logger.info('Running evaluation without metadata.', title='Warning', color='red')
logger.info(f'Evaluation started with {num_workers} workers.')
total_instances = len(dataset)
pbar = tqdm(total=total_instances, desc='Instances processed')
output_fp = open(output_file, 'a')
try:
if use_multiprocessing:
# 使用队列来收集结果
results_queue = mp.Queue()
active_processes = []
instances_iter = dataset.iterrows()
instances_completed = 0
while instances_completed < total_instances:
# 启动新进程直到达到worker数量限制
while len(active_processes) < num_workers and instances_completed < total_instances:
try:
_, instance = next(instances_iter)
# 创建非守护进程
p = mp.Process(
target=_process_and_queue,
args=(process_instance_func, instance, metadata, True, max_retries, results_queue),
daemon=False # 关键:设置为非守护进程
)
p.start()
time.sleep(3)
active_processes.append((p, time.time())) # 记录进程启动时间
except StopIteration:
break
# 检查完成的进程
for p, start_time in active_processes[:]:
if not p.is_alive():
try:
# 给进程1分钟时间来清理资源
p.join(timeout=60)
if p.is_alive():
logger.warning(f"Process {p.pid} cleanup timeout, force terminating...")
p.terminate()
p.join(timeout=5)
if p.is_alive():
p.kill()
except Exception as e:
logger.warning(f"Error cleaning up process {p.pid}: {str(e)}")
p.kill()
finally:
active_processes.remove((p, start_time))
# 处理队列中的结果
try:
while not results_queue.empty():
result = results_queue.get_nowait()
update_progress(result, pbar, output_fp)
instances_completed += 1
except Exception as e:
logger.error(f"Error processing results: {str(e)}")
time.sleep(0.1) # 避免过度占用CPU
# 清理剩余进程
logger.info("Cleaning up remaining processes...")
for p, _ in active_processes:
try:
# 给进程一个较短的超时时间
p.join(timeout=5)
if p.is_alive():
p.terminate()
p.join(timeout=1)
if p.is_alive():
p.kill()
except Exception as e:
logger.info(f"Error cleaning up process {p.pid}: {str(e)}", title='warning', color='red')
try:
p.kill()
except:
pass
# 快速清空队列
try:
while True:
try:
result = results_queue.get_nowait()
update_progress(result, pbar, output_fp)
instances_completed += 1
except queue.Empty:
break
except Exception as e:
logger.info(f"Error processing final results: {str(e)}", title='Warning', color='red')
else:
for _, instance in dataset.iterrows():
result = _process_instance_wrapper(
process_instance_func=process_instance_func,
instance=instance,
metadata=metadata,
use_mp=False,
max_retries=max_retries,
)
update_progress(result, pbar, output_fp)
except KeyboardInterrupt:
print('\nKeyboardInterrupt received. Cleaning up...\n')
if use_multiprocessing:
for p, _ in active_processes:
try:
p.terminate()
p.join(timeout=1)
except Exception:
p.kill()
cleanup()
finally:
# 确保资源被释放
output_fp.close()
if use_multiprocessing:
results_queue.close()
results_queue.join_thread()
output_fp.close()
logger.info('\nEvaluation finished.\n')
def _process_instance_wrapper_mp(args):
"""Wrapper for multiprocessing, especially for imap_unordered."""
return _process_instance_wrapper(*args)
def _process_instance_wrapper(
process_instance_func: Callable[[pd.Series, EvalMetadata, bool], EvalOutput],
instance: pd.Series,
metadata: EvalMetadata,
use_mp: bool,
max_retries: int = 5,
) -> EvalOutput:
"""Wrap the process_instance_func to handle retries and errors.
Retry an instance up to max_retries times if it fails (e.g., due to transient network/runtime issues).
"""
if use_mp:
log_path = os.path.join(metadata.eval_output_dir, 'logs', f'agent_{metadata.model}_did_{instance["instance_id"]}.log')
logger = MetaChainLogger(log_path)
else:
logger = LoggerManager.get_logger()
for attempt in range(max_retries + 1):
try:
result = process_instance_func(instance, metadata, logger)
return result
except Exception as e:
error = str(e)
stacktrace = traceback.format_exc()
if attempt == max_retries:
logger.info(error, title='Error', color='red')
msg = (
'-' * 10
+ '\n'
+ f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
+ '\n'
+ f'[Encountered after {max_retries} retries. Please check the logs and report the issue.]'
+ '-' * 10
)
# Raise an error after all retries & stop the evaluation
logger.info(error, title='Error', color='red')
raise RuntimeError(
f'Maximum error retries reached for instance {instance.instance_id}'
) from e
msg = (
'-' * 10
+ '\n'
+ f'Error in instance [{instance.instance_id}]: {error}. Stacktrace:\n{stacktrace}'
+ '\n'
+ '-' * 10
+ f'[The above error occurred. Retrying... (attempt {attempt + 1} of {max_retries})]'
+ '-' * 10
+ '\n'
)
logger.info(msg, title='Error', color='red')
if use_mp:
print(msg) # use print to directly print to console
time.sleep(5)
def update_progress(
result: EvalOutput,
pbar: tqdm,
output_fp: TextIO,
):
"""Update the progress bar and write the result to the output file."""
logger = LoggerManager.get_logger()
pbar.update(1)
pbar.set_description(f'Instance {result.instance_id}')
pbar.set_postfix_str(f'Test Result: {str(result.test_result)[:300]}...')
logger.info(
f'Finished evaluation for instance {result.instance_id}: {str(result.test_result)[:300]}...\n'
)
output_fp.write(json.dumps(result.model_dump()) + '\n')
output_fp.flush()
def cleanup():
print('Cleaning up child processes...')
for process in mp.active_children():
print(f'Terminating child process: {process.name}')
process.terminate()
process.join()
def check_port_available(port):
"""check if the port is available"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
# set the port reuse option
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# try to bind the port
s.bind(('0.0.0.0', port))
# immediately close the connection
s.close()
return True # the port is available
except socket.error:
return False # the port is not available
def clean_msg(msg: List[Dict[str, Any]]):
new_msg = []
for m in msg:
msg_content = m['content']
if isinstance(msg_content, str):
m['content'] = msg_content
new_msg.append(m.copy())
elif isinstance(msg_content, List):
new_content = []
for c in msg_content:
if c['type'] == 'text':
new_content.append(c.copy())
elif c['type'] == 'image_url':
new_content.append({'type': 'image_url', 'image_url': 'placeholder'})
m['content'] = new_content
new_msg.append(m.copy())
return new_msg

44
loop_utils/font_page.py Normal file
View file

@ -0,0 +1,44 @@
from rich.table import Table
from rich.console import Console
from rich.text import Text
from rich.panel import Panel
from rich.style import Style
from rich.console import Console
from rich.box import DOUBLE
from rich.markdown import Markdown
MC_LOGO = """\
𝒞𝓇𝑒𝒶𝓉𝑒 𝒜𝑔𝑒𝓃𝓉𝒾𝒸 𝒜 𝓊𝓈𝒾𝓃𝑔 𝒶𝓃𝑔𝓊𝒶𝑔𝑒
""".strip()
version_table = Table(show_header=False, box=DOUBLE, expand=True)
version_table.add_column("Key", style="cyan")
version_table.add_column("Value", style="green")
version_table.add_row("Version", "0.1.0")
version_table.add_row("Author", "MetaChain Team@HKU")
version_table.add_row("License", "MIT")
NOTES = """\
* Choose `user mode` if you just want to let a general yet powerful AI Assistant to help you
* Choose `agent editor` to create your own AI Agent with language.
* Choose `workflow editor` to create your own AI Workflow with language.
* Choose `exit` to exit the program
"""
NOTES = Markdown(NOTES)
GOODBYE_LOGO = """\
· 𝓜𝓮𝓽𝓪𝓒𝓱𝓪𝓲𝓷-𝓐𝓘 ·
""".strip()

9
metachain/__init__.py Normal file
View file

@ -0,0 +1,9 @@
from .core import MetaChain
from .types import Agent, Response
# from .workflow import Graph, meta_workflow, FlowEngine
from .flow import default_drive
import metachain.workflows
import metachain.tools
import metachain.agents
__all__ = ["MetaChain", "Agent", "Response", "default_drive", ]

View file

@ -0,0 +1,64 @@
# from metachain.agents.programming_agent import get_programming_agent
# from metachain.agents.tool_retriver_agent import get_tool_retriver_agent
# from metachain.agents.agent_check_agent import get_agent_check_agent
# from metachain.agents.tool_check_agent import get_tool_check_agent
# from metachain.agents.github_agent import get_github_agent
# from metachain.agents.programming_triage_agent import get_programming_triage_agent
# from metachain.agents.plan_agent import get_plan_agent
# import os
# import importlib
# from metachain.registry import registry
# # 获取当前目录下的所有 .py 文件
# current_dir = os.path.dirname(__file__)
# for file in os.listdir(current_dir):
# if file.endswith('.py') and not file.startswith('__'):
# module_name = file[:-3]
# importlib.import_module(f'metachain.agents.{module_name}')
# # 导出所有注册的 agent 创建函数
# globals().update(registry.agents)
# __all__ = list(registry.agents.keys())
import os
import importlib
from metachain.registry import registry
def import_agents_recursively(base_dir: str, base_package: str):
"""Recursively import all agents in .py files
Args:
base_dir: the root directory to start searching
base_package: the base name of the Python package
"""
for root, dirs, files in os.walk(base_dir):
# get the relative path to the base directory
rel_path = os.path.relpath(root, base_dir)
for file in files:
if file.endswith('.py') and not file.startswith('__'):
# build the module path
if rel_path == '.':
# in the root directory
module_path = f"{base_package}.{file[:-3]}"
else:
# in the subdirectory
package_path = rel_path.replace(os.path.sep, '.')
module_path = f"{base_package}.{package_path}.{file[:-3]}"
try:
importlib.import_module(module_path)
except Exception as e:
print(f"Warning: Failed to import {module_path}: {e}")
# get the current directory and import all agents
current_dir = os.path.dirname(__file__)
import_agents_recursively(current_dir, 'metachain.agents')
# export all agent creation functions
globals().update(registry.agents)
globals().update(registry.plugin_agents)
__all__ = list(registry.agents.keys())

View file

@ -0,0 +1,25 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_agent
)
from metachain.registry import register_agent
@register_agent(name = "Agent Check Agent", func_name="get_agent_check_agent")
def get_agent_check_agent(model: str):
def instructions(context_variables):
return \
f"""You are a developer working on a project named 'metachain'.
You are given a user request and required to use existing project code to solve the task.
Your goal is to enrich the functionality of of existing list of agents in the `agents` folder as much as possible, so that once the similar task occurs again, the agent can solve it directly without developing new agents.
whether you should develop a new agent to solve the task.
If you have already have an pre-built agent in the `agents` folder and suitable actions in the `actions` folder you could use with it, you should not develop a new agent.
Note that the key of agent is the apprioriate `instructions` and `functions` using existing tools.
Answer 'Needed' or 'Not needed' first and then give your reason.
"""
return Agent(
name="Agent Check Agent",
model=model,
instructions=instructions,
functions=[check_agent],
parallel_tool_calls = False
)

View file

@ -0,0 +1,147 @@
from metachain.types import Agent
from metachain.registry import register_agent
from browsergym.core.action.highlevel import HighLevelActionSet
from metachain.util import function_to_json
import gymnasium as gym
import browsergym.miniwob # register miniwob tasks as gym environments
import importlib
import json
from functools import wraps
from typing import Callable, Union
from metachain.environment.browser_env import BrowserEnv
import inspect
from metachain.types import Result
from browsergym.utils.obs import flatten_axtree_to_str
def get_error_prefix(last_browser_action: str) -> str:
return f'IMPORTANT! Last action is incorrect:\n{last_browser_action}\nThink again with the current observation of the page.\n'
def wrap_browser_action(action_func: Callable, env: BrowserEnv) -> Callable:
"""
包装浏览器动作函数使其能与环境交互
Args:
action_func: 原始的浏览器动作函数
Returns:
包装后的函数可以与环境交互
"""
@wraps(action_func)
def wrapper(*args, **kwargs) -> Union[Result, str]:
error_prefix = ""
try:
# 执行动作
# action = action_func(*args, **kwargs)
action_str = f"{action_func.__name__}({', '.join([f'{repr(v)}' for k, v in kwargs.items()])})"
# 与环境交互
obs = env.step(action_str)
# 返回观察结果
obs_dict = dict(
content=obs['text_content'], # text content of the page
url=obs.get('url', ''), # URL of the page
screenshot=obs.get('screenshot', None), # base64-encoded screenshot, png
open_pages_urls=obs.get('open_pages_urls', []), # list of open pages
active_page_index=obs.get(
'active_page_index', -1
), # index of the active page
dom_object=obs.get('dom_object', {}), # DOM object
axtree_object=obs.get('axtree_object', {}), # accessibility tree object
extra_element_properties=obs.get('extra_element_properties', {}),
focused_element_bid=obs.get(
'focused_element_bid', None
), # focused element bid
last_browser_action=obs.get(
'last_action', ''
), # last browser env action performed
last_browser_action_error=obs.get('last_action_error', ''),
error=True if obs.get('last_action_error', '') else False, # error flag
)
except Exception as e:
obs_dict = dict(
content=str(e),
screenshot='',
error=True,
last_browser_action_error=str(e),
)
if obs_dict['error']:
# add error recovery prompt prefix
error_prefix = get_error_prefix(obs_dict['last_browser_action'])
# self.error_accumulator += 1
# if self.error_accumulator > 5:
# return MessageAction('Too many errors encountered. Task failed.')
cur_url = obs_dict['url']
try:
cur_axtree_txt = flatten_axtree_to_str(
obs_dict['axtree_object'],
extra_properties=obs_dict['extra_element_properties'],
with_clickable=True,
filter_visible_only=True,
)
except Exception as e:
print(
'Error when trying to process the accessibility tree: %s', e
)
return 'Error encountered when browsing.'
ret_value = f"""\
{error_prefix}
# Current Page URL:
{cur_url}
# Current Accessibility Tree:
{cur_axtree_txt}
Here is an example with chain of thought of a valid action when clicking on a button:
"
In order to accomplish my goal I need to click on the button with bid 12
```click("12")```
"
""".strip()
return Result(
value=ret_value,
image=obs_dict['screenshot'],
)
# 保留原函数的签名和文档
wrapper.__signature__ = inspect.signature(action_func)
wrapper.__doc__ = action_func.__doc__
return wrapper
@register_agent(name = "Browsing Agent", func_name="get_browsing_agent")
def get_browsing_agent(model: str):
env = BrowserEnv()
demo_mode = "off"
action_set = HighLevelActionSet(
subsets=["chat", "nav", "bid"], # define a subset of the action space
# subsets=["chat", "bid", "coord", "infeas"] # allow the agent to also use x,y coordinates
strict=False, # less strict on the parsing of the actions
multiaction=False, # does not enable the agent to take multiple actions at once
demo_mode=demo_mode, # add visual effects
)
func_list = [act for act in action_set.action_set.keys()]
func_module = importlib.import_module("browsergym.core.action.functions")
func_list = [getattr(func_module, func) for func in func_list]
wrap_func_list = [wrap_browser_action(func, env) for func in func_list]
def instructions(context_variables):
goal = context_variables.get("goal", "")
action_space = action_set.describe(with_long_description=False, with_examples=True)
return \
f"""Review the current state of the page and all other information to find the best
possible next action to accomplish your goal. Your answer will be interpreted
and executed by a program, make sure to follow the formatting instructions.
# Goal:
{goal}
# Action Space
{action_space}
"""
return Agent(
name="Browsing Agent",
model=model,
instructions=instructions,
functions=wrap_func_list
)

View file

@ -0,0 +1,36 @@
from metachain.types import Agent
from metachain.tools import tool_dummy
from typing import Union
from metachain.registry import register_plugin_agent # import the register_agent function from the registry
@register_plugin_agent(name = "Dummy Agent", func_name="get_dummy_agent") # You must register the agent in the registry, otherwise the agent will not be loaded. The name of register_agent is get_xxx_agent.
def get_dummy_agent(model: str):
"""
This is a dummy agent, it's used for demonstrating the usage of the metachain.
Args:
model: The model to be used for the agent.
Returns:
An agent instance.
"""
def dummy_instructions(context_variables: dict):
"""
The function should take the context_variables as an argument, and return a string. The context_variables is a dictionary, and it's track the important variables of the agent in the whole conversation.
The instructions should be concise and clear, and it's very important for the agent to follow the instructions.
"""
tmp_variables = context_variables.get("tmp_variables", {})
return f"""..."""
return Agent(
name="Dummy Agent", # The name of the agent, you can change it in different scenes.
model=model, # The default model is gpt-4o-2024-08-06, you can change it to other models if user specified.
instructions="..." or dummy_instructions, # the instructions of the agent, the instructions can be a string or a function that returns a string. If it is a function, the function should take the context_variables as an argument, and return a string. The instructions should be concise and clear, and it's very important for the agent to follow the instructions.
functions=[tool_dummy], # The tools of the agent, you can add different tools in different scenes.
)
"""
Form to create an agent:
agent_name = "Dummy Agent"
agent_description = "This is a dummy agent, it's used for demonstrating the usage of the metachain."
agent_instructions = "..." | "...{global_variables}..."
agent_tools = [tool_dummy]
"""

View file

@ -0,0 +1,25 @@
from metachain.types import Agent
from metachain.tools import (
push_changes, submit_pull_request
)
from metachain.registry import register_agent
@register_agent(name = "Github Agent", func_name="get_github_agent")
def get_github_agent(model: str):
def instructions(context_variables):
return \
f"""You are an agent that helps user to manage the GitHub repository named 'metachain'.
The user will give you the suggestion of the changes to be pushed to the repository.
Follow the following routine with the user:
1. First, use `push_changes` to push the changes to the repository. (If the user want to push all the changes, use `push_changes` with `file_paths=None` as the argument.)
2. Then, ask the user whether to submit a pull request to a target branch. (If yes, give the `target_branch`)
3. If the user wants to submit a pull request, use `submit_pull_request` to submit the pull request, if not, just ignore this step.
"""
return Agent(
name="Github Agent",
model=model,
instructions=instructions,
functions=[push_changes, submit_pull_request],
parallel_tool_calls = False
)

View file

@ -0,0 +1,12 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Condition Extraction Agent", func_name = "get_condition_extraction_agent")
def get_condition_extraction_agent(model):
instruction = """
This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.
"""
return Agent(
name="Condition Extraction Agent",
description=instruction,
model=model,
)

View file

@ -0,0 +1,17 @@
from metachain.types import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Math Solver Agent", func_name="get_math_solver_agent")
def get_math_solver_agent(model: str):
'''
This agent solves mathematical problems using analytical and systematic approaches.
'''
instructions = 'You are responsible for solving mathematical problems using a systematic approach. You should:\n1. Use the provided conditions and objective to formulate a solution strategy\n2. Break down complex problems into smaller steps\n3. Apply appropriate mathematical concepts and formulas\n4. Show clear step-by-step work and explanations\n5. Verify the solution matches the problem requirements'
return Agent(
name="Math Solver Agent",
model=model,
instructions=instructions,
functions=[]
)

View file

@ -0,0 +1,12 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Math Solver Agent", func_name = "get_math_solver_agent")
def get_math_solver_agent(model):
instruction = """
This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.
"""
return Agent(
name="Math Solver Agent",
description=instruction,
model=model,
)

View file

@ -0,0 +1,12 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Objective Extraction Agent", func_name = "get_objective_extraction_agent")
def get_objective_extraction_agent(model):
instruction = """
This agent is specialized in analyzing math problems and extracting the main objective or question being asked.
"""
return Agent(
name="Objective Extraction Agent",
description=instruction,
model=model,
)

View file

@ -0,0 +1,12 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Result Aggregator Agent", func_name = "get_result_aggregator_agent")
def get_result_aggregator_agent(model):
instruction = """
This agent is specialized in aggregating results from different models and determining the final answer through majority voting.
"""
return Agent(
name="Math Solver Agent",
description=instruction,
model=model,
)

View file

@ -0,0 +1,17 @@
from metachain.types import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Vote Aggregator Agent", func_name="get_vote_aggregator_agent")
def get_vote_aggregator_agent(model: str):
'''
This agent aggregates solutions from different solvers and determines the final answer through majority voting.
'''
instructions = 'You are a solution aggregator specializing in combining and analyzing multiple solutions to determine the most accurate answer. Your responsibilities include:\n\n1. Carefully review all provided solutions\n2. Compare the reasoning and calculations in each solution\n3. Identify commonalities and differences between solutions\n4. Implement majority voting when solutions differ\n5. Evaluate the confidence level of each solution\n6. Provide justification for the final selected answer\n\nWhen aggregating solutions:\n1. List all solutions received\n2. Compare the approach and methodology used in each\n3. Identify the final answer from each solution\n4. Apply majority voting to determine the consensus\n5. If no clear majority, analyze the reasoning quality to break ties\n6. Present the final selected answer with explanation of the selection process'
return Agent(
name="Vote Aggregator Agent",
model=model,
instructions=instructions,
functions=[]
)

View file

@ -0,0 +1,78 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from metachain.types import Agent
from metachain.io_utils import read_file
@register_agent(name = "Agent Creator Agent", func_name="get_agent_creator_agent")
def get_agent_creator_agent(model: str) -> str:
"""
The agent creator is an agent that can be used to create the agents.
"""
def instructions(context_variables):
return f"""\
You are an Agent Creator specialized in the MetaChain framework. Your primary responsibility is to create, manage, and orchestrate agents based on XML-formatted agent forms.
CORE RESPONSIBILITIES:
1. Parse and implement agent forms
2. Create and manage individual agents
3. Orchestrate multi-agent systems
4. Handle dependencies and system requirements
AVAILABLE FUNCTIONS:
1. Agent Management:
- `create_agent`: Create new agents or update existing ones strictly following the given agent form.
- `read_agent`: Retrieve existing agent definitions. Note that if you want to use `create_agent` to update an existing agent, you MUST use the `read_agent` function to get the definition of the agent first.
- `delete_agent`: Remove unnecessary agents.
- `list_agents`: Display all available agents and their information.
- `create_orchestrator_agent`: Create orchestrator for multi-agent systems. If the request is to create MORE THAN ONE agent, after you create ALL required agents, you MUST use the `create_orchestrator_agent` function to create an orchestrator agent that can orchestrate the workflow of the agents. And then use the `run_agent` function to run the orchestrator agent to complete the user task.
2. Execution:
- run_agent: Execute agent to complete the user task. The agent could be a single agent (single agent form) or an orchestrator agent (multi-agent form).
- execute_command: Handle system dependencies and requirements
- terminal_page_down: Move the terminal page down when the terminal output is too long.
- terminal_page_up: Move the terminal page up when the terminal output is too long.
- terminal_page_to: Move the terminal page to the specific page when the terminal output is too long, and you want to move to the specific page with the meaningful content.
WORKFLOW GUIDELINES:
1. Single Agent Implementation:
- Carefully read the agent form and understand the requirements.
- Create/update agent using create_agent
- Execute task using run_agent
- Monitor and handle any errors
2. Multi-Agent Implementation:
- Create all required agents individually using `create_agent`
- MUST create an orchestrator agent using `create_orchestrator_agent`
- Execute task through the `run_agent` function to execute the created orchestrator agent
- Monitor system performance
3. Error Handling:
- Check for missing dependencies using `execute_command`
- Install required packages using execute_command
- Validate agent creation and execution
- Report any issues clearly
BEST PRACTICES:
1. Always verify existing agents using `read_agent` before updates
2. Create orchestrator agents for ANY multi-agent scenario using `create_orchestrator_agent`
3. Handle dependencies proactively using `execute_command`
4. Maintain clear documentation of created agents
5. Follow the exact specifications from the agent form XML
Remember: Your success is measured by both the accurate creation of agents and their effective execution of the given tasks.
"""
tool_list = [list_agents, create_agent, delete_agent, run_agent, execute_command, read_agent, create_orchestrator_agent, terminal_page_down, terminal_page_up, terminal_page_to]
return Agent(
name="Agent Creator Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,38 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
@register_agent(name = "Agent Editor Agent", func_name="get_agent_editor_agent")
def get_agent_editor_agent(model: str) -> str:
"""
The agent editor is an agent that can be used to edit the agents.
"""
def instructions(context_variables):
return f"""\
You are an agent editor agent that can be used to edit the agents. You are working on a Agent framework named MetaChain, and your responsibility is to edit the agents in the MetaChain, so that the agents can be used to help the user with their request.
The existing agents are shown below:
{list_agents(context_variables)}
If you want to create a new agent, you should:
1. follow the format of the `get_dummy_agent` below:
```python
{read_file('metachain/agents/dummy_agent.py')}
```
2. you successfully create the agent only after you have successfully run the agent with the `run_agent` function to satisfy the user's request.
3. If you encounter any error while creating and running the agent, like dependency missing, you should use the `execute_command` function to install the dependency.
[IMPORTANT] The `register_plugin_agent` registry function is strictly required for a agent implementation to be recognized by the MetaChain framework.
"""
tool_list = [list_agents, create_agent, delete_agent, run_agent, execute_command]
return Agent(
name="Agent Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,68 @@
<agents>
<system_input>
The user request from the specific user about the product or service, mainly categorized into 2 types:
- Purchase a product or service
- Refund a product or service
</system_input>
<system_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</system_output>
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user.</description>
<value>John Doe</value>
</variable>
</global_variables>
<agent>
<name>Personal Sales Agent</name>
<description>The personal sales agent is an agent that serves as a personal sales agent for a specific user.</description>
<instructions>You are a personal sales agent that can be used to help the user {user_name} with their request.</instructions>
<tools category="new">
<tool>
<name>recommend_product</name>
<description>Recommend a product to the user.</description>
</tool>
<tool>
<name>recommend_service</name>
<description>Recommend a service to the user.</description>
</tool>
<tool>
<name>conduct_sales</name>
<description>Conduct sales with the user.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for purchasing a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
<agent>
<name>Personal Refunds Agent</name>
<description>The personal refunds agent is an agent that serves as a personal refunds agent for a specific user.</description>
<instructions>Help the user {user_name} with a refund. If the reason is that it was too expensive, offer the user a discount. If they insist, then process the refund.</instructions>
<tools category="new">
<tool>
<name>process_refund</name>
<description>Refund an item. Refund an item. Make sure you have the item_id of the form item_... Ask for user confirmation before processing the refund.</description>
</tool>
<tool>
<name>apply_discount</name>
<description>Apply a discount to the user's cart.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for refunding a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
</agents>

View file

@ -0,0 +1,85 @@
<agents>
<system_input>
Two types of financial requests:
1. Managing private financial documents stored in the 'financial_docs' folder
2. Searching online financial information for specific company tickers
</system_input>
<system_output>
<key>financial_response</key>
<description>Comprehensive response containing either document analysis results or requested financial information.</description>
</system_output>
<agent>
<name>Financial Agent</name>
<description>A specialized agent that handles both private financial document management and online financial information retrieval.</description>
<instructions>You are a financial assistant with two primary responsibilities:
1. For private financial documents:
- Process and analyze documents in the 'financial_docs' folder
- Store document content for efficient retrieval
- Answer questions about stored financial documents
- Maintain document confidentiality and security
2. For online financial information:
- Retrieve accurate financial data for specified company tickers
- Format and present financial statements clearly
- Ensure data accuracy and proper citation
- Provide context for financial metrics when needed</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Process and store private financial documents into the vector database for efficient retrieval.</description>
</tool>
<tool>
<name>query_db</name>
<description>Search through stored financial documents to find relevant information.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Refine search queries for better document retrieval results.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Provide answers based on information found in stored documents.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Verify if sufficient information exists in stored documents to answer a query.</description>
</tool>
<tool>
<name>get_historical_stock_price</name>
<description>Retrieve historical stock price data for specified tickers.</description>
</tool>
<tool>
<name>visualizer</name>
<description>Visualize financial data and documents when needed.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>get_balance_sheet</name>
<description>Retrieve balance sheet data for a specific company ticker over a given period.</description>
</tool>
<tool>
<name>get_cash_flow</name>
<description>Retrieve cash flow statement data for a specific company ticker over a given period.</description>
</tool>
<tool>
<name>get_income_statement</name>
<description>Retrieve income statement data for a specific company ticker over a given period.</description>
</tool>
</tools>
<agent_input>
<key>financial_request</key>
<description>User request for either document management or financial information retrieval, including:
- Document analysis requests for private financial documents
- Requests for specific financial statements with company ticker and time period</description>
</agent_input>
<agent_output>
<key>financial_response</key>
<description>Comprehensive response containing either:
- Analysis results from private financial documents
- Requested financial statements and data
- Relevant visualizations or summaries as needed</description>
</agent_output>
</agent>
</agents>

View file

@ -0,0 +1,87 @@
{
"system_input": "Two types of financial requests:\n 1. Managing and analyzing private financial documents stored in the `financial_docs` folder\n 2. Retrieving and analyzing public financial information for specific company tickers",
"system_output": {
"key": "financial_analysis",
"description": "The comprehensive financial analysis or response based on either private documents or public financial data."
},
"global_variables": {},
"agents": [
{
"name": "Private Financial Document Manager",
"description": "An agent specialized in managing and analyzing private financial documents stored locally.",
"instructions": "You are a financial document manager responsible for:\n1. Processing and organizing financial documents from the local `financial_docs` folder\n2. Storing document content in a searchable format using vector database\n3. Retrieving relevant financial information from stored documents\n4. Providing detailed analysis based on the stored financial documents\n\nAlways verify document processing success and maintain data confidentiality.",
"tools": {
"existing": [
{
"name": "save_raw_docs_to_vector_db",
"description": "Save the financial documents to the vector database for efficient retrieval and analysis."
},
{
"name": "query_db",
"description": "Search for specific financial information within stored documents."
},
{
"name": "modify_query",
"description": "Refine search queries to get more accurate financial information."
},
{
"name": "answer_query",
"description": "Provide detailed answers based on the financial documents."
},
{
"name": "can_answer",
"description": "Verify if sufficient information exists in stored documents to answer a query."
}
],
"new": []
},
"agent_input": {
"key": "document_request",
"description": "User's request related to private financial documents, including document processing, searching, or analysis needs."
},
"agent_output": {
"key": "financial_analysis",
"description": "Analysis, insights, or information retrieved from private financial documents."
}
},
{
"name": "Public Financial Data Analyst",
"description": "An agent specialized in retrieving and analyzing public financial information for specific company tickers.",
"instructions": "You are a financial data analyst responsible for:\n1. Retrieving public financial data including balance sheets, cash flow statements, and income statements\n2. Analyzing financial metrics and trends\n3. Providing detailed financial analysis based on public data\n4. Ensuring accuracy in financial data retrieval and calculations\n\nAlways verify data accuracy and provide clear sources for financial information.",
"tools": {
"existing": [
{
"name": "get_historical_stock_price",
"description": "Retrieve historical stock price data for analysis."
}
],
"new": [
{
"name": "get_balance_sheet",
"description": "Retrieve balance sheet data for a specific ticker over a given period."
},
{
"name": "get_cash_flow_statement",
"description": "Retrieve cash flow statement data for a specific ticker over a given period."
},
{
"name": "get_income_statement",
"description": "Retrieve income statement data for a specific ticker over a given period."
},
{
"name": "calculate_financial_metrics",
"description": "Calculate key financial metrics and ratios from the retrieved financial statements."
}
]
},
"agent_input": {
"key": "market_request",
"description": "User's request for public financial data, including specific ticker symbols and time periods for analysis."
},
"agent_output": {
"key": "financial_analysis",
"description": "Analysis and insights based on public financial data, including financial statements and calculated metrics."
}
}
]
}

View file

@ -0,0 +1,98 @@
<agents>
<system_input>
Two types of financial requests:
1. Managing and analyzing private financial documents stored in the `financial_docs` folder
2. Retrieving and analyzing public financial information for specific company tickers
</system_input>
<system_output>
<key>financial_analysis</key>
<description>The comprehensive financial analysis or response based on either private documents or public financial data.</description>
</system_output>
<agent>
<name>Private Financial Document Manager</name>
<description>An agent specialized in managing and analyzing private financial documents stored locally.</description>
<instructions>You are a financial document manager responsible for:
1. Processing and organizing financial documents from the local `financial_docs` folder
2. Storing document content in a searchable format using vector database
3. Retrieving relevant financial information from stored documents
4. Providing detailed analysis based on the stored financial documents
Always verify document processing success and maintain data confidentiality.</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Save the financial documents to the vector database for efficient retrieval and analysis.</description>
</tool>
<tool>
<name>query_db</name>
<description>Search for specific financial information within stored documents.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Refine search queries to get more accurate financial information.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Provide detailed answers based on the financial documents.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Verify if sufficient information exists in stored documents to answer a query.</description>
</tool>
</tools>
<agent_input>
<key>document_request</key>
<description>User's request related to private financial documents, including document processing, searching, or analysis needs.</description>
</agent_input>
<agent_output>
<key>financial_analysis</key>
<description>Analysis, insights, or information retrieved from private financial documents.</description>
</agent_output>
</agent>
<agent>
<name>Public Financial Data Analyst</name>
<description>An agent specialized in retrieving and analyzing public financial information for specific company tickers.</description>
<instructions>You are a financial data analyst responsible for:
1. Retrieving public financial data including balance sheets, cash flow statements, and income statements
2. Analyzing financial metrics and trends
3. Providing detailed financial analysis based on public data
4. Ensuring accuracy in financial data retrieval and calculations
Always verify data accuracy and provide clear sources for financial information.</instructions>
<tools category="existing">
<tool>
<name>get_historical_stock_price</name>
<description>Retrieve historical stock price data for analysis.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>get_balance_sheet</name>
<description>Retrieve balance sheet data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>get_cash_flow_statement</name>
<description>Retrieve cash flow statement data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>get_income_statement</name>
<description>Retrieve income statement data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>calculate_financial_metrics</name>
<description>Calculate key financial metrics and ratios from the retrieved financial statements.</description>
</tool>
</tools>
<agent_input>
<key>market_request</key>
<description>User's request for public financial data, including specific ticker symbols and time periods for analysis.</description>
</agent_input>
<agent_output>
<key>financial_analysis</key>
<description>Analysis and insights based on public financial data, including financial statements and calculated metrics.</description>
</agent_output>
</agent>
</agents>

View file

@ -0,0 +1,54 @@
<agents>
<system_input>
Questions from the user about the OpenAI products. The document of the OpenAI products is available at `/workspace/docs/openai_products/`.
</system_input>
<system_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</system_output>
<agent>
<name>Helper Center Agent</name>
<description>The helper center agent is an agent that serves as a helper center agent for a specific user to answer the user's question about the OpenAI products.</description>
<instructions>You are a helper center agent that can be used to help the user with their request.</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Save the raw documents to the vector database. The documents could be:
- ANY text document with the extension of pdf, docx, txt, etcs.
- A zip file containing multiple text documents
- a directory containing multiple text documents
All documents will be converted to raw text format and saved to the vector database in the chunks of 4096 tokens.</description>
</tool>
<tool>
<name>query_db</name>
<description>Query the vector database to find the answer to the user's question.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Modify the user's question to a more specific question.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Answer the user's question based on the answer from the vector database.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Check if the user's question can be answered by the vector database.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>send_email</name>
<description>Send an email to the user.</description>
</tool>
</tools>
<agent_input>
<key>user_question</key>
<description>The question from the user about the OpenAI products.</description>
</agent_input>
<agent_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</agent_output>
</agent>
</agents>

View file

@ -0,0 +1,257 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
from pydantic import BaseModel, Field
from typing import List
@register_agent(name = "Agent Former Agent", func_name="get_agent_former_agent")
def get_agent_former_agent(model: str) -> str:
"""
This agent is used to complete a form that can be used to create an agent.
"""
def instructions(context_variables):
return r"""\
You are an agent specialized in creating agent forms for the MetaChain framework.
Your task is to analyze user requests and generate structured creation forms for either single or multi-agent systems.
KEY COMPONENTS OF THE FORM:
1. <agents> - Root element containing all agent definitions
2. <system_input> - Defines what the system receives
- Must describe the overall input that the system accepts
- For single agent: Same as agent_input
- For multi-agent: Should encompass all possible inputs that will be routed to different agents
3. <system_output> - Specifies system response format
- Must contain exactly ONE key-description pair
- <key>: Single identifier for the system's output
- <description>: Explanation of the output
- For single agent: Same as agent_output
- For multi-agent: Should represent the unified output format from all agents
4. <agent> - Individual agent definition
- name: Agent's identifier
- description: Agent's purpose and capabilities
- instructions: Agent's behavioral guidelines
* To reference global variables, use format syntax: {variable_key}
* Example: "Help the user {user_name} with his/her request"
* All referenced keys must exist in global_variables
- tools: Available tools (existing/new)
- agent_input:
* Must contain exactly ONE key-description pair
* <key>: Identifier for the input this agent accepts
* <description>: Detailed explanation of the input format
- agent_output:
* Must contain exactly ONE key-description pair
* <key>: Identifier for what this agent produces
* <description>: Detailed explanation of the output format
5. <global_variables> - Shared variables across agents (optional)
- Used for constants or shared values accessible by all agents
- Variables defined here can be referenced in instructions using {key}
- Example:
```xml
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user</description>
<value>John Doe</value>
</variable>
</global_variables>
```
- Usage in instructions: "You are a personal assistant for {user_name}."
IMPORTANT RULES:
- For single agent systems:
* system_input/output must match agent_input/output exactly
- For multi-agent systems:
* system_input should describe the complete input space
* Each agent_input should specify which subset of the system_input it handles
* system_output should represent the unified response format
""" + \
f"""
Existing tools you can use is:
{list_tools(context_variables)}
Existing agents you can use is:
{list_agents(context_variables)}
""" + \
r"""
EXAMPLE 1 - SINGLE AGENT:
User: I want to build an agent that can answer the user's question about the OpenAI products. The document of the OpenAI products is available at `/workspace/docs/openai_products/`.
The agent should be able to:
1. query and answer the user's question about the OpenAI products based on the document.
2. send email to the user if the sending email is required in the user's request.
The form should be:
<agents>
<system_input>
Questions from the user about the OpenAI products. The document of the OpenAI products is available at `/workspace/docs/openai_products/`.
</system_input>
<system_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</system_output>
<agent>
<name>Helper Center Agent</name>
<description>The helper center agent is an agent that serves as a helper center agent for a specific user to answer the user's question about the OpenAI products.</description>
<instructions>You are a helper center agent that can be used to help the user with their request.</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Save the raw documents to the vector database. The documents could be:
- ANY text document with the extension of pdf, docx, txt, etcs.
- A zip file containing multiple text documents
- a directory containing multiple text documents
All documents will be converted to raw text format and saved to the vector database in the chunks of 4096 tokens.</description>
</tool>
<tool>
<name>query_db</name>
<description>Query the vector database to find the answer to the user's question.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Modify the user's question to a more specific question.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Answer the user's question based on the answer from the vector database.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Check if the user's question can be answered by the vector database.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>send_email</name>
<description>Send an email to the user.</description>
</tool>
</tools>
<agent_input>
<key>user_question</key>
<description>The question from the user about the OpenAI products.</description>
</agent_input>
<agent_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</agent_output>
</agent>
</agents>
EXAMPLE 2 - MULTI-AGENT:
User: I want to build a multi-agent system that can handle two types of requests for the specific user:
1. Purchase a product or service
2. Refund a product or service
The specific user worked for is named John Doe.
The form should be:
<agents>
<system_input>
The user request from the specific user about the product or service, mainly categorized into 2 types:
- Purchase a product or service
- Refund a product or service
</system_input>
<system_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</system_output>
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user.</description>
<value>John Doe</value>
</variable>
</global_variables>
<agent>
<name>Personal Sales Agent</name>
<description>The personal sales agent is an agent that serves as a personal sales agent for a specific user.</description>
<instructions>You are a personal sales agent that can be used to help the user {user_name} with their request.</instructions>
<tools category="new">
<tool>
<name>recommend_product</name>
<description>Recommend a product to the user.</description>
</tool>
<tool>
<name>recommend_service</name>
<description>Recommend a service to the user.</description>
</tool>
<tool>
<name>conduct_sales</name>
<description>Conduct sales with the user.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for purchasing a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
<agent>
<name>Personal Refunds Agent</name>
<description>The personal refunds agent is an agent that serves as a personal refunds agent for a specific user.</description>
<instructions>Help the user {user_name} with a refund. If the reason is that it was too expensive, offer the user a discount. If they insist, then process the refund.</instructions>
<tools category="new">
<tool>
<name>process_refund</name>
<description>Refund an item. Refund an item. Make sure you have the item_id of the form item_... Ask for user confirmation before processing the refund.</description>
</tool>
<tool>
<name>apply_discount</name>
<description>Apply a discount to the user's cart.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for refunding a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
</agents>
GUIDELINES:
1. Each agent must have clear, focused responsibilities
2. Tool selections should be minimal but sufficient
3. Instructions should be specific and actionable
4. Input/Output definitions must be precise
5. Use global_variables for shared context across agents
Follow these examples and guidelines to create appropriate agent forms based on user requirements.
"""
return Agent(
name = "Agent Former Agent",
model = model,
instructions = instructions,
)
if __name__ == "__main__":
from metachain import MetaChain
agent = get_agent_former_agent("claude-3-5-sonnet-20241022")
client = MetaChain()
task_yaml = """\
I want to create two agents that can help me to do two kinds of tasks:
1. Manage the private financial docs. I have a folder called `financial_docs` in my local machine, and I want to help me to manage the financial docs.
2. Search the financial information online. You may help me to:
- get balance sheets for a given ticker over a given period.
- get cash flow statements for a given ticker over a given period.
- get income statements for a given ticker over a given period.
"""
task_yaml = task_yaml + """\
Directly output the form in the XML format.
"""
messages = [{"role": "user", "content": task_yaml}]
response = client.run(agent, messages)
print(response.messages[-1]["content"])

View file

@ -0,0 +1,162 @@
from pydantic import BaseModel, Field, validator, field_validator, ValidationInfo
from typing import List, Dict, Optional, Literal
import xml.etree.ElementTree as ET
class KeyDescription(BaseModel):
key: str
description: str
class Tool(BaseModel):
name: str
description: str
class ToolSet(BaseModel):
existing: List[Tool] = Field(default_factory=list)
new: List[Tool] = Field(default_factory=list)
class GlobalVariable(BaseModel):
key: str
description: str
value: str
class Agent(BaseModel):
name: str
description: str
instructions: str
tools: ToolSet
agent_input: KeyDescription
agent_output: KeyDescription
class AgentForm(BaseModel):
system_input: str
system_output: KeyDescription
global_variables: Dict[str, GlobalVariable] = Field(default_factory=dict)
agents: List[Agent]
@field_validator('agents')
def validate_single_agent_io(cls, v, info: ValidationInfo):
"""验证单agent系统的输入输出是否匹配"""
if len(v) == 1:
agent = v[0]
system_output = info.data.get('system_output')
if system_output and agent.agent_output.key != system_output.key:
raise ValueError("Single agent system must have matching system and agent output keys")
return v
# def validate_global_ctx_instructions(cls, v, info: ValidationInfo):
# """验证全局变量和系统输入是否匹配"""
class XMLParser:
@staticmethod
def parse_key_description(elem: ET.Element, tag_name: str) -> KeyDescription:
node = elem.find(tag_name)
if node is None:
raise ValueError(f"Missing {tag_name}")
return KeyDescription(
key=node.find('key').text.strip(),
description=node.find('description').text.strip()
)
@staticmethod
def parse_tools(agent_elem: ET.Element) -> ToolSet:
tools = ToolSet()
for tools_elem in agent_elem.findall('tools'):
category = tools_elem.get('category')
if category not in ('existing', 'new'):
continue
tool_list = []
for tool_elem in tools_elem.findall('tool'):
tool = Tool(
name=tool_elem.find('name').text.strip(),
description=tool_elem.find('description').text.strip()
)
tool_list.append(tool)
if category == 'existing':
tools.existing = tool_list
else:
tools.new = tool_list
return tools
@staticmethod
def parse_global_variables(root: ET.Element) -> Dict[str, GlobalVariable]:
variables = {}
global_vars = root.find('global_variables')
if global_vars is not None:
for var in global_vars.findall('variable'):
key = var.find('key').text.strip()
variables[key] = GlobalVariable(
key=key,
description=var.find('description').text.strip(),
value=var.find('value').text.strip()
)
return variables
@classmethod
def parse_agent(cls, agent_elem: ET.Element) -> Agent:
return Agent(
name=agent_elem.find('name').text.strip(),
description=agent_elem.find('description').text.strip(),
instructions=agent_elem.find('instructions').text.strip(),
tools=cls.parse_tools(agent_elem),
agent_input=cls.parse_key_description(agent_elem, 'agent_input'),
agent_output=cls.parse_key_description(agent_elem, 'agent_output')
)
@classmethod
def parse_xml(cls, xml_content: str) -> AgentForm:
root = ET.fromstring(xml_content)
return AgentForm(
system_input=root.find('system_input').text.strip(),
system_output=cls.parse_key_description(root, 'system_output'),
global_variables=cls.parse_global_variables(root),
agents=[cls.parse_agent(agent_elem) for agent_elem in root.findall('agent')]
)
def parse_agent_form(xml_content: str) -> Optional[AgentForm]:
"""
读取并解析agent form XML文件
Args:
xml_content: XML文件内容
Returns:
解析后的AgentForm对象如果解析失败返回None
"""
try:
# with open(xml_path, 'r', encoding='utf-8') as f:
# xml_content = f.read()
return XMLParser.parse_xml(xml_content)
except ET.ParseError as e:
print(f"Error parsing XML: {e}")
return None
except Exception as e:
print(f"Unexpected error: {e}")
return None
# 使用示例
if __name__ == "__main__":
# 读取和解析XML文件
import json
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/financial_agent_2.xml", 'r', encoding='utf-8') as f:
xml_content = f.read()
form = parse_agent_form(xml_content)
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/financial_agent_2.json", 'w', encoding='utf-8') as f:
json.dump(form.model_dump(), f, indent=4)
if form:
# 访问数据(现在有类型提示和验证)
print(f"System input: {form.system_input}")
print(f"Number of agents: {len(form.agents)}")
# 访问第一个agent的信息
first_agent = form.agents[0]
print(f"First agent name: {first_agent.name}")
print(f"First agent tools: {first_agent.tools}")
# 转换为字典
form_dict = form.model_dump()
print(json.dumps(form_dict, indent=4))

View file

@ -0,0 +1,137 @@
import xml.etree.ElementTree as ET
from typing import Dict, List, Optional
class AgentForm:
def __init__(self, xml_string: str):
# Parse XML string
root = ET.fromstring(xml_string)
# Parse system input/output
self.system_input = root.find('system_input').text.strip()
system_output = root.find('system_output')
self.system_output = {
'key': system_output.find('key').text.strip(),
'description': system_output.find('description').text.strip()
}
# Parse global variables (optional)
global_vars = root.find('global_variables')
self.global_variables = {}
if global_vars is not None:
for var in global_vars.findall('variable'):
self.global_variables[var.find('key').text.strip()] = {
'description': var.find('description').text.strip(),
'value': var.find('value').text.strip()
}
# Parse agents
self.agents = []
for agent_elem in root.findall('agent'):
agent = {
'name': agent_elem.find('name').text.strip(),
'description': agent_elem.find('description').text.strip(),
'instructions': agent_elem.find('instructions').text.strip(),
# Parse tools
'tools': {
'existing': [],
'new': []
},
# Parse agent input/output
'input': {
'key': agent_elem.find('agent_input/key').text.strip(),
'description': agent_elem.find('agent_input/description').text.strip()
},
'output': {
'key': agent_elem.find('agent_output/key').text.strip(),
'description': agent_elem.find('agent_output/description').text.strip()
}
}
# Parse tools for both existing and new categories
for tools_category in agent_elem.findall('tools'):
category = tools_category.get('category')
for tool in tools_category.findall('tool'):
tool_info = {
'name': tool.find('name').text.strip(),
'description': tool.find('description').text.strip()
}
agent['tools'][category].append(tool_info)
self.agents.append(agent)
def validate(self) -> bool:
"""
验证表单是否符合规则
1. system_output必须只有一个key-description对
2. 每个agent的input/output必须只有一个key-description对
3. 对于单agent系统system in/output必须与agent in/output相同
"""
try:
# 检查是否为单agent系统
if len(self.agents) == 1:
agent = self.agents[0]
# 检查system和agent的input/output是否匹配
if agent['output']['key'] != self.system_output['key']:
return False
# 检查每个agent的input/output格式
for agent in self.agents:
if not agent['input'].get('key') or not agent['input'].get('description'):
return False
if not agent['output'].get('key') or not agent['output'].get('description'):
return False
return True
except Exception:
return False
def to_dict(self) -> Dict:
"""将表单转换为字典格式"""
return {
'system_input': self.system_input,
'system_output': self.system_output,
'global_variables': self.global_variables,
'agents': self.agents
}
# 使用示例
def parse_agent_form(xml_path: str) -> Optional[Dict]:
"""
读取并解析agent form XML文件
Args:
xml_path: XML文件路径
Returns:
解析后的字典格式数据如果解析失败返回None
"""
try:
with open(xml_path, 'r', encoding='utf-8') as f:
xml_content = f.read()
form = AgentForm(xml_content)
if not form.validate():
print("Error: Invalid agent form format")
return None
return form.to_dict()
except ET.ParseError as e:
print(f"Error parsing XML: {e}")
return None
except Exception as e:
print(f"Unexpected error: {e}")
return None
# 使用示例
if __name__ == "__main__":
import json
result = parse_agent_form("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/customer_service.xml")
if result:
print("Successfully parsed agent form:")
print(json.dumps(result, indent=4))

View file

@ -0,0 +1,118 @@
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.environment import DockerEnv, LocalEnv
from metachain.tools.meta.edit_tools import list_tools
from metachain.tools.meta.edit_agents import list_agents
from metachain.agents.meta_agent.agent_editor import get_agent_editor_agent
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from typing import Union
from metachain.tools.inner import case_resolved, case_not_resolved
from pydantic import BaseModel
from metachain.util import function_to_json
from metachain.agents.meta_agent.meta_plan_agent import get_meta_plan_agent
class AgentDescription(BaseModel):
tools: list[str]
existing: bool
class ToolDescription(BaseModel):
tool_functionalities: str
existing: bool
tool_docs: str
class ToolPlan(BaseModel):
tool_name: str
tool_description: ToolDescription
class AgentPlan(BaseModel):
agent_name: str
agent_description: AgentDescription
@register_agent(name = "Meta Agent", func_name="get_meta_agent")
def get_meta_agent(model: str) -> str:
"""
The meta agent is an agent that can be used to create and run other agents.
"""
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful assistant that can help the user with their request by creating and running agents in the Metachain agent framework. Your responsibility is to determine which agent is best suited to handle the user's request under the current context, and transfer the conversation to that agent. And you should not stop to try to solve the user's request by transferring to another agent only until the task is completed.
Existing tools you already have:
{list_tools(context_variables)}
Existing agents you already have:
{list_agents(context_variables)}
You should first transfer the conversation to the `Meta Plan Agent` to plan how to use MetaChain to solve the user's request, and the plan should follow the following constraints:
1. If exising agents are enough for your task, you can directly use them to solve the user's request.
2. If exising agents are not enough for your task but there are enough existing tools, you can transfer the conversation to the `Agent Editor Agent` to develop new agents by using the existing tools.
3. If exising agents and existing tools are not enough for your task, you should first transfer the conversation to the `Tool Editor Agent` to develop new tools, then transfer the conversation to the `Agent Editor Agent` to develop new agents by using the new tools.
"""
return instructions
tool_editor_agent: Agent = get_tool_editor_agent(model)
agent_editor_agent: Agent = get_agent_editor_agent(model)
meta_plan_agent: Agent = get_meta_plan_agent(model)
def transfer_to_tool_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Tool Editor Agent` to do.
"""
return tool_editor_agent
def transfer_to_agent_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Agent Editor Agent` to do.
"""
return agent_editor_agent
def transfer_to_meta_plan_agent(sub_task: str):
"""
Use this function when you want to plan how to use MetaChain to solve the user's request.
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Meta Plan Agent` to do.
"""
return meta_plan_agent
meta_agent = Agent(
name="Meta Agent",
model=model,
instructions=instructions,
functions=[transfer_to_meta_plan_agent, transfer_to_tool_editor_agent, transfer_to_agent_editor_agent, case_resolved, case_not_resolved],
tool_choice = "required",
parallel_tool_calls = False
)
def transfer_back_to_meta_agent(task_status: str):
"""
Args:
task_status: The status of the task that the `Meta Agent` will ask the `Meta Agent` to do.
"""
return meta_agent
def transfer_back_to_meta_agent_with_plans(tool_development_steps: list[ToolPlan]) -> str:
"""
This function is used to plan how to use MetaChain to solve the user's request. You can use this function only after you have fully understood the user's request and have try your best to search information from exsiting resources.
Args:
tool_development_steps: The steps of tool development. It is a list of dictionaries, each dictionary contains the tools name you should use in the exsiting MetaChain or the tools name you should develop. If the tool is not existing, dictionaries should contain the tool documentation.
"""
tool_str = "\n".join([f"{tool['tool_name']}: {tool['tool_description']['tool_functionalities']} [{tool['tool_description']['existing']}]" for tool in tool_development_steps])
ret_val = f"""\
Receiving user's request, I have the following plans to use MetaChain to solve the user's request:
As for using existing tools, I have the following plans:
{tool_str}
"""
return Result(
value=ret_val,
agent=meta_agent
)
tool_editor_agent.functions.append(transfer_back_to_meta_agent)
agent_editor_agent.functions.append(transfer_back_to_meta_agent)
meta_plan_agent.functions.append(transfer_back_to_meta_agent_with_plans)
return meta_agent

View file

@ -0,0 +1,38 @@
from metachain.types import Agent
from pydantic import BaseModel
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.tools.meta.edit_tools import list_tools
from typing import Union
from metachain.environment import DockerEnv, LocalEnv
def get_meta_plan_agent(model: str) -> Agent:
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful planner that can help `Tool Editor Agent` how to use MetaChain to solve the user's request.
Existing tools you already have:
{list_tools(context_variables)}
You should first fully understand the user's request, then analyze the existing tools and determine which tools are needed to solve the user's request, finally, you should transfer the conversation to the `Meta Agent` with the plan of using the tools.
If existing tools are not enough for your task, you should develop new tools.
1. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
2. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
3. [IMPORTANT] You can not use `transfer_back_to_meta_agent_with_plans` util you have fully understood the user's request and have try your best to search information from exsiting resources if you want to create a new tool.
"""
return instructions
return Agent(
name="Meta Plan Agent",
model=model,
instructions=instructions,
functions=[get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc],
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,257 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
@register_agent(name = "Tool Editor Agent", func_name="get_tool_editor_agent")
def get_tool_editor_agent(model: str) -> Agent:
"""
The tool editor is an agent that can be used to edit the tools.
"""
def instructions(context_variables):
return f"""\
You are a tool editor agent responsible for managing plugin tools in the MetaChain framework. Your core responsibility is to edit, create, and manage plugin tools that can be used by other agents.
[PLUGIN TOOLS SYSTEM]
- Plugin tools are the building blocks of MetaChain
- All available plugin tools are as follows:
{list_tools(context_variables)}
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`. You should import `run_tool` by `from metachain.tools import run_tool`.
- NEVER try to import and run plugin tools directly - always use `run_tool`
[TOOL CREATION WORKFLOW]
1. ALWAYS start with `list_tools()` to check existing tools
2. For NEW plugin tool creation, FOLLOW THIS ORDER:
a. For third-party API integration (e.g., RapidAPI, external services):
- MUST FIRST use `get_api_plugin_tools_doc` to get API documentation and keys
- API keys should be embedded IN the function body, NOT as parameters.
- The API keys are always in the retrieved information from `get_api_plugin_tools_doc`, DO NOT guess the API keys by yourself.
- Follow the API implementation details from the documentation
b. For modal transformation tasks (image/video/audio generation/processing):
- FIRST use `search_trending_models_on_huggingface` to find suitable models, only support the following tags: ['audio-text-to-text', 'text-to-image', 'image-to-image', 'image-to-video', 'text-to-video', 'text-to-speech', 'text-to-audio', 'automatic-speech-recognition', 'audio-to-audio'].
- Then use `get_hf_model_tools_doc` for detailed model information
- Only use internal knowledge if no suitable models are found
c. For visual analysis tasks (images/videos):
- MUST use the existing `visual_question_answering` plugin tool by `run_tool("visual_question_answering", "from metachain.tools import visual_question_answering; ...")`. DO NOT use it directly without `run_tool`.
- NO direct implementation of visual processing
- Chain with other tools as needed
3. Plugin Tool Implementation Requirements:
- Use @register_plugin_tool decorator (REQUIRED). You should import `register_plugin_tool` by `from metachain.registry import register_plugin_tool`.
- Follow this template:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
- Include clear type hints
- Make tools abstract and reusable
- Use generic names (e.g., 'process_media' not 'process_youtube_video')
- Handle dependencies with `execute_command`
[AVAILABLE TOOLS]
1. get_api_plugin_tools_doc:
- PRIMARY tool for third-party API integration
- MUST be used FIRST for Finance, Entertainment, eCommerce, etc.
- Provides API documentation AND authentication keys
- API keys should be embedded in tool implementation
2. search_trending_models_on_huggingface:
- Use for finding models for media transformation tasks
- Supported tags: ['text-to-image', 'image-to-image', 'text-to-video', etc.]
- Use AFTER checking no suitable API exists via `get_api_plugin_tools_doc`
3. get_hf_model_tools_doc:
- Get the detailed information of a model on Hugging Face, such as the detailed usage of the model containing the model's README.md.
- You should use this tool after you have used `search_trending_models_on_huggingface` to find the model you want to use.
4. Other management tools:
- list_tools(): Check existing tools
- create_tool(tool_name, tool_code): Create new tools
- run_tool(tool_name, run_code): REQUIRED method to execute any plugin tool
- delete_tool(tool_name): Remove tools
- execute_command: Install dependencies. Handles system-level operations
- terminal_page_* tools: Navigate long outputs
5. case_resolved & case_not_resolved:
- case_resolved: after you have created all the tools and tested them using `run_tool` successfully (with the expected output rather than just run it), you should use the `case_resolved` tool to brief the result.
- case_not_resolved: after you have tried your best to create the tools but failed, you should use the `case_not_resolved` tool to tell the failure reason.
[CRITICAL RULES]
1. Tool Creation Priority:
- FIRST: Check existing tools via list_tools()
- SECOND: Use `get_api_plugin_tools_doc` for API-based tools
- THIRD: Use `search_trending_models_on_huggingface` for media tasks
- LAST: Use internal knowledge if no other options available
2. API Implementation:
- NEVER expose API keys as parameters
- ALWAYS embed API keys in function body
- Get keys from `get_api_plugin_tools_doc`
3. Tool Design:
- Tools MUST be abstract, modular, and reusable:
- Use generic function names (e.g., `download_media` instead of `download_youtube_video`)
- Break complex tasks into smaller, reusable components
- Avoid task-specific implementations
- Use parameters instead of hardcoded values
- Include proper error handling
[TESTING]
Test new tools using `run_tool`:
`run_tool(tool_name="your_tool", run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))")`
"""
tool_list = [list_tools, create_tool, run_tool, delete_tool, get_api_plugin_tools_doc, execute_command, terminal_page_down, terminal_page_up, terminal_page_to, search_trending_models_on_huggingface, get_hf_model_tools_doc]
return Agent(
name="Tool Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
"""
5. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
6. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
"""
"""\
You are a tool editor agent that can be used to edit the tools. You are working on a Agent framework named MetaChain, and your responsibility is to edit the tools in the MetaChain, so that the tools can be used by the agents to help the user with their request.
The existing tools are shown below:
{list_tools(context_variables)}
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully run the tool with the `run_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(run_tool(tool_name="tool_dummy", run_code="from metachain.tools import tool_dummy; print(tool_dummy(args1=args1, args2=args1, ...))"))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
4. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
[IMPORTANT] The `register_plugin_tool` registry function is strictly required for a tool implementation to be recognized by the MetaChain framework.
[IMPORTANT] The tool you create should be abstract, modular, and reusable. Specifically, the function name must be generic (e.g.,
`count_objects` instead of `count_apples`). The function must use parameters instead of hard-coded values. The
function body must be self-contained.
[IMPORTANT] Explicitly declare input and output data types using type hints.
[IMPORTANT] For ANY visual tasks related to image and video, you should use `visual_question_answering` tool.
"""
"""\
You are a tool editor agent responsible for managing plugin tools in the MetaChain framework. Your core responsibility is to edit, create, and manage plugin tools that can be used by other agents.
[PLUGIN TOOLS SYSTEM]
- Plugin tools are the building blocks of MetaChain
- All available plugin tools are as follows:
{list_tools(context_variables)}
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`
- NEVER try to import and run tools directly - always use `run_tool`
[AVAILABLE MANAGEMENT TOOLS]
1. list_tools():
- Lists all existing plugin tools
- Returns: tool name, arguments, docstring, implementation details
- Use this FIRST to check existing tools
2. create_tool(tool_name: str, tool_code: str):
- Creates new plugin tools
- Requires proper registration using @register_plugin_tool, and you MUST import `register_plugin_tool` by `from metachain.registry import register_plugin_tool`
3. run_tool(tool_name: str, run_code: str,):
- REQUIRED method to execute any plugin tool
- Format: run_tool("tool_name", "from metachain.tools import tool_name; print(tool_name(args))")
4. delete_tool(tool_name: str,):
- Removes existing plugin tools
- Use with caution
5. get_api_plugin_tools_doc:
- Required for third-party API integrations, e.g. RapidAPI.
- MUST be used for Finance, Entertainment, etc.
6. execute_command:
- Handles system-level operations
- Use for dependency installation
7. terminal_page_down:
- Move the terminal page down when the terminal output is too long.
8. terminal_page_up:
- Move the terminal page up when the terminal output is too long.
9. terminal_page_to:
- Move the terminal page to the specific page when the terminal output is too long, and you want to move to the specific page with the meaningful content.
10. search_trending_models_on_huggingface:
- Search trending models on Hugging Face.
- Use this tool when you want to use Hugging Face models to generate images, videos, audios, etc.
- Do NOT use this tool for text-to-text or image-to-text tasks.
11. get_hf_model_tools_doc:
- Get the detailed information about the specific model on Hugging Face.
- Use this tool when you want to use Hugging Face models to generate images, videos, audios, etc.
[CRITICAL PRINCIPLES FOR PLUGIN TOOLS]
1. Tools MUST be abstract, modular, and reusable:
- Use generic function names (e.g., `download_media` instead of `download_youtube_video`)
- Break complex tasks into smaller, reusable components
- Avoid task-specific implementations
- Use parameters instead of hardcoded values
2. For ALL visual tasks (images, videos, visual analysis):
- MUST use the existing `visual_question_answering` plugin tool
- NO direct implementation of visual processing
- Chain `visual_question_answering` with other tools as needed
[WORKFLOW FOR PLUGIN TOOL MANAGEMENT]
1. Always start with `list_tools()` to check existing tools
2. For new plugin tools:
a. Design generic, reusable interface
b. Follow the template format:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
c. Create using `create_tool`
d. Test using `run_tool`
e. Handle dependencies with `execute_command`
[IMPORTANT RULES]
- ALL tools must be registered with @register_plugin_tool
- ALL tools must have type hints
- Each tool does ONE thing well
- Create modular tools that can be combined
- ALWAYS use `run_tool` to execute plugin tools
- NEVER modify the `visual_question_answering` tool
[TOOL TESTING EXAMPLE]
Correct way to test a plugin tool:
```python
result = run_tool(
tool_name="your_tool",
run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))",
context_variables=context_variables
)
```
"""

View file

@ -0,0 +1,121 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
@register_agent(name = "Tool Editor Agent", func_name="get_tool_editor_agent")
def get_tool_editor_agent(model: str) -> str:
"""
The tool editor is an agent that can be used to edit the tools.
"""
def instructions(context_variables):
return f"""\
You are a Tool Editor specialized in the MetaChain framework. Your role is to create, modify, and maintain tools that agents can use effectively.
CURRENT TOOLS:
{list_tools(context_variables)}
TOOL CREATION GUIDELINES:
1. STRUCTURE AND FORMATTING
- Follow the template structure below:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
- MUST use @register_plugin_tool decorator
- Include clear docstrings with args and returns
- Handle errors gracefully
- If the tool should be used with third-part api key, you should write the api key inside the definition of the tool
- DO NOT direct return too long output (e.g., the raw content of the download file for `download_file_from_url`), instead, save the output to a file in the `workplace/outputs` directory
2. IMPLEMENTATION PRINCIPLES:
- Keep tools GENERIC and REUSABLE
- Avoid over-specific implementations
- Focus on single responsibility
- Ensure proper error handling
- Include input validation
- Return clear, structured outputs
""" + \
r"""
3. TESTING REQUIREMENTS:
- All tools MUST be tested before deployment
- Use this testing template:
```python
from metachain.tools import your_tool_name
if __name__ == "__main__":
# Setup test environment
test_args = {
"arg1": value1,
"arg2": value2
}
# Execute test
result = your_tool_name(**test_args)
print(f"Test result: {result}")
```
- if the output of the tool is too long, you should use the `terminal_page_down` or `terminal_page_up` or `terminal_page_to` function to move the terminal page to the specific page with the meaningful content.
4. DEPENDENCY MANAGEMENT:
- Use execute_command for installing dependencies
- Document all required packages
- Verify compatibility with MetaChain
CRITICAL RULES:
1. ALWAYS use @register_plugin_tool decorator
2. NEVER create overly specific tools
3. ALWAYS test before finalizing
4. ALWAYS handle errors gracefully
5. ALWAYS document clearly
BEST PRACTICES:
1. Keep tools modular and focused
2. Provide clear error messages
3. Include usage examples in docstrings
4. Follow Python PEP 8 style guide
5. Use type hints for better clarity
Remember: A good tool is reusable, reliable, and well-documented. Focus on creating tools that can be effectively used across different agents and scenarios.
"""
tool_list = [list_tools, create_tool, delete_tool, run_tool, get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc, execute_command, terminal_page_down, terminal_page_up, terminal_page_to]
return Agent(
name="Tool Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
"""\
You are a tool editor agent that can be used to edit the tools. You are working on a Agent framework named MetaChain, and your responsibility is to edit the tools in the MetaChain, so that the tools can be used by the agents to help the user with their request.
The existing tools are shown below:
{list_tools(context_variables)}
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully tested the tool with the `test_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(tool_dummy(args1=args1, args2=args1, ...))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
[IMPORTANT] The `register_plugin_tool` registry function is strictly required for a tool implementation to be recognized by the MetaChain framework.
[IMPORTANT] Tools you create should be as general as possible, and you should not create too specific tools, so that the tools can be reused by other agents or other related tasks.
"""

View file

@ -0,0 +1,53 @@
<agents>
<agent>
<name>Personal Sales Agent</name>
<description>The personal sales agent is an agent that serves as a personal sales agent for a specific user.</description>
<instructions>You are a personal sales agent that can be used to help the user {user_name} with their request.</instructions>
<tools category="exsiting">
<tool>
<name>recommend_product</name>
<description>Recommend a product to the user.</description>
</tool>
<tool>
<name>recommend_service</name>
<description>Recommend a service to the user.</description>
</tool>
<tool>
<name>transfer_customer</name>
<description>Transfer a customer to another sales agent.</description>
</tool>
<tool>
<name>search_information</name>
<description>Search for information for the user.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>create_agent</name>
<description>Create a new agent.</description>
</tool>
</tools>
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user.</description>
<value>John Doe</value>
</variable>
<variable>
<key>user_email</key>
<description>The email of the user.</description>
<value>john.doe@example.com</value>
</variable>
</global_variables>
</agent>
</agents>
<orchestrate>
<transition>
<from>personal_sales_agent</from>
<to>agent_former_agent</to>
<condition>
<type>user_request</type>
<value>create_agent</value>
</condition>
</transition>
</orchestrate>

View file

@ -0,0 +1,81 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent, create_orchestrator_agent
from metachain.tools.meta.edit_workflow import list_workflows, create_workflow, run_workflow
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
from metachain.types import Agent
from metachain.io_utils import read_file
@register_agent(name = "Workflow Creator Agent", func_name="get_workflow_creator_agent")
def get_workflow_creator_agent(model: str) -> str:
"""
The workflow creator is an agent that can be used to create the workflow.
"""
def instructions(context_variables):
return f"""\
You are a Workflow Creator specialized in the MetaChain framework. Your primary responsibility is to create and manage workflows based on XML-formatted workflow forms.
CORE RESPONSIBILITIES:
1. Parse and implement workflow forms
2. Create necessary agents if specified in the workflow
3. Create and manage workflows
4. Execute workflows as needed
AVAILABLE FUNCTIONS:
1. Workflow Management:
- `create_workflow`: Create new workflows based on the workflow form
- `run_workflow`: Execute the created workflow
- `list_workflows`: Display all available workflows
2. Agent Management (when needed):
- `create_agent`: Create new agents if specified in the workflow form. If no tools are explicitly specified, use empty tool list ([])
- `read_agent`: Retrieve existing agent definitions before updates
- `list_agents`: Display all available agents
3. System Tools:
- `execute_command`: Handle system dependencies
- `terminal_page_down`, `terminal_page_up`, `terminal_page_to`: Navigate terminal output
WORKFLOW CREATION PROCESS:
1. Parse Workflow Form:
- Analyze the workflow form carefully
- Identify any new agents that need to be created
- Understand the workflow structure and requirements
2. Create Required Agents:
- For each new agent in the workflow form:
* Use `create_agent` with appropriate parameters
* If no tools specified, use empty tool list ([])
* Verify agent creation success
3. Create Workflow:
- Use `create_workflow` to generate the workflow
- Ensure all required agents exist
- Validate workflow structure
4. Execute Workflow:
- Use `run_workflow` to execute the created workflow
- Monitor execution progress
- Handle any errors appropriately
BEST PRACTICES:
1. Always check if required agents exist before creating new ones
2. Use empty tool list ([]) when no specific tools are mentioned
3. Validate workflow creation before execution
4. Follow the exact specifications from the workflow form XML
5. Handle errors and dependencies appropriately
Remember: Your primary goal is to create and execute workflows according to the provided workflow forms, creating any necessary agents along the way.
"""
tool_list = [list_agents, create_agent, execute_command, read_agent, terminal_page_down, terminal_page_up, terminal_page_to, list_workflows, create_workflow, run_workflow]
return Agent(
name="Workflow Creator Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,184 @@
{
"name": "math_solver_chain_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "solution",
"description": "The complete solution to the math problem."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent analyzes the math problem and extracts its main objective or goal.",
"category": "existing",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent identifies and extracts all relevant conditions and given information from the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Condition Evaluator Agent",
"description": "This agent evaluates whether the extracted conditions are sufficient to solve the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent solves mathematical problems using analytical and systematic approaches.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "extract_objective",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Extract and clearly state the main objective of the math problem.",
"outputs": [
{
"key": "objective",
"description": "The main objective or question that needs to be answered.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Objective Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "extract_conditions",
"inputs": [
{
"key": "objective",
"description": "The main objective or question that needs to be answered."
}
],
"task": "Extract all relevant conditions and given information from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "The complete set of conditions and information extracted from the problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_objective"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "evaluate_conditions",
"inputs": [
{
"key": "conditions",
"description": "The complete set of conditions and information extracted from the problem."
}
],
"task": "Evaluate if the extracted conditions are sufficient to solve the problem.",
"outputs": [
{
"key": "merged_conditions",
"description": "The merged and organized conditions ready for problem-solving.",
"condition": "When conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
},
{
"key": "insufficient_feedback",
"description": "Feedback on what additional information is needed.",
"condition": "When conditions are insufficient to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Condition Evaluator Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_problem",
"inputs": [
{
"key": "merged_conditions",
"description": "The merged and organized conditions ready for problem-solving."
}
],
"task": "Solve the math problem using the complete set of conditions.",
"outputs": [
{
"key": "solution",
"description": "The complete solution to the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"evaluate_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
}
]
}

View file

@ -0,0 +1,169 @@
<workflow>
<name>math_solver_chain_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
</system_output>
<agents>
<agent category="existing">
<name>Objective Extraction Agent</name>
<description>This agent analyzes the math problem and extracts its main objective or goal.</description>
</agent>
<agent category="existing">
<name>Condition Extraction Agent</name>
<description>This agent identifies and extracts all relevant conditions and given information from the math problem.</description>
</agent>
<agent category="existing">
<name>Condition Evaluator Agent</name>
<description>This agent evaluates whether the extracted conditions are sufficient to solve the math problem.</description>
</agent>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent solves mathematical problems using analytical and systematic approaches.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>extract_objective</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Extract and clearly state the main objective of the math problem.</task>
<outputs>
<output>
<key>objective</key>
<description>The main objective or question that needs to be answered.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Objective Extraction Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<inputs>
<input>
<key>objective</key>
<description>The main objective or question that needs to be answered.</description>
</input>
</inputs>
<task>Extract all relevant conditions and given information from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>The complete set of conditions and information extracted from the problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_objective</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>evaluate_conditions</name>
<inputs>
<input>
<key>conditions</key>
<description>The complete set of conditions and information extracted from the problem.</description>
</input>
</inputs>
<task>Evaluate if the extracted conditions are sufficient to solve the problem.</task>
<outputs>
<output>
<key>merged_conditions</key>
<description>The merged and organized conditions ready for problem-solving.</description>
<condition>When conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>insufficient_feedback</key>
<description>Feedback on what additional information is needed.</description>
<condition>When conditions are insufficient to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Condition Evaluator Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_problem</name>
<inputs>
<input>
<key>merged_conditions</key>
<description>The merged and organized conditions ready for problem-solving.</description>
</input>
</inputs>
<task>Solve the math problem using the complete set of conditions.</task>
<outputs>
<output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>evaluate_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,158 @@
{
"name": "math_condition_mining_workflow",
"system_input": {
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
},
"system_output": {
"key": "solution",
"description": "The detailed solution to the mathematical problem."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent is specialized in analyzing math problems and extracting the main objective or question being asked.",
"category": "existing",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "extract_objective",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
}
],
"task": "Analyze the math problem and extract the main objective or question being asked.",
"outputs": [
{
"key": "objective",
"description": "The main objective or question extracted from the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Objective Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "extract_conditions",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
},
{
"key": "objective",
"description": "The main objective or question extracted from the math problem."
}
],
"task": "Extract all relevant conditions, given values, and constraints from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "The extracted conditions, values, and constraints from the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_objective"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_problem",
"inputs": [
{
"key": "objective",
"description": "The main objective or question extracted from the math problem."
},
{
"key": "conditions",
"description": "The extracted conditions, values, and constraints from the math problem."
}
],
"task": "Evaluate whether conditions are sufficient and solve the math problem if possible.",
"outputs": [
{
"key": "solution",
"description": "The complete solution to the math problem.",
"condition": "When conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
},
{
"key": "insufficient_conditions",
"description": "Feedback about missing or unclear conditions.",
"condition": "When conditions are insufficient to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
}
]
}

View file

@ -0,0 +1,148 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The detailed solution to the mathematical problem.</description>
</system_output>
<agents>
<agent category="existing">
<name>Objective Extraction Agent</name>
<description>This agent is specialized in analyzing math problems and extracting the main objective or question being asked.</description>
</agent>
<agent category="existing">
<name>Condition Extraction Agent</name>
<description>This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.</description>
</agent>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>extract_objective</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
</inputs>
<task>Analyze the math problem and extract the main objective or question being asked.</task>
<outputs>
<output>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Objective Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
<input>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
</input>
</inputs>
<task>Extract all relevant conditions, given values, and constraints from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>The extracted conditions, values, and constraints from the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_objective</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_problem</name>
<inputs>
<input>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
</input>
<input>
<key>conditions</key>
<description>The extracted conditions, values, and constraints from the math problem.</description>
</input>
</inputs>
<task>Evaluate whether conditions are sufficient and solve the math problem if possible.</task>
<outputs>
<output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
<condition>When conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>insufficient_conditions</key>
<description>Feedback about missing or unclear conditions.</description>
<condition>When conditions are insufficient to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,173 @@
{
"name": "math_problem_solver_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "final_solution",
"description": "The final solution to the math problem determined by majority voting."
},
"global_variables": {},
"agents": [
{
"name": "Math Solver Agent",
"description": "This agent is specialized in solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
},
{
"name": "Result Aggregator Agent",
"description": "This agent aggregates results from different models and determines the final answer through majority voting.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "solve_with_gpt4",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using GPT-4 model.",
"outputs": [
{
"key": "gpt4_solution",
"description": "The solution provided by GPT-4 model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_claude",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using Claude model.",
"outputs": [
{
"key": "claude_solution",
"description": "The solution provided by Claude model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_with_deepseek",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using Deepseek model.",
"outputs": [
{
"key": "deepseek_solution",
"description": "The solution provided by Deepseek model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "deepseek/deepseek-chat"
}
},
{
"name": "aggregate_results",
"inputs": [
{
"key": "gpt4_solution",
"description": "The solution provided by GPT-4 model."
},
{
"key": "claude_solution",
"description": "The solution provided by Claude model."
},
{
"key": "deepseek_solution",
"description": "The solution provided by Deepseek model."
}
],
"task": "Aggregate the solutions from different models and determine the final answer through majority voting.",
"outputs": [
{
"key": "final_solution",
"description": "The final solution determined by majority voting.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"solve_with_gpt4",
"solve_with_claude",
"solve_with_deepseek"
],
"agent": {
"name": "Result Aggregator Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -0,0 +1,161 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>final_solution</key>
<description>The final solution to the math problem determined by majority voting.</description>
</system_output>
<agents>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving math problems using appropriate mathematical methods.</description>
</agent>
<agent category="existing">
<name>Result Aggregator Agent</name>
<description>This agent aggregates results from different models and determines the final answer through majority voting.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>solve_with_gpt4</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using GPT-4 model.</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>The solution provided by GPT-4 model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using Claude model.</task>
<outputs>
<output>
<key>claude_solution</key>
<description>The solution provided by Claude model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_deepseek</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using Deepseek model.</task>
<outputs>
<output>
<key>deepseek_solution</key>
<description>The solution provided by Deepseek model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>deepseek/deepseek-chat</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<inputs>
<input>
<key>gpt4_solution</key>
<description>The solution provided by GPT-4 model.</description>
</input>
<input>
<key>claude_solution</key>
<description>The solution provided by Claude model.</description>
</input>
<input>
<key>deepseek_solution</key>
<description>The solution provided by Deepseek model.</description>
</input>
</inputs>
<task>Aggregate the solutions from different models and determine the final answer through majority voting.</task>
<outputs>
<output>
<key>final_solution</key>
<description>The final solution determined by majority voting.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_deepseek</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,159 @@
<workflow>
<system_input>
<key>problem_path</key>
<description>The URL of the dataset containing math problems to solve: https://huggingface.co/datasets/openai/gsm8k</description>
</system_input>
<system_output>
<key>solution_results</key>
<description>The aggregated solution results with majority voting from multiple models</description>
</system_output>
<agents>
<agent category="new">
<name>Data Fetcher Agent</name>
<description>This agent is responsible for downloading and processing the dataset from HuggingFace</description>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving mathematical problems step by step</description>
</agent>
<agent category="new">
<name>Result Aggregator Agent</name>
<description>This agent aggregates solutions from different models and performs majority voting</description>
</agent>
</agents>
<global_variables>
<variable>
<key>dataset_url</key>
<description>The URL of the GSM8K dataset</description>
<value>https://huggingface.co/datasets/openai/gsm8k</value>
</variable>
</global_variables>
<events>
<event>
<name>fetch_dataset</name>
<input>
<key>problem_path</key>
<description>The URL of the dataset to download</description>
</input>
<task>Download the GSM8K dataset and save it locally</task>
<outputs>
<output>
<key>dataset_content</key>
<description>The downloaded math problems from the dataset</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Data Fetcher Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using GPT-4 model</task>
<outputs>
<output>
<key>gpt4_solutions</key>
<description>Solutions generated by GPT-4</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4-1106-preview</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using Claude model</task>
<outputs>
<output>
<key>claude_solutions</key>
<description>Solutions generated by Claude</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-opus-20240229</model>
</agent>
</event>
<event>
<name>solve_with_mixtral</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using Mixtral model</task>
<outputs>
<output>
<key>mixtral_solutions</key>
<description>Solutions generated by Mixtral</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>mixtral-8x7b-instruct</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>model_solutions</key>
<description>Solutions from different models to be aggregated</description>
</input>
<task>Aggregate solutions from all models using majority voting</task>
<outputs>
<output>
<key>solution_results</key>
<description>Final aggregated solutions with majority voting</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_mixtral</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,167 @@
<workflow>
<system_input>
<key>dataset_url</key>
<description>The URL of the math problem dataset on huggingface.</description>
</system_input>
<system_output>
<key>final_result</key>
<description>The final solution of the math problem after majority voting.</description>
</system_output>
<agents>
<agent category="new">
<name>Data Loader Agent</name>
<description>This agent is responsible for downloading and processing the dataset.</description>
<tools>
<tool>download_file</tool>
<tool>analyze_data</tool>
<tool>load_one_instance</tool>
</tools>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is responsible for solving math problems using different language models.</description>
</agent>
<agent category="new">
<name>Result Aggregator Agent</name>
<description>This agent aggregates results from different solvers and performs majority voting.</description>
</agent>
</agents>
<global_variables>
<variable>
<key>dataset_path</key>
<description>Local path where the dataset will be stored</description>
<value>/workspace/data/math_dataset.json</value>
</variable>
</global_variables>
<events>
<event>
<name>download_data</name>
<input>
<key>dataset_url</key>
<description>The URL of the math problem dataset</description>
</input>
<task>Download the dataset from huggingface and analyze its structure</task>
<outputs>
<output>
<key>problem_instance</key>
<description>A single math problem instance from the dataset</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Data Loader Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using GPT-4</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>Solution from GPT-4 model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using Claude</task>
<outputs>
<output>
<key>claude_solution</key>
<description>Solution from Claude model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_palm</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using PaLM</task>
<outputs>
<output>
<key>palm_solution</key>
<description>Solution from PaLM model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>palm-2-4-chat-20240125</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>solutions</key>
<description>Solutions from all models</description>
</input>
<task>Aggregate solutions using majority voting</task>
<outputs>
<output>
<key>final_result</key>
<description>Final solution after majority voting</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_palm</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,158 @@
{
"name": "math_problem_solver_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "final_solution",
"description": "The final solution to the math problem determined through majority voting."
},
"global_variables": {},
"agents": [
{
"name": "Math Solver Agent",
"description": "This agent is specialized in solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
},
{
"name": "Result Aggregator Agent",
"description": "This agent is specialized in aggregating results from different models and determining the final answer through majority voting.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"task": "Distribute the math problem to parallel solvers.",
"outputs": [
{
"key": "problem_ready",
"description": "Math problem ready for parallel processing.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_gpt4",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using GPT-4 model.",
"outputs": [
{
"key": "gpt4_solution",
"description": "Solution from GPT-4 model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_claude",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using Claude model.",
"outputs": [
{
"key": "claude_solution",
"description": "Solution from Claude model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_with_deepseek",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using Deepseek model.",
"outputs": [
{
"key": "deepseek_solution",
"description": "Solution from Deepseek model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "deepseek/deepseek-chat"
}
},
{
"name": "aggregate_results",
"input": {
"key": "multiple_solutions",
"description": "Solutions from all three models."
},
"task": "Aggregate solutions from different models and determine final answer through majority voting.",
"outputs": [
{
"key": "final_solution",
"description": "Final solution determined through majority voting.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"solve_with_gpt4",
"solve_with_claude",
"solve_with_deepseek"
],
"agent": {
"name": "Result Aggregator Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -0,0 +1,148 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>final_solution</key>
<description>The final solution to the math problem determined through majority voting.</description>
</system_output>
<agents>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving math problems using appropriate mathematical methods.</description>
</agent>
<agent category="existing">
<name>Result Aggregator Agent</name>
<description>This agent is specialized in aggregating results from different models and determining the final answer through majority voting.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
<task>Distribute the math problem to parallel solvers.</task>
<outputs>
<output>
<key>problem_ready</key>
<description>Math problem ready for parallel processing.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using GPT-4 model.</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>Solution from GPT-4 model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using Claude model.</task>
<outputs>
<output>
<key>claude_solution</key>
<description>Solution from Claude model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_deepseek</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using Deepseek model.</task>
<outputs>
<output>
<key>deepseek_solution</key>
<description>Solution from Deepseek model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>deepseek/deepseek-chat</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>multiple_solutions</key>
<description>Solutions from all three models.</description>
</input>
<task>Aggregate solutions from different models and determine final answer through majority voting.</task>
<outputs>
<output>
<key>final_solution</key>
<description>Final solution determined through majority voting.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_deepseek</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,119 @@
{
"name": "mining_conditions_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that user wants to solve."
},
"system_output": {
"key": "solution",
"description": "The detailed solution of the math problem, including steps and final answer."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent is specialized in analyzing math problems and extracting the main objective or question being asked.",
"category": "new",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.",
"category": "new",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.",
"category": "new",
"tools": null
}
],
"events": [
{
"name": "on_start",
"input": {
"key": "math_problem",
"description": "The original math problem text."
},
"task": "Extract the main objective or question from the math problem.",
"outputs": [
{
"key": "objective",
"description": "The clearly defined objective or question that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": {
"name": "Objective Extraction Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "extract_conditions",
"input": {
"key": "objective",
"description": "The objective of the math problem."
},
"task": "Extract all relevant conditions, given values, and constraints from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "List of all identified conditions and constraints.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "evaluate_and_solve",
"input": {
"key": "conditions",
"description": "The extracted conditions and constraints."
},
"task": "Evaluate if conditions are sufficient and solve the math problem if possible.",
"outputs": [
{
"key": "insufficient_conditions",
"description": "Conditions are not sufficient to solve the problem.",
"condition": "If the current conditions are not enough to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
},
{
"key": "solution",
"description": "Complete solution with steps and final answer.",
"condition": "If conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -0,0 +1,113 @@
<workflow>
<name>mining_conditions_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that user wants to solve.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The detailed solution of the math problem, including steps and final answer.</description>
</system_output>
<agents>
<agent category="new">
<name>Objective Extraction Agent</name>
<description>This agent is specialized in analyzing math problems and extracting the main objective or question being asked.</description>
</agent>
<agent category="new">
<name>Condition Extraction Agent</name>
<description>This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.</description>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>math_problem</key>
<description>The original math problem text.</description>
</input>
<task>Extract the main objective or question from the math problem.</task>
<outputs>
<output>
<key>objective</key>
<description>The clearly defined objective or question that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Objective Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<input>
<key>objective</key>
<description>The objective of the math problem.</description>
</input>
<task>Extract all relevant conditions, given values, and constraints from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>List of all identified conditions and constraints.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>evaluate_and_solve</name>
<input>
<key>conditions</key>
<description>The extracted conditions and constraints.</description>
</input>
<task>Evaluate if conditions are sufficient and solve the math problem if possible.</task>
<outputs>
<output>
<key>insufficient_conditions</key>
<description>Conditions are not sufficient to solve the problem.</description>
<condition>If the current conditions are not enough to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
<output>
<key>solution</key>
<description>Complete solution with steps and final answer.</description>
<condition>If conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,144 @@
<workflow>
<system_input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</system_input>
<system_output>
<key>article</key>
<description>The article that satisfies the user's request.</description>
</system_output>
<agents>
<agent category="existing">
<name>Web Surfer Agent</name>
<description>This agent is used to search the web for the user's topic.</description>
</agent>
<agent category="new">
<name>Outline Agent</name>
<description>This agent is used to write an outline for the user's topic.</description>
</agent>
<agent category="new">
<name>Evaluator Agent</name>
<description>This agent is used to evaluate the outline of the user's topic.</description>
</agent>
<agent category="new">
<name>Article Writer Agent</name>
<description>This agent is used to write the article for the user's topic.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</input>
<task>
search the information about the topic and return the result.
</task>
<outputs>
<output>
<key>search_result</key>
<description>The search result of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Web Surfer Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_outline</name>
<input>
<key>search_result</key>
<description>The search result of the user's topic.</description>
</input>
<task>
write an outline for the user's topic.
</task>
<outputs>
<output>
<key>outline</key>
<description>The outline of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Outline Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_evaluate</name>
<input>
<key>outline</key>
<description>The outline of the user's topic.</description>
</input>
<task>
evaluate the outline of the user's topic.
</task>
<outputs>
<output>
<key>positive_feedback</key>
<description>The positive feedback of the outline of the user's topic.</description>
<condition>
If the outline is good enough, give positive feedback.
</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>negative_feedback</key>
<description>The negative feedback of the outline of the user's topic.</description>
<condition>
If the outline is not good enough, give negative feedback.
</condition>
<action>
<type>ABORT</type>
</action>
</output>
</outputs>
<listen>
<event>on_outline</event>
</listen>
<agent>
<name>Evaluator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_write</name>
<input>
<key>outline</key>
<description>The outline of user's topic.</description>
</input>
<task>
write the article for the user's topic.
</task>
<outputs>
<output>
<key>article</key>
<description>The article of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_evaluate</event>
</listen>
<agent>
<name>Article Writer Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -0,0 +1,578 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent, read_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool
from metachain.tools.meta.edit_workflow import list_workflows
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
from pydantic import BaseModel, Field
from typing import List
import json
@register_agent(name = "Workflow Former Agent", func_name="get_workflow_former_agent")
def get_workflow_former_agent(model: str) -> str:
"""
This agent is used to complete a form that can be used to create a workflow consisting of multiple agents.
"""
def instructions(context_variables):
workflow_list = list_workflows(context_variables)
workflow_list = json.loads(workflow_list)
workflow_list = [workflow_name for workflow_name in workflow_list.keys()]
workflow_list_str = ", ".join(workflow_list)
return r"""\
You are an agent specialized in creating workflow forms for the MetaChain framework.
Your task is to analyze user requests and generate structured creation forms for workflows consisting of multiple agents.
KEY COMPONENTS OF THE FORM:
1. <workflow> - Root element containing the entire workflow definition
2. <name> - The name of the workflow. It should be a single word with '_' as the separator, and as unique as possible to describe the speciality of the workflow.
3. <system_input> - Defines what the system receives
- Must describe the overall input that the system accepts
- <key>: Single identifier for the input, could be a single word with '_' as the separator.
- <description>: Detailed explanation of input format
4. <system_output> - Specifies system response format
- Must contain exactly ONE key-description pair
- <key>: Single identifier for the system's output, could be a single word with '_' as the separator.
- <description>: Explanation of the output format
5. <agents> - Contains all agent definitions
- Each <agent> can be existing or new (specified by category attribute)
- name: Agent's identifier
- description: Agent's purpose and capabilities
- tools: (optional): Only required for new agents when specific tools are requested
* Only include when user explicitly requests certain tools
6. <global_variables> - Shared variables across agents in the workflow (optional)
- Used for constants or shared values accessible by all agents in EVERY event in the workflow
- Example:
```xml
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user</description>
<value>John Doe</value>
</variable>
</global_variables>
```
7. <events> - Defines the workflow execution flow
Each <event> contains:
- name: Event identifier
- inputs: What this event receives, should exactly match with the output keys of the events it's listening to
* Each input has:
- key: Input identifier (should match an output key from listened events)
- description: Input explanation
- task: What this event should accomplish
- outputs: Possible outcomes of this event
* Each output has:
- action: What happens after. Every action has a type and a optional value. Action is categorized into 3 types:
- RESULT: The event is successful, and the workflow will continue to the next event which is listening to this event. Value is the output of this event.
- ABORT: The event is not successful, and the workflow will abort. Value could be empty.
- GOTO: The event is not successful, and the workflow will wait for the next event. Value is the name of the event to go to. The event go to should NOT listen to this event.
- key: Output identifier (be a single word with '_' as the separator)
- description: Output explanation
- condition: when the output occurs, the action will be executed
* Can have single or multiple outputs:
- For single output (simple flow):
```xml
<outputs>
<output>
<key>result_key</key>
<description>Description of the result</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
```
- For multiple outputs (conditional flow):
```xml
<outputs>
<output>
<key>success_result</key>
<description>Output when condition A is met</description>
<condition>When condition A is true</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>should_repeat</key>
<description>Output when condition B is met</description>
<condition>When condition B is true</condition>
<action>
<type>GOTO</type>
<value>target_event</value>
</action>
</output>
<output>
<key>failure_result</key>
<description>Output when condition C is met</description>
<condition>When condition C is true</condition>
<action>
<type>ABORT</type>
</action>
</output>
</outputs>
```
- listen: Which events trigger this one.
- agent: Which agent handles this event. Every agent has the name of the agent, and the exact model of the agent (like `claude-3-5-sonnet-20241022` or others)
IMPORTANT RULES:
0. The `on_start` event is a special event that:
- Must be the first event in the workflow
- Has inputs that match the system_input
- Has outputs that match the system_input (just pass through)
- Does not have an agent
- Does not have a task
- Does not have listen elements
Example:
```xml
<event>
<name>on_start</name>
<inputs>
<input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</input>
</inputs>
<outputs>
<output>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
```
1. For simple sequential flows:
- Use single output with RESULT type
- No condition is needed
- Next event in chain listening to this event will be triggered automatically
2. For conditional flows:
- Multiple outputs must each have a condition
- Conditions should be mutually exclusive
- Each output should specify appropriate action type
- `GOTO` action should have a value which is the name of the event to go to
3. Only include tools section when:
- Agent is new (category="new") AND
- User explicitly requests specific tools for the agent
4. Omit tools section when:
- Using existing agents (category="existing") OR
- Creating new agents without specific tool requirements
""" + \
f"""
Existing tools you can use is:
{list_tools(context_variables)}
Existing agents you can use is:
{list_agents(context_variables)}
The name of existing workflows: [{workflow_list_str}]. The name of the new workflow you are creating should be DIFFERENT from these names according to the speciality of the workflow.
""" + \
r"""
COMMON WORKFLOW PATTERNS:
1. If-Else Pattern (Conditional Branching):
```xml
<event>
<name>analyze_data</name>
<task>Analyze the data and determine next steps</task>
<outputs>
<output>
<key>positive_case</key>
<description>Handle positive case</description>
<condition>If data meets criteria A</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>negative_case</key>
<description>Handle the negative case</description>
<condition>If data does not meet criteria A</condition>
<action>
<type>ABORT</type>
</action>
</output>
</outputs>
</event>
```
2. Parallelization Pattern (Concurrent Execution):
```xml
<!-- Parent event -->
<event>
<name>initial_analysis</name>
<outputs>
<output>
<key>analysis_result</key>
<description>Initial analysis result</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<!-- Multiple events listening to the same parent -->
<event>
<name>technical_analysis</name>
<listen>
<event>initial_analysis</event>
</listen>
<outputs>
<output>
<key>technical_result</key>
<description>Technical analysis result</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>financial_analysis</name>
<listen>
<event>initial_analysis</event>
</listen>
<outputs>
<output>
<key>financial_result</key>
<description>Financial analysis result</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<!-- Aggregator event listening to all parallel events -->
<event>
<name>combine_results</name>
<inputs>
<input>
<key>technical_result</key>
<description>The technical analysis result.</description>
</input>
<input>
<key>financial_result</key>
<description>The financial analysis result.</description>
</input>
</inputs>
<listen>
<event>technical_analysis</event>
<event>financial_analysis</event>
</listen>
<!-- This event will only execute when ALL listened events complete -->
</event>
```
3. Evaluator-Optimizer Pattern (Iterative Refinement):
```xml
<event>
<name>generate_content</name>
<outputs>
<output>
<key>content</key>
<description>Generated content</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>evaluate_content</name>
<listen>
<event>generate_content</event>
</listen>
<task>Evaluate the quality of generated content</task>
<outputs>
<output>
<key>approved</key>
<description>Content meets quality standards</description>
<condition>If quality score >= threshold</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>needs_improvement</key>
<description>Content needs improvement</description>
<condition>If quality score < threshold</condition>
<action>
<type>GOTO</type>
<value>generate_content</value>
</action>
</output>
</outputs>
</event>
```
IMPORTANT NOTES ON PATTERNS:
0. The above patterns are incomplete which some mandatory elements are missing due to the limitation of context length. In real-world, you could refer to the logic of the patterns to create a complete and correct workflow.
1. If-Else Pattern:
- Use mutually exclusive conditions
- You can NOT place MORE THAN ONE OUTPUT with RESULT type
- Outputs determine which branch executes
2. Parallelization Pattern:
- Multiple events can listen to the same parent event
- Aggregator event must list ALL parallel events in its listen section
- All parallel events must complete before aggregator executes
- Model of agents in every parallel event could be different
3. Evaluator-Optimizer Pattern:
- Use GOTO action for iteration
- Include clear evaluation criteria in conditions
- Have both success and retry paths
- Consider adding maximum iteration limit in global_variables
""" + \
r"""
EXAMPLE:
User: I want to build a workflow that can help me to write a wikipiead-like article about the user's topic. It should:
1. Search the web for the user's topic.
2. Write an outline for the user's topic.
3. Evaluate the outline. If the outline is not good enough, repeat the outline step, otherwise, continue to write the article.
4. Write the article.
The form should be:
<workflow>
<name>wiki_article_workflow</name>
<system_input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</system_input>
<system_output>
<key>article</key>
<description>The article that satisfies the user's request.</description>
</system_output>
<agents>
<agent category="existing">
<name>Web Surfer Agent</name>
<description>This agent is used to search the web for the user's topic.</description>
</agent>
<agent category="new">
<name>Outline Agent</name>
<description>This agent is used to write an outline for the user's topic.</description>
</agent>
<agent category="new">
<name>Evaluator Agent</name>
<description>This agent is used to evaluate the outline of the user's topic.</description>
</agent>
<agent category="new">
<name>Article Writer Agent</name>
<description>This agent is used to write the article for the user's topic.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</input>
</inputs>
<outputs>
<output>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>on_search</name>
<inputs>
<input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</input>
</inputs>
<task>
search the information about the topic and return the result.
</task>
<outputs>
<output>
<key>search_result</key>
<description>The search result of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Web Surfer Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>on_outline</name>
<inputs>
<input>
<key>search_result</key>
<description>The search result of the user's topic.</description>
</input>
</inputs>
<task>
write an outline for the user's topic.
</task>
<outputs>
<output>
<key>outline</key>
<description>The outline of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Outline Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>on_evaluate</name>
<inputs>
<input>
<key>outline</key>
<description>The outline of the user's topic.</description>
</input>
</inputs>
<task>
evaluate the outline of the user's topic.
</task>
<outputs>
<output>
<key>positive_feedback</key>
<description>The positive feedback of the outline of the user's topic.</description>
<condition>
If the outline is good enough, give positive feedback.
</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>negative_feedback</key>
<description>The negative feedback of the outline of the user's topic.</description>
<condition>
If the outline is not good enough, give negative feedback.
</condition>
<action>
<type>GOTO</type>
<value>on_outline</value>
</action>
</output>
</outputs>
<listen>
<event>on_outline</event>
</listen>
<agent>
<name>Evaluator Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>on_write</name>
<inputs>
<input>
<key>outline</key>
<description>The outline of user's topic.</description>
</input>
</inputs>
<task>
write the article for the user's topic.
</task>
<outputs>
<output>
<key>article</key>
<description>The article of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_evaluate</event>
</listen>
<agent>
<name>Article Writer Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
</events>
</workflow>
GUIDELINES:
1. Each event should have clear inputs and outputs
2. Use conditions to handle different outcomes
3. Properly chain events using the listen element
4. Review steps should be included for quality control
5. Action types should be either RESULT or ABORT
Follow these examples and guidelines to create appropriate workflow forms based on user requirements.
"""
return Agent(
name = "Workflow Former Agent",
model = model,
instructions = instructions,
)
if __name__ == "__main__":
from metachain import MetaChain
agent = get_workflow_former_agent("claude-3-5-sonnet-20241022")
client = MetaChain()
# task_yaml = """\
# I want to create a workflow that can help me to solving the math problem.
# The workflow should:
# 2. Parallelize solving the math problem with the same `Math Solver Agent` using different language models (`gpt-4o-2024-08-06`, `claude-3-5-sonnet-20241022`, `deepseek/deepseek-chat`)
# 3. Aggregate the results from the `Math Solver Agent` and return the final result using majority voting.
# Please create the form of this workflow in the XML format.
# """
task_yaml = """\
I want to create a workflow that can help me to solving the math problem.
The workflow should:
1. The `Objective Extraction Agent` will extract the objective of the math problem.
2. The `Condition Extraction Agent` will extract the conditions of the math problem.
3. The `Math Solver Agent` will evaluate whether the conditions are enough to solve the math problem: if yes, solve the math problem; if no, return to the `Condition Extraction Agent` to extract more conditions.
Please create the form of this workflow in the XML format.
"""
task_yaml = task_yaml + """\
Directly output the form in the XML format.
"""
messages = [{"role": "user", "content": task_yaml}]
response = client.run(agent, messages)
print(response.messages[-1]["content"])

View file

@ -0,0 +1,335 @@
from pydantic import BaseModel, Field, field_validator, ValidationInfo, model_validator
from typing import List, Dict, Optional, Literal, Union
import xml.etree.ElementTree as ET
import re
# 基础模型
class WorkflowFormParseError(Exception):
"""Exception raised when WorkflowForm failed to parse.
"""
def __init__(self, message):
super().__init__(message)
class WorkflowConstraintError(Exception):
"""Exception raised when WorkflowForm failed to parse. Use this Exception to raise when the workflow form does not meet some specific constraints.
"""
def __init__(self, message):
super().__init__(message)
class KeyDescription(BaseModel):
key: str
description: str
class Tool(BaseModel):
name: str
description: str
class Action(BaseModel):
type: Literal["RESULT", "ABORT", "GOTO"]
value: Optional[str] = None
@field_validator('value')
def validate_goto_value(cls, v, info: ValidationInfo):
if info.data.get('type') == 'GOTO' and not v:
raise WorkflowConstraintError("GOTO action must have a value")
return v
class Output(BaseModel):
key: str
description: str
condition: Optional[str] = None
action: Action
@field_validator('condition')
def validate_condition(cls, v, info: ValidationInfo):
"""验证condition的存在性"""
outputs_info = info.data.get('_outputs_info', {})
if outputs_info.get('multiple_outputs', False) and not v:
raise WorkflowConstraintError("Multiple outputs must each have a condition")
return v
class Event(BaseModel):
name: str
inputs: Optional[List[KeyDescription]] = None # 修改这里
task: Optional[str] = None # 修改为可选
outputs: List[Output]
listen: Optional[List[str]] = None
agent: Optional[Dict[str, str]] = None # 修改为可选
@field_validator('task')
def validate_task(cls, v, info: ValidationInfo):
"""验证非on_start事件必须有task"""
if info.data.get('name') != 'on_start' and not v:
raise WorkflowConstraintError("Non-start events must have a task")
return v
@field_validator('agent')
def validate_agent(cls, v, info: ValidationInfo):
"""验证非on_start事件必须有agent"""
if info.data.get('name') != 'on_start' and not v:
raise WorkflowConstraintError("Non-start events must have an agent")
return v
@field_validator('listen')
def validate_listen(cls, v, info: ValidationInfo):
"""验证on_start事件不能有listen"""
if info.data.get('name') == 'on_start' and v:
raise WorkflowConstraintError("Start event cannot have listen elements")
return v
@field_validator('name')
def validate_start_event(cls, v, info: ValidationInfo):
"""验证起始事件的名称"""
if info.data.get('is_start_event', False) and v != "on_start":
raise WorkflowConstraintError("Start event must be named 'on_start'")
return v
@field_validator('outputs')
def validate_start_event_outputs(cls, v, info: ValidationInfo):
"""验证on_start事件的输出必须与输入相同"""
if info.data.get('name') == 'on_start':
inputs = info.data.get('inputs', [])
if len(v) != len(inputs):
raise WorkflowConstraintError("Start event outputs must match inputs")
for output, input in zip(v, inputs):
if output.key != input.key or output.description != input.description:
raise WorkflowConstraintError("Start event output must match input")
return v
@field_validator('outputs')
def validate_outputs(cls, v):
"""验证输出的合法性"""
result_outputs = [out for out in v if out.action.type == "RESULT"]
if len(result_outputs) > 1:
raise WorkflowConstraintError("Cannot have more than one RESULT type output")
return v
@model_validator(mode='after')
def validate_event_constraints(self) -> 'Event':
"""验证事件的所有约束"""
# 如果是 on_start event跳过输入验证
if self.name == "on_start":
return self
# 验证非on_start事件的输入
if self.inputs is None:
raise WorkflowConstraintError(f"Event '{self.name}': Non-start events must have inputs")
# 验证listen是否存在
if self.listen is None:
raise WorkflowConstraintError(f"Event '{self.name}': Non-start events must have listen events")
# 验证输入数量
if len(self.inputs) != len(self.listen):
raise WorkflowConstraintError(
f"Event '{self.name}': Number of inputs ({len(self.inputs)}) must match number of listen events ({len(self.listen)})"
)
return self
class Agent(BaseModel):
name: str
description: str
category: Literal["existing", "new"]
tools: Optional[List[Tool]] = None
@field_validator('tools')
def validate_tools(cls, v, info: ValidationInfo):
"""验证tools的存在性"""
if info.data.get('category') == 'existing' and v:
raise WorkflowConstraintError("Existing agents should not have tools defined")
return v
class WorkflowForm(BaseModel):
name: str
system_input: KeyDescription
system_output: KeyDescription
global_variables: Dict[str, str] = Field(default_factory=dict)
agents: List[Agent]
events: List[Event]
@field_validator('events')
def validate_events(cls, v):
"""验证事件流的合法性"""
# 验证是否有且仅有一个on_start事件
start_events = [e for e in v if e.name == "on_start"]
if len(start_events) != 1:
raise WorkflowConstraintError("Must have exactly one 'on_start' event")
# 验证事件监听的合法性
event_names = {e.name for e in v}
for event in v:
if event.listen:
for listened_event in event.listen:
if listened_event not in event_names:
raise WorkflowConstraintError(f"Event {event.name} listens to non-existent event {listened_event}")
return v
@model_validator(mode='after')
def validate_event_order(self) -> 'WorkflowForm':
"""验证事件的监听顺序:
1. 事件只能监听在它之前定义的事件
2. 不能有循环依赖
"""
# 创建事件名称到索引的映射
event_indices = {event.name: idx for idx, event in enumerate(self.events)}
# 验证每个事件的监听关系
for idx, event in enumerate(self.events):
if event.listen:
for listened_event_name in event.listen:
# 检查被监听的事件是否存在
if listened_event_name not in event_indices:
raise WorkflowConstraintError(
f"Event '{event.name}': Referenced listen event '{listened_event_name}' not found"
)
# 检查是否监听了后面的事件
listened_idx = event_indices[listened_event_name]
if listened_idx >= idx:
raise WorkflowConstraintError(
f"Event '{event.name}' cannot listen to event '{listened_event_name}' "
f"because it appears later in the workflow or creates a cycle"
)
return self
class XMLParser:
@staticmethod
def parse_key_description(elem: ET.Element) -> KeyDescription:
return KeyDescription(
key=elem.find('key').text.strip(),
description=elem.find('description').text.strip()
)
@staticmethod
def parse_action(elem: ET.Element) -> Action:
action_elem = elem.find('action')
return Action(
type=action_elem.find('type').text.strip(),
value=action_elem.find('value').text.strip() if action_elem.find('value') is not None else None
)
@staticmethod
def parse_output(elem: ET.Element, multiple_outputs: bool) -> Output:
return Output(
key=elem.find('key').text.strip(),
description=elem.find('description').text.strip(),
condition=elem.find('condition').text.strip() if elem.find('condition') is not None else None,
action=XMLParser.parse_action(elem),
_outputs_info={'multiple_outputs': multiple_outputs}
)
@staticmethod
def parse_event(elem: ET.Element, is_start: bool = False) -> Event:
name = elem.find('name').text.strip()
is_start = name == 'on_start'
outputs_elem = elem.find('outputs')
multiple_outputs = len(outputs_elem.findall('output')) > 1
listen_elem = elem.find('listen')
listen = [e.text.strip() for e in listen_elem.findall('event')] if listen_elem is not None and not is_start else None
agent_elem = elem.find('agent')
agent = {
"name": agent_elem.find('name').text.strip(),
"model": agent_elem.find('model').text.strip()
} if agent_elem is not None and not is_start else None
inputs_elem = elem.find('inputs')
inputs = [XMLParser.parse_key_description(input_elem)
for input_elem in inputs_elem.findall('input')] if inputs_elem is not None else None
task_elem = elem.find('task')
task = task_elem.text.strip() if task_elem is not None and not is_start else None
return Event(
name=name,
inputs=inputs,
task=task,
outputs=[XMLParser.parse_output(out, multiple_outputs)
for out in outputs_elem.findall('output')],
listen=listen,
agent=agent,
is_start_event=is_start
)
@staticmethod
def parse_agent(elem: ET.Element) -> Agent:
tools_elem = elem.find('tools')
tools = None
if tools_elem is not None:
tools = [Tool(
name=tool.find('name').text.strip(),
description=tool.find('description').text.strip()
) for tool in tools_elem.findall('tool')]
return Agent(
name=elem.find('name').text.strip(),
description=elem.find('description').text.strip(),
category=elem.get('category'),
tools=tools
)
@classmethod
def parse_xml(cls, xml_content: str) -> WorkflowForm:
root = ET.fromstring(xml_content)
workflow_name = root.get('name')
if not workflow_name:
# If name attribute doesn't exist, try to find name element
name_elem = root.find('name')
workflow_name = name_elem.text.strip() if name_elem is not None else "Unnamed Workflow"
return WorkflowForm(
name=workflow_name,
system_input=cls.parse_key_description(root.find('system_input')),
system_output=cls.parse_key_description(root.find('system_output')),
global_variables={var.find('key').text.strip(): var.find('value').text.strip()
for var in root.find('global_variables').findall('variable')}
if root.find('global_variables') is not None else {},
agents=[cls.parse_agent(agent) for agent in root.findall('.//agents/agent')],
events=[cls.parse_event(event, event.find('name').text.strip() == 'on_start')
for event in root.findall('.//events/event')]
)
def extract_workflow_content(text):
pattern = r'(<workflow>.*?</workflow>)'
# re.DOTALL 让 . 也能匹配换行符
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1)
else:
raise WorkflowFormParseError("The workflow XML form is not correct. The workflow XML form should be wrapped by <workflow>...</workflow> tags.")
def parse_workflow_form(xml_content: str) -> Optional[WorkflowForm]:
"""
读取并解析workflow form XML文件
Args:
xml_content: XML文件内容
Returns:
解析后的WorkflowForm对象如果解析失败返回None
"""
try:
workflow_content = extract_workflow_content(xml_content)
return XMLParser.parse_xml(workflow_content)
except WorkflowFormParseError as e:
return f"The Error to extract workflow content: {e}"
except WorkflowConstraintError as e:
return f"The generated workflow form MUST meet all the constraints in the given instructions, but the constraints are not met: {e}"
except ET.ParseError as e:
return f"The Error parsing XML workflow form: {e}"
except Exception as e:
return f"Unexpected error: {e}"
# 使用示例
if __name__ == "__main__":
# 读取和解析XML文件
import json
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/workflow_form/condition_mining.xml", 'r', encoding='utf-8') as f:
xml_content = f.read()
form = parse_workflow_form(xml_content)
print(form)
with open("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/workflow_form/condition_mining.json", 'w', encoding='utf-8') as f:
json.dump(form.model_dump(), f, indent=4)
workflow_form = form.model_dump()

View file

@ -0,0 +1,40 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_agent, check_tool
)
from metachain.registry import register_agent
@register_agent(name = "Plan Agent", func_name="get_plan_agent")
def get_plan_agent(model: str):
def instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are a planner working on an agent project named 'metachain' which can generate a coding plan for a given user request.
I want to use existing project code to solve the task. You should use the tools `check_agent` and `check_tool` to carefully go through the existing code to find out whether you should develop a new agent or new tool.
After you have checked the existing code, you should give a detailed plan for developing agents to solve the task based on the existing code, and ask user to confirm or modify the plan.
Finally, after user confirms the plan, you should generate the final coding plan and output it, and transfer the conversation to the 'Programming Triage Agent' to use the plan to execute the task util you finish the task, otherwise I will lose a lot of money.
Follow the following rules to develop new tools:
1. If you want to create new tools, you should first create a new file in the `metachain/metachain/tools` directory, write the function, and then add the function to the `metachain/metachain/tools/__init__.py`. Note that when add new tools into `__init__.py`, you first read the file content and keep the original content, then add the new tools into the file.
2. The tool is python functions.
3. When developing a new tool, you should follow the coding style of the existing tools, which means you should write docstring for the function, and add some useful comments to explain the code.
4. Function should usually return a `str` (values will be attempted to be cast as a `str`).
5. If you need to develop a new tool through external API, you should use `get_api_plugin_tools_doc` tool to get the tool doc, such as websearch, news search, financial tools, etc, otherwise you should develop a new tool by yourself.
6. If you need to develop a new tool related to vector database, you should use the pre-built class `Memory` in `/{working_dir}/metachain/metachain/memory/rag_memory.py` to save and retrieve the data.
Follow the following instructions to develop new agents:
1. If you want to create new agents, you should first create a new file in the `metachain/metachain/agents` directory, write the function `get_xxx_agent(model: str)`, and then add the function to the `metachain/metachain/agents/__init__.py`. Note that when add new agents into `__init__.py`, you first read the file content and keep the original content, then add the new agents into the file.
Note that your plan should fit the given rules.
"""
return Agent(
name="Plan Agent",
model=model,
instructions=instructions,
functions=[check_agent, check_tool],
parallel_tool_calls = False
)

View file

@ -0,0 +1,103 @@
from metachain.types import Agent
from metachain.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, case_resolved, get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
def examples(context_variables):
working_dir = context_variables.get("working_dir", None)
examples_list = []
examples_list.extend(make_message('user', "Create a list of numbers from 1 to 10, and display them in a web page at port 5000."))
examples_list.extend(make_message('assistant', "I should first use create_file to write the python code into a file named 'app.py' for starting a web server"))
examples_list.extend(make_tool_message(create_file, {'path': f"/{working_dir}/metachain/app.py",
'content': """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
numbers = list(range(1, 11))
return str(numbers)
if __name__ == '__main__':
app.run(port=5000)"""}, f"File created at: /{working_dir}/metachain/app.py"))
examples_list.extend(make_message('assistant', 'I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you using `run_python`'))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/metachain/app.py"}, f"""
Traceback (most recent call last):
File "/{working_dir}/metachain/app.py", line 2, in <module>
from flask import Flask
ModuleNotFoundError: No module named 'flask'"""))
examples_list.extend(make_message('assistant', "It seems that Flask is not installed. Let me install Flask for you using `execute_command` by the command: pip install flask"))
examples_list.extend(make_tool_message(execute_command, {'command': 'pip install flask'}, """Defaulting to user installation because normal site-packages is not writeable
Collecting flask
Using cached flask-3.0.3-py3-none-any.whl (101 kB)
Collecting blinker>=1.6.2
Using cached blinker-1.7.0-py3-none-any.whl (13 kB)
Collecting Werkzeug>=3.0.0
Using cached werkzeug-3.0.2-py3-none-any.whl (226 kB)
Collecting click>=8.1.3
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Collecting itsdangerous>=2.1.2
Using cached itsdangerous-2.2.0-py3-none-any.whl (16 kB)
Requirement already satisfied: Jinja2>=3.1.2 in /home/openhands/.local/lib/python3.10/site-packages (from flask) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.0 in /home/openhands/.local/lib/python3.10/site-packages (from Jinja2>=3.1.2->flask) (2.1.5)
Installing collected packages: Werkzeug, itsdangerous, click, blinker, flask
Successfully installed Werkzeug-3.0.2 blinker-1.7.0 click-8.1.7 flask-3.0.3 itsdangerous-2.2.0"""))
examples_list.extend(make_message('assistant', "Now that Flask is installed, let me run the Python file again using `run_python`"))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/metachain/app.py"}, """* Serving Flask app 'app'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit"""))
examples_list.extend(make_message('assistant', """The server is running on port 5000 with PID 124. You can access the list of numbers by visiting http://127.0.0.1:5000. Your task is completed."""))
return examples_list
@register_agent(name = "Programming Agent", func_name="get_programming_agent")
def get_programming_agent(model: str):
def instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
ret_instructions = \
f"""You are a programmer working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your can leverage your capabilities by using the specific functions listed below:
1. Creating project structures based on the user requirement using function `create_directory`.
2. Writing clean, efficient, and well-documented code using function `create_file` and `write_file`.
3. You must run python scripts using function `run_python` rather than using the `execute_command` function.
4. Exam the project to re-use the existing code snippets as much as possible, you may need to use
functions like `list_files`, `read_file` and `write_file`.
5. Writing the code into the file when creating new files, do not create empty files.
6. If you are required to code base on the specific directory, you can use function `code_rag` to search the relatd codes in the specific directory, and remember you could only search one thing (like a function name, a class name, a variable name, etc.) in the codebase at a time.
7. Before you write code into the existing files, you should first read the file content using function `read_file` and reserve the original content as much as possible.
8. Decide whether the task requires execution and debugging before moving to the next or not.
9. Generate the commands to run and test the current task, and the dependencies list for this task.
10. You only write Python scripts, don't write Jupiter notebooks which require interactive execution.
11. Note that every path you read, write, or search should be the absolute path (starting with '/').
Your task is using existing project to create agents to complete the user request.
If the existing tools or agents are not enough for your task, you should develop new tools or agents.
Follow the following routine:
1. If there is enough pre-built tools and agents, create a python script in the `/{working_dir}/metachain` folder to run the agent to complete the user request.
2. If you need to develop new tools, create a new tool in the `/{working_dir}/metachain/metachain/tools` folder.
3. If you need to develop new agents, create a new agent in the `/{working_dir}/metachain/metachain/agents` folder.
4. Create a python script in the `/{working_dir}/metachain` folder to run the new agent to complete the user request.
Note that if you need OPENAI_API_KEY, my key is: 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
"""
how_to_guides = context_variables.get("how_to_guides", None)
if how_to_guides:
ret_instructions += \
f"""
If you want to develop new tools or agents, you should follow the following guides:
{how_to_guides}
"""
return ret_instructions
return Agent(
name="Programming Agent",
model=model,
instructions=instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, get_api_plugin_tools_doc],
# examples=examples,
tool_choice = "auto",
parallel_tool_calls = False
)

View file

@ -0,0 +1,415 @@
from metachain.types import Agent
from metachain.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, case_resolved, get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
@register_agent(name = "Tool Creation Agent", func_name="get_tool_creation_agent")
def get_tool_creation_agent(model: str):
def tool_creation_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to develop new tools in the `/{working_dir}/metachain/metachain/tools` folder.
Follow the following instructions to develop new tools:
1. If you want to create new tools, you should first create a new file in the `metachain/metachain/tools` directory, write the function, and then add the function to the `metachain/metachain/tools/__init__.py`. Note that when add new tools into `__init__.py`, you first read the file content and keep the original content, then add the new tools into the file.
2. The tool is python functions.
3. When developing a new tool, you should follow the coding style of the existing tools, which means you should write docstring for the function, and add some useful comments to explain the code.
4. Function should usually return a `str` (values will be attempted to be cast as a `str`).
5. If there is any error during the development process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
6. If you need to develop a new tool through external API, you should use `get_api_plugin_tools_doc` tool to get the tool doc, such as websearch, news search, financial tools, etc, otherwise you should develop a new tool by yourself.
7. If you need to develop a new tool related to vector database, you should use the pre-built class `Memory` in `/{working_dir}/metachain/metachain/memory/rag_memory.py` to save and retrieve the data.
8. You can add `if __name__ == "__main__":` at the end of the function file to make sure the function can be executed, and after testing all functions you should develop, using `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to develop all the tools and make sure they can be executed.
Note that if you need OPENAI_API_KEY, my key is: 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
"""
return Agent(
name="Tool Creation Agent",
model=model,
instructions=tool_creation_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, get_api_plugin_tools_doc],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Agent Creation Agent", func_name="get_agent_creation_agent")
def get_agent_creation_agent(model: str):
def agent_creation_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to develop new agents in the `/{working_dir}/metachain/metachain/agents` folder.
Follow the following instructions to develop new agents:
1. If you want to create new agents, you should first create a new file in the `metachain/metachain/agents` directory, write the function `get_xxx_agent(model: str)`, and then add the function to the `metachain/metachain/agents/__init__.py`. Note that when add new agents into `__init__.py`, you first read the file content and keep the original content, then add the new agents into the file.
2. In this stage, you should not run the agent, you should only develop the agent.
3. You may need to develop more than one agent, and in this stage you should not concern the relationship between agents.
4. After developing a new agent, you should use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to develop all the agents.
And there is a guide for you to follow:
"""+\
r"""An `Agent` simply encapsulates a set of `instructions` with a set of `functions` (plus some additional settings below), and has the capability to hand off execution to another `Agent`.
While it's tempting to personify an `Agent` as "someone who does X", it can also be used to represent a very specific workflow or step defined by a set of `instructions` and `functions` (e.g. a set of steps, a complex retrieval, single step of data transformation, etc). This allows `Agent`s to be composed into a network of "agents", "workflows", and "tasks", all represented by the same primitive.
### `Agent` Fields
| Field | Type | Description | Default |
| ---------------- | ------------------------ | ------------------------------------------------------------ | ---------------------------- |
| **name** | `str` | The name of the agent. | `"Agent"` |
| **model** | `str` | The model to be used by the agent. | `"gpt-4o"` |
| **instructions** | `str` or `func() -> str` | Instructions for the agent, can be a string or a callable returning a string. | `"You are a helpful agent."` |
| **functions** | `List` | A list of functions that the agent can call. | `[]` |
| **tool_choice** | `str` | The tool choice for the agent, if any. | `None` |
#### Instructions
`Agent` `instructions` are directly converted into the `system` prompt of a conversation (as the first message). Only the `instructions` of the active `Agent` will be present at any given time (e.g. if there is an `Agent` handoff, the `system` prompt will change, but the chat history will not.)
```python
agent = Agent(
instructions="You are a helpful agent."
)
```
The `instructions` can either be a regular `str`, or a function that returns a `str`. The function can optionally receive a `context_variables` parameter, which will be populated by the `context_variables` passed into `client.run()`.
```python
def instructions(context_variables):
user_name = context_variables["user_name"]
return f"Help the user, {user_name}, do whatever they want."
agent = Agent(
instructions=instructions
)
response = client.run(
agent=agent,
messages=[{"role":"user", "content": "Hi!"}],
context_variables={"user_name":"John"}
)
print(response.messages[-1]["content"])
```
```
Hi John, how can I assist you today?
```
"""
return Agent(
name="Agent Creation Agent",
model=model,
instructions=agent_creation_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Workflow Run Agent", func_name="get_workflow_run_agent")
def get_workflow_run_agent(model: str):
def workflow_run_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to run workflows to complete the user request.
Follow the following instructions to run workflows:
1. The workflow is a directed graph represented by a dictionary, with the format:
""" +\
r"""
{
"type": "object",
"properties": {
"nodes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"agent_name": {"type": "string"},
"agent_tools": {"type": "array", "items": {"type": "string"}},
"input": {"type": "string"},
"output": {"type": "string"},
"is_start": {"type": "boolean"},
"is_end": {"type": "boolean"}
},
"required": ["agent_name", "agent_tools", "input", "output", "is_start", "is_end"],
"additionalProperties": False
}
},
"edges": {
"type": "array",
"items": {
"type": "object",
"properties": {
"start": {"type": "string"},
"end": {"type": "string"},
"description": {"type": "string"}
},
"required": ["start", "end", "description"],
"additionalProperties": False
}
}
},
"required": ["nodes", "edges"],
"additionalProperties": False
}
2. First create a python script named `run_xxx_workflow.py` in the `/{working_dir}/metachain` directory, and the workflow graph should be instantiated by `Graph` class in `metachain/metachain/workflow/flowgraph.py`, using `Graph.from_dict()` method.
3. After instantiating the workflow graph, you should use `FlowEngine` class in `metachain/metachain/workflow/flowengine.py`, using `FlowEngine(g = g, model=model)` to instantiate the workflow engine.
4. Then you can use `engine.run_meta(query, context_variables = context_variables, debug = True)` to run the workflow
5. After running the workflow, you should tell the 'Programming Triage Agent' final running results and use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent'.
6. If there is any error during the running process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
"""+\
r"""
There is an example to run a workflow based on the 'metachain' project:
```python
from metachain.workflow import Graph, FlowEngine
from metachain.types import Response
import os
os.environ["OPENAI_API_KEY"] = "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"
model = 'gpt-4o-2024-08-06'
workflow_dict = {
"nodes": [
{
"agent_name": "user_request",
"agent_tools": [],
"input": "PDF file",
"output": "PDF file",
"is_start": True,
"is_end": False
},
{
"agent_name": "read_pdf_agent",
"agent_tools": [
"read_pdf"
],
"input": "PDF file",
"output": "Extracted text",
"is_start": False,
"is_end": False
},
{
"agent_name": "chunk_text_agent",
"agent_tools": [
"chunk_text"
],
"input": "Extracted text",
"output": "Chunked text",
"is_start": False,
"is_end": False
},
{
"agent_name": "vectordb_agent",
"agent_tools": [
"vectordb_save"
],
"input": "Chunked text",
"output": "Text saved to VectorDB",
"is_start": False,
"is_end": False
},
{
"agent_name": "retrieve_vectordb_agent",
"agent_tools": [
"retrieve_vectordb"
],
"input": "Text saved to VectorDB",
"output": "Method section text",
"is_start": False,
"is_end": False
},
{
"agent_name": "output",
"agent_tools": [],
"input": "Method section text",
"output": "Description of Method section",
"is_start": False,
"is_end": True
}
],
"edges": [
{
"start": "user_request",
"end": "read_pdf_agent",
"description": "Send PDF to be read."
},
{
"start": "read_pdf_agent",
"end": "chunk_text_agent",
"description": "Send extracted text for chunking."
},
{
"start": "chunk_text_agent",
"end": "vectordb_agent",
"description": "Save chunked text to VectorDB."
},
{
"start": "vectordb_agent",
"end": "retrieve_vectordb_agent",
"description": "Retrieve Method section."
},
{
"start": "retrieve_vectordb_agent",
"end": "output",
"description": "Output of Method section text."
}
]
}
g = Graph.from_dict(workflow_dict)
engine = FlowEngine(g = g, model=model)
query = 'I have a paper in the pdf format, and I want to know what the method section is about.'
context_variables = {}
response: Response = engine.run_meta(query, context_variables = context_variables, debug = True)
print(response.messages[-1]['content'])
```
"""
return Agent(
name="Workflow Run Agent",
model=model,
instructions=workflow_run_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Agent Run Agent", func_name="get_agent_run_agent")
def get_agent_run_agent(model: str):
def agent_run_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to run agents to complete the user request.
Follow the following instructions to run agents:
1. To complete the user request using 'metachain' project, you need to run the agent by creating a python file named `run_xxx_agent.py` in the 'metachain' directory, and use `run_python` function to run the agent.
2. If there is any error during the running process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
3. After running the agent, you should tell the 'Programming Triage Agent' final running results and use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to run all the agents.
Note that if you need OPENAI_API_KEY, my key is: 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
And there is a guide for you to follow:
"""+\
r"""
```python
from metachain import MetaChain
from metachain.agents import get_programming_agent
client = MetaChain()
programming_agent = get_programming_agent(model)
context_variables = {"key": value}
messages = [{"role": "user", "content": task_instructions}]
response = client.run(agent=programming_agent, messages=messages, context_variables=context_variables, debug=True)
```
### `client.run()`
MetaChain's `run()` function is analogous to the `chat.completions.create()` function in the Chat Completions API - it takes `messages` and returns `messages` and saves no state between calls. Importantly, however, it also handles Agent function execution, hand-offs, context variable references, and can take multiple turns before returning to the user.
At its core, MetaChain's `client.run()` implements the following loop:
1. Get a completion from the current Agent
2. Execute tool calls and append results
3. Switch Agent if necessary
4. Update context variables, if necessary
5. If no new function calls, return
#### Arguments
| Argument | Type | Description | Default |
| --------------------- | ------- | ------------------------------------------------------------ | -------------- |
| **agent** | `Agent` | The (initial) agent to be called. | (required) |
| **messages** | `List` | A list of message objects, identical to [Chat Completions `messages`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) | (required) |
| **context_variables** | `dict` | A dictionary of additional context variables, available to functions and Agent instructions | `{}` |
| **max_turns** | `int` | The maximum number of conversational turns allowed | `float("inf")` |
| **model_override** | `str` | An optional string to override the model being used by an Agent | `None` |
| **execute_tools** | `bool` | If `False`, interrupt execution and immediately returns `tool_calls` message when an Agent tries to call a function | `True` |
| **stream** | `bool` | If `True`, enables streaming responses | `False` |
| **debug** | `bool` | If `True`, enables debug logging | `False` |
Once `client.run()` is finished (after potentially multiple calls to agents and tools) it will return a `Response` containing all the relevant updated state. Specifically, the new `messages`, the last `Agent` to be called, and the most up-to-date `context_variables`. You can pass these values (plus new user messages) in to your next execution of `client.run()` to continue the interaction where it left off much like `chat.completions.create()`. (The `run_demo_loop` function implements an example of a full execution loop in `/MetaChain/repl/repl.py`.)
#### `Response` Fields
| Field | Type | Description |
| --------------------- | ------- | ------------------------------------------------------------ |
| **messages** | `List` | A list of message objects generated during the conversation. Very similar to [Chat Completions `messages`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages), but with a `sender` field indicating which `Agent` the message originated from. |
| **agent** | `Agent` | The last agent to handle a message. |
| **context_variables** | `dict` | The same as the input variables, plus any changes. |
"""
return Agent(
name="Agent Run Agent",
model=model,
instructions=agent_run_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Programming Triage Agent", func_name="get_programming_triage_agent")
def get_programming_triage_agent(model: str):
def programming_triage_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
ret_instructions = \
f"""You are a programmer working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your overall task is using existing project to create agents or workflows to complete the user request.
If the existing tools or agents are not enough for your task, you should develop new tools or agents.
And you should determine which agent is best suited to handle the user's request, and transfer the conversation to that agent based on the following routine:
1. If you need to develop new tools, transfer the conversation to the 'Tool Creation Agent' to create a new tool in the `/{working_dir}/metachain/metachain/tools` folder using function `transfer_to_tool_creation_agent`.
2. If you need to develop new agents, transfer the conversation to the 'Agent Creation Agent' to create a new agent in the `/{working_dir}/metachain/metachain/agents` folder using function `transfer_to_agent_creation_agent`.
3. After there is enough pre-built tools and agents, transfer the conversation to the 'Agent Run Agent' or 'Workflow Run Agent' to create agents or workflows to complete the user request using function `transfer_to_agent_run_agent` or `transfer_to_workflow_run_agent`.
4. Note that if you should create both new tools and new agents, you should create the new tools first, and then create the new agents.
Note that if there are not enough pre-built tools, you should develop new tools first, and then develop new agents, and finally run the workflow or agent to complete the user request.
Once you receive the develop plan, you should not stop util you finish the task.
"""
how_to_guides = context_variables.get("how_to_guides", None)
if how_to_guides:
ret_instructions += \
f"""
If you want to develop new tools or agents, you should follow the following guides:
{how_to_guides}
"""
return ret_instructions
tool_creation_agent = get_tool_creation_agent(model)
agent_creation_agent = get_agent_creation_agent(model)
workflow_run_agent = get_workflow_run_agent(model)
agent_run_agent = get_agent_run_agent(model)
def transfer_to_tool_creation_agent(input: str):
return tool_creation_agent
def transfer_to_agent_creation_agent(input: str):
return agent_creation_agent
def transfer_to_workflow_run_agent(input: str):
return workflow_run_agent
def transfer_to_agent_run_agent(input: str):
return agent_run_agent
programming_triage_agent = Agent(
name="Programming Triage Agent",
model=model,
instructions=programming_triage_instructions,
functions=[transfer_to_tool_creation_agent, transfer_to_agent_creation_agent, transfer_to_workflow_run_agent, transfer_to_agent_run_agent],
tool_choice = "auto",
parallel_tool_calls = False
)
def transfer_back_to_programming_triage_agent():
"""Call this function if the existing agent has already finished the sub-task."""
return programming_triage_agent
tool_creation_agent.functions.append(transfer_back_to_programming_triage_agent)
agent_creation_agent.functions.append(transfer_back_to_programming_triage_agent)
workflow_run_agent.functions.append(transfer_back_to_programming_triage_agent)
agent_run_agent.functions.append(transfer_back_to_programming_triage_agent)
return programming_triage_agent
if __name__ == "__main__":
print(agent_creation_instructions({"working_dir": "metachain"}))

View file

@ -0,0 +1,43 @@
from metachain.types import Agent
from metachain.registry import register_agent
from metachain.tools import open_local_file, page_up_markdown, page_down_markdown, find_on_page_ctrl_f, find_next, visual_question_answering
from metachain.tools.file_surfer_tool import with_env
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
import time
from inspect import signature
from constant import LOCAL_ROOT, DOCKER_WORKPLACE_NAME
@register_agent(name = "File Surfer Agent", func_name="get_filesurfer_agent")
def get_filesurfer_agent(model: str = "gpt-4o", **kwargs):
def handle_mm_func(tool_name, tool_args):
return f"After using tool `{tool_name}({tool_args})`, I have opened the image I want to see and prepared a question according to the image. Please answer the question based on the image."
def instructions(context_variables):
file_env: RequestsMarkdownBrowser = context_variables.get("file_env", None)
assert file_env is not None, "file_env is required"
return \
f"""
You are a file surfer agent that can handle local files.
You can only access the files in the folder `{file_env.docker_workplace}` and when you want to open a file, you should use absolute path from root like `{file_env.docker_workplace}/...`.
Note that `open_local_file` can read a file as markdown text and ask questions about it. And `open_local_file` can handle the following file extensions: [".html", ".htm", ".xlsx", ".pptx", ".wav", ".mp3", ".flac", ".pdf", ".docx"], and all other types of text files.
But IT DOES NOT HANDLE IMAGES, you should use `visual_question_answering` to see the image.
If the converted markdown text has more than 1 page, you can use `page_up`, `page_down`, `find_on_page_ctrl_f`, `find_next` to navigate through the pages.
When you think you have completed the task the `System Triage Agent` asked you to do, you should use `transfer_back_to_triage_agent` to transfer the conversation back to the `System Triage Agent`. And you should not stop to try to solve the user's request by transferring to `System Triage Agent` only until the task is completed.
If you are unable to open the file, you can transfer the conversation back to the `System Triage Agent`, and let the `Coding Agent` try to solve the problem by coding.
"""
tool_list = [open_local_file, page_up_markdown, page_down_markdown, find_on_page_ctrl_f, find_next, visual_question_answering]
return Agent(
name="File Surfer Agent",
model=model,
instructions=instructions,
functions=tool_list,
handle_mm_func=handle_mm_func,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,98 @@
from metachain.types import Agent
from metachain.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, terminal_page_up, terminal_page_down, terminal_page_to
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent, register_plugin_agent
from constant import LOCAL_ROOT, DOCKER_WORKPLACE_NAME
from metachain.environment import DockerEnv, BrowserEnv, LocalEnv
from typing import Union
from inspect import signature
def examples(context_variables):
working_dir = context_variables.get("working_dir", None)
examples_list = []
examples_list.extend(make_message('user', "Create a list of numbers from 1 to 10, and display them in a web page at port 5000."))
examples_list.extend(make_message('assistant', "I should first use create_file to write the python code into a file named 'app.py' for starting a web server"))
examples_list.extend(make_tool_message(create_file, {'path': f"/{working_dir}/app.py",
'content': """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
numbers = list(range(1, 11))
return str(numbers)
if __name__ == '__main__':
app.run(port=5000)"""}, f"File created at: /{working_dir}/app.py"))
examples_list.extend(make_message('assistant', 'I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you using `run_python`'))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/app.py"}, f"""
Traceback (most recent call last):
File "/{working_dir}/app.py", line 2, in <module>
from flask import Flask
ModuleNotFoundError: No module named 'flask'"""))
examples_list.extend(make_message('assistant', "It seems that Flask is not installed. Let me install Flask for you using `execute_command` by the command: pip install flask"))
examples_list.extend(make_tool_message(execute_command, {'command': 'pip install flask'}, """Defaulting to user installation because normal site-packages is not writeable
Collecting flask
Using cached flask-3.0.3-py3-none-any.whl (101 kB)
Collecting blinker>=1.6.2
Using cached blinker-1.7.0-py3-none-any.whl (13 kB)
Collecting Werkzeug>=3.0.0
Using cached werkzeug-3.0.2-py3-none-any.whl (226 kB)
Collecting click>=8.1.3
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Collecting itsdangerous>=2.1.2
Using cached itsdangerous-2.2.0-py3-none-any.whl (16 kB)
Requirement already satisfied: Jinja2>=3.1.2 in /home/openhands/.local/lib/python3.10/site-packages (from flask) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.0 in /home/openhands/.local/lib/python3.10/site-packages (from Jinja2>=3.1.2->flask) (2.1.5)
Installing collected packages: Werkzeug, itsdangerous, click, blinker, flask
Successfully installed Werkzeug-3.0.2 blinker-1.7.0 click-8.1.7 flask-3.0.3 itsdangerous-2.2.0"""))
examples_list.extend(make_message('assistant', "Now that Flask is installed, let me run the Python file again using `run_python`"))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/app.py"}, """* Serving Flask app 'app'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit"""))
examples_list.extend(make_message('assistant', """The server is running on port 5000 with PID 124. You can access the list of numbers by visiting http://127.0.0.1:5000. Your task is completed."""))
return examples_list
@register_agent(name= "Coding Agent", func_name="get_coding_agent")
@register_plugin_agent(name= "Coding Agent", func_name="get_coding_agent")
def get_coding_agent(model: str, **kwargs):
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
return f"""You are a helpful programming assistant that can write and execute code. You are working in the folder: `{code_env.docker_workplace}`, and you can only access the files in this folder.
Your can leverage your capabilities by using the specific functions listed below:
1. Creating project structures based on the user requirement using function `create_directory`.
2. Writing clean, efficient, and well-documented code using function `create_file` and `write_file`.
3. You must run python scripts using function `run_python` rather than using the `execute_command` function.
4. Exam the project to re-use the existing code snippets as much as possible, you may need to use
functions like `list_files`, `read_file` and `write_file`.
5. Writing the code into the file when creating new files, do not create empty files.
6. Before you write code into the existing files, you should first read the file content using function `read_file` and reserve the original content as much as possible.
7. Decide whether the task requires execution and debugging before moving to the next or not.
8. Generate the commands to run and test the current task, and the dependencies list for this task.
9. You only write Python scripts, don't write Jupiter notebooks which require interactive execution.
10. Note that every path you read, write, or search should be the absolute path (starting with '/').
11. If you should use programming other than Python, you should use the `write_file` function to write the code into a file, and then use the `execute_command` function to run the code.
12. If the terminal output is too long, you should use `terminal_page_up` to move the viewport up, `terminal_page_down` to move the viewport down, `terminal_page_to` to move the viewport to the specific page of terminal where the meaningful content is.
Note that you can use this agent to make complex computation, write a api request, and anything else that can be done by writing code.
When you think you have completed the task the `System Triage Agent` asked you to do, you should use `transfer_back_to_triage_agent` to transfer the conversation back to the `System Triage Agent`. And you should not stop to try to solve the user's request by transferring to `System Triage Agent` only until the task is completed.
[IMPORTANT] You can only complete the task by coding. Talk is cheap, show me the code with tools.
"""
tool_list = [gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, terminal_page_up, terminal_page_down, terminal_page_to]
return Agent(
name="Coding Agent",
model=model,
instructions=instructions,
functions=tool_list,
examples=examples,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,66 @@
from .filesurfer_agent import get_filesurfer_agent
from .programming_agent import get_coding_agent
from .websurfer_agent import get_websurfer_agent
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.tools.inner import case_resolved, case_not_resolved
@register_agent(name = "System Triage Agent", func_name="get_system_triage_agent")
def get_system_triage_agent(model: str, **kwargs):
"""
This is the `System Triage Agent`, it can help the user to determine which agent is best suited to handle the user's request under the current context, and transfer the conversation to that agent.
Args:
model: The model to use for the agent.
**kwargs: Additional keyword arguments, `file_env`, `web_env` and `code_env` are required.
"""
filesurfer_agent = get_filesurfer_agent(model)
websurfer_agent = get_websurfer_agent(model)
coding_agent = get_coding_agent(model)
instructions = \
f"""You are a helpful assistant that can help the user with their request.
Based on the state of solving user's task, your responsibility is to determine which agent is best suited to handle the user's request under the current context, and transfer the conversation to that agent. And you should not stop to try to solve the user's request by transferring to another agent only until the task is completed.
There are three agents you can transfer to:
1. use `transfer_to_filesurfer_agent` to transfer to {filesurfer_agent.name}, it can help you to open any type of local files and browse the content of them.
2. use `transfer_to_websurfer_agent` to transfer to {websurfer_agent.name}, it can help you to open any website and browse any content on it.
3. use `transfer_to_coding_agent` to transfer to {coding_agent.name}, it can help you to write code to solve the user's request, especially some complex tasks.
"""
tool_choice = "required"
tools = [case_resolved, case_not_resolved] if tool_choice == "required" else []
system_triage_agent = Agent(
name="System Triage Agent",
model=model,
instructions=instructions,
functions=tools,
tool_choice = tool_choice,
parallel_tool_calls = False,
)
def transfer_to_filesurfer_agent(sub_task_description: str):
"""
Args:
sub_task_description: The detailed description of the sub-task that the `System Triage Agent` will ask the `File Surfer Agent` to do.
"""
return Result(value=sub_task_description, agent=filesurfer_agent)
def transfer_to_websurfer_agent(sub_task_description: str):
return Result(value=sub_task_description, agent=websurfer_agent)
def transfer_to_coding_agent(sub_task_description: str):
return Result(value=sub_task_description, agent=coding_agent)
def transfer_back_to_triage_agent(task_status: str):
"""
Args:
task_status: The detailed description of the task status after a sub-agent has finished its sub-task. A sub-agent can use this tool to transfer the conversation back to the `System Triage Agent` only when it has finished its sub-task.
"""
return Result(value=task_status, agent=system_triage_agent)
system_triage_agent.agent_teams = {
filesurfer_agent.name: transfer_to_filesurfer_agent,
websurfer_agent.name: transfer_to_websurfer_agent,
coding_agent.name: transfer_to_coding_agent
}
system_triage_agent.functions.extend([transfer_to_filesurfer_agent, transfer_to_websurfer_agent, transfer_to_coding_agent])
filesurfer_agent.functions.append(transfer_back_to_triage_agent)
websurfer_agent.functions.append(transfer_back_to_triage_agent)
coding_agent.functions.append(transfer_back_to_triage_agent)
return system_triage_agent

View file

@ -0,0 +1,37 @@
from metachain.types import Agent
from metachain.registry import register_agent
from metachain.tools import click, page_down, page_up, history_back, history_forward, web_search, input_text, sleep, visit_url, get_page_markdown
from metachain.tools.web_tools import with_env
from metachain.environment.browser_env import BrowserEnv
import time
from constant import DOCKER_WORKPLACE_NAME, LOCAL_ROOT
@register_agent(name = "Web Surfer Agent", func_name="get_websurfer_agent")
def get_websurfer_agent(model: str = "gpt-4o", **kwargs):
def handle_mm_func(tool_name, tool_args):
return f"After take last action `{tool_name}({tool_args})`, the image of current page is shown below. Please take next action based on the image, the current state of the page as well as previous actions and observations."
def instructions(context_variables):
web_env: BrowserEnv = context_variables.get("web_env", None)
assert web_env is not None, "web_env is required"
return \
f"""Review the current state of the page and all other information to find the best possible next action to accomplish your goal. Your answer will be interpreted and executed by a program, make sure to follow the formatting instructions.
Note that if you want to analyze the YouTube video, Wikipedia page, or other pages that contain media content, or you just want to analyze the text content of the page in a more detailed way, you should use `get_page_markdown` tool to convert the page information to markdown text. And when browsing the web, if you have downloaded any files, the path of the downloaded files will be `{web_env.docker_workplace}/downloads`, and you CANNOT open the downloaded files directly, you should transfer back to the `System Triage Agent`, and let `System Triage Agent` to transfer to `File Surfer Agent` to open the downloaded files.
When you think you have completed the task the `System Triage Agent` asked you to do, you should use `transfer_back_to_triage_agent` to transfer the conversation back to the `System Triage Agent`. And you should not stop to try to solve the user's request by transferring to `System Triage Agent` only until the task is completed.
"""
tool_list = [click, page_down, page_up, history_back, history_forward, web_search, input_text, sleep, visit_url, get_page_markdown]
return Agent(
name="Web Surfer Agent",
model=model,
instructions=instructions,
functions=tool_list,
handle_mm_func=handle_mm_func,
tool_choice = "required",
parallel_tool_calls = False
)
"""
Note that when you need to download something, you should first know the url of the file, and then use the `visit_url` tool to download the file. For example, if you want to download paper from 'https://arxiv.org/abs/2310.13023', you should use `visit_url('url'='https://arxiv.org/pdf/2310.13023.pdf')`.
"""

View file

@ -0,0 +1,38 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
@register_agent(name = "Agent Editor Agent", func_name="get_agent_editor_agent")
def get_agent_editor_agent(model: str) -> str:
"""
The agent editor is an agent that can be used to edit the agents.
"""
def instructions(context_variables):
return f"""\
You are an agent editor agent that can be used to edit the agents. You are working on a Agent framework named MetaChain, and your responsibility is to edit the agents in the MetaChain, so that the agents can be used to help the user with their request.
The existing agents are shown below:
{list_agents(context_variables)}
If you want to create a new agent, you should:
1. follow the format of the `get_dummy_agent` below:
```python
{read_file('metachain/agents/dummy_agent.py')}
```
2. you successfully create the agent only after you have successfully run the agent with the `run_agent` function to satisfy the user's request.
3. If you encounter any error while creating and running the agent, like dependency missing, you should use the `execute_command` function to install the dependency.
[IMPORTANT] The `register_plugin_agent` registry function is strictly required for a agent implementation to be recognized by the MetaChain framework.
"""
tool_list = [list_agents, create_agent, delete_agent, run_agent, execute_command]
return Agent(
name="Agent Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,40 @@
from metachain.types import Agent
from pydantic import BaseModel
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.tools.meta.edit_tools import list_tools
from typing import Union
from metachain.environment import DockerEnv, LocalEnv
def get_meta_plan_agent(model: str) -> Agent:
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful planner that can help `Tool Editor Agent` how to use MetaChain to solve the user's request.
Existing tools you already have:
{list_tools(context_variables)}
You should first fully understand the user's request, then analyze the existing tools and determine which tools are needed to solve the user's request, finally, you should transfer the conversation to the `Meta Agent` with the plan of using the tools.
If existing tools are not enough for your task, you should develop new tools.
1. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
2. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
3. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
4. [IMPORTANT] You can not use `transfer_back_to_meta_agent_with_plans` util you have fully understood the user's request and have try your best to search information from exsiting resources if you want to create a new tool.
"""
return instructions
return Agent(
name="Meta Plan Agent",
model=model,
instructions=instructions,
functions=[get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc],
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -0,0 +1,100 @@
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.environment import DockerEnv, LocalEnv
from metachain.tools.meta.edit_tools import list_tools
from metachain.agents.tool_agent.tool_editor import get_tool_editor_agent
from typing import Union
from metachain.tools.inner import case_resolved, case_not_resolved
from pydantic import BaseModel
from metachain.util import function_to_json
from metachain.agents.tool_agent.meta_plan_agent import get_meta_plan_agent
class ToolDescription(BaseModel):
tool_functionalities: str
existing: bool
tool_docs: str
class ToolPlan(BaseModel):
tool_name: str
tool_description: ToolDescription
@register_agent(name = "Tool Agent", func_name="get_tool_agent")
def get_tool_agent(model: str) -> Agent:
"""
The tool agent is an agent that can be used to create and run other tools.
"""
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful assistant that can help the user with their request by creating and running tools in the Metachain agent framework. Your responsibility is to determine which sub-agent is best suited to handle the user's request under the current context, and transfer the conversation to that sub-agent. And you should not stop to try to solve the user's request by transferring to another sub-agent only until the task is completed.
Your sub-agents are:
1. `Meta Plan Agent`: This agent is used to plan how to use MetaChain to solve the user's request.
2. `Tool Editor Agent`: This agent is used to run and edit tools.
Existing tools you already have:
{list_tools(context_variables)}
You should first transfer the conversation to the `Meta Plan Agent` to plan how to use MetaChain to solve the user's request, and the plan should follow the following constraints:
1. If exising tools are enough for your task, you can directly use them to solve the user's request.
2. If exising tools are not enough for your task, `Meta Plan Agent` should search information from the resources and plan how to create new tools.
3. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
"""
return instructions
tool_editor_agent: Agent = get_tool_editor_agent(model)
meta_plan_agent: Agent = get_meta_plan_agent(model)
def transfer_to_tool_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Tool Editor Agent` to do.
"""
return tool_editor_agent
def transfer_to_meta_plan_agent(sub_task: str):
"""
Use this function when you want to plan how to use MetaChain to solve the user's request.
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Meta Plan Agent` to do.
"""
return meta_plan_agent
meta_agent = Agent(
name="Meta Agent",
model=model,
instructions=instructions,
functions=[transfer_to_meta_plan_agent, transfer_to_tool_editor_agent, case_resolved, case_not_resolved],
tool_choice = "required",
parallel_tool_calls = False
)
def transfer_back_to_meta_agent(task_status: str):
"""
Args:
task_status: The status of the task that the `Meta Agent` will ask the `Meta Agent` to do.
"""
return meta_agent
def transfer_back_to_meta_agent_with_plans(tool_development_steps: list[ToolPlan]) -> str:
"""
This function is used to plan how to use MetaChain to solve the user's request. You can use this function only after you have fully understood the user's request and have try your best to search information from exsiting resources.
Args:
tool_development_steps: The steps of tool development. It is a list of dictionaries, each dictionary contains the tools name you should use in the exsiting MetaChain or the tools name you should develop. If the tool is not existing, dictionaries should contain the tool documentation.
"""
tool_str = "\n".join([f"{tool['tool_name']}: {tool['tool_description']['tool_functionalities']} [{tool['tool_description']['existing']}]" for tool in tool_development_steps])
ret_val = f"""\
Receiving user's request, I have the following plans to use MetaChain to solve the user's request:
As for using existing tools, I have the following plans:
{tool_str}
"""
return Result(
value=ret_val,
agent=meta_agent
)
tool_editor_agent.functions.append(transfer_back_to_meta_agent)
meta_plan_agent.functions.append(transfer_back_to_meta_agent_with_plans)
return meta_agent

View file

@ -0,0 +1,143 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command
def get_tool_editor_agent(model: str) -> Agent:
"""
The tool editor is an agent that can be used to edit the tools.
"""
def instructions(context_variables):
return f"""\
You are a tool editor agent responsible for managing plugin tools in the MetaChain framework. Your core responsibility is to edit, create, and manage plugin tools that can be used by other agents.
[PLUGIN TOOLS SYSTEM]
- Plugin tools are the building blocks of MetaChain
- All available plugin tools are as follows:
{list_tools(context_variables)}
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`
- NEVER try to import and run tools directly - always use `run_tool`
[AVAILABLE MANAGEMENT TOOLS]
1. list_tools():
- Lists all existing plugin tools
- Returns: tool name, arguments, docstring, implementation details
- Use this FIRST to check existing tools
2. create_tool(tool_name: str, tool_code: str):
- Creates new plugin tools
- Requires proper registration using @register_plugin_tool
3. run_tool(tool_name: str, run_code: str,):
- REQUIRED method to execute any plugin tool
- Format: run_tool("tool_name", "from metachain.tools import tool_name; print(tool_name(args))")
4. delete_tool(tool_name: str,):
- Removes existing plugin tools
- Use with caution
5. get_api_plugin_tools_doc:
- Required for third-party API integrations
- Must be used for Finance, Entertainment, etc.
6. execute_command:
- Handles system-level operations
- Use for dependency installation
[CRITICAL PRINCIPLES FOR PLUGIN TOOLS]
1. Tools MUST be abstract, modular, and reusable:
- Use generic function names (e.g., `download_media` instead of `download_youtube_video`)
- Break complex tasks into smaller, reusable components
- Avoid task-specific implementations
- Use parameters instead of hardcoded values
2. For ALL visual tasks (images, videos, visual analysis):
- MUST use the existing `visual_question_answering` plugin tool
- NO direct implementation of visual processing
- Chain `visual_question_answering` with other tools as needed
[WORKFLOW FOR PLUGIN TOOL MANAGEMENT]
1. Always start with `list_tools()` to check existing tools
2. For new plugin tools:
a. Design generic, reusable interface
b. Follow the template format:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
c. Create using `create_tool`
d. Test using `run_tool`
e. Handle dependencies with `execute_command`
[IMPORTANT RULES]
- ALL tools must be registered with @register_plugin_tool
- ALL tools must have type hints
- Each tool does ONE thing well
- Create modular tools that can be combined
- ALWAYS use `run_tool` to execute plugin tools
- NEVER modify the `visual_question_answering` tool
[TOOL TESTING EXAMPLE]
Correct way to test a plugin tool:
```python
result = run_tool(
tool_name="your_tool",
run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))",
context_variables=context_variables
)
```
"""
tool_list = [list_tools, create_tool, run_tool, delete_tool, get_api_plugin_tools_doc, execute_command]
return Agent(
name="Tool Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
"""
5. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
6. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
"""
"""\
You are a tool editor agent that can be used to edit the tools. You are working on a Agent framework named MetaChain, and your responsibility is to edit the tools in the MetaChain, so that the tools can be used by the agents to help the user with their request.
The existing tools are shown below:
{list_tools(context_variables)}
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully run the tool with the `run_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(run_tool(tool_name="tool_dummy", run_code="from metachain.tools import tool_dummy; print(tool_dummy(args1=args1, args2=args1, ...))"))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
4. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
[IMPORTANT] The `register_plugin_tool` registry function is strictly required for a tool implementation to be recognized by the MetaChain framework.
[IMPORTANT] The tool you create should be abstract, modular, and reusable. Specifically, the function name must be generic (e.g.,
`count_objects` instead of `count_apples`). The function must use parameters instead of hard-coded values. The
function body must be self-contained.
[IMPORTANT] Explicitly declare input and output data types using type hints.
[IMPORTANT] For ANY visual tasks related to image and video, you should use `visual_question_answering` tool.
"""

View file

@ -0,0 +1,28 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_tool
)
from metachain.registry import register_agent
@register_agent(name = "Tool Check Agent", func_name="get_tool_check_agent")
def get_tool_check_agent(model: str):
def instructions(context_variables):
return \
f"""You are a developer working on a project named 'metachain'.
You are given a user request and required to use existing project code to solve the task.
Your goal is to enrich the functionality of existing list of tools in the `tools` folder as much as possible, so that once the similar task occurs again, the agent can solve it directly by using the tools without developing new tools.
whether you should develop some new tools to integrate into the agent to directly solve the task.
If you use an external api, you should always develop a new tool, rather than using coding-related tools.
Answer 'Needed' or 'Not needed' first and then give your reason. ('Needed' means you should develop a new tool, 'Not needed' means you should not develop a new tool).
You can use `check_tool` tool to review the existing tools and check whether developing a new tool is needed.
"""
return Agent(
name="Tool Check Agent",
model=model,
instructions=instructions,
functions=[check_tool],
parallel_tool_calls = False
)
"""If you need to develop a new tool, you must use `get_tool_doc` tool to get the tool doc."""

View file

@ -0,0 +1,21 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
@register_agent(name = "Tool Retriver Agent", func_name="get_tool_retriver_agent")
def get_tool_retriver_agent(model: str):
def instructions(context_variables):
return \
f"""
You are a tool retriver agent.
You are given a task instruction, and you need to retrieve the tool docs for the task using function `get_tool_doc`.
Note that if you want to complete the task, you may need to use more than one tool, so you should retrieve the tool docs for all the tools you may need. Finally, you should give a merged tool doc consisting of all the tool docs you retrieved, and the implementation code of each tool should be included in the tool doc.
"""
return Agent(
name="Tool Retriver Agent",
model=model,
instructions=instructions,
functions=[get_api_plugin_tools_doc],
)

63
metachain/cli.py Normal file
View file

@ -0,0 +1,63 @@
import click
import importlib
from metachain import MetaChain
from metachain.util import debug_print
import asyncio
@click.group()
def cli():
"""The command line interface for metachain"""
pass
@cli.command()
@click.option('--model', default='gpt-4o-2024-08-06', help='the name of the model')
@click.option('--agent_func', default='get_dummy_agent', help='the function to get the agent')
@click.option('--query', default='...', help='the user query to the agent')
@click.argument('context_variables', nargs=-1)
def agent(model: str, agent_func: str, query: str, context_variables):
"""
Run an agent with a given model, agent function, query, and context variables.
Args:
model (str): The name of the model.
agent_func (str): The function to get the agent.
query (str): The user query to the agent.
context_variables (list): The context variables to pass to the agent.
Usage:
mc agent --model=gpt-4o-2024-08-06 --agent_func=get_weather_agent --query="What is the weather in Tokyo?" city=Tokyo unit=C timestamp=2024-01-01
"""
context_storage = {}
for arg in context_variables:
if '=' in arg:
key, value = arg.split('=', 1)
context_storage[key] = value
agent_module = importlib.import_module(f'metachain.agents')
try:
agent_func = getattr(agent_module, agent_func)
except AttributeError:
raise ValueError(f'Agent function {agent_func} not found, you shoud check in the `metachain.agents` directory for the correct function name')
agent = agent_func(model)
mc = MetaChain()
messages = [
{"role": "user", "content": query}
]
response = mc.run(agent, messages, context_storage, debug=True)
debug_print(True, response.messages[-1]['content'], title = f'Result of running {agent.name} agent', color = 'pink3')
return response.messages[-1]['content']
@cli.command()
@click.option('--workflow_name', default=None, help='the name of the workflow')
@click.option('--system_input', default='...', help='the user query to the agent')
def workflow(workflow_name: str, system_input: str):
"""命令行函数的同步包装器"""
return asyncio.run(async_workflow(workflow_name, system_input))
async def async_workflow(workflow_name: str, system_input: str):
"""异步实现的workflow函数"""
workflow_module = importlib.import_module(f'metachain.workflows')
try:
workflow_func = getattr(workflow_module, workflow_name)
except AttributeError:
raise ValueError(f'Workflow function {workflow_name} not found...')
result = await workflow_func(system_input) # 使用 await 等待异步函数完成
debug_print(True, result, title=f'Result of running {workflow_name} workflow', color='pink3')
return result

620
metachain/core.py Normal file
View file

@ -0,0 +1,620 @@
# Standard library imports
import copy
import json
from collections import defaultdict
from typing import List, Callable, Union
from datetime import datetime
# Local imports
import os
from .util import function_to_json, debug_print, merge_chunk, pretty_print_messages
from .types import (
Agent,
AgentFunction,
Message,
ChatCompletionMessageToolCall,
Function,
Response,
Result,
)
from litellm import completion, acompletion
from pathlib import Path
from .logger import MetaChainLogger, LoggerManager
from httpx import RemoteProtocolError, ConnectError
from litellm.exceptions import APIError
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type
)
from openai import AsyncOpenAI
import litellm
import inspect
from constant import MC_MODE, FN_CALL, API_BASE_URL, NOT_SUPPORT_SENDER, ADD_USER, NON_FN_CALL
from metachain.fn_call_converter import convert_tools_to_description, convert_non_fncall_messages_to_fncall_messages, SYSTEM_PROMPT_SUFFIX_TEMPLATE, convert_fn_messages_to_non_fn_messages, interleave_user_into_messages
from litellm.types.utils import Message as litellmMessage
# litellm.set_verbose=True
# client = AsyncOpenAI()
def should_retry_error(exception):
if MC_MODE is False: print(f"Caught exception: {type(exception).__name__} - {str(exception)}")
# 匹配更多错误类型
if isinstance(exception, (APIError, RemoteProtocolError, ConnectError)):
return True
# 通过错误消息匹配
error_msg = str(exception).lower()
return any([
"connection error" in error_msg,
"server disconnected" in error_msg,
"eof occurred" in error_msg,
"timeout" in error_msg,
"event loop is closed" in error_msg, # 添加事件循环错误
"anthropicexception" in error_msg # 添加 Anthropic 相关错误
])
__CTX_VARS_NAME__ = "context_variables"
logger = LoggerManager.get_logger()
class MetaChain:
def __init__(self, log_path: Union[str, None, MetaChainLogger] = None):
"""
log_path: path of log file, None
"""
if logger:
self.logger = logger
elif isinstance(log_path, MetaChainLogger):
self.logger = log_path
else:
self.logger = MetaChainLogger(log_path=log_path)
# if self.logger.log_path is None: self.logger.info("[Warning] Not specific log path, so log will not be saved", "...", title="Log Path", color="light_cyan3")
# else: self.logger.info("Log file is saved to", self.logger.log_path, "...", title="Log Path", color="light_cyan3")
@retry(
stop=stop_after_attempt(4),
wait=wait_exponential(multiplier=1, min=4, max=60),
retry=should_retry_error,
before_sleep=lambda retry_state: print(f"Retrying... (attempt {retry_state.attempt_number})")
)
def get_chat_completion(
self,
agent: Agent,
history: List,
context_variables: dict,
model_override: str,
stream: bool,
debug: bool,
) -> Message:
context_variables = defaultdict(str, context_variables)
instructions = (
agent.instructions(context_variables)
if callable(agent.instructions)
else agent.instructions
)
if agent.examples:
examples = agent.examples(context_variables) if callable(agent.examples) else agent.examples
history = examples + history
messages = [{"role": "system", "content": instructions}] + history
# debug_print(debug, "Getting chat completion for...:", messages)
tools = [function_to_json(f) for f in agent.functions]
# hide context_variables from model
for tool in tools:
params = tool["function"]["parameters"]
params["properties"].pop(__CTX_VARS_NAME__, None)
if __CTX_VARS_NAME__ in params["required"]:
params["required"].remove(__CTX_VARS_NAME__)
if FN_CALL:
create_model = model_override or agent.model
assert litellm.supports_function_calling(model = create_model) == True, f"Model {create_model} does not support function calling, please set `FN_CALL=False` to use non-function calling mode"
create_params = {
"model": create_model,
"messages": messages,
"tools": tools or None,
"tool_choice": agent.tool_choice,
"stream": stream,
}
NO_SENDER_MODE = False
for not_sender_model in NOT_SUPPORT_SENDER:
if not_sender_model in create_model:
NO_SENDER_MODE = True
break
if NO_SENDER_MODE:
messages = create_params["messages"]
for message in messages:
if 'sender' in message:
del message['sender']
create_params["messages"] = messages
if tools and create_params['model'].startswith("gpt"):
create_params["parallel_tool_calls"] = agent.parallel_tool_calls
completion_response = completion(**create_params)
else:
create_model = model_override or agent.model
assert agent.tool_choice == "required", f"Non-function calling mode MUST use tool_choice = 'required' rather than {agent.tool_choice}"
last_content = messages[-1]["content"]
tools_description = convert_tools_to_description(tools)
messages[-1]["content"] = last_content + "\n[IMPORTANT] You MUST use the tools provided to complete the task.\n" + SYSTEM_PROMPT_SUFFIX_TEMPLATE.format(description=tools_description)
NO_SENDER_MODE = False
for not_sender_model in NOT_SUPPORT_SENDER:
if not_sender_model in create_model:
NO_SENDER_MODE = True
break
if NO_SENDER_MODE:
for message in messages:
if 'sender' in message:
del message['sender']
if NON_FN_CALL:
messages = convert_fn_messages_to_non_fn_messages(messages)
if ADD_USER and messages[-1]["role"] != "user":
# messages.append({"role": "user", "content": "Please think twice and take the next action according to your previous actions and observations."})
messages = interleave_user_into_messages(messages)
create_params = {
"model": create_model,
"messages": messages,
"stream": stream,
"base_url": API_BASE_URL,
}
completion_response = completion(**create_params)
last_message = [{"role": "assistant", "content": completion_response.choices[0].message.content}]
converted_message = convert_non_fncall_messages_to_fncall_messages(last_message, tools)
converted_tool_calls = [ChatCompletionMessageToolCall(**tool_call) for tool_call in converted_message[0]["tool_calls"]]
completion_response.choices[0].message = litellmMessage(content = converted_message[0]["content"], role = "assistant", tool_calls = converted_tool_calls)
return completion_response
def handle_function_result(self, result, debug) -> Result:
match result:
case Result() as result:
return result
case Agent() as agent:
return Result(
value=json.dumps({"assistant": agent.name}),
agent=agent,
)
case _:
try:
return Result(value=str(result))
except Exception as e:
error_message = f"Failed to cast response to string: {result}. Make sure agent functions return a string or Result object. Error: {str(e)}"
self.logger.info(error_message, title="Handle Function Result Error", color="red")
raise TypeError(error_message)
def handle_tool_calls(
self,
tool_calls: List[ChatCompletionMessageToolCall],
functions: List[AgentFunction],
context_variables: dict,
debug: bool,
handle_mm_func: Callable[[], str] = None,
) -> Response:
function_map = {f.__name__: f for f in functions}
partial_response = Response(
messages=[], agent=None, context_variables={})
for tool_call in tool_calls:
name = tool_call.function.name
# handle missing tool case, skip to next tool
if name not in function_map:
self.logger.info(f"Tool {name} not found in function map. You are recommended to use `run_tool` to run this tool.", title="Tool Call Error", color="red")
partial_response.messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"name": name,
"content": f"Error: Tool {name} not found. You are recommended to use `run_tool` to run this tool.",
}
)
continue
args = json.loads(tool_call.function.arguments)
# debug_print(
# debug, f"Processing tool call: {name} with arguments {args}")
func = function_map[name]
# pass context_variables to agent functions
# if __CTX_VARS_NAME__ in func.__code__.co_varnames:
# args[__CTX_VARS_NAME__] = context_variables
if __CTX_VARS_NAME__ in inspect.signature(func).parameters.keys():
args[__CTX_VARS_NAME__] = context_variables
raw_result = function_map[name](**args)
result: Result = self.handle_function_result(raw_result, debug)
partial_response.messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"name": name,
"content": result.value,
}
)
self.logger.pretty_print_messages(partial_response.messages[-1])
if result.image:
assert handle_mm_func, f"handle_mm_func is not provided, but an image is returned by tool call {name}({tool_call.function.arguments})"
partial_response.messages.append(
{
"role": "user",
"content": [
# {"type":"text", "text":f"After take last action `{name}({tool_call.function.arguments})`, the image of current page is shown below. Please take next action based on the image, the current state of the page as well as previous actions and observations."},
{"type":"text", "text":handle_mm_func(name, tool_call.function.arguments)},
{
"type":"image_url",
"image_url":{
"url":f"data:image/png;base64,{result.image}"
}
}
]
}
)
# debug_print(debug, "Tool calling: ", json.dumps(partial_response.messages[-1], indent=4), log_path=log_path, title="Tool Calling", color="green")
partial_response.context_variables.update(result.context_variables)
if result.agent:
partial_response.agent = result.agent
return partial_response
def run_and_stream(
self,
agent: Agent,
messages: List,
context_variables: dict = {},
model_override: str = None,
debug: bool = False,
max_turns: int = float("inf"),
execute_tools: bool = True,
):
active_agent = agent
context_variables = copy.deepcopy(context_variables)
history = copy.deepcopy(messages)
init_len = len(messages)
while len(history) - init_len < max_turns:
message = {
"content": "",
"sender": agent.name,
"role": "assistant",
"function_call": None,
"tool_calls": defaultdict(
lambda: {
"function": {"arguments": "", "name": ""},
"id": "",
"type": "",
}
),
}
# get completion with current history, agent
completion = self.get_chat_completion(
agent=active_agent,
history=history,
context_variables=context_variables,
model_override=model_override,
stream=True,
debug=debug,
)
yield {"delim": "start"}
for chunk in completion:
delta = json.loads(chunk.choices[0].delta.json())
if delta["role"] == "assistant":
delta["sender"] = active_agent.name
yield delta
delta.pop("role", None)
delta.pop("sender", None)
merge_chunk(message, delta)
yield {"delim": "end"}
message["tool_calls"] = list(
message.get("tool_calls", {}).values())
if not message["tool_calls"]:
message["tool_calls"] = None
debug_print(debug, "Received completion:", message)
history.append(message)
if not message["tool_calls"] or not execute_tools:
debug_print(debug, "Ending turn.")
break
# convert tool_calls to objects
tool_calls = []
for tool_call in message["tool_calls"]:
function = Function(
arguments=tool_call["function"]["arguments"],
name=tool_call["function"]["name"],
)
tool_call_object = ChatCompletionMessageToolCall(
id=tool_call["id"], function=function, type=tool_call["type"]
)
tool_calls.append(tool_call_object)
# handle function calls, updating context_variables, and switching agents
partial_response = self.handle_tool_calls(
tool_calls, active_agent.functions, context_variables, debug
)
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
if partial_response.agent:
active_agent = partial_response.agent
yield {
"response": Response(
messages=history[init_len:],
agent=active_agent,
context_variables=context_variables,
)
}
def run(
self,
agent: Agent,
messages: List,
context_variables: dict = {},
model_override: str = None,
stream: bool = False,
debug: bool = True,
max_turns: int = float("inf"),
execute_tools: bool = True,
) -> Response:
if stream:
return self.run_and_stream(
agent=agent,
messages=messages,
context_variables=context_variables,
model_override=model_override,
debug=debug,
max_turns=max_turns,
execute_tools=execute_tools,
)
active_agent = agent
enter_agent = agent
context_variables = copy.copy(context_variables)
history = copy.deepcopy(messages)
init_len = len(messages)
self.logger.info("Receiveing the task:", history[-1]['content'], title="Receive Task", color="green")
while len(history) - init_len < max_turns and active_agent:
# get completion with current history, agent
completion = self.get_chat_completion(
agent=active_agent,
history=history,
context_variables=context_variables,
model_override=model_override,
stream=stream,
debug=debug,
)
message: Message = completion.choices[0].message
message.sender = active_agent.name
# debug_print(debug, "Received completion:", message.model_dump_json(indent=4), log_path=log_path, title="Received Completion", color="blue")
self.logger.pretty_print_messages(message)
history.append(
json.loads(message.model_dump_json())
) # to avoid OpenAI types (?)
# if not message.tool_calls or not execute_tools:
# self.logger.info("Ending turn.", title="End Turn", color="red")
# break
if enter_agent.tool_choice != "required":
if (not message.tool_calls and active_agent.name == enter_agent.name) or not execute_tools:
self.logger.info("Ending turn.", title="End Turn", color="red")
break
else:
if (message.tool_calls and message.tool_calls[0].function.name == "case_resolved") or not execute_tools:
self.logger.info("Ending turn with case resolved.", title="End Turn", color="red")
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
break
elif (message.tool_calls and message.tool_calls[0].function.name == "case_not_resolved") or not execute_tools:
self.logger.info("Ending turn with case not resolved.", title="End Turn", color="red")
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
break
# if (message.tool_calls and message.tool_calls[0].function.name == "case_resolved") or not execute_tools:
# debug_print(debug, "Ending turn.", log_path=log_path, title="End Turn", color="red")
# break
# handle function calls, updating context_variables, and switching agents
if message.tool_calls:
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
else:
partial_response = Response(messages=[message])
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
if partial_response.agent:
active_agent = partial_response.agent
return Response(
messages=history[init_len:],
agent=active_agent,
context_variables=context_variables,
)
@retry(
stop=stop_after_attempt(4),
wait=wait_exponential(multiplier=1, min=10, max=180),
retry=should_retry_error,
before_sleep=lambda retry_state: print(f"Retrying... (attempt {retry_state.attempt_number})")
)
async def get_chat_completion_async(
self,
agent: Agent,
history: List,
context_variables: dict,
model_override: str,
stream: bool,
debug: bool,
) -> Message:
context_variables = defaultdict(str, context_variables)
instructions = (
agent.instructions(context_variables)
if callable(agent.instructions)
else agent.instructions
)
if agent.examples:
examples = agent.examples(context_variables) if callable(agent.examples) else agent.examples
history = examples + history
messages = [{"role": "system", "content": instructions}] + history
# debug_print(debug, "Getting chat completion for...:", messages)
tools = [function_to_json(f) for f in agent.functions]
# hide context_variables from model
for tool in tools:
params = tool["function"]["parameters"]
params["properties"].pop(__CTX_VARS_NAME__, None)
if __CTX_VARS_NAME__ in params["required"]:
params["required"].remove(__CTX_VARS_NAME__)
if FN_CALL:
create_model = model_override or agent.model
assert litellm.supports_function_calling(model = create_model) == True, f"Model {create_model} does not support function calling, please set `FN_CALL=False` to use non-function calling mode"
create_params = {
"model": create_model,
"messages": messages,
"tools": tools or None,
"tool_choice": agent.tool_choice,
"stream": stream,
}
NO_SENDER_MODE = False
for not_sender_model in NOT_SUPPORT_SENDER:
if not_sender_model in create_model:
NO_SENDER_MODE = True
break
if NO_SENDER_MODE:
messages = create_params["messages"]
for message in messages:
if 'sender' in message:
del message['sender']
create_params["messages"] = messages
if tools and create_params['model'].startswith("gpt"):
create_params["parallel_tool_calls"] = agent.parallel_tool_calls
completion_response = await acompletion(**create_params)
else:
create_model = model_override or agent.model
assert agent.tool_choice == "required", f"Non-function calling mode MUST use tool_choice = 'required' rather than {agent.tool_choice}"
last_content = messages[-1]["content"]
tools_description = convert_tools_to_description(tools)
messages[-1]["content"] = last_content + "\n[IMPORTANT] You MUST use the tools provided to complete the task.\n" + SYSTEM_PROMPT_SUFFIX_TEMPLATE.format(description=tools_description)
NO_SENDER_MODE = False
for not_sender_model in NOT_SUPPORT_SENDER:
if not_sender_model in create_model:
NO_SENDER_MODE = True
break
if NO_SENDER_MODE:
for message in messages:
if 'sender' in message:
del message['sender']
create_params = {
"model": create_model,
"messages": messages,
"stream": stream,
"base_url": API_BASE_URL,
}
completion_response = await acompletion(**create_params)
last_message = [{"role": "assistant", "content": completion_response.choices[0].message.content}]
converted_message = convert_non_fncall_messages_to_fncall_messages(last_message, tools)
converted_tool_calls = [ChatCompletionMessageToolCall(**tool_call) for tool_call in converted_message[0]["tool_calls"]]
completion_response.choices[0].message = litellmMessage(content = converted_message[0]["content"], role = "assistant", tool_calls = converted_tool_calls)
# response = await acompletion(**create_params)
# response = await client.chat.completions.create(**create_params)
return completion_response
async def run_async(
self,
agent: Agent,
messages: List,
context_variables: dict = {},
model_override: str = None,
stream: bool = False,
debug: bool = True,
max_turns: int = float("inf"),
execute_tools: bool = True,
) -> Response:
assert stream == False, "Async run does not support stream"
active_agent = agent
enter_agent = agent
context_variables = copy.copy(context_variables)
history = copy.deepcopy(messages)
init_len = len(messages)
self.logger.info("Receiveing the task:", history[-1]['content'], title="Receive Task", color="green")
while len(history) - init_len < max_turns and active_agent:
# get completion with current history, agent
completion = await self.get_chat_completion_async(
agent=active_agent,
history=history,
context_variables=context_variables,
model_override=model_override,
stream=stream,
debug=debug,
)
message: Message = completion.choices[0].message
message.sender = active_agent.name
# debug_print(debug, "Received completion:", message.model_dump_json(indent=4), log_path=log_path, title="Received Completion", color="blue")
self.logger.pretty_print_messages(message)
history.append(
json.loads(message.model_dump_json())
) # to avoid OpenAI types (?)
if enter_agent.tool_choice != "required":
if (not message.tool_calls and active_agent.name == enter_agent.name) or not execute_tools:
self.logger.info("Ending turn.", title="End Turn", color="red")
break
else:
if (message.tool_calls and message.tool_calls[0].function.name == "case_resolved") or not execute_tools:
self.logger.info("Ending turn with case resolved.", title="End Turn", color="red")
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
break
elif (message.tool_calls and message.tool_calls[0].function.name == "case_not_resolved") or not execute_tools:
self.logger.info("Ending turn with case not resolved.", title="End Turn", color="red")
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
break
# if (message.tool_calls and message.tool_calls[0].function.name == "case_resolved") or not execute_tools:
# debug_print(debug, "Ending turn.", log_path=log_path, title="End Turn", color="red")
# break
# handle function calls, updating context_variables, and switching agents
if message.tool_calls:
partial_response = self.handle_tool_calls(
message.tool_calls, active_agent.functions, context_variables, debug, handle_mm_func=active_agent.handle_mm_func
)
else:
partial_response = Response(messages=[message])
history.extend(partial_response.messages)
context_variables.update(partial_response.context_variables)
if partial_response.agent:
active_agent = partial_response.agent
return Response(
messages=history[init_len:],
agent=active_agent,
context_variables=context_variables,
)

View file

@ -0,0 +1,5 @@
from .docker_env import DockerEnv, DockerConfig
from .local_env import LocalEnv
from .browser_env import BrowserEnv, VIEWPORT
from .markdown_browser import RequestsMarkdownBrowser
from .utils import setup_metachain

View file

@ -0,0 +1,49 @@
import json
from pathlib import Path
import glob
wd = Path(__file__).parent.resolve()
def load_cookies_from_json(json_path):
with open(json_path, 'r') as f:
cookies = json.load(f)
return cookies
# COOKIES_LIST = []
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "orcid.org.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.researchgate.net.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "github.com.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.youtube.com.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.ncbi.nlm.nih.gov.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "archive.org.cookies.json")
def convert_cookies_to_python():
all_cookies = []
# cookie_files = [
# "orcid.org.cookies.json",
# "www.researchgate.net.cookies.json",
# "github.com.cookies.json",
# "www.youtube.com.cookies.json",
# "www.ncbi.nlm.nih.gov.cookies.json",
# "archive.org.cookies.json",
# "nature.com.cookies.json"
# ]
json_dir = wd / "cookie_json"
cookie_files = glob.glob(str(json_dir / "*.json"))
for cookie_file in cookie_files:
json_path = wd / "cookie_json" / cookie_file
cookies = load_cookies_from_json(json_path)
all_cookies.extend(cookies)
# 生成Python格式的cookies文件
output_path = wd / "cookies_data.py"
output_str = "COOKIES_LIST = [\n"
for cookie in all_cookies:
output_str += f" {repr(cookie)},\n"
output_str += "]\n"
with open(output_path, "w", encoding="utf-8") as f:
f.write(output_str)
return output_str
if __name__ == "__main__":
print(convert_cookies_to_python())

View file

@ -0,0 +1,649 @@
import atexit
import base64
import io
import json
import multiprocessing
import time
import uuid
import browsergym.core # noqa F401 (we register the openended task as a gym environment)
import gymnasium as gym
import html2text
import numpy as np
import tenacity
from browsergym.utils.obs import flatten_dom_to_str
from PIL import Image
from metachain.util import debug_print
from metachain.logger import LoggerManager
import inspect
import textwrap
from .shutdown_listener import should_continue, should_exit
from .tenacity_stop import stop_if_should_exit
from datetime import datetime
from pathlib import Path
from browsergym.core.action.functions import goto, page, get_elem_by_bid, demo_mode, tab_focus
import os
from typing import Dict, Union, cast, Literal
from playwright.sync_api import Page, Download
from metachain.io_utils import read_file
from metachain.environment.mdconvert import _get_page_markdown
from metachain.environment.browser_cookies import convert_cookies_to_python
from metachain.environment.cookies_data import COOKIES_LIST
# from constant import DOCKER_WORKPLACE_NAME, LOCAL_ROOT
from functools import update_wrapper
from inspect import signature
import types
import sys
import tempfile
VIEWPORT = {"width": 1280, "height": 720}
BROWSER_EVAL_GET_GOAL_ACTION = 'GET_EVAL_GOAL'
BROWSER_EVAL_GET_REWARDS_ACTION = 'GET_EVAL_REWARDS'
class BrowserInitException(Exception):
def __init__(self, message='Failed to initialize browser environment'):
super().__init__(message)
def _local_to_docker(local_path: str):
"""
Convert a local path to a docker path
local_path: the local path to convert, like `{local_workplace}/downloads/xxx`
docker_path: the docker path to convert, like `{docker_workplace}/downloads/xxx`
Examples:
_local_to_docker('/Users/tangjiabin/Documents/reasoning/metachain/workplace_gaia_eval/downloads/xxx')
"""
local_workplace = None
docker_workplace = None
assert local_workplace in local_path, f"local_path must contain {local_workplace}"
return local_path.replace(local_workplace, docker_workplace)
def _visit_page(url: str):
"""
Visit a page, including downloading files based on the url
Examples:
_visit_page('https://archive.org/download/higpt_stage2/instruct_ds_dblp.tar.gz')
"""
# def _local_to_docker(local_path: str):
# """
# Convert a local path to a docker path
# local_path: the local path to convert, like `{LOCAL_ROOT}/{DOCKER_WORKPLACE_NAME}/downloads/xxx`
# docker_path: the docker path to convert, like `/{DOCKER_WORKPLACE_NAME}/downloads/xxx`
# """
# assert LOCAL_ROOT in local_path, f"local_path must contain {LOCAL_ROOT}"
# return local_path.replace(LOCAL_ROOT, '')
try:
# 尝试作为普通网页访问
page.context.add_cookies(COOKIES_LIST)
# goto(url)
page.set_extra_http_headers({
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9"
})
page.goto(url, timeout=6000)
if page.get_by_text("Verify you are human by completing the action below.").count() > 0:
_checkMeetChallenge()
# 等待页面完全加载
# 增加等待时间,确保页面完全加载
page.wait_for_load_state("networkidle", timeout=3000)
# page.wait_for_timeout(3000)
except Exception as e_outer:
# 处理文件下载情况
if "net::ERR_ABORTED" in str(e_outer):
import os
import requests
import base64
downloads_folder = f"{local_workplace}/downloads"
os.makedirs(downloads_folder, exist_ok=True)
filename = os.path.basename(url)
filepath = os.path.join(downloads_folder, filename)
filepath = os.path.abspath(filepath)
try:
# 使用requests下载文件
response = requests.get(url, stream=True)
response.raise_for_status()
with open(filepath, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
# 显示下载成功页面
message = f"""<body style="margin: 20px;">
<h1>Successfully downloaded '{filename}' to local path:
<br><br>{_local_to_docker(filepath)}</h1></body>"""
goto(
"data:text/html;base64," +
base64.b64encode(message.encode("utf-8")).decode("utf-8")
)
# 触发pageshow事件
page.evaluate("""
const event = new Event('pageshow', {
bubbles: true,
cancelable: false
});
window.dispatchEvent(event);
""")
except Exception as e:
raise Exception(f"Download error: {str(e)}")
else:
raise e_outer
# def _click_id(bid: str, button: Literal["left", "middle", "right"] = "left"):
# """
# Clicks the mouse on the target with the given element bid.
# Examples:
# _click_id('12')
# _click_id('12', button='left')
# """
# from typing import Dict, Union, cast
# try:
# elem = get_elem_by_bid(page, bid, demo_mode != "off")
# box = cast(Dict[str, Union[int, float]], elem.bounding_box())
# # 如果既不是下载也不是新页面,在当前页面处理
# page.mouse.click(box["x"] + box["width"] / 2, box["y"] + box["height"] / 2, button=button)
# try:
# page.wait_for_load_state("networkidle", timeout=5000)
# except:
# pass
# return
# except Exception as e:
# raise Exception(f"Click error: {str(e)}")
def _click_id(bid: str, button: Literal["left", "middle", "right"] = "left"):
"""
Clicks the mouse on the target with the given element bid.
Examples:
_click_id('12')
_click_id('12', button='left')
"""
# def _local_to_docker(local_path: str):
# """
# Convert a local path to a docker path
# local_path: the local path to convert, like `{LOCAL_ROOT}/{DOCKER_WORKPLACE_NAME}/downloads/xxx`
# docker_path: the docker path to convert, like `/{DOCKER_WORKPLACE_NAME}/downloads/xxx`
# """
# assert LOCAL_ROOT in local_path, f"local_path must contain {LOCAL_ROOT}"
# return local_path.replace(LOCAL_ROOT, '')
from typing import Dict, Union, cast
import time
import base64
import os
from playwright._impl._api_types import TimeoutError as playwright_TimeoutError
try:
global page
elem = get_elem_by_bid(page, bid, demo_mode != "off")
box = cast(Dict[str, Union[int, float]], elem.bounding_box())
# 获取当前页面URL
current_url = page.url
page.context.add_cookies(COOKIES_LIST)
# goto(url)
page.set_extra_http_headers({
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9"
})
# 执行点击并等待下载
try:
with page.expect_download(timeout=5000) as download_info: # 增加到30秒
page.mouse.click(box["x"] + box["width"] / 2, box["y"] + box["height"] / 2, button=button)
download = download_info.value
print(f"Downloading file: {download.suggested_filename}")
# 确保下载目录存在
download_path = f"{local_workplace}/downloads"
os.makedirs(download_path, exist_ok=True)
# 保存文件
filepath = os.path.join(download_path, download.suggested_filename)
filepath = os.path.abspath(filepath)
download.save_as(filepath)
# 显示下载成功页面
message = f"""<body style="margin: 20px;">
<h1>Successfully downloaded '{download.suggested_filename}' to local path:
<br><br>{_local_to_docker(filepath)}</h1></body>"""
goto(
"data:text/html;base64," +
base64.b64encode(message.encode("utf-8")).decode("utf-8")
)
# 触发pageshow事件
page.evaluate("""
const event = new Event('pageshow', {
bubbles: true,
cancelable: false
});
window.dispatchEvent(event);
""")
return
except playwright_TimeoutError:
# print("Download timeout, trying alternative approach...")
# # 如果超时尝试获取PDF直接URL并下载
# if "arxiv.org" in current_url:
# paper_id = current_url.split("/")[-1]
# pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
# _visit_page(pdf_url)
# return
pass
# 等待可能的新标签页或导航
time.sleep(1)
# 检查是否有新标签页
pages_after = len(page.context.pages)
if pages_after > 1:
# 切换到最新的标签页
page = page.context.pages[-1]
page.bring_to_front()
elif page.url != current_url:
# URL改变了说明发生了导航
try:
page.wait_for_load_state("networkidle", timeout=5000)
if page.get_by_text("Verify you are human by completing the action below.").count() > 0:
_checkMeetChallenge()
# 等待页面完全加载
# 增加等待时间,确保页面完全加载
page.wait_for_load_state("networkidle", timeout=3000)
except:
pass
return
except Exception as e:
raise Exception(f"Click error: {str(e)}, {type(e)}")
def _checkMeetChallenge():
"""
check if meet challenge
Examples:
_checkMeetChallenge()
"""
global page
def tryToClickChallenge(this_page):
try:
# 尝试定位并点击验证框架中的复选框
frame = this_page.frame_locator("iframe[title*='challenge']")
if frame:
checkbox = frame.locator("input[type='checkbox']")
if checkbox.is_visible():
checkbox.click()
return True
# 尝试点击验证按钮 (同时支持中英文)
verify_texts = ["请完成以下操作,验证您是真人。", "Verify you are human by completing the action below."]
for text in verify_texts:
verify_button = this_page.get_by_text(text)
if verify_button.is_visible():
verify_button.click()
return True
# 尝试点击任何可见的验证按钮
challenge_buttons = this_page.locator("button[class*='challenge']")
if challenge_buttons.count() > 0:
challenge_buttons.first.click()
return True
except Exception as e:
print(f"尝试点击验证失败: {str(e)}")
return False
check_count = 1
max_attempts = 6
while check_count <= max_attempts:
# 检查是否存在验证页面的特征 (同时支持中英文)
if (page.get_by_text("请完成以下操作,验证您是真人。").count() == 0 and
page.get_by_text("Verify you are human by completing the action below.").count() == 0):
print("验证已完成")
break
print(f"检测到 Cloudflare 验证页面,尝试处理... (第 {check_count}/{max_attempts} 次)")
# 尝试处理验证
if tryToClickChallenge(page):
print("已尝试点击验证按钮,等待响应...")
# 等待验证结果
try:
# 等待验证页面消失或出现新内容
page.wait_for_function("""
() => !document.querySelector("div#challenge-stage") ||
(!document.body.textContent.includes("请完成以下操作,验证您是真人。") &&
!document.body.textContent.includes("Verify you are human by completing the action below."))
""", timeout=20000)
except:
print("等待验证超时")
# 检查是否仍在验证页面
if check_count >= max_attempts:
if (page.get_by_text("请完成以下操作,验证您是真人。").count() > 0 or
page.get_by_text("Verify you are human by completing the action below.").count() > 0):
raise Exception("cannot pass challenge, need to restart")
check_count += 1
page.wait_for_timeout(5000) # 短暂等待后再次检查
class BrowserEnv:
def __init__(self, browsergym_eval_env: str | None = None, local_root: str | None = None, workplace_name: str | None = None):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_dir = Path(f"logs/res_{timestamp}")
log_dir.mkdir(parents=True, exist_ok=True) # recursively create all necessary parent directories
log_path = str(log_dir / "browser_env.log")
self.log_path = log_path
# self.logger = LoggerManager.get_logger()
self.html_text_converter = self.get_html_text_converter()
self.eval_mode = False
self.eval_dir = ''
self.local_workplace = os.path.join(local_root, workplace_name)
self.docker_workplace = f"/{workplace_name}"
# EVAL only: browsergym_eval_env must be provided for evaluation
self.browsergym_eval_env = browsergym_eval_env
self.eval_mode = bool(browsergym_eval_env)
# Initialize browser environment process
multiprocessing.set_start_method('spawn', force=True)
self.browser_side, self.agent_side = multiprocessing.Pipe()
# tmp_env = gym.make(self.browsergym_eval_env,tags_to_mark='all') if self.eval_mode else gym.make('browsergym/openended',task_kwargs={'start_url': 'about:blank', 'goal': 'PLACEHOLDER_GOAL'},
# wait_for_user_message=False,
# headless=True,
# disable_env_checker=True,
# tags_to_mark='all'
# )
# obs, info = tmp_env.reset()
# self.viewport = tmp_env.env.viewport if tmp_env.env.viewport else tmp_env.env.task.viewport
# tmp_env.close()
self.init_browser()
atexit.register(self.close)
def get_html_text_converter(self):
html_text_converter = html2text.HTML2Text()
# ignore links and images
html_text_converter.ignore_links = False
html_text_converter.ignore_images = True
# use alt text for images
html_text_converter.images_to_alt = True
# disable auto text wrapping
html_text_converter.body_width = 0
return html_text_converter
@tenacity.retry(
wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(5) | stop_if_should_exit(),
retry=tenacity.retry_if_exception_type(BrowserInitException),
)
def init_browser(self):
debug_print(True, "Starting browser env...", title = "Browser Env", log_path=self.log_path)
# self.logger.info("Starting browser env...", title="Browser Env", color="green")
try:
self.process = multiprocessing.Process(target=self.browser_process)
self.process.start()
except Exception as e:
debug_print(True, f'Failed to start browser process: {e}', title = "Browser Env", log_path=self.log_path)
# self.logger.info(f'Failed to start browser process: {e}', title="Browser Env", color="red")
raise
if not self.check_alive():
self.close()
raise BrowserInitException('Failed to start browser environment.')
def browser_process(self):
if self.eval_mode:
assert self.browsergym_eval_env is not None
debug_print(True, 'Initializing browser env for web browsing evaluation.', title = "Browser Env", log_path=self.log_path)
# self.logger.info('Initializing browser env for web browsing evaluation.', title="Browser Env", color="green")
if 'webarena' in self.browsergym_eval_env:
import browsergym.webarena # noqa F401 register webarena tasks as gym environments
elif 'miniwob' in self.browsergym_eval_env:
import browsergym.miniwob # noqa F401 register miniwob tasks as gym environments
else:
raise ValueError(
f'Unsupported browsergym eval env: {self.browsergym_eval_env}'
)
env = gym.make(
self.browsergym_eval_env,
tags_to_mark='all',
)
else:
from browsergym.core.action.highlevel import HighLevelActionSet
def _local_to_docker(local_path: str):
"""
Convert a local path to a docker path
local_path: the local path to convert, like `{local_workplace}/downloads/xxx`
docker_path: the docker path to convert, like `{docker_workplace}/downloads/xxx`
Examples:
_local_to_docker('/Users/tangjiabin/Documents/reasoning/metachain/workplace_gaia_eval/downloads/xxx')
"""
local_workplace = None
docker_workplace = None
assert local_workplace in local_path, f"local_path must contain {local_workplace}"
return local_path.replace(local_workplace, docker_workplace)
source = inspect.getsource(_local_to_docker)
normalized_source = textwrap.dedent(source)
normalized_source = normalized_source.replace('local_workplace = None', f'local_workplace = {repr(self.local_workplace)}')
normalized_source = normalized_source.replace('docker_workplace = None', f'docker_workplace = {repr(self.docker_workplace)}')
action_set = HighLevelActionSet(subsets = ["chat", "infeas", "bid", "nav", "tab", "custom"], custom_actions = [_visit_page, _click_id, _get_page_markdown, _checkMeetChallenge])
# action_set.python_includes = \
# f"""
# {repr(read_file('metachain/environment/markdown_browser/mdconvert.py'))}
# """ + action_set.python_includes
action_set.python_includes = f"""\
{convert_cookies_to_python()}
""" + action_set.python_includes
action_set.python_includes = f"""\
def _local_to_docker(local_path: str):
local_workplace = {repr(self.local_workplace)}
docker_workplace = {repr(self.docker_workplace)}
assert local_workplace in local_path
return local_path.replace(local_workplace, docker_workplace)
""" + action_set.python_includes
action_set.python_includes = f"local_workplace = {repr(self.local_workplace)}\n" + action_set.python_includes
# action_set.python_includes = f"LOCAL_ROOT = {repr(LOCAL_ROOT)}\n" + action_set.python_includes
# print(action_set.python_includes)
action_mapping = action_set.to_python_code
env = gym.make(
'browsergym/openended',
task_kwargs={'start_url': 'about:blank', 'goal': 'PLACEHOLDER_GOAL'},
wait_for_user_message=False,
headless=True,
disable_env_checker=True,
tags_to_mark='all',
action_mapping = action_mapping
)
obs, info = env.reset()
# self.viewport = env.env.viewport if env.env.viewport else env.env.task.viewport
# print(f"Viewport: {self.viewport}")
# 通过管道发送viewport信息
# EVAL ONLY: save the goal into file for evaluation
self.eval_goal = None
self.eval_rewards: list[float] = []
if self.eval_mode:
debug_print(True, f"Browsing goal: {obs['goal']}", title = "Browser Env", log_path=self.log_path)
# self.logger.info(f"Browsing goal: {obs['goal']}", title="Browser Env", color="green")
self.eval_goal = obs['goal']
debug_print(True, 'Browser env started.', title = "Browser Env", log_path=self.log_path)
# self.logger.info('Browser env started.', title="Browser Env", color="green")
while should_continue():
try:
if self.browser_side.poll(timeout=0.01):
unique_request_id, action_data = self.browser_side.recv()
# shutdown the browser environment
if unique_request_id == 'SHUTDOWN':
debug_print(False, 'SHUTDOWN recv, shutting down browser env...', title = "Browser Env", log_path=self.log_path)
# self.logger.info('SHUTDOWN recv, shutting down browser env...', title="Browser Env", color="green")
env.close()
return
elif unique_request_id == 'IS_ALIVE':
self.browser_side.send(('ALIVE', None))
continue
# EVAL ONLY: Get evaluation info
if action_data['action'] == BROWSER_EVAL_GET_GOAL_ACTION:
self.browser_side.send(
(unique_request_id, {'text_content': self.eval_goal})
)
continue
elif action_data['action'] == BROWSER_EVAL_GET_REWARDS_ACTION:
self.browser_side.send(
(
unique_request_id,
{'text_content': json.dumps(self.eval_rewards)},
)
)
continue
action = action_data['action']
obs, reward, terminated, truncated, info = env.step(action)
# EVAL ONLY: Save the rewards into file for evaluation
if self.eval_mode:
self.eval_rewards.append(reward)
# add text content of the page
html_str = flatten_dom_to_str(obs['dom_object'])
obs['text_content'] = self.html_text_converter.handle(html_str)
# make observation serializable
obs['screenshot'] = self.image_to_png_base64_url(obs['screenshot'])
obs['active_page_index'] = obs['active_page_index'].item()
obs['elapsed_time'] = obs['elapsed_time'].item()
self.browser_side.send((unique_request_id, obs))
except KeyboardInterrupt:
debug_print(True, 'Browser env process interrupted by user.', title = "Browser Env", log_path=self.log_path)
# self.logger.info('Browser env process interrupted by user.', title="Browser Env", color="green")
try:
env.close()
except Exception:
pass
return
def step(self, action_str: str, timeout: float = 30) -> dict:
"""Execute an action in the browser environment and return the observation."""
unique_request_id = str(uuid.uuid4())
self.agent_side.send((unique_request_id, {'action': action_str}))
start_time = time.time()
while True:
if should_exit() or (time.time() - start_time > timeout and '_visit_page' not in action_str):
raise TimeoutError('Browser environment took too long to respond.')
if should_exit() or (time.time() - start_time > 600 and '_visit_page' in action_str):
raise TimeoutError('Browser environment took too long to respond.')
if self.agent_side.poll(timeout=0.01):
response_id, obs = self.agent_side.recv()
if response_id == unique_request_id:
return obs
def check_alive(self, timeout: float = 60):
self.agent_side.send(('IS_ALIVE', None))
if self.agent_side.poll(timeout=timeout):
response_id, _ = self.agent_side.recv()
if response_id == 'ALIVE':
return True
debug_print(True, f'Browser env is not alive. Response ID: {response_id}', title = "Browser Env", log_path=self.log_path)
# self.logger.info(f'Browser env is not alive. Response ID: {response_id}', title="Browser Env", color="red")
def close(self):
if not self.process.is_alive():
return
try:
self.agent_side.send(('SHUTDOWN', None))
self.process.join(5) # Wait for the process to terminate
if self.process.is_alive():
debug_print(True, 'Browser process did not terminate, forcefully terminating...', title = "Browser Env", log_path=self.log_path)
# self.logger.info('Browser process did not terminate, forcefully terminating...', title="Browser Env", color="red")
self.process.terminate()
self.process.join(5) # Wait for the process to terminate
if self.process.is_alive():
self.process.kill()
self.process.join(5) # Wait for the process to terminate
self.agent_side.close()
self.browser_side.close()
except Exception:
debug_print(True, 'Encountered an error when closing browser env', exc_info=True, title = "Browser Env", log_path=self.log_path)
# self.logger.info('Encountered an error when closing browser env', exc_info=True, title="Browser Env", color="red")
@staticmethod
def image_to_png_base64_url(
image: np.ndarray | Image.Image, add_data_prefix: bool = False
):
"""Convert a numpy array to a base64 encoded png image url."""
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if image.mode in ('RGBA', 'LA'):
image = image.convert('RGB')
buffered = io.BytesIO()
image.save(buffered, format='PNG')
image_base64 = base64.b64encode(buffered.getvalue()).decode()
return (
f'data:image/png;base64,{image_base64}'
if add_data_prefix
else f'{image_base64}'
)
@staticmethod
def image_to_jpg_base64_url(
image: np.ndarray | Image.Image, add_data_prefix: bool = False
):
"""Convert a numpy array to a base64 encoded jpeg image url."""
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if image.mode in ('RGBA', 'LA'):
image = image.convert('RGB')
buffered = io.BytesIO()
image.save(buffered, format='JPEG')
image_base64 = base64.b64encode(buffered.getvalue()).decode()
return (
f'data:image/jpeg;base64,{image_base64}'
if add_data_prefix
else f'{image_base64}'
)
def source_to_function(source_code: str, func_name: str):
"""将源代码字符串转换为函数,支持 inspect.getsource"""
# 创建临时文件
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
f.write(source_code)
temp_path = f.name
try:
# 导入临时模块
import importlib.util
spec = importlib.util.spec_from_file_location("temp_module", temp_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# 获取函数
func = getattr(module, func_name)
return func
finally:
# 清理临时文件
os.unlink(temp_path)

View file

@ -0,0 +1,83 @@
[
{
"name": "donation-identifier",
"value": "8ed6af4cc08b88b68b36fffcb6dd7323",
"domain": ".archive.org",
"path": "/",
"expires": 1741773847.95608,
"httpOnly": false,
"secure": false
},
{
"name": "abtest-identifier",
"value": "ca9982a6c4240d53598f01665a3c6100",
"domain": ".archive.org",
"path": "/",
"expires": 1741773847.956153,
"httpOnly": false,
"secure": false
},
{
"name": "test-cookie",
"value": "1",
"domain": ".archive.org",
"path": "/",
"expires": 1734348067.326946,
"httpOnly": false,
"secure": false
},
{
"name": "g_state",
"value": "{\"i_l\":0}",
"domain": "archive.org",
"path": "/",
"expires": 1748690473,
"httpOnly": false,
"secure": false
},
{
"name": "logged-in-sig",
"value": "1764674476%201733138476%20Y3yQCmHjxUil%2FcGs%2FgYR6m%2FHA%2F%2FtAtShDsn25N2tNIzvkGr6EkwbEsYEwDTjZ6%2Bu4Iy65eDH5gZVrZayaRZzJEa6R91agNjLC1rmw%2F47W5OXyDVFN5kLX%2Ba2OxNOzEx6Ws%2BLVwFVr%2Bdnbzhdt1vqNTEpECwy14%2Fu4n9qXGANJ5IKEO7pfu4ONymTb0RWH%2B158Wphp0Gluy9bR1a3t3TSGM%2FyhBEa37FJ56ckJJDghwIVsANhhu%2FextDlCDLXDkPtxLrwdX%2FAlbBoNFIeQ5%2BzoJX21KKQVdJxVWzSRLb4LXyFQsvhkpL221qlJ%2FDQER53IrTAIkmxrDI4cfjumUnKTQ%3D%3D",
"domain": ".archive.org",
"path": "/",
"expires": 1764674476.838234,
"httpOnly": false,
"secure": false
},
{
"name": "logged-in-user",
"value": "jiabintang77%40gmail.com",
"domain": ".archive.org",
"path": "/",
"expires": 1764674476.838343,
"httpOnly": false,
"secure": false
},
{
"name": "PHPSESSID",
"value": "jteta3bg9mb3t8e6dkp7r6mcd4",
"domain": ".archive.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "donation",
"value": "x",
"domain": ".archive.org",
"path": "/",
"expires": 1736767334,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24device_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".archive.org",
"path": "/",
"expires": 1764675133,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,48 @@
[
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1767701986.053151,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765083373,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,159 @@
[
{
"name": "_device_id",
"value": "49f9d6cfbd603c8509e73807be70a438",
"domain": "github.com",
"path": "/",
"expires": 1764674868.858374,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "MicrosoftApplicationsTelemetryDeviceId",
"value": "3966ee53-78ca-4fa3-95d7-85e299cecee4",
"domain": "github.com",
"path": "/",
"expires": 1763890136.033527,
"httpOnly": false,
"secure": true
},
{
"name": "_octo",
"value": "GH1.1.1313590405.1727940967",
"domain": ".github.com",
"path": "/",
"expires": 1759476967,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "GHCC",
"value": "Required:1-Analytics:1-SocialMedia:1-Advertising:1",
"domain": ".github.com",
"path": "/",
"expires": 1745563377,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "MSFPC",
"value": "GUID=3452f0b49fd14d349a6dbf8ddee26d60&HASH=3452&LV=202410&V=4&LU=1730011383391",
"domain": "github.com",
"path": "/",
"expires": 1761547383.513164,
"httpOnly": false,
"secure": true
},
{
"name": "logged_in",
"value": "yes",
"domain": ".github.com",
"path": "/",
"expires": 1762511337.053395,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "saved_user_sessions",
"value": "151511798%3A8an8gJwE3la35NvNIyacuRFRSHlup_9RBaQ5q4CThhvPV89o%7C152840453%3A2Quysh6Cns_a0IpeKcw-GAUZIt6ZndbJ7BoGdxx11qkZa9bi%7C151510669%3AMpYw2DQuFwt3NJiimm36OWLTQmoWFzVcSUbLuV8SBFRPqN8-%7C165454715%3AZSjwi4MUxVCr91r-m1ElvPL2L0DGDSoSo6uwV7pPpliml3js%7C148674909%3ALnLJclEDIxFjFcwX0eBlgOJzbDpsxKedtd6So7_EFs6HPtL7%7C56426168%3AmM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1739599354.295483,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "user_session",
"value": "mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1734348468.858989,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "__Host-user_session_same_site",
"value": "mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1734348468.859144,
"httpOnly": true,
"secure": true,
"sameSite": "Strict"
},
{
"name": "dotcom_user",
"value": "tjb-tech",
"domain": ".github.com",
"path": "/",
"expires": 1763647073.257243,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "color_mode",
"value": "%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "cpu_bucket",
"value": "xlg",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "preferred_color_mode",
"value": "light",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "tz",
"value": "Asia%2FHong_Kong",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928eb980316cc-050dbe3db24bd2-16525637-16a7f0-1928eb980321bb8%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D",
"domain": ".github.com",
"path": "/",
"expires": 1764674869,
"httpOnly": false,
"secure": false
},
{
"name": "_gh_sess",
"value": "oUZyg0XEvo5fm%2FC18yV17FMePsGYB4hM9R5q8AgiwOAjTritHx1Ux4jNGjnm7Jaxz99%2FOxD4agIy05dUdG6cnSxRP62NJE7bZxIWFV2W64ekLVCwz7ge2oaRcvVlN4HjVhw5dsl2czpD8Irn%2BZG0Dmw16tH9GZZ4yhaFW5%2Fshmte3DBYsndzLNn4rGje9B3P1IFYyz9sYx23j71xRb9wRjwoLHPYGf4Yp3vRKVAzTp3X6nrjvgr4XGU2N%2BGPH3OYDZQYCIPLckTIEmRg7a0dd2KvU2mfcm%2F%2B9N9%2FNNBFTbKvUhPwWM8kIRpv5WTzU%2FI5Y0qBv71gX2B7nNm%2FtIkWjbWUhgizf%2BpxOHAuhs89sRaicpc9NjasSUISwfxRCoH5evWqVXEifhqQvSU42iR4wkhnRHs%3D--za2vZwPq%2FBJxevj3--tEOzEYASRs0gepJUCIv8Mg%3D%3D",
"domain": "github.com",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
}
]

View file

@ -0,0 +1,103 @@
[
{
"name": "hum_ieee_visitor",
"value": "3403d64f-1870-4601-9ff7-e5900074a6db",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1756280921.91082,
"httpOnly": false,
"secure": true
},
{
"name": "_zitok",
"value": "6273c58ab3f308a07a711718187500",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1761551757,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1767701986.053151,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_8E929CC25A1FB2B30A495C97%40AdobeOrg",
"value": "1",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "s_cc",
"value": "true",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765086053,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:2$_ss:0$_st:1733551853250$ses_id:1733549982472%3Bexp-session$_pn:2%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765086053,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_8E929CC25A1FB2B30A495C97%40AdobeOrg",
"value": "359503849%7CMCIDTS%7C20065%7CMCMID%7C53777252718039557930823884447397163100%7CMCAID%7CNONE%7CMCOPTOUT-1733557253s%7CNONE%7CvVersion%7C5.0.1",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1768110053.386389,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,67 @@
[
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:13$_ss:0$_st:1733552707246$ses_id:1733549982472%3Bexp-session$_pn:9%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765086907,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768110998.70329,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765087052,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,247 @@
[
{
"domain": "www.nature.com",
"secure": false,
"expirationDate": 1733745572000,
"hostOnly": true,
"name": "user.uuid.v2",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "\"765b07e9-028b-45d1-8abd-baa7b6c88125\"",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"hostOnly": false,
"name": "Hm_lpvt_d38bce82bcb44717ccc29a90c4b781ea",
"httpOnly": false,
"session": true,
"storeId": null,
"value": "1733140842",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676842000,
"hostOnly": false,
"name": "ajs_anonymous_id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "7e4d00ab-3618-46a2-b0fb-c80b189a0584",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676842000,
"hostOnly": false,
"name": "ajs_user_id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "b1ae7862-b9d6-49c5-a7a5-ad96682ac6dc_SN",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga_B3E4QL2TPR",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GS1.1.1733140776.1.1.1733140841.60.0.0",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga_ERRNTNZ807",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GS1.1.1733140776.1.1.1733140841.60.0.467679787",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767304843000,
"hostOnly": false,
"name": "cto_bundle",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "5685XF9lQVd1dU4zd2xWRE1uZ3BpQTk3SUVXNkx2bGslMkZwTkZodjRWJTJCcGoyd0JWdiUyQjVlcGkwMVoyWHc4aGxKQkM2N3hyeGI4aFlIRzBZRDNTUTJFb1JYZVhPJTJGMUIlMkZka252a0RPZFdlbld4OU1jaUFrMHN6VDVaREYzSSUyRmFDMEtnb0FoaQ",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1766836842000,
"hostOnly": false,
"name": "__gpi",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "UID=00000fa61060e41d:T=1733140842:RT=1733140842:S=ALNI_Mai2WWloG6liac6hEyJYOSjI3WtCg",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1766836841000,
"hostOnly": false,
"name": "_uetvid",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "e6d7f220b0a411efaac753cc9ddac552",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676841000,
"hostOnly": false,
"name": "Hm_lvt_d38bce82bcb44717ccc29a90c4b781ea",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "1733140777",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1748692774000,
"hostOnly": false,
"name": "__eoi",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "ID=1ced890879e93934:T=1733140774:RT=1733140774:S=AA-AfjauQ5O9wXrdBjufrcsmQ-EM",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1766836842000,
"hostOnly": false,
"name": "__gads",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "ID=edf25ef88638a1b3:T=1733140842:RT=1733140842:S=ALNI_MYUdW0s3LG6IOpCKgjBo4gbGPsI1Q",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1740916843000,
"hostOnly": false,
"name": "_fbp",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "fb.1.1733140776577.688163329394303800",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GA1.1.2115119478.1733140776",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1733227241000,
"hostOnly": false,
"name": "_uetsid",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "e6d7f280b0a411efaed4a5384bcc5d88",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"hostOnly": false,
"name": "HMACCOUNT",
"httpOnly": false,
"session": true,
"storeId": null,
"value": "7B6C1DFC72FE250C",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "permutive-id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "7cbbccaf-2079-4e6d-99fc-186a9db51c90",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "permutive-session",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "%7B%22session_id%22%3A%221d3a9243-5c93-4975-ae30-63ca2047b7cf%22%2C%22last_updated%22%3A%222024-12-02T12%3A00%3A41.747Z%22%7D",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676775000,
"hostOnly": false,
"name": "sncc",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "P%3D8%3AV%3D68.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue",
"path": "/",
"sameSite": "Lax"
}
]

View file

@ -0,0 +1,105 @@
[
{
"name": "OptanonAlertBoxClosed",
"value": "2024-06-06T05:28:24.993Z",
"domain": ".orcid.org",
"path": "/",
"expires": 1749187704,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AWSELB",
"value": "CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AWSELBCORS",
"value": "CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "orcidUserConnectionId",
"value": "-114606494029392851",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "XSRF-TOKEN",
"value": "b64bcd3a-f0f5-407b-9115-a1f5183f3997",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "JSESSIONID",
"value": "48DD20615AC49336A91F9A3A6F5B1483",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "locale_v3",
"value": "en",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24device_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".orcid.org",
"path": "/",
"expires": 1764674617,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Mon+Dec+02+2024+19%3A23%3A37+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202310.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=71ca593a-5b7c-4963-87cf-52c27440ac95&interactionCount=1&landingPath=NotLandingPage&groups=C0001%3A1%2CC0003%3A1%2CC0002%3A1%2CC0004%3A1&geolocation=HK%3B&AwaitingReconsent=false",
"domain": ".orcid.org",
"path": "/",
"expires": 1764674617,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "cookieName",
"value": "dont%20show%20message",
"domain": "orcid.org",
"path": "/",
"expires": 1764674620,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "closable-unique-name",
"value": "understood",
"domain": "orcid.org",
"path": "/",
"expires": 1764674620,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
}
]

View file

@ -0,0 +1,75 @@
[
{
"name": "user.uuid.v2",
"value": "\"f9248aca-ac13-40e6-8b45-eaeb5fe20825\"",
"domain": "www-nature-com.eproxy.lib.hku.hk",
"path": "/",
"expires": 1740916756.716508,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768110998.70329,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765087308,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765087436,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,138 @@
[
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765087308,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733559088s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768111888.617908,
"httpOnly": false,
"secure": false
},
{
"name": "SID",
"value": "\"EUW1ED0CAFs37MFXuY5NakcbWc5Qu\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "CUSTOMER",
"value": "\"UNIVERSITY OF HONG KONG\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "E_GROUP_NAME",
"value": "\"University of Hong Kong\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "dU7HSmMJl6w4XDg.tZSoewkYsxb0bX7Barvg4RvulLw-1733551961-1.0.1.1-7Um2w5HRPO8C06bwjScmRD9BaTZkbArPDfX_e8urefWlKlH50nONZAxnxeL4VbDbHzBBcAY1OzwO5TyNuuCUfQ",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1733553761.117424,
"httpOnly": false,
"secure": false
},
{
"name": "AMCVS_242B6472541199F70A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "adBlockEnabled",
"value": "blocked",
"domain": "www-science-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "cookiePolicy",
"value": "iaccept",
"domain": "www-science-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1733638777.524329,
"httpOnly": false,
"secure": true
},
{
"name": "AMCV_242B6472541199F70A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C90810009207598809487163227219398447255%7CMCOPTOUT-1733559578s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768112378.032281,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24search_engine%22%3A%20%22google%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765088378,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,103 @@
[
{
"name": "_sp_id.a65e",
"value": "a151b61b-0e26-493f-9885-ed0d9579e181.1712037732.1.1712037742..381bfab3-8c2a-4e54-8d4b-44a5c8c997ef..6db53b82-8b6d-471c-b7de-194adad46810.1712037732261.2",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1746597742.393476,
"httpOnly": false,
"secure": true
},
{
"name": "__cflb",
"value": "02DiuFwNDm462z9fWfJeB58usqeie1xoTDrYZciipwE2x",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1733223382.743499,
"httpOnly": true,
"secure": true
},
{
"name": "XSRF-TOKEN",
"value": "64a0f62d-dc8f-40cb-8aa2-66e3ad283ad4",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "dictcode",
"value": "english",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763873478.639472,
"httpOnly": false,
"secure": false
},
{
"name": "searchPanelOpen",
"value": "true",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1733803809.434554,
"httpOnly": false,
"secure": false
},
{
"name": "search",
"value": "hello",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763871009.434808,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "xGchgbvqtkoAYddlxWT4VgRmeTZ1qTVmI0hjpRvOj0w-1733201062-1.0.1.1-SDl6_cuGUlqEOSm4oDQpU5rJdha8wEbITIgLoxdY69GgWrSt5GO7nX47Vc2AihzcBY.yS6GZ9qXVfRKEttQyLw",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1733202862.320396,
"httpOnly": true,
"secure": true
},
{
"name": "last_url",
"value": "https%3A%2F%2Fwww.collinsdictionary.com%2Fdictionary%2Fspanish-english%2Fcaminata",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763873068.316249,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Tue+Dec+03+2024+12%3A51%3A18+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202409.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0003%3A1%2CC0004%3A1&AwaitingReconsent=false&geolocation=JP%3B27",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1759121478,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "OptanonAlertBoxClosed",
"value": "2024-12-03T04:51:18.738Z",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1759121478,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24device_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1764737478,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,146 @@
[
{
"name": "UUID",
"value": "3c4dd735-8d33-4fd0-a40f-83d399a0dc46",
"domain": "www.jstor.org",
"path": "/",
"expires": 1740190342.420181,
"httpOnly": false,
"secure": true
},
{
"name": "_pxvid",
"value": "2dd5c1cb-b670-11ee-9186-3dd546fa1c41",
"domain": "www.jstor.org",
"path": "/",
"expires": 1737166344,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "__zlcmid",
"value": "1O1n3oDgpvApbaQ",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831447,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "csrftoken",
"value": "iyx0piwZPaxHwlEjMlBpPrxRasiSrbVv",
"domain": "www.jstor.org",
"path": "/",
"expires": 1764745045.418981,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "OptanonAlertBoxClosed",
"value": "2024-12-04T06:56:48.465Z",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831408,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AccessToken",
"value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzZXNzaW9uSWQiOiIwMTkzOTA3NGY4MTg3OTQ1OTg4NzdiNWQzMWE4NDk3MSIsInV1aWQiOiIzYzRkZDczNS04ZDMzLTRmZDAtYTQwZi04M2QzOTlhMGRjNDYiLCJ1c2VyIjp7ImlkIjoiIiwibG9nZ2VkSW4iOmZhbHNlLCJhZG1pbiI6ZmFsc2V9LCJpYXQiOjE3MzMyOTU0MDksImV4cCI6MTczMzI5NTcwOX0.lIt08pG__dm_kZ3kJUYMw_bK0Ow2kAD8i2Jf8OQA0RM",
"domain": "www.jstor.org",
"path": "/",
"expires": 1733299009.610988,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "AccessSessionTimedSignature",
"value": "1b72fc2754973a3daa1baf1b169dfda5ed067ed4113573f1a1005ce5da900999",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AccessSession",
"value": "H4sIAAAAAAAA_42U3W7bMAyF38XXUSFKlEXlLgv2f7Gi7S62oRgYSc68uXHg2B26ou8-KW4adxu6GoEhyYfi4Scqt8Uw1KGYF9pjCFYbQUFrgVWQglFWgnTQzrEMHstiVtTbpAVLJ2DsSfop1Hk17yDBaSetQYzKuqpy1q-YfbBKckWGkm47FWJFQNahcUTWrkzQwITOQhJ2e2HFu765ESFWPDT9nBQiWXIVeAmMUCkGUtJrU6UX5AQN9ylOSYUClJB4Icu5sXOJJ1YTgrJkPmfZ8KRMQ7mX7Z5WmVHV-2Led0OcFdfc7H1PQ9whJAW4Mm2dQ7jvu10xvy2WyyRfnKel5UUatTv-wV-VzfNPaf7uNI3OlofReda8vXizeL8o7tIuQ_9t4X2fdvpyW_Q325g_b3Z93Q993W4yx7aJ-fPlrLiqN_VV_Su-anh9MLx3CyVamaGMM5BSKq3LfAxxvxSjX1HJLGLJTqAMJNhqKYB1NICUzzvH3zuoHznwXQyjwWe3mXhmwoR7iM9v3Xt7L7r25y52p1x39WZdjBhHIHeXd7MJyFBf12HgZsKx-Hj-8qx4iiai1gpB6iNRQ45caj59JOqgghBABBmsQDBOMJUovFaByKtQAkyInsZu124-Jtd_Az3kwwmQdduumyhAOmegTHfMSJsOGYg0HUF8WOTC_6g_lcYPLTWemEWrFD54V-nmhtTNwliuBAZk4TywkBwlQVm5kswxyVlcj33wL2DNfQJNWtGk3m4ftK83H8hIMbXlEaCLJG2IXlTargR6awR7SjfOAzk2fiVZHk287tph-6QHNcm-zuoxeaLxffzL-s_zGONvRDIvs1UFAAA",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AccessSessionSignature",
"value": "78ecce97f2a2de3ffb4af7c87424885165a11fe7d2e29bf960edff5c48167a35",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "_pxhd",
"value": "xbR2aQnYKWrHiFs3JKCUojuB1cOv3M4cNFTMhSdjqxqtIsdgsSi8ehBRY7OxVo9kz4W0VVvgcv6IyY0Ta0SJPA==:medf83pfHV213oGcCOKWmgsZc4Kr8q2rEC2GEWBrpMBibA5DYuL7eKs10ANKfVD8qmvJUfcosZrIkQ83XUVKtKmZa4Y6lK04fy46yN254wo=",
"domain": "www.jstor.org",
"path": "/",
"expires": 1764831425.214494,
"httpOnly": false,
"secure": false
},
{
"name": "pxcts",
"value": "f8fbc8a1-b20c-11ef-a65c-4947163db9b8",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "_px2",
"value": "eyJ1IjoiZjhjZDk4ZjAtYjIwYy0xMWVmLWFkYzctZGQxMTkxM2QxNDlkIiwidiI6IjJkZDVjMWNiLWI2NzAtMTFlZS05MTg2LTNkZDU0NmZhMWM0MSIsInQiOjE3MzMyOTU3MjY4NjgsImgiOiIyMTFhMjMyMTRlZmQwOWE5OTNhZjlmODU2MDU1ZmI1N2U4MTcwY2RmNDNlZjM0MGFhYzg1Yzk2NzQ0NmVjOWI5In0=",
"domain": "www.jstor.org",
"path": "/",
"expires": 1733296026,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ReferringRequestId",
"value": "fastly-default:258d2927284d8837614cc35365d11c1d",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24device_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831445,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Wed+Dec+04+2024+14%3A57%3A25+GMT%2B0800+(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202303.1.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=e6c4a174-c538-4f9f-8632-f5f8e9ff428d&interactionCount=2&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0005%3A1%2CC0004%3A1%2CC0003%3A1&AwaitingReconsent=false&geolocation=JP%3B27",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831445,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
}
]

View file

@ -0,0 +1,111 @@
[
{
"name": "ncbi_sid",
"value": "015E11D6531E8483_1525SID",
"domain": ".nih.gov",
"path": "/",
"expires": 1764675079.027761,
"httpOnly": false,
"secure": false
},
{
"name": "pmc-frontend-csrftoken",
"value": "L3uvd1o5Uu2efxgCXWDzwxfDTl5QIFDR",
"domain": "www.ncbi.nlm.nih.gov",
"path": "/",
"expires": 1753769052.705813,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ncbi-login-route",
"value": "google",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": 1741001395.405247,
"httpOnly": false,
"secure": false
},
{
"name": "PRESERVTARGET",
"value": "%2FtYTXpgzJne16bwfb4ZN2lGInyYoZNk58TVbSvhIR0njSJplCp65%2BiF2SZAktvmmznDxgJBJhBCH%0ANoo2je1cMk0RXykLSXa4UwW7u0%2B%2Fc1X7WzHdCi209NjSVDPLNfOmFzmtz50Uuh6EfD95OQ%2BYQ2B%2B%0Aq7BP3es9s8ArLlZd9XW7NS72Ulu8cigULF%2FZADnu%2FPZf8DmPLOXuV6xWf0fqcNlZXwWhiCjrPJiU%0AU594rDm20QBWFe5y0VjWXnJtzYm7uSPkWDQYJ8htbKyWwjn4aG0xcYfTBSBUTOi9A%2Bo1BnUPHLIi%0A8V9%2Fi7S2i2vLCCwVTCSGS0pctKKWZRmzEmP9NB4rA167%2FSMuyX6ezHZNUyztiKaga84g5monl5bT%0AjNlmWeBFQV90piriK2wjmey3mIoTu2eJyDi%2Bx%2FO7pwMTfeiU2WXZ5h3U4kRBxw%2FR6%2FrCMYtVrzXp%0A%2FexiuMJDHQmiDPowP8dxw97tgs353jnBRGe8jpoCPoPG2hywQnwXtxW8SjWp19yTypxVFl4KnD1e%0A5aoPyq%2F7tPDRPbW7UikYuihFvX0mD1TH7A0G9Bk%2B36y%2F7jL8oW7OArzEbESjcx2aVRL%2B3VqzX1Oc%0AZcFWXfVarYgckE8EeyNwFwhPDoASs2T4SVNAJAQ38A0bYzCAxc6mQLqADqesOuuveClDDgB8WITg%0A1QnE32rGsLz37nzAQ89V",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "NIHSMPROFILE",
"value": "9i9xFyZxcZ3DeEBWJ1M%2B1ygJsb2LhWqfanAC3W20fjIpeXaMrRQ%2F9L3R6DUjYzq5%2FqUDVLhYywfn1%2BT0RJpzID8efN8zNczLDneXLM7waIbhTdfwbIh%2BCnmN0fucHtqYylLU1altZcOhSRTow47jYwyEUFsmJ6kz3T1%2BnZWx3Ol0zaFC8onzZCtl4YHbCxMJVbHYuMcGM4f4YxpiDefQvlDdwY1soBI8z9nvb%2BKMs1B3GgplTzyllWIbC1RHxGLvdlNaz8Zlzw6MU4B3piqrAiCWAvoMF3%2FSShIchSdP0utP%2BMROhcGaoWBU%2FKfkjjDc3lHCPfydE%2F895aasf6uvrL7uccokjb6HxdVs0FA%2FHxfBNJXURVRSpRl9%2BPOd9%2FOOlXQQqhBh1FyAZs6WIxDvLhegMvLITcLh7ahcahuJnoeImSla4b4kK0Ayy6736mJCa0hhXUzGjab4Yhht11PliHlAlh4wLEXj0Dp7X9pj7Ws1%2BdCx8QZyiTWrbauCQJtS1hNXn%2Blg4BoQ2sIq%2FxltuA%3D%3D",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "NIHSMSESSION",
"value": "ZITCB6DlveT31D7iO+eVBnrLBxVxrCJGSz/5hVXNSYwvjuPpvd0O7dD6XqsFf6LKdJXktiX+hhtPWtsNok2mgiSvcpYTBHJxHVefyBt+KiLTVm12lBwYTkx9Gb9OxZNQsMS/Hvoj45/ShvSKut3d7c8e2cEhtjA7DWjHEYHj0tuk3fEoQ4t0UAWkhj6bFt5Vo5tm6dyqen/1EH2o6cBZrVSLbk67LctlbRyV4pc5099Il2lTMPo6LqtyVI1AC/bcSioge+LqDbpDiuP4NOF3EPj/yFSWvBz76/bqQ0Hu5oRGCC1zVPhSGJ1iukio91F6IfYr5vTtMrN00vSuxHRnxj0BYCEuGKtCeNDohuGAZvQVkjhc1aI53oWFu8UNHZvYA+Xo2lpLZUOT+2pkjk1Z/WpAPzE8L+O6mRjwaq8/2b3rUS8gq4Y/2vQYhqmNxeHOBsM01BhpJdRF3Urp3dnIIYakYeCgzC/3Kww+p8ImsBTjvXDO1ivVEjKB4AdnCsRdLpGPszS9xF7439aXXCWHDItM4Wl458T12QWeV+DXiiwzD/kis1QQBWibzsZOzo9KDM3oapKa8I2ouKXb797Z7s+eLR1+Z10lyWZuNVLLZK5ckFT5riayLYeT8+IjFYVER/nfDzm3KpgVPnep/k4DANpDgAOK78iuTv3sBndNMoKrXz2qCZtfi3/gLGZTKcOy90meluFZy9+iLyb+M01VBWuDp/v0a2jSdsJPVmgUQqz7hLVvtc4KpMfiDhfxXGMQnaieP9jREFK3NutAiUrkjS96WS3v5eLK80o/aG1j5IsAvxU/0lMnEri3Yz6Qw1f0ymS6giKiFIUBRUWGXcm5S1qCjwL5GiU71r3nOcaC8T9T1pVLf1R558WqH6Ha95aJVqN6CnEHo8TsZl25lb5tlJgbgb2OFvLSrbUZwuM3R5mA9zP7ciQBywxNm7xFO8sX8QQk0bRhrhgk458KE72Ci/8lhZmvpYy5aqbI4OtaLkuFuu3lX3c7/LsGt+iTFkO6eDSS4CFEnFqg3W5Glvs7WZkTasVI7L0mN0q8DCPXaIDFVPlXEA0shxZuB6Iz+mx4MshQHwY9fMRSWB7gOF5cHjHYUBLfHT/gOwl35rkoJfVf9ikpcgT88mJyk9KTQpVM+CZAGUFDbgHsRqA0jPE19sBum3cqaA6fzh9AnWXfOlAY5KNDdTB4yip4UakCXWsiXVng0GfQ7KvxAguC59L7iZyFjdsIDESi7ZozcPHOpFZleeAU3yFTvMGHmO3G3RFrxyIGCwgWehus3YCqQxZPSE6+yLjXeXTqhqgk0kxcV/MlOFgzMcAhgKEYJS045sLZsmohsIVLV0ONY4uqogSxd3YUzc0WImi1mYdNbzYwbX5tPngah4SK61Nia8Z6xjZuKfXnxNFEkNneezPoPy97Hvd+9wzI+DkU5sa844DzGxeSY/ySE3DTtpowf440r5rX",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "MyNcbiSigninPreferences",
"value": "O2dvb2dsZSY%3D",
"domain": ".nih.gov",
"path": "/",
"expires": 1740915025.611341,
"httpOnly": false,
"secure": false
},
{
"name": "ncbi_prevPHID",
"value": "CE88342C74D8A32100000000003B0036",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "WebCubbyUser",
"value": "3GX25AI24DLUXL8LVDJFIVTH6LJRZBE1%3Blogged-in%3Dtrue%3Bmy-name%3Djiabintang77%2540gmail.com%3Bpersistent%3Dfalse%40015E11D6531E8483_1525SID",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "WebEnv",
"value": "1D7wJH%40015E11D6531E8483_1525SID",
"domain": ".nlm.nih.gov",
"path": "/",
"expires": 1733167826.636953,
"httpOnly": true,
"secure": true
},
{
"name": "ncbi_pinger",
"value": "N4IgDgTgpgbg+mAFgSwCYgFwgAwEYCsAorrgCIBs+AzLoQBwAsdAnLgOxU1XPZt354AygElSIAL5A===",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24device_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D",
"domain": ".nih.gov",
"path": "/",
"expires": 1764675078,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,74 @@
[
{
"name": "MAID",
"value": "+O8mvi2rAtZrnJqF+2cRIQ==",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078802.198648,
"httpOnly": true,
"secure": true
},
{
"name": "MACHINE_LAST_SEEN",
"value": "2024-12-02T09%3A00%3A01.960-08%3A00",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078802.198711,
"httpOnly": true,
"secure": true
},
{
"name": "JSESSIONID",
"value": "CEDD494D14F0052C199B1D7AE667EF42",
"domain": ".pnas.org",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "__cf_bm",
"value": "YJQBFxCTLG1d3d9R0fVmwlmAgP9kqVl3zwf02v.COMQ-1733158802-1.0.1.1-tLccs1jD809lM7_9Bhy35sLQdM1TaakBEYvhdDEi1w9cWJS9IGjovTwKGdYQtse6_rWkJNYt._LsHQI2WCwDUQ",
"domain": ".pnas.org",
"path": "/",
"expires": 1733160603.504839,
"httpOnly": true,
"secure": true
},
{
"name": "cookiePolicy",
"value": "accept",
"domain": ".pnas.org",
"path": "/",
"expires": 1767718816.994233,
"httpOnly": true,
"secure": true
},
{
"name": "connect_auto_login",
"value": "true",
"domain": ".pnas.org",
"path": "/",
"expires": 1735750875.510643,
"httpOnly": true,
"secure": true
},
{
"name": "PLUID",
"value": "l8nplDdx7mN9Xh4lErbknypxfmo=",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078875.9476,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24device_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".pnas.org",
"path": "/",
"expires": 1764694876,
"httpOnly": false,
"secure": false
}
]

View file

@ -0,0 +1,122 @@
[
{
"name": "edgebucket",
"value": "lyxOSFtqXnsQEn0H9C",
"domain": ".reddit.com",
"path": "/",
"expires": 1738484736.570167,
"httpOnly": false,
"secure": true
},
{
"name": "loid",
"value": "000000000r0luy1m5t.2.1703924736813.Z0FBQUFBQmxqOVFBLVZ2UHJIRWswQW4zQnlJZGtYU2ZBS1dSQlpncW1hQ2o2TmVLMk12QkFYRlBEMFpaOGpqTndjcXhuQjhFS3hhc0dSMXRyZ1o4SUg1cTZvSTNHejk5NW5xdlRTRUtfeUdSU250alJhQTFDY3RSeDJrekdnWG90bk1CWmhhc2hlMWU",
"domain": ".reddit.com",
"path": "/",
"expires": 1763278221.514142,
"httpOnly": false,
"secure": true
},
{
"name": "csv",
"value": "2",
"domain": ".reddit.com",
"path": "/",
"expires": 1738484736.97362,
"httpOnly": false,
"secure": true
},
{
"name": "g_state",
"value": "{\"i_l\":0}",
"domain": "www.reddit.com",
"path": "/",
"expires": 1744270240,
"httpOnly": false,
"secure": false
},
{
"name": "pc",
"value": "nd",
"domain": ".reddit.com",
"path": "/",
"expires": 1760254304,
"httpOnly": false,
"secure": true
},
{
"name": "__stripe_mid",
"value": "104997eb-5535-4dd4-a71a-5a7f697b8a4650cc1f",
"domain": ".www.reddit.com",
"path": "/",
"expires": 1761300348,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
},
{
"name": "t2_r0luy1m5t_recentclicks3",
"value": "t3_o9s5iv%2Ct3_1ga952r%2Ct3_1eefr4x%2Ct3_1fs5q5b",
"domain": ".reddit.com",
"path": "/",
"expires": 1762676289,
"httpOnly": false,
"secure": false,
"sameSite": "Strict"
},
{
"name": "reddit_session",
"value": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpsVFdYNlFVUEloWktaRG1rR0pVd1gvdWNFK01BSjBYRE12RU1kNzVxTXQ4IiwidHlwIjoiSldUIn0.eyJzdWIiOiJ0Ml9yMGx1eTFtNXQiLCJleHAiOjE3NDg4MzgwNzkuNzc0MDEzLCJpYXQiOjE3MzMxOTk2NzkuNzc0MDEzLCJqdGkiOiJyT3l6V2hFUmtxNDA0b0YzX1FSSVR3R240Y0gzS0EiLCJjaWQiOiJjb29raWUiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeUtqZ1VFQUFEX193RVZBTGsiLCJ2MSI6Ijc2MjE3NTUxMDk3OTY5LDIwMjQtMTAtMTJUMDc6MzA6NDEsYWU3Y2U5ZDdiMjU3OGQ3MWVmMTEwYjFiNTc2NTU2NmNmYzJkNDljNiIsImZsbyI6Mn0.NUhfjOfX7pWC5FFUfJvTw0Ts8b0ZICrmmg_Eh4_O6hvnqEH5UHVjjwtS7YNGyxTRv5k0AJTx-GW5CWTUJvhciPOYokV1iM4RirTbijGfqyvSlbl7YIARX8gUMrm6X2TmFvHmQHem4S-0YcrhvBakEXb2TAk0e4KLiPBS6jbEa5c4EoIkp8PjvFVkWZhY_FMge6SxAmPlx2Xksk2c_9s_rJ-UTZkyOWP5ighh7TmA3B_0ZWEPRme7yDBtV-AJ1UH533suaBukxD_-O3afm1AtjMiQAygZ4tl78T7unCopK1_c8PGiunpfLCMTdTTb07NnI2_fo-8AfARF-9O1MPdYFQ",
"domain": ".reddit.com",
"path": "/",
"expires": 1748838078.887127,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221929b3d4cbd13e0-01d450fe6d3301-16525637-16a7f0-1929b3d4cbe2033%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D",
"domain": ".reddit.com",
"path": "/",
"expires": 1764735681,
"httpOnly": false,
"secure": false
},
{
"name": "reddit_chat_view",
"value": "closed",
"domain": "www.reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "token_v2",
"value": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpzS3dsMnlsV0VtMjVmcXhwTU40cWY4MXE2OWFFdWFyMnpLMUdhVGxjdWNZIiwidHlwIjoiSldUIn0.eyJzdWIiOiJ1c2VyIiwiZXhwIjoxNzMzMjg2MDgxLjgwMjc1NywiaWF0IjoxNzMzMTk5NjgxLjgwMjc1NywianRpIjoiTkFrUGZVTVVwRGZ1SmFNbjZkV3dqbnB4U0REMEFnIiwiY2lkIjoiMFItV0FNaHVvby1NeVEiLCJsaWQiOiJ0Ml9yMGx1eTFtNXQiLCJhaWQiOiJ0Ml9yMGx1eTFtNXQiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeGtrZEdPdERBSWhkLWwxejdCX3lwX05odHNjWWFzTFFhb2szbjdEVm9jazcwN2NMNGlIUDhuS0lxRkxFMnVCS0drS1dFRld0T1VOaUx2NTh5OU9aRUZTeUZUUjg0M3l3b2thVXBQVW1ONXB5bFJ3V1prTGxmYXNVS0RCNllwVlM2WjIwS1BTNXZRM0kxRnowNk1xbHhXSHRUWW8zSnBiR01LMnhQanpjWnFReXF1eTZsTVlGa29uOFdMZnZ5Ry10WS1mN2JmaEhZd3JLZ0tEX1RPdUZ4d1lfSERGSGJfbnByMGJGMndxTDNYZzlRLTEtTjI3Yk5tb2RtNV9WelB2emFTY1RtRzVpZll2N3QtQ1IxNDVIbVpVUWN3WWcwX3lyQWo2X0N2T29ES0JRV01KWWhQSTVBcmwyX19KZGl1VGY4YXR5ZC0tR2JFVFdfNHJSbW81eExFb1VfajZ6Y0FBUF9fWERfZTR3IiwicmNpZCI6Ill6STUzaXNuVVRQUm42M3NQbjRSNFBNbVdOcjE4SU1uUU93T2VfaHFuem8iLCJmbG8iOjJ9.F_24jXHdZDXCmMx4aubrjT94AtnYDzD7eg7SjV1Rwa6ymrvrXW8uZnIqgqVkHJio-mZW_JsxlSKzlIDMJ_lrWtgxFHhgGFWnWkS-raKhYrrQt3gwN-C5VPc3iF-1pVUaf0Jf0gX1aYyvdtRD48rRd8sjCoAwHcGiNH8B7abUPN8JJuQcAEH2GzYSc9Zarb0jANLyw7fGdTdWXfWjUXjy33alItwyhMVcgCIXlVf5wlayRBsRXS_ObpKiril2BuAgCrrVuOWDdflpi58FTA0pki4F0wTdcJfORP9yjZ_L7AJUXhXhswx5Lcf0kTU1hFy4RqFRRd95Q0xZg7Yj2uIC7w",
"domain": ".reddit.com",
"path": "/",
"expires": 1733286081.968748,
"httpOnly": true,
"secure": true
},
{
"name": "session_tracker",
"value": "hhjeikkilpgneqelrj.0.1733199684513.Z0FBQUFBQm5Ub2RFN25sX3lzSVRyLUFxSEtEcmozdW1BbGdaQUtmZUZCMVNteW1PSGhxRHNEdkhrR0lneUNCTUgzUlE2QkdUb2tRRHVyTnNNNlpnOV80TVZzd2hlSzFCRUtLLVZmREYwejhDSXIyX1Q4T04yMnlrSGJkMlVla2h5R1huYnFGOUFleGU",
"domain": ".reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "csrf_token",
"value": "04b1407f1ebd23723b261a469d4dac84",
"domain": ".reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
}
]

View file

@ -0,0 +1,133 @@
[
{
"name": "did",
"value": "iXxy5Y0Lo7cY1c90Riq7yRkq4VdJdrz9F1T7N0fqKOFKoaNbUas5EeVE3Oo7jl4M",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764671442.13171,
"httpOnly": true,
"secure": true
},
{
"name": "ptc",
"value": "RG1.8779880038783811042.1696569455",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1767695442.13179,
"httpOnly": true,
"secure": true
},
{
"name": "pl",
"value": "wbbzs5zjXc51gyzsE5huVpQxOu7nxEnyZDiQcl7KEpwjXmoGTD064RedCDxz696tPbB38xaV8xe1oIJFEAAanE6P4MKWta1rUAq6iCLxSGnCYhfeB9JhdfN5cz70vq1R",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764671442.13175,
"httpOnly": true,
"secure": true
},
{
"name": "didomi_token",
"value": "eyJ1c2VyX2lkIjoiMTkxOTNiYWQtMDhhMS02Njg5LWE4NTUtMzAwN2VhYzA5MjAwIiwiY3JlYXRlZCI6IjIwMjQtMDgtMjdUMTI6MDY6MTkuNTMwWiIsInVwZGF0ZWQiOiIyMDI0LTA4LTI3VDEyOjA2OjI4LjQ4M1oiLCJ2ZW5kb3JzIjp7ImVuYWJsZWQiOlsiZ29vZ2xlIiwiYzpnb29nbGVhbmEtNFRYbkppZ1IiLCJjOnBvc3Rob2ctQmpKeEZkRjkiLCJjOmRpZG9taSJdfSwidmVuZG9yc19saSI6eyJlbmFibGVkIjpbImdvb2dsZSJdfSwidmVyc2lvbiI6Mn0=",
"domain": ".researchgate.net",
"path": "/",
"expires": 1756382788,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "euconsent-v2-didomi",
"value": "CQEAuYAQEAuYAAHABBENBDFgALHAAELAAAYgF5wAQF5gXnABAXmAAAAA.djgACFgAAAAA",
"domain": ".researchgate.net",
"path": "/",
"expires": 1756382788,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "dmd-tag",
"value": "29c64880-8f9e-11ef-aabb-5bb25e381cbc",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1767695450.916031,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "sid",
"value": "TCoR8Z544diaoyMwqLA8X9TS7HfkN6SpWX1ropwuTqxNP2j10bZlYc7YIVBKPyVcfem0NqbdChel00sIOkOeF5GtceLW00ubTrpe4dAD5xLJ81ocWo9svlf6J9gynIf6",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "cili",
"value": "_2_MjZhMmQ5N2FmMzgyNmJkYTJiZjc2ZjE0ZjRmMDUxYjMzYjJkZmQxYWY0Njg4Nzc4MTM4ZDE1MGVmNWRhYTc0Nl8yOTU3MzAzNjsw",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "cirgu",
"value": "_1_SaiBmdG2CNFFXfi8YLMIIkC%2BD5oxd2H0CqLqIhkm0V8y0ncsQEoe%2FGUZOhACuKUy5feYcA%3D%3D",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764692368.131654,
"httpOnly": false,
"secure": true
},
{
"name": "_cfuvid",
"value": "GehzRPPGzCCv.nPAiw9L7tRQCAi.hQAAinF5RqUksz0-1733135442009-0.0.1.1-604800000",
"domain": ".researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "dmd-sid4",
"value": "{%22id%22:%227cce57f0-b098-11ef-924a-5d9117969fc2%22%2C%22timestamp%22:1733135444000%2C%22lastUpdate%22:1733135450000}",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%22192aea49fbae19-0a0ceb4f539c6b-16525637-16a7f0-192aea49fbb210e%22%2C%22utm_source%22%3A%20%22researchgate%22%2C%22utm_medium%22%3A%20%22email%22%2C%22utm_campaign%22%3A%20%22re442%22%2C%22utm_content%22%3A%20%22re442_up_pb_hnsg_nnaas_p110%22%2C%22utm_term%22%3A%20%22re442_up_pb_hnsg_nnaas%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%7D",
"domain": ".researchgate.net",
"path": "/",
"expires": 1764671451,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "X2xX99VpHc3Xm_TRnIRhN7Wp533PLOt.311xMWQYn28-1733136361-1.0.1.1-qskSGITd3dmHUV3UswCd8O6ygg3jdBOF9Wz1PxlPQq66VUx0blvFKst7fH33pTnkl2W1VBjBjlF8CgPVLesusQ",
"domain": ".researchgate.net",
"path": "/",
"expires": 1733138161.246979,
"httpOnly": true,
"secure": true
},
{
"name": "ph_phc_ma1XTQyee96N1GML6qUTgLQRiDifnRcE9STiHTZ0CfZ_posthog",
"value": "%7B%22distinct_id%22%3A%22AC%3A29573036%22%2C%22%24sesid%22%3A%5B1733137066914%2C%22019386ec-50e4-79f9-9b7b-d8ed5854cf9f%22%2C1733135454436%5D%2C%22%24epp%22%3Atrue%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fwww.researchgate.net%2Fprofile%2FJiabin_Tang4%2Fpublications%3FeditMode%3D1%26sorting%3DrecentlyAdded%26utm_medium%3Demail%26utm_source%3Dresearchgate%26utm_campaign%3Dre214%26loginT%3DeX2d52IqLj-iYd58KHUvU88w6Ub-0Rjh_XCM-6tyfVf3Goy3Bf0swonajNlIbQg6gax3uaL6ulhi_ik9eMs%26pli%3D1%26utm_term%3Dre214_x%26utm_content%3Dre214_x_p2%26cp%3Dre214_x_p2%26uid%3D2aJ4s09Uf8rvZLKbNnk9UiFVrlZcTRXt51G2%26ch%3Dreg%22%7D%7D",
"domain": ".researchgate.net",
"path": "/",
"expires": 1764673066,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
}
]

View file

@ -0,0 +1,173 @@
[
{
"name": "PREF",
"value": "f7=4100&tz=Asia.Hong_Kong&f4=4000000",
"domain": ".youtube.com",
"path": "/",
"expires": 1767698936.819909,
"httpOnly": false,
"secure": true
},
{
"name": "HSID",
"value": "AuvRBV-Q9GEReACoE",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.11147,
"httpOnly": true,
"secure": false
},
{
"name": "SSID",
"value": "AHum7OsxEOAD-Ibp4",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111503,
"httpOnly": true,
"secure": true
},
{
"name": "APISID",
"value": "FRZgwlTWYfVE-B2B/A7FrDbUAZCyMOb6ue",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111539,
"httpOnly": false,
"secure": false
},
{
"name": "SAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111575,
"httpOnly": false,
"secure": true
},
{
"name": "__Secure-1PAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111613,
"httpOnly": false,
"secure": true
},
{
"name": "__Secure-3PAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111646,
"httpOnly": false,
"secure": true
},
{
"name": "LOGIN_INFO",
"value": "AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs:QUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB",
"domain": ".youtube.com",
"path": "/",
"expires": 1766493681.602721,
"httpOnly": true,
"secure": true
},
{
"name": "SID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsOzHaI33jRkuMY7qPd2ZFEAACgYKAUMSARYSFQHGX2Miunh4qpdUynkY2PXqPSvzGxoVAUF8yKqW-lS90SUYH-cprrTRIWDZ0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111199,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsSADU_B-86vJipiHTuQxp3gACgYKATgSARYSFQHGX2MinsKT7OE3L_H9SEzrOgndQRoVAUF8yKqLdTsMTC89NTlwps9-uRSu0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111393,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsvr3qlDZAuj7i33G332SpFwACgYKAekSARYSFQHGX2Mi9DYKrElENMuHNeaGctBCdBoVAUF8yKrmoWXREkhXzva2a6J3B2ps0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111431,
"httpOnly": true,
"secure": true
},
{
"name": "NID",
"value": "519=I7v3EQhK2PGhnXSOCZyz5QYVGbJ383LEPRVqShP2G15ip3zj5VjyDJWEAlJtS3ifC0qs-7cxlwk_vCTqntg_LWW_hfONxTqG6JVJ8JvpMEr2eM_Fqb9n8nVbc_YNrwSIUEorM5N5FUoZmW2u4Qksi_a0-ssHJWsOBEwdxpDONaY",
"domain": ".youtube.com",
"path": "/",
"expires": 1747984059.767459,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928f27a2a80-04cb41970e6945-16525637-16a7f0-1928f27a2a920cc%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22utm_source%22%3A%20%22ythp%22%2C%22utm_medium%22%3A%20%22LeftNav%22%2C%22utm_campaign%22%3A%20%22ytgen%22%2C%22utm_content%22%3A%20%22txt%22%7D",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674936,
"httpOnly": false,
"secure": false
},
{
"name": "ST-xuwub9",
"value": "session_logininfo=AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs%3AQUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB",
"domain": ".youtube.com",
"path": "/",
"expires": 1733138942,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSIDTS",
"value": "sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674938.226552,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSIDTS",
"value": "sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674938.226624,
"httpOnly": true,
"secure": true
},
{
"name": "SIDCC",
"value": "AKEyXzWXdf72zjmIboZNkzmg9VURwnmM1MpJVRgAxjRuMRib1b7iu5SkCZccexcK6jf2abrLEoQ",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.01384,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSIDCC",
"value": "AKEyXzXWzx9lRoJCEXrHvqZeWtAugc_tFou4ucmylPeSpc0nRX2EZ-t3QGTGqberRiTB3QIHjQ",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.013891,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSIDCC",
"value": "AKEyXzVyGt5J-awGqBrP5_hTnwTmCMsUu5oWISlljhXbP9P7vrGxlzOg05O_vwlgbuGOKRUQGYA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.013962,
"httpOnly": true,
"secure": true
}
]

View file

@ -0,0 +1,195 @@
COOKIES_LIST = [
{'name': 'edgebucket', 'value': 'lyxOSFtqXnsQEn0H9C', 'domain': '.reddit.com', 'path': '/', 'expires': 1738484736.570167, 'httpOnly': False, 'secure': True},
{'name': 'loid', 'value': '000000000r0luy1m5t.2.1703924736813.Z0FBQUFBQmxqOVFBLVZ2UHJIRWswQW4zQnlJZGtYU2ZBS1dSQlpncW1hQ2o2TmVLMk12QkFYRlBEMFpaOGpqTndjcXhuQjhFS3hhc0dSMXRyZ1o4SUg1cTZvSTNHejk5NW5xdlRTRUtfeUdSU250alJhQTFDY3RSeDJrekdnWG90bk1CWmhhc2hlMWU', 'domain': '.reddit.com', 'path': '/', 'expires': 1763278221.514142, 'httpOnly': False, 'secure': True},
{'name': 'csv', 'value': '2', 'domain': '.reddit.com', 'path': '/', 'expires': 1738484736.97362, 'httpOnly': False, 'secure': True},
{'name': 'g_state', 'value': '{"i_l":0}', 'domain': 'www.reddit.com', 'path': '/', 'expires': 1744270240, 'httpOnly': False, 'secure': False},
{'name': 'pc', 'value': 'nd', 'domain': '.reddit.com', 'path': '/', 'expires': 1760254304, 'httpOnly': False, 'secure': True},
{'name': '__stripe_mid', 'value': '104997eb-5535-4dd4-a71a-5a7f697b8a4650cc1f', 'domain': '.www.reddit.com', 'path': '/', 'expires': 1761300348, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 't2_r0luy1m5t_recentclicks3', 'value': 't3_o9s5iv%2Ct3_1ga952r%2Ct3_1eefr4x%2Ct3_1fs5q5b', 'domain': '.reddit.com', 'path': '/', 'expires': 1762676289, 'httpOnly': False, 'secure': False, 'sameSite': 'Strict'},
{'name': 'reddit_session', 'value': 'eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpsVFdYNlFVUEloWktaRG1rR0pVd1gvdWNFK01BSjBYRE12RU1kNzVxTXQ4IiwidHlwIjoiSldUIn0.eyJzdWIiOiJ0Ml9yMGx1eTFtNXQiLCJleHAiOjE3NDg4MzgwNzkuNzc0MDEzLCJpYXQiOjE3MzMxOTk2NzkuNzc0MDEzLCJqdGkiOiJyT3l6V2hFUmtxNDA0b0YzX1FSSVR3R240Y0gzS0EiLCJjaWQiOiJjb29raWUiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeUtqZ1VFQUFEX193RVZBTGsiLCJ2MSI6Ijc2MjE3NTUxMDk3OTY5LDIwMjQtMTAtMTJUMDc6MzA6NDEsYWU3Y2U5ZDdiMjU3OGQ3MWVmMTEwYjFiNTc2NTU2NmNmYzJkNDljNiIsImZsbyI6Mn0.NUhfjOfX7pWC5FFUfJvTw0Ts8b0ZICrmmg_Eh4_O6hvnqEH5UHVjjwtS7YNGyxTRv5k0AJTx-GW5CWTUJvhciPOYokV1iM4RirTbijGfqyvSlbl7YIARX8gUMrm6X2TmFvHmQHem4S-0YcrhvBakEXb2TAk0e4KLiPBS6jbEa5c4EoIkp8PjvFVkWZhY_FMge6SxAmPlx2Xksk2c_9s_rJ-UTZkyOWP5ighh7TmA3B_0ZWEPRme7yDBtV-AJ1UH533suaBukxD_-O3afm1AtjMiQAygZ4tl78T7unCopK1_c8PGiunpfLCMTdTTb07NnI2_fo-8AfARF-9O1MPdYFQ', 'domain': '.reddit.com', 'path': '/', 'expires': 1748838078.887127, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221929b3d4cbd13e0-01d450fe6d3301-16525637-16a7f0-1929b3d4cbe2033%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D', 'domain': '.reddit.com', 'path': '/', 'expires': 1764735681, 'httpOnly': False, 'secure': False},
{'name': 'reddit_chat_view', 'value': 'closed', 'domain': 'www.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'token_v2', 'value': 'eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpzS3dsMnlsV0VtMjVmcXhwTU40cWY4MXE2OWFFdWFyMnpLMUdhVGxjdWNZIiwidHlwIjoiSldUIn0.eyJzdWIiOiJ1c2VyIiwiZXhwIjoxNzMzMjg2MDgxLjgwMjc1NywiaWF0IjoxNzMzMTk5NjgxLjgwMjc1NywianRpIjoiTkFrUGZVTVVwRGZ1SmFNbjZkV3dqbnB4U0REMEFnIiwiY2lkIjoiMFItV0FNaHVvby1NeVEiLCJsaWQiOiJ0Ml9yMGx1eTFtNXQiLCJhaWQiOiJ0Ml9yMGx1eTFtNXQiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeGtrZEdPdERBSWhkLWwxejdCX3lwX05odHNjWWFzTFFhb2szbjdEVm9jazcwN2NMNGlIUDhuS0lxRkxFMnVCS0drS1dFRld0T1VOaUx2NTh5OU9aRUZTeUZUUjg0M3l3b2thVXBQVW1ONXB5bFJ3V1prTGxmYXNVS0RCNllwVlM2WjIwS1BTNXZRM0kxRnowNk1xbHhXSHRUWW8zSnBiR01LMnhQanpjWnFReXF1eTZsTVlGa29uOFdMZnZ5Ry10WS1mN2JmaEhZd3JLZ0tEX1RPdUZ4d1lfSERGSGJfbnByMGJGMndxTDNYZzlRLTEtTjI3Yk5tb2RtNV9WelB2emFTY1RtRzVpZll2N3QtQ1IxNDVIbVpVUWN3WWcwX3lyQWo2X0N2T29ES0JRV01KWWhQSTVBcmwyX19KZGl1VGY4YXR5ZC0tR2JFVFdfNHJSbW81eExFb1VfajZ6Y0FBUF9fWERfZTR3IiwicmNpZCI6Ill6STUzaXNuVVRQUm42M3NQbjRSNFBNbVdOcjE4SU1uUU93T2VfaHFuem8iLCJmbG8iOjJ9.F_24jXHdZDXCmMx4aubrjT94AtnYDzD7eg7SjV1Rwa6ymrvrXW8uZnIqgqVkHJio-mZW_JsxlSKzlIDMJ_lrWtgxFHhgGFWnWkS-raKhYrrQt3gwN-C5VPc3iF-1pVUaf0Jf0gX1aYyvdtRD48rRd8sjCoAwHcGiNH8B7abUPN8JJuQcAEH2GzYSc9Zarb0jANLyw7fGdTdWXfWjUXjy33alItwyhMVcgCIXlVf5wlayRBsRXS_ObpKiril2BuAgCrrVuOWDdflpi58FTA0pki4F0wTdcJfORP9yjZ_L7AJUXhXhswx5Lcf0kTU1hFy4RqFRRd95Q0xZg7Yj2uIC7w', 'domain': '.reddit.com', 'path': '/', 'expires': 1733286081.968748, 'httpOnly': True, 'secure': True},
{'name': 'session_tracker', 'value': 'hhjeikkilpgneqelrj.0.1733199684513.Z0FBQUFBQm5Ub2RFN25sX3lzSVRyLUFxSEtEcmozdW1BbGdaQUtmZUZCMVNteW1PSGhxRHNEdkhrR0lneUNCTUgzUlE2QkdUb2tRRHVyTnNNNlpnOV80TVZzd2hlSzFCRUtLLVZmREYwejhDSXIyX1Q4T04yMnlrSGJkMlVla2h5R1huYnFGOUFleGU', 'domain': '.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'csrf_token', 'value': '04b1407f1ebd23723b261a469d4dac84', 'domain': '.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 'hum_ieee_visitor', 'value': '3403d64f-1870-4601-9ff7-e5900074a6db', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1756280921.91082, 'httpOnly': False, 'secure': True},
{'name': '_zitok', 'value': '6273c58ab3f308a07a711718187500', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1761551757, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1767701986.053151, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_8E929CC25A1FB2B30A495C97%40AdobeOrg', 'value': '1', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 's_cc', 'value': 'true', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765086053, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:2$_ss:0$_st:1733551853250$ses_id:1733549982472%3Bexp-session$_pn:2%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765086053, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_8E929CC25A1FB2B30A495C97%40AdobeOrg', 'value': '359503849%7CMCIDTS%7C20065%7CMCMID%7C53777252718039557930823884447397163100%7CMCAID%7CNONE%7CMCOPTOUT-1733557253s%7CNONE%7CvVersion%7C5.0.1', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1768110053.386389, 'httpOnly': False, 'secure': False},
{'name': 'did', 'value': 'iXxy5Y0Lo7cY1c90Riq7yRkq4VdJdrz9F1T7N0fqKOFKoaNbUas5EeVE3Oo7jl4M', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764671442.13171, 'httpOnly': True, 'secure': True},
{'name': 'ptc', 'value': 'RG1.8779880038783811042.1696569455', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1767695442.13179, 'httpOnly': True, 'secure': True},
{'name': 'pl', 'value': 'wbbzs5zjXc51gyzsE5huVpQxOu7nxEnyZDiQcl7KEpwjXmoGTD064RedCDxz696tPbB38xaV8xe1oIJFEAAanE6P4MKWta1rUAq6iCLxSGnCYhfeB9JhdfN5cz70vq1R', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764671442.13175, 'httpOnly': True, 'secure': True},
{'name': 'didomi_token', 'value': 'eyJ1c2VyX2lkIjoiMTkxOTNiYWQtMDhhMS02Njg5LWE4NTUtMzAwN2VhYzA5MjAwIiwiY3JlYXRlZCI6IjIwMjQtMDgtMjdUMTI6MDY6MTkuNTMwWiIsInVwZGF0ZWQiOiIyMDI0LTA4LTI3VDEyOjA2OjI4LjQ4M1oiLCJ2ZW5kb3JzIjp7ImVuYWJsZWQiOlsiZ29vZ2xlIiwiYzpnb29nbGVhbmEtNFRYbkppZ1IiLCJjOnBvc3Rob2ctQmpKeEZkRjkiLCJjOmRpZG9taSJdfSwidmVuZG9yc19saSI6eyJlbmFibGVkIjpbImdvb2dsZSJdfSwidmVyc2lvbiI6Mn0=', 'domain': '.researchgate.net', 'path': '/', 'expires': 1756382788, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'euconsent-v2-didomi', 'value': 'CQEAuYAQEAuYAAHABBENBDFgALHAAELAAAYgF5wAQF5gXnABAXmAAAAA.djgACFgAAAAA', 'domain': '.researchgate.net', 'path': '/', 'expires': 1756382788, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'dmd-tag', 'value': '29c64880-8f9e-11ef-aabb-5bb25e381cbc', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1767695450.916031, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'sid', 'value': 'TCoR8Z544diaoyMwqLA8X9TS7HfkN6SpWX1ropwuTqxNP2j10bZlYc7YIVBKPyVcfem0NqbdChel00sIOkOeF5GtceLW00ubTrpe4dAD5xLJ81ocWo9svlf6J9gynIf6', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'cili', 'value': '_2_MjZhMmQ5N2FmMzgyNmJkYTJiZjc2ZjE0ZjRmMDUxYjMzYjJkZmQxYWY0Njg4Nzc4MTM4ZDE1MGVmNWRhYTc0Nl8yOTU3MzAzNjsw', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'cirgu', 'value': '_1_SaiBmdG2CNFFXfi8YLMIIkC%2BD5oxd2H0CqLqIhkm0V8y0ncsQEoe%2FGUZOhACuKUy5feYcA%3D%3D', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764692368.131654, 'httpOnly': False, 'secure': True},
{'name': '_cfuvid', 'value': 'GehzRPPGzCCv.nPAiw9L7tRQCAi.hQAAinF5RqUksz0-1733135442009-0.0.1.1-604800000', 'domain': '.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'dmd-sid4', 'value': '{%22id%22:%227cce57f0-b098-11ef-924a-5d9117969fc2%22%2C%22timestamp%22:1733135444000%2C%22lastUpdate%22:1733135450000}', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%22192aea49fbae19-0a0ceb4f539c6b-16525637-16a7f0-192aea49fbb210e%22%2C%22utm_source%22%3A%20%22researchgate%22%2C%22utm_medium%22%3A%20%22email%22%2C%22utm_campaign%22%3A%20%22re442%22%2C%22utm_content%22%3A%20%22re442_up_pb_hnsg_nnaas_p110%22%2C%22utm_term%22%3A%20%22re442_up_pb_hnsg_nnaas%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%7D', 'domain': '.researchgate.net', 'path': '/', 'expires': 1764671451, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'X2xX99VpHc3Xm_TRnIRhN7Wp533PLOt.311xMWQYn28-1733136361-1.0.1.1-qskSGITd3dmHUV3UswCd8O6ygg3jdBOF9Wz1PxlPQq66VUx0blvFKst7fH33pTnkl2W1VBjBjlF8CgPVLesusQ', 'domain': '.researchgate.net', 'path': '/', 'expires': 1733138161.246979, 'httpOnly': True, 'secure': True},
{'name': 'ph_phc_ma1XTQyee96N1GML6qUTgLQRiDifnRcE9STiHTZ0CfZ_posthog', 'value': '%7B%22distinct_id%22%3A%22AC%3A29573036%22%2C%22%24sesid%22%3A%5B1733137066914%2C%22019386ec-50e4-79f9-9b7b-d8ed5854cf9f%22%2C1733135454436%5D%2C%22%24epp%22%3Atrue%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fwww.researchgate.net%2Fprofile%2FJiabin_Tang4%2Fpublications%3FeditMode%3D1%26sorting%3DrecentlyAdded%26utm_medium%3Demail%26utm_source%3Dresearchgate%26utm_campaign%3Dre214%26loginT%3DeX2d52IqLj-iYd58KHUvU88w6Ub-0Rjh_XCM-6tyfVf3Goy3Bf0swonajNlIbQg6gax3uaL6ulhi_ik9eMs%26pli%3D1%26utm_term%3Dre214_x%26utm_content%3Dre214_x_p2%26cp%3Dre214_x_p2%26uid%3D2aJ4s09Uf8rvZLKbNnk9UiFVrlZcTRXt51G2%26ch%3Dreg%22%7D%7D', 'domain': '.researchgate.net', 'path': '/', 'expires': 1764673066, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'user.uuid.v2', 'value': '"f9248aca-ac13-40e6-8b45-eaeb5fe20825"', 'domain': 'www-nature-com.eproxy.lib.hku.hk', 'path': '/', 'expires': 1740916756.716508, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768110998.70329, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765087308, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765087436, 'httpOnly': False, 'secure': False},
{'name': '_sp_id.a65e', 'value': 'a151b61b-0e26-493f-9885-ed0d9579e181.1712037732.1.1712037742..381bfab3-8c2a-4e54-8d4b-44a5c8c997ef..6db53b82-8b6d-471c-b7de-194adad46810.1712037732261.2', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1746597742.393476, 'httpOnly': False, 'secure': True},
{'name': '__cflb', 'value': '02DiuFwNDm462z9fWfJeB58usqeie1xoTDrYZciipwE2x', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1733223382.743499, 'httpOnly': True, 'secure': True},
{'name': 'XSRF-TOKEN', 'value': '64a0f62d-dc8f-40cb-8aa2-66e3ad283ad4', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'dictcode', 'value': 'english', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763873478.639472, 'httpOnly': False, 'secure': False},
{'name': 'searchPanelOpen', 'value': 'true', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1733803809.434554, 'httpOnly': False, 'secure': False},
{'name': 'search', 'value': 'hello', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763871009.434808, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'xGchgbvqtkoAYddlxWT4VgRmeTZ1qTVmI0hjpRvOj0w-1733201062-1.0.1.1-SDl6_cuGUlqEOSm4oDQpU5rJdha8wEbITIgLoxdY69GgWrSt5GO7nX47Vc2AihzcBY.yS6GZ9qXVfRKEttQyLw', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1733202862.320396, 'httpOnly': True, 'secure': True},
{'name': 'last_url', 'value': 'https%3A%2F%2Fwww.collinsdictionary.com%2Fdictionary%2Fspanish-english%2Fcaminata', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763873068.316249, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Tue+Dec+03+2024+12%3A51%3A18+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202409.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0003%3A1%2CC0004%3A1&AwaitingReconsent=false&geolocation=JP%3B27', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1759121478, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-12-03T04:51:18.738Z', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1759121478, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24device_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1764737478, 'httpOnly': False, 'secure': False},
{'name': 'UUID', 'value': '3c4dd735-8d33-4fd0-a40f-83d399a0dc46', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1740190342.420181, 'httpOnly': False, 'secure': True},
{'name': '_pxvid', 'value': '2dd5c1cb-b670-11ee-9186-3dd546fa1c41', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1737166344, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': '__zlcmid', 'value': '1O1n3oDgpvApbaQ', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831447, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'csrftoken', 'value': 'iyx0piwZPaxHwlEjMlBpPrxRasiSrbVv', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1764745045.418981, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-12-04T06:56:48.465Z', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831408, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AccessToken', 'value': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzZXNzaW9uSWQiOiIwMTkzOTA3NGY4MTg3OTQ1OTg4NzdiNWQzMWE4NDk3MSIsInV1aWQiOiIzYzRkZDczNS04ZDMzLTRmZDAtYTQwZi04M2QzOTlhMGRjNDYiLCJ1c2VyIjp7ImlkIjoiIiwibG9nZ2VkSW4iOmZhbHNlLCJhZG1pbiI6ZmFsc2V9LCJpYXQiOjE3MzMyOTU0MDksImV4cCI6MTczMzI5NTcwOX0.lIt08pG__dm_kZ3kJUYMw_bK0Ow2kAD8i2Jf8OQA0RM', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1733299009.610988, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'AccessSessionTimedSignature', 'value': '1b72fc2754973a3daa1baf1b169dfda5ed067ed4113573f1a1005ce5da900999', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AccessSession', 'value': 'H4sIAAAAAAAA_42U3W7bMAyF38XXUSFKlEXlLgv2f7Gi7S62oRgYSc68uXHg2B26ou8-KW4adxu6GoEhyYfi4Scqt8Uw1KGYF9pjCFYbQUFrgVWQglFWgnTQzrEMHstiVtTbpAVLJ2DsSfop1Hk17yDBaSetQYzKuqpy1q-YfbBKckWGkm47FWJFQNahcUTWrkzQwITOQhJ2e2HFu765ESFWPDT9nBQiWXIVeAmMUCkGUtJrU6UX5AQN9ylOSYUClJB4Icu5sXOJJ1YTgrJkPmfZ8KRMQ7mX7Z5WmVHV-2Led0OcFdfc7H1PQ9whJAW4Mm2dQ7jvu10xvy2WyyRfnKel5UUatTv-wV-VzfNPaf7uNI3OlofReda8vXizeL8o7tIuQ_9t4X2fdvpyW_Q325g_b3Z93Q993W4yx7aJ-fPlrLiqN_VV_Su-anh9MLx3CyVamaGMM5BSKq3LfAxxvxSjX1HJLGLJTqAMJNhqKYB1NICUzzvH3zuoHznwXQyjwWe3mXhmwoR7iM9v3Xt7L7r25y52p1x39WZdjBhHIHeXd7MJyFBf12HgZsKx-Hj-8qx4iiai1gpB6iNRQ45caj59JOqgghBABBmsQDBOMJUovFaByKtQAkyInsZu124-Jtd_Az3kwwmQdduumyhAOmegTHfMSJsOGYg0HUF8WOTC_6g_lcYPLTWemEWrFD54V-nmhtTNwliuBAZk4TywkBwlQVm5kswxyVlcj33wL2DNfQJNWtGk3m4ftK83H8hIMbXlEaCLJG2IXlTargR6awR7SjfOAzk2fiVZHk287tph-6QHNcm-zuoxeaLxffzL-s_zGONvRDIvs1UFAAA', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AccessSessionSignature', 'value': '78ecce97f2a2de3ffb4af7c87424885165a11fe7d2e29bf960edff5c48167a35', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': '_pxhd', 'value': 'xbR2aQnYKWrHiFs3JKCUojuB1cOv3M4cNFTMhSdjqxqtIsdgsSi8ehBRY7OxVo9kz4W0VVvgcv6IyY0Ta0SJPA==:medf83pfHV213oGcCOKWmgsZc4Kr8q2rEC2GEWBrpMBibA5DYuL7eKs10ANKfVD8qmvJUfcosZrIkQ83XUVKtKmZa4Y6lK04fy46yN254wo=', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1764831425.214494, 'httpOnly': False, 'secure': False},
{'name': 'pxcts', 'value': 'f8fbc8a1-b20c-11ef-a65c-4947163db9b8', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': '_px2', 'value': 'eyJ1IjoiZjhjZDk4ZjAtYjIwYy0xMWVmLWFkYzctZGQxMTkxM2QxNDlkIiwidiI6IjJkZDVjMWNiLWI2NzAtMTFlZS05MTg2LTNkZDU0NmZhMWM0MSIsInQiOjE3MzMyOTU3MjY4NjgsImgiOiIyMTFhMjMyMTRlZmQwOWE5OTNhZjlmODU2MDU1ZmI1N2U4MTcwY2RmNDNlZjM0MGFhYzg1Yzk2NzQ0NmVjOWI5In0=', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1733296026, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ReferringRequestId', 'value': 'fastly-default:258d2927284d8837614cc35365d11c1d', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24device_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831445, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Wed+Dec+04+2024+14%3A57%3A25+GMT%2B0800+(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202303.1.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=e6c4a174-c538-4f9f-8632-f5f8e9ff428d&interactionCount=2&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0005%3A1%2CC0004%3A1%2CC0003%3A1&AwaitingReconsent=false&geolocation=JP%3B27', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831445, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1767701986.053151, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765083373, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765087308, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733559088s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768111888.617908, 'httpOnly': False, 'secure': False},
{'name': 'SID', 'value': '"EUW1ED0CAFs37MFXuY5NakcbWc5Qu"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'CUSTOMER', 'value': '"UNIVERSITY OF HONG KONG"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'E_GROUP_NAME', 'value': '"University of Hong Kong"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'dU7HSmMJl6w4XDg.tZSoewkYsxb0bX7Barvg4RvulLw-1733551961-1.0.1.1-7Um2w5HRPO8C06bwjScmRD9BaTZkbArPDfX_e8urefWlKlH50nONZAxnxeL4VbDbHzBBcAY1OzwO5TyNuuCUfQ', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1733553761.117424, 'httpOnly': False, 'secure': False},
{'name': 'AMCVS_242B6472541199F70A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'adBlockEnabled', 'value': 'blocked', 'domain': 'www-science-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'cookiePolicy', 'value': 'iaccept', 'domain': 'www-science-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1733638777.524329, 'httpOnly': False, 'secure': True},
{'name': 'AMCV_242B6472541199F70A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C90810009207598809487163227219398447255%7CMCOPTOUT-1733559578s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768112378.032281, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24search_engine%22%3A%20%22google%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765088378, 'httpOnly': False, 'secure': False},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-06-06T05:28:24.993Z', 'domain': '.orcid.org', 'path': '/', 'expires': 1749187704, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AWSELB', 'value': 'CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AWSELBCORS', 'value': 'CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'orcidUserConnectionId', 'value': '-114606494029392851', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'XSRF-TOKEN', 'value': 'b64bcd3a-f0f5-407b-9115-a1f5183f3997', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'JSESSIONID', 'value': '48DD20615AC49336A91F9A3A6F5B1483', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'locale_v3', 'value': 'en', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24device_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.orcid.org', 'path': '/', 'expires': 1764674617, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Mon+Dec+02+2024+19%3A23%3A37+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202310.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=71ca593a-5b7c-4963-87cf-52c27440ac95&interactionCount=1&landingPath=NotLandingPage&groups=C0001%3A1%2CC0003%3A1%2CC0002%3A1%2CC0004%3A1&geolocation=HK%3B&AwaitingReconsent=false', 'domain': '.orcid.org', 'path': '/', 'expires': 1764674617, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'cookieName', 'value': 'dont%20show%20message', 'domain': 'orcid.org', 'path': '/', 'expires': 1764674620, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'closable-unique-name', 'value': 'understood', 'domain': 'orcid.org', 'path': '/', 'expires': 1764674620, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:13$_ss:0$_st:1733552707246$ses_id:1733549982472%3Bexp-session$_pn:9%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765086907, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768110998.70329, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765087052, 'httpOnly': False, 'secure': False},
{'name': 'PREF', 'value': 'f7=4100&tz=Asia.Hong_Kong&f4=4000000', 'domain': '.youtube.com', 'path': '/', 'expires': 1767698936.819909, 'httpOnly': False, 'secure': True},
{'name': 'HSID', 'value': 'AuvRBV-Q9GEReACoE', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.11147, 'httpOnly': True, 'secure': False},
{'name': 'SSID', 'value': 'AHum7OsxEOAD-Ibp4', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111503, 'httpOnly': True, 'secure': True},
{'name': 'APISID', 'value': 'FRZgwlTWYfVE-B2B/A7FrDbUAZCyMOb6ue', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111539, 'httpOnly': False, 'secure': False},
{'name': 'SAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111575, 'httpOnly': False, 'secure': True},
{'name': '__Secure-1PAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111613, 'httpOnly': False, 'secure': True},
{'name': '__Secure-3PAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111646, 'httpOnly': False, 'secure': True},
{'name': 'LOGIN_INFO', 'value': 'AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs:QUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB', 'domain': '.youtube.com', 'path': '/', 'expires': 1766493681.602721, 'httpOnly': True, 'secure': True},
{'name': 'SID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsOzHaI33jRkuMY7qPd2ZFEAACgYKAUMSARYSFQHGX2Miunh4qpdUynkY2PXqPSvzGxoVAUF8yKqW-lS90SUYH-cprrTRIWDZ0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111199, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsSADU_B-86vJipiHTuQxp3gACgYKATgSARYSFQHGX2MinsKT7OE3L_H9SEzrOgndQRoVAUF8yKqLdTsMTC89NTlwps9-uRSu0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111393, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsvr3qlDZAuj7i33G332SpFwACgYKAekSARYSFQHGX2Mi9DYKrElENMuHNeaGctBCdBoVAUF8yKrmoWXREkhXzva2a6J3B2ps0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111431, 'httpOnly': True, 'secure': True},
{'name': 'NID', 'value': '519=I7v3EQhK2PGhnXSOCZyz5QYVGbJ383LEPRVqShP2G15ip3zj5VjyDJWEAlJtS3ifC0qs-7cxlwk_vCTqntg_LWW_hfONxTqG6JVJ8JvpMEr2eM_Fqb9n8nVbc_YNrwSIUEorM5N5FUoZmW2u4Qksi_a0-ssHJWsOBEwdxpDONaY', 'domain': '.youtube.com', 'path': '/', 'expires': 1747984059.767459, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928f27a2a80-04cb41970e6945-16525637-16a7f0-1928f27a2a920cc%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22utm_source%22%3A%20%22ythp%22%2C%22utm_medium%22%3A%20%22LeftNav%22%2C%22utm_campaign%22%3A%20%22ytgen%22%2C%22utm_content%22%3A%20%22txt%22%7D', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674936, 'httpOnly': False, 'secure': False},
{'name': 'ST-xuwub9', 'value': 'session_logininfo=AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs%3AQUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB', 'domain': '.youtube.com', 'path': '/', 'expires': 1733138942, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSIDTS', 'value': 'sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674938.226552, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSIDTS', 'value': 'sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674938.226624, 'httpOnly': True, 'secure': True},
{'name': 'SIDCC', 'value': 'AKEyXzWXdf72zjmIboZNkzmg9VURwnmM1MpJVRgAxjRuMRib1b7iu5SkCZccexcK6jf2abrLEoQ', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.01384, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSIDCC', 'value': 'AKEyXzXWzx9lRoJCEXrHvqZeWtAugc_tFou4ucmylPeSpc0nRX2EZ-t3QGTGqberRiTB3QIHjQ', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.013891, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSIDCC', 'value': 'AKEyXzVyGt5J-awGqBrP5_hTnwTmCMsUu5oWISlljhXbP9P7vrGxlzOg05O_vwlgbuGOKRUQGYA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.013962, 'httpOnly': True, 'secure': True},
{'name': 'MAID', 'value': '+O8mvi2rAtZrnJqF+2cRIQ==', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078802.198648, 'httpOnly': True, 'secure': True},
{'name': 'MACHINE_LAST_SEEN', 'value': '2024-12-02T09%3A00%3A01.960-08%3A00', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078802.198711, 'httpOnly': True, 'secure': True},
{'name': 'JSESSIONID', 'value': 'CEDD494D14F0052C199B1D7AE667EF42', 'domain': '.pnas.org', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': '__cf_bm', 'value': 'YJQBFxCTLG1d3d9R0fVmwlmAgP9kqVl3zwf02v.COMQ-1733158802-1.0.1.1-tLccs1jD809lM7_9Bhy35sLQdM1TaakBEYvhdDEi1w9cWJS9IGjovTwKGdYQtse6_rWkJNYt._LsHQI2WCwDUQ', 'domain': '.pnas.org', 'path': '/', 'expires': 1733160603.504839, 'httpOnly': True, 'secure': True},
{'name': 'cookiePolicy', 'value': 'accept', 'domain': '.pnas.org', 'path': '/', 'expires': 1767718816.994233, 'httpOnly': True, 'secure': True},
{'name': 'connect_auto_login', 'value': 'true', 'domain': '.pnas.org', 'path': '/', 'expires': 1735750875.510643, 'httpOnly': True, 'secure': True},
{'name': 'PLUID', 'value': 'l8nplDdx7mN9Xh4lErbknypxfmo=', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078875.9476, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24device_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.pnas.org', 'path': '/', 'expires': 1764694876, 'httpOnly': False, 'secure': False},
{'name': 'donation-identifier', 'value': '8ed6af4cc08b88b68b36fffcb6dd7323', 'domain': '.archive.org', 'path': '/', 'expires': 1741773847.95608, 'httpOnly': False, 'secure': False},
{'name': 'abtest-identifier', 'value': 'ca9982a6c4240d53598f01665a3c6100', 'domain': '.archive.org', 'path': '/', 'expires': 1741773847.956153, 'httpOnly': False, 'secure': False},
{'name': 'test-cookie', 'value': '1', 'domain': '.archive.org', 'path': '/', 'expires': 1734348067.326946, 'httpOnly': False, 'secure': False},
{'name': 'g_state', 'value': '{"i_l":0}', 'domain': 'archive.org', 'path': '/', 'expires': 1748690473, 'httpOnly': False, 'secure': False},
{'name': 'logged-in-sig', 'value': '1764674476%201733138476%20Y3yQCmHjxUil%2FcGs%2FgYR6m%2FHA%2F%2FtAtShDsn25N2tNIzvkGr6EkwbEsYEwDTjZ6%2Bu4Iy65eDH5gZVrZayaRZzJEa6R91agNjLC1rmw%2F47W5OXyDVFN5kLX%2Ba2OxNOzEx6Ws%2BLVwFVr%2Bdnbzhdt1vqNTEpECwy14%2Fu4n9qXGANJ5IKEO7pfu4ONymTb0RWH%2B158Wphp0Gluy9bR1a3t3TSGM%2FyhBEa37FJ56ckJJDghwIVsANhhu%2FextDlCDLXDkPtxLrwdX%2FAlbBoNFIeQ5%2BzoJX21KKQVdJxVWzSRLb4LXyFQsvhkpL221qlJ%2FDQER53IrTAIkmxrDI4cfjumUnKTQ%3D%3D', 'domain': '.archive.org', 'path': '/', 'expires': 1764674476.838234, 'httpOnly': False, 'secure': False},
{'name': 'logged-in-user', 'value': 'jiabintang77%40gmail.com', 'domain': '.archive.org', 'path': '/', 'expires': 1764674476.838343, 'httpOnly': False, 'secure': False},
{'name': 'PHPSESSID', 'value': 'jteta3bg9mb3t8e6dkp7r6mcd4', 'domain': '.archive.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'donation', 'value': 'x', 'domain': '.archive.org', 'path': '/', 'expires': 1736767334, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24device_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.archive.org', 'path': '/', 'expires': 1764675133, 'httpOnly': False, 'secure': False},
{'name': 'ncbi_sid', 'value': '015E11D6531E8483_1525SID', 'domain': '.nih.gov', 'path': '/', 'expires': 1764675079.027761, 'httpOnly': False, 'secure': False},
{'name': 'pmc-frontend-csrftoken', 'value': 'L3uvd1o5Uu2efxgCXWDzwxfDTl5QIFDR', 'domain': 'www.ncbi.nlm.nih.gov', 'path': '/', 'expires': 1753769052.705813, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ncbi-login-route', 'value': 'google', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': 1741001395.405247, 'httpOnly': False, 'secure': False},
{'name': 'PRESERVTARGET', 'value': '%2FtYTXpgzJne16bwfb4ZN2lGInyYoZNk58TVbSvhIR0njSJplCp65%2BiF2SZAktvmmznDxgJBJhBCH%0ANoo2je1cMk0RXykLSXa4UwW7u0%2B%2Fc1X7WzHdCi209NjSVDPLNfOmFzmtz50Uuh6EfD95OQ%2BYQ2B%2B%0Aq7BP3es9s8ArLlZd9XW7NS72Ulu8cigULF%2FZADnu%2FPZf8DmPLOXuV6xWf0fqcNlZXwWhiCjrPJiU%0AU594rDm20QBWFe5y0VjWXnJtzYm7uSPkWDQYJ8htbKyWwjn4aG0xcYfTBSBUTOi9A%2Bo1BnUPHLIi%0A8V9%2Fi7S2i2vLCCwVTCSGS0pctKKWZRmzEmP9NB4rA167%2FSMuyX6ezHZNUyztiKaga84g5monl5bT%0AjNlmWeBFQV90piriK2wjmey3mIoTu2eJyDi%2Bx%2FO7pwMTfeiU2WXZ5h3U4kRBxw%2FR6%2FrCMYtVrzXp%0A%2FexiuMJDHQmiDPowP8dxw97tgs353jnBRGe8jpoCPoPG2hywQnwXtxW8SjWp19yTypxVFl4KnD1e%0A5aoPyq%2F7tPDRPbW7UikYuihFvX0mD1TH7A0G9Bk%2B36y%2F7jL8oW7OArzEbESjcx2aVRL%2B3VqzX1Oc%0AZcFWXfVarYgckE8EeyNwFwhPDoASs2T4SVNAJAQ38A0bYzCAxc6mQLqADqesOuuveClDDgB8WITg%0A1QnE32rGsLz37nzAQ89V', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'NIHSMPROFILE', 'value': '9i9xFyZxcZ3DeEBWJ1M%2B1ygJsb2LhWqfanAC3W20fjIpeXaMrRQ%2F9L3R6DUjYzq5%2FqUDVLhYywfn1%2BT0RJpzID8efN8zNczLDneXLM7waIbhTdfwbIh%2BCnmN0fucHtqYylLU1altZcOhSRTow47jYwyEUFsmJ6kz3T1%2BnZWx3Ol0zaFC8onzZCtl4YHbCxMJVbHYuMcGM4f4YxpiDefQvlDdwY1soBI8z9nvb%2BKMs1B3GgplTzyllWIbC1RHxGLvdlNaz8Zlzw6MU4B3piqrAiCWAvoMF3%2FSShIchSdP0utP%2BMROhcGaoWBU%2FKfkjjDc3lHCPfydE%2F895aasf6uvrL7uccokjb6HxdVs0FA%2FHxfBNJXURVRSpRl9%2BPOd9%2FOOlXQQqhBh1FyAZs6WIxDvLhegMvLITcLh7ahcahuJnoeImSla4b4kK0Ayy6736mJCa0hhXUzGjab4Yhht11PliHlAlh4wLEXj0Dp7X9pj7Ws1%2BdCx8QZyiTWrbauCQJtS1hNXn%2Blg4BoQ2sIq%2FxltuA%3D%3D', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'NIHSMSESSION', 'value': 'ZITCB6DlveT31D7iO+eVBnrLBxVxrCJGSz/5hVXNSYwvjuPpvd0O7dD6XqsFf6LKdJXktiX+hhtPWtsNok2mgiSvcpYTBHJxHVefyBt+KiLTVm12lBwYTkx9Gb9OxZNQsMS/Hvoj45/ShvSKut3d7c8e2cEhtjA7DWjHEYHj0tuk3fEoQ4t0UAWkhj6bFt5Vo5tm6dyqen/1EH2o6cBZrVSLbk67LctlbRyV4pc5099Il2lTMPo6LqtyVI1AC/bcSioge+LqDbpDiuP4NOF3EPj/yFSWvBz76/bqQ0Hu5oRGCC1zVPhSGJ1iukio91F6IfYr5vTtMrN00vSuxHRnxj0BYCEuGKtCeNDohuGAZvQVkjhc1aI53oWFu8UNHZvYA+Xo2lpLZUOT+2pkjk1Z/WpAPzE8L+O6mRjwaq8/2b3rUS8gq4Y/2vQYhqmNxeHOBsM01BhpJdRF3Urp3dnIIYakYeCgzC/3Kww+p8ImsBTjvXDO1ivVEjKB4AdnCsRdLpGPszS9xF7439aXXCWHDItM4Wl458T12QWeV+DXiiwzD/kis1QQBWibzsZOzo9KDM3oapKa8I2ouKXb797Z7s+eLR1+Z10lyWZuNVLLZK5ckFT5riayLYeT8+IjFYVER/nfDzm3KpgVPnep/k4DANpDgAOK78iuTv3sBndNMoKrXz2qCZtfi3/gLGZTKcOy90meluFZy9+iLyb+M01VBWuDp/v0a2jSdsJPVmgUQqz7hLVvtc4KpMfiDhfxXGMQnaieP9jREFK3NutAiUrkjS96WS3v5eLK80o/aG1j5IsAvxU/0lMnEri3Yz6Qw1f0ymS6giKiFIUBRUWGXcm5S1qCjwL5GiU71r3nOcaC8T9T1pVLf1R558WqH6Ha95aJVqN6CnEHo8TsZl25lb5tlJgbgb2OFvLSrbUZwuM3R5mA9zP7ciQBywxNm7xFO8sX8QQk0bRhrhgk458KE72Ci/8lhZmvpYy5aqbI4OtaLkuFuu3lX3c7/LsGt+iTFkO6eDSS4CFEnFqg3W5Glvs7WZkTasVI7L0mN0q8DCPXaIDFVPlXEA0shxZuB6Iz+mx4MshQHwY9fMRSWB7gOF5cHjHYUBLfHT/gOwl35rkoJfVf9ikpcgT88mJyk9KTQpVM+CZAGUFDbgHsRqA0jPE19sBum3cqaA6fzh9AnWXfOlAY5KNDdTB4yip4UakCXWsiXVng0GfQ7KvxAguC59L7iZyFjdsIDESi7ZozcPHOpFZleeAU3yFTvMGHmO3G3RFrxyIGCwgWehus3YCqQxZPSE6+yLjXeXTqhqgk0kxcV/MlOFgzMcAhgKEYJS045sLZsmohsIVLV0ONY4uqogSxd3YUzc0WImi1mYdNbzYwbX5tPngah4SK61Nia8Z6xjZuKfXnxNFEkNneezPoPy97Hvd+9wzI+DkU5sa844DzGxeSY/ySE3DTtpowf440r5rX', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'MyNcbiSigninPreferences', 'value': 'O2dvb2dsZSY%3D', 'domain': '.nih.gov', 'path': '/', 'expires': 1740915025.611341, 'httpOnly': False, 'secure': False},
{'name': 'ncbi_prevPHID', 'value': 'CE88342C74D8A32100000000003B0036', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'WebCubbyUser', 'value': '3GX25AI24DLUXL8LVDJFIVTH6LJRZBE1%3Blogged-in%3Dtrue%3Bmy-name%3Djiabintang77%2540gmail.com%3Bpersistent%3Dfalse%40015E11D6531E8483_1525SID', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'WebEnv', 'value': '1D7wJH%40015E11D6531E8483_1525SID', 'domain': '.nlm.nih.gov', 'path': '/', 'expires': 1733167826.636953, 'httpOnly': True, 'secure': True},
{'name': 'ncbi_pinger', 'value': 'N4IgDgTgpgbg+mAFgSwCYgFwgAwEYCsAorrgCIBs+AzLoQBwAsdAnLgOxU1XPZt354AygElSIAL5A===', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24device_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D', 'domain': '.nih.gov', 'path': '/', 'expires': 1764675078, 'httpOnly': False, 'secure': False},
{'name': '_device_id', 'value': '49f9d6cfbd603c8509e73807be70a438', 'domain': 'github.com', 'path': '/', 'expires': 1764674868.858374, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'MicrosoftApplicationsTelemetryDeviceId', 'value': '3966ee53-78ca-4fa3-95d7-85e299cecee4', 'domain': 'github.com', 'path': '/', 'expires': 1763890136.033527, 'httpOnly': False, 'secure': True},
{'name': '_octo', 'value': 'GH1.1.1313590405.1727940967', 'domain': '.github.com', 'path': '/', 'expires': 1759476967, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'GHCC', 'value': 'Required:1-Analytics:1-SocialMedia:1-Advertising:1', 'domain': '.github.com', 'path': '/', 'expires': 1745563377, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'MSFPC', 'value': 'GUID=3452f0b49fd14d349a6dbf8ddee26d60&HASH=3452&LV=202410&V=4&LU=1730011383391', 'domain': 'github.com', 'path': '/', 'expires': 1761547383.513164, 'httpOnly': False, 'secure': True},
{'name': 'logged_in', 'value': 'yes', 'domain': '.github.com', 'path': '/', 'expires': 1762511337.053395, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'saved_user_sessions', 'value': '151511798%3A8an8gJwE3la35NvNIyacuRFRSHlup_9RBaQ5q4CThhvPV89o%7C152840453%3A2Quysh6Cns_a0IpeKcw-GAUZIt6ZndbJ7BoGdxx11qkZa9bi%7C151510669%3AMpYw2DQuFwt3NJiimm36OWLTQmoWFzVcSUbLuV8SBFRPqN8-%7C165454715%3AZSjwi4MUxVCr91r-m1ElvPL2L0DGDSoSo6uwV7pPpliml3js%7C148674909%3ALnLJclEDIxFjFcwX0eBlgOJzbDpsxKedtd6So7_EFs6HPtL7%7C56426168%3AmM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1739599354.295483, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'user_session', 'value': 'mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1734348468.858989, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': '__Host-user_session_same_site', 'value': 'mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1734348468.859144, 'httpOnly': True, 'secure': True, 'sameSite': 'Strict'},
{'name': 'dotcom_user', 'value': 'tjb-tech', 'domain': '.github.com', 'path': '/', 'expires': 1763647073.257243, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'color_mode', 'value': '%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'cpu_bucket', 'value': 'xlg', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'preferred_color_mode', 'value': 'light', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'tz', 'value': 'Asia%2FHong_Kong', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928eb980316cc-050dbe3db24bd2-16525637-16a7f0-1928eb980321bb8%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D', 'domain': '.github.com', 'path': '/', 'expires': 1764674869, 'httpOnly': False, 'secure': False},
{'name': '_gh_sess', 'value': 'oUZyg0XEvo5fm%2FC18yV17FMePsGYB4hM9R5q8AgiwOAjTritHx1Ux4jNGjnm7Jaxz99%2FOxD4agIy05dUdG6cnSxRP62NJE7bZxIWFV2W64ekLVCwz7ge2oaRcvVlN4HjVhw5dsl2czpD8Irn%2BZG0Dmw16tH9GZZ4yhaFW5%2Fshmte3DBYsndzLNn4rGje9B3P1IFYyz9sYx23j71xRb9wRjwoLHPYGf4Yp3vRKVAzTp3X6nrjvgr4XGU2N%2BGPH3OYDZQYCIPLckTIEmRg7a0dd2KvU2mfcm%2F%2B9N9%2FNNBFTbKvUhPwWM8kIRpv5WTzU%2FI5Y0qBv71gX2B7nNm%2FtIkWjbWUhgizf%2BpxOHAuhs89sRaicpc9NjasSUISwfxRCoH5evWqVXEifhqQvSU42iR4wkhnRHs%3D--za2vZwPq%2FBJxevj3--tEOzEYASRs0gepJUCIv8Mg%3D%3D', 'domain': 'github.com', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'domain': 'www.nature.com', 'secure': False, 'expirationDate': 1733745572000, 'hostOnly': True, 'name': 'user.uuid.v2', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '"765b07e9-028b-45d1-8abd-baa7b6c88125"', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'hostOnly': False, 'name': 'Hm_lpvt_d38bce82bcb44717ccc29a90c4b781ea', 'httpOnly': False, 'session': True, 'storeId': None, 'value': '1733140842', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676842000, 'hostOnly': False, 'name': 'ajs_anonymous_id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '7e4d00ab-3618-46a2-b0fb-c80b189a0584', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676842000, 'hostOnly': False, 'name': 'ajs_user_id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'b1ae7862-b9d6-49c5-a7a5-ad96682ac6dc_SN', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga_B3E4QL2TPR', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GS1.1.1733140776.1.1.1733140841.60.0.0', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga_ERRNTNZ807', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GS1.1.1733140776.1.1.1733140841.60.0.467679787', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767304843000, 'hostOnly': False, 'name': 'cto_bundle', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '5685XF9lQVd1dU4zd2xWRE1uZ3BpQTk3SUVXNkx2bGslMkZwTkZodjRWJTJCcGoyd0JWdiUyQjVlcGkwMVoyWHc4aGxKQkM2N3hyeGI4aFlIRzBZRDNTUTJFb1JYZVhPJTJGMUIlMkZka252a0RPZFdlbld4OU1jaUFrMHN6VDVaREYzSSUyRmFDMEtnb0FoaQ', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1766836842000, 'hostOnly': False, 'name': '__gpi', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'UID=00000fa61060e41d:T=1733140842:RT=1733140842:S=ALNI_Mai2WWloG6liac6hEyJYOSjI3WtCg', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1766836841000, 'hostOnly': False, 'name': '_uetvid', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'e6d7f220b0a411efaac753cc9ddac552', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676841000, 'hostOnly': False, 'name': 'Hm_lvt_d38bce82bcb44717ccc29a90c4b781ea', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '1733140777', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1748692774000, 'hostOnly': False, 'name': '__eoi', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'ID=1ced890879e93934:T=1733140774:RT=1733140774:S=AA-AfjauQ5O9wXrdBjufrcsmQ-EM', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1766836842000, 'hostOnly': False, 'name': '__gads', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'ID=edf25ef88638a1b3:T=1733140842:RT=1733140842:S=ALNI_MYUdW0s3LG6IOpCKgjBo4gbGPsI1Q', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1740916843000, 'hostOnly': False, 'name': '_fbp', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'fb.1.1733140776577.688163329394303800', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GA1.1.2115119478.1733140776', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1733227241000, 'hostOnly': False, 'name': '_uetsid', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'e6d7f280b0a411efaed4a5384bcc5d88', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'hostOnly': False, 'name': 'HMACCOUNT', 'httpOnly': False, 'session': True, 'storeId': None, 'value': '7B6C1DFC72FE250C', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': 'permutive-id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '7cbbccaf-2079-4e6d-99fc-186a9db51c90', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': 'permutive-session', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '%7B%22session_id%22%3A%221d3a9243-5c93-4975-ae30-63ca2047b7cf%22%2C%22last_updated%22%3A%222024-12-02T12%3A00%3A41.747Z%22%7D', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676775000, 'hostOnly': False, 'name': 'sncc', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'P%3D8%3AV%3D68.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue', 'path': '/', 'sameSite': 'Lax'},
]

View file

@ -0,0 +1,95 @@
import os
import subprocess
from constant import GITHUB_AI_TOKEN, AI_USER, BASE_IMAGES
import time
from metachain.util import run_command_in_container
def init_container(workplace_name, container_name, test_pull_name = 'test_pull_1010', task_name = 'test_task', git_clone = False, setup_package = 'setup_package'):
# get the current working directory's subfolder path
workplace = os.path.join(os.getcwd(), workplace_name)
# check if the container exists
container_check_command = ["docker", "ps", "-a", "--filter", f"name={container_name}", "--format", "{{.Names}}"]
existing_container = subprocess.run(container_check_command, capture_output=True, text=True)
os.makedirs(workplace, exist_ok=True)
# cp_command = ["cp", "tcp_server.py", workplace]
if not os.path.exists(os.path.join(workplace, 'tcp_server.py')):
unzip_command = ["tar", "-xzvf", f"packages/{setup_package}.tar.gz", "-C", workplace]
subprocess.run(unzip_command)
if git_clone:
if not os.path.exists(os.path.join(workplace, 'metachain')):
git_command = ["cd", workplace, "&&", "git", "clone", "-b", test_pull_name, f"https://{AI_USER}:{GITHUB_AI_TOKEN}@github.com/tjb-tech/metachain.git"]
git_command = " ".join(git_command)
result = subprocess.run(git_command, shell=True)
if result.returncode != 0:
raise Exception(f"Failed to clone the repository. Please check your internet connection and try again.")
# create a new branch
new_branch_name = f"{test_pull_name}_{task_name}"
create_branch_command = f"cd {workplace}/metachain && git checkout -b {new_branch_name}"
result = subprocess.run(create_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(Exception(f"Failed to create and switch to new branch. Error: {result.stderr}"))
switch_branch_command = f"cd {workplace}/metachain && git checkout {new_branch_name}"
result = subprocess.run(switch_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to switch to new branch. Error: {result.stderr}")
else:
print(f"Successfully switched to new branch: {new_branch_name}")
else:
print(f"Successfully created and switched to new branch: {new_branch_name}")
if existing_container.stdout.strip() == container_name:
# check if the container is running
running_check_command = ["docker", "ps", "--filter", f"name={container_name}", "--format", "{{.Names}}"]
running_container = subprocess.run(running_check_command, capture_output=True, text=True)
if running_container.stdout.strip() == container_name:
print(f"Container '{container_name}' is already running. Skipping creation.")
return # container is already running, skip creation
else:
# container exists but is not running, start it
start_command = ["docker", "start", container_name]
subprocess.run(start_command)
print(f"Container '{container_name}' has been started.")
return
# if the container does not exist, create and start a new container
docker_command = [
"docker", "run", "-d", "--name", container_name, "--user", "root",
"-v", f"{workplace}:/{workplace_name}",
"-w", f"/{workplace_name}", "-p", "12345:12345", BASE_IMAGES,
"/bin/bash", "-c",
f"python3 /{workplace_name}/tcp_server.py --workplace {workplace_name}"
]
# execute the docker command
result = subprocess.run(docker_command, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to start container: {result.stderr}")
if wait_for_container_ready(container_name, timeout=60):
print(f"Container '{container_name}' has been created and started.")
def wait_for_container_ready(container_name, timeout=30):
"""using subprocess to check if the container is running"""
start_time = time.time()
while time.time() - start_time < timeout:
result = subprocess.run(
["docker", "inspect", "--format", "{{.State.Running}}", container_name],
capture_output=True,
text=True
)
if result.returncode == 0 and "true" in result.stdout.lower():
# 额外检查 tcp_server 是否运行
try:
result = run_command_in_container('ps aux')
if "tcp_server.py" in result['result']:
return True
except Exception as e:
pass
time.sleep(1)
raise TimeoutError(f"Container {container_name} failed to start within {timeout} seconds")

View file

@ -0,0 +1,275 @@
import os
import os.path as osp
import subprocess
from constant import BASE_IMAGES, AI_USER, GITHUB_AI_TOKEN
import time
import socket
import json
from pathlib import Path
import shutil
wd = Path(__file__).parent.resolve()
from dataclasses import dataclass, field
from typing import Optional, Union, Dict
from functools import update_wrapper
from inspect import signature
@dataclass
class DockerConfig:
container_name: str
workplace_name: str
communication_port: int # 12345
conda_path: str # /root/miniconda3
test_pull_name: str = field(default='main')
task_name: Optional[str] = field(default=None)
git_clone: bool = field(default=False)
setup_package: Optional[str] = field(default=None)
local_root: str = field(default=os.getcwd())
class DockerEnv:
def __init__(self, config: Union[DockerConfig, Dict]):
if isinstance(config, Dict):
config = DockerConfig(**config)
self.workplace_name = config.workplace_name
self.local_workplace = osp.join(config.local_root, config.workplace_name)
self.docker_workplace = f"/{config.workplace_name}"
self.container_name = config.container_name
self.test_pull_name = config.test_pull_name
self.task_name = config.task_name
self.git_clone = config.git_clone
self.setup_package = config.setup_package
self.communication_port = config.communication_port
self.conda_path = config.conda_path
def init_container(self):
container_check_command = ["docker", "ps", "-a", "--filter", f"name={self.container_name}", "--format", "{{.Names}}"]
existing_container = subprocess.run(container_check_command, capture_output=True, text=True)
os.makedirs(self.local_workplace, exist_ok=True)
if not osp.exists(osp.join(self.local_workplace, 'tcp_server.py')):
shutil.copy(osp.join(wd, 'tcp_server.py'), self.local_workplace)
assert osp.exists(osp.join(self.local_workplace, 'tcp_server.py')), "Failed to copy tcp_server.py to the local workplace"
if self.setup_package is not None:
unzip_command = ["tar", "-xzvf", f"packages/{self.setup_package}.tar.gz", "-C", self.local_workplace]
subprocess.run(unzip_command)
if self.git_clone:
if not os.path.exists(os.path.join(self.local_workplace, 'metachain')):
git_command = ["cd", self.local_workplace, "&&", "git", "clone", "-b", self.test_pull_name, f"https://{AI_USER}:{GITHUB_AI_TOKEN}@github.com/tjb-tech/metachain.git"]
git_command = " ".join(git_command)
result = subprocess.run(git_command, shell=True)
if result.returncode != 0:
raise Exception(f"Failed to clone the repository. Please check your internet connection and try again.")
copy_env_command = f"cp .env {self.local_workplace}/metachain"
result = subprocess.run(copy_env_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to copy .env file to the metachain directory. Error: {result.stderr}")
# create a new branch
new_branch_name = f"{self.test_pull_name}_{self.task_name}"
create_branch_command = f"cd {self.local_workplace}/metachain && git checkout -b {new_branch_name}"
result = subprocess.run(create_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(Exception(f"Failed to create and switch to new branch. Error: {result.stderr}"))
switch_branch_command = f"cd {self.local_workplace}/metachain && git checkout {new_branch_name}"
result = subprocess.run(switch_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to switch to new branch. Error: {result.stderr}")
else:
print(f"Successfully switched to new branch: {new_branch_name}")
else:
print(f"Successfully created and switched to new branch: {new_branch_name}")
if existing_container.stdout.strip() == self.container_name:
# check if the container is running
running_check_command = ["docker", "ps", "--filter", f"name={self.container_name}", "--format", "{{.Names}}"]
running_container = subprocess.run(running_check_command, capture_output=True, text=True)
if running_container.stdout.strip() == self.container_name:
print(f"Container '{self.container_name}' is already running. Skipping creation.")
return # container is already running, skip creation
else:
# container exists but is not running, start it
start_command = ["docker", "start", self.container_name]
subprocess.run(start_command)
print(f"Container '{self.container_name}' has been started.")
return
# if the container does not exist, create and start a new container
docker_command = [
"docker", "run", "-d", "--name", self.container_name, "--user", "root",
"-v", f"{self.local_workplace}:{self.docker_workplace}",
"-w", f"{self.docker_workplace}", "-p", f"{self.communication_port}:{self.communication_port}", BASE_IMAGES,
"/bin/bash", "-c",
f"python3 {self.docker_workplace}/tcp_server.py --workplace {self.workplace_name} --conda_path {self.conda_path} --port {self.communication_port}"
]
# execute the docker command
result = subprocess.run(docker_command, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to start container: {result.stderr}")
if self.wait_for_container_ready(timeout=60):
print(f"Container '{self.container_name}' has been created and started.")
def wait_for_container_ready(self, timeout=30):
"""using subprocess to check if the container is running"""
start_time = time.time()
while time.time() - start_time < timeout:
result = subprocess.run(
["docker", "inspect", "--format", "{{.State.Running}}", self.container_name],
capture_output=True,
text=True
)
if result.returncode == 0 and "true" in result.stdout.lower():
# 额外检查 tcp_server 是否运行
try:
port_info = check_container_ports(self.container_name)
assert port_info and (port_info[0] == port_info[1])
available_port = port_info[0]
self.communication_port = available_port
result = self.run_command('ps aux')
if "tcp_server.py" in result['result']:
return True
except Exception as e:
pass
time.sleep(1)
raise TimeoutError(f"Container {self.container_name} failed to start within {timeout} seconds")
def stop_container(self):
stop_command = ["docker", "stop", self.container_name]
result = subprocess.run(stop_command, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to stop container: {result.stderr}")
def run_command(self, command, stream_callback=None):
"""
communicate with docker container and execute command, support stream output
Args:
command: the command to execute
stream_callback: optional callback function, for handling stream output
the function signature should be callback(text: str)
Returns:
dict: the complete JSON result returned by the docker container
"""
hostname = 'localhost'
port = self.communication_port
buffer_size = 4096
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((hostname, port))
s.sendall(command.encode())
partial_line = ""
while True:
chunk = s.recv(buffer_size)
if not chunk:
break
# add new received data to the unfinished data
data = partial_line + chunk.decode('utf-8')
lines = data.split('\n')
# except the last line, process all complete lines
for line in lines[:-1]:
if line:
try:
response = json.loads(line)
if response['type'] == 'chunk':
# process stream output
if stream_callback:
stream_callback(response['data'])
elif response['type'] == 'final':
# return the final result
return {
'status': response['status'],
'result': response['result']
}
except json.JSONDecodeError:
print(f"Invalid JSON: {line}")
# save the possibly unfinished last line
partial_line = lines[-1]
# if the loop ends normally without receiving a final response
return {
'status': -1,
'result': 'Connection closed without final response'
}
def with_env(env: DockerEnv):
"""将env注入到工具函数中的装饰器"""
def decorator(func):
def wrapped(*args, **kwargs):
return func(env=env, *args, **kwargs)
# 保留原始函数的所有属性
update_wrapper(wrapped, func)
# 修改signature移除env参数
wrapped.__signature__ = signature(func).replace(
parameters=[p for p in signature(func).parameters.values() if p.name != 'env']
)
if func.__doc__:
try:
if '{docker_workplace}' in func.__doc__:
wrapped.__doc__ = func.__doc__.format(docker_workplace=env.docker_workplace)
else:
wrapped.__doc__ = func.__doc__
if '{local_workplace}' in func.__doc__:
wrapped.__doc__ = func.__doc__.format(local_workplace=env.local_workplace)
else:
wrapped.__doc__ = func.__doc__
except (KeyError, IndexError, ValueError):
# 如果格式化失败(没有占位符),保持原始文档
wrapped.__doc__ = func.__doc__
return wrapped
return decorator
def check_container_ports(container_name: str):
"""
check if the container has port mapping
return format:
- if the container exists and has port mapping: '0.0.0.0:12345->12345/tcp'
- if the container does not exist or does not have port mapping: None
"""
# use docker ps to check the container and get the port information
container_check_command = [
"docker", "ps", "-a",
"--filter", f"name={container_name}",
"--format", "{{.Ports}}"
]
result = subprocess.run(container_check_command, capture_output=True, text=True)
ports_info = result.stdout.strip()
if not ports_info:
return None
# only process the mapped ports
for mapping in ports_info.split(','):
mapping = mapping.strip()
if '->' in mapping:
# parse '0.0.0.0:12345->12345/tcp' to (12345, 12345)
host_part, container_part = mapping.split('->')
host_port = host_part.split(':')[1] # get '12345' from '0.0.0.0:12345'
container_port = container_part.split('/')[0] # get '12345' from '12345/tcp'
return (int(host_port), int(container_port)) # convert to integers
return None
def check_container_exist(container_name: str):
container_check_command = [
"docker", "ps", "-a",
"--filter", f"name={container_name}",
"--format", "{{.Names}}"
]
result = subprocess.run(container_check_command, capture_output=True, text=True)
return container_name in result.stdout.strip()
def check_container_running(container_name: str):
container_check_command = [
"docker", "ps",
"--filter", f"name={container_name}",
"--format", "{{.Names}}"
]
result = subprocess.run(container_check_command, capture_output=True, text=True)
return container_name in result.stdout.strip()

View file

@ -0,0 +1,76 @@
import os
import shutil
import time
from seleniumbase.config import settings
from seleniumbase.fixtures import constants
# The "downloads_folder" is a folder for saving downloaded files.
# Works for downloads initiated by Chromium and Firefox WebDriver clicks.
# Browser type doesn't matter if using self.download_file(file_url)
# or self.save_file_as(file_url, new_file_name)
# The "downloads_folder" is cleaned out at the start of each pytest run,
# but there is an option to save existing files in "archived_files".
DOWNLOADS_DIR = constants.Files.DOWNLOADS_FOLDER
abs_path = os.path.abspath("./examples")
downloads_path = os.path.join(abs_path, DOWNLOADS_DIR)
def get_downloads_folder():
return downloads_path
def reset_downloads_folder():
"""Clears the downloads folder.
If settings.ARCHIVE_EXISTING_DOWNLOADS is set to True, archives it."""
downloads_dir = constants.Files.DOWNLOADS_FOLDER
archive_dir = constants.Files.ARCHIVED_DOWNLOADS_FOLDER
if downloads_dir.endswith("/"):
downloads_dir = downloads_dir[:-1]
if downloads_dir.startswith("/"):
downloads_dir = downloads_dir[1:]
if archive_dir.endswith("/"):
archive_dir = archive_dir[:-1]
if archive_dir.startswith("/"):
archive_dir = archive_dir[1:]
if len(downloads_dir) < 10 or len(archive_dir) < 10:
return # Prevent accidental deletions if constants are renamed
archived_downloads_folder = os.path.join(os.getcwd(), archive_dir) + os.sep
if os.path.exists(downloads_path) and not os.listdir(downloads_path) == []:
reset_downloads_folder_assistant(archived_downloads_folder)
if os.path.exists(downloads_path) and os.listdir(downloads_path) == []:
try:
os.rmdir(downloads_path)
except OSError:
pass
if (
os.path.exists(archived_downloads_folder)
and os.listdir(archived_downloads_folder) == []
):
try:
os.rmdir(archived_downloads_folder)
except OSError:
pass
def reset_downloads_folder_assistant(archived_downloads_folder):
if not os.path.exists(archived_downloads_folder):
try:
os.makedirs(archived_downloads_folder, exist_ok=True)
except Exception:
pass # Should only be reachable during multi-threaded test runs
new_archived_downloads_sub_folder = "%s/downloads_%s" % (
archived_downloads_folder,
int(time.time()),
)
if os.path.exists(downloads_path):
if not os.listdir(downloads_path) == []:
try:
shutil.move(downloads_path, new_archived_downloads_sub_folder)
os.makedirs(downloads_path, exist_ok=True)
except Exception:
pass
if not settings.ARCHIVE_EXISTING_DOWNLOADS:
try:
shutil.rmtree(new_archived_downloads_sub_folder)
except OSError:
pass

View file

@ -0,0 +1,90 @@
import subprocess
import json
import os
from pathlib import Path
import platform
class LocalEnv:
def __init__(self):
self.docker_workplace = os.getcwd()
if self.docker_workplace.endswith("metachain"):
self.docker_workplace = os.path.dirname(self.docker_workplace)
self.local_workplace = self.docker_workplace
self.conda_sh = self._find_conda_sh()
def _find_conda_sh(self) -> str:
"""
Find conda.sh file location across different environments
"""
# 1. Try common locations based on OS
possible_paths = []
home = str(Path.home())
if platform.system() == "Windows":
possible_paths.extend([
Path(home) / "Anaconda3" / "etc" / "profile.d" / "conda.sh",
Path(home) / "miniconda3" / "etc" / "profile.d" / "conda.sh",
Path(home) / "micromamba" / "etc" / "profile.d" / "conda.sh",
])
else: # Linux and MacOS
possible_paths.extend([
Path(home) / "anaconda3" / "etc" / "profile.d" / "conda.sh",
Path(home) / "miniconda3" / "etc" / "profile.d" / "conda.sh",
Path(home) / "micromamba" / "etc" / "profile.d" / "conda.sh",
Path("/opt/conda/etc/profile.d/conda.sh"), # Docker containers
Path("/usr/local/conda/etc/profile.d/conda.sh"),
])
# For Linux, also check root installations
if platform.system() == "Linux":
possible_paths.extend([
Path("/opt/anaconda3/etc/profile.d/conda.sh"),
Path("/opt/miniconda3/etc/profile.d/conda.sh"),
Path("/opt/micromamba/etc/profile.d/conda.sh"),
])
# Check all possible paths
for path in possible_paths:
if path.exists():
return str(path)
# 2. Try to find using conda info command
try:
result = subprocess.run(['conda', 'info', '--base'],
capture_output=True,
text=True)
if result.returncode == 0:
base_path = result.stdout.strip()
conda_sh = Path(base_path) / "etc" / "profile.d" / "conda.sh"
if conda_sh.exists():
return str(conda_sh)
except:
pass
# 3. If all fails, return None and handle in run_command
return None
def run_command(self, command, stream_callback=None):
assert self.conda_sh is not None, "Conda.sh not found"
modified_command = f"/bin/bash -c 'source {self.conda_sh} && conda activate browser && cd {self.docker_workplace} && {command}'"
process = subprocess.Popen(modified_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
output = ''
while True:
line = process.stdout.readline()
if not line and process.poll() is not None:
break
output += line
# 立即发送每一行输出
# 发送最终的完整响应
response = {
"status": process.poll(),
"result": output
}
return response
def _convert_local_to_docker(self, path):
return path
def _convert_docker_to_local(self, path):
return path
if __name__ == "__main__":
print(str(Path.home()))

View file

@ -0,0 +1,22 @@
from .abstract_markdown_browser import AbstractMarkdownBrowser
from .markdown_search import AbstractMarkdownSearch, BingMarkdownSearch
# TODO: Fix mdconvert
from .mdconvert import ( # type: ignore
DocumentConverterResult,
FileConversionException,
MarkdownConverter,
UnsupportedFormatException,
)
from .requests_markdown_browser import RequestsMarkdownBrowser
__all__ = (
"AbstractMarkdownBrowser",
"RequestsMarkdownBrowser",
"AbstractMarkdownSearch",
"BingMarkdownSearch",
"MarkdownConverter",
"UnsupportedFormatException",
"FileConversionException",
"DocumentConverterResult",
)

View file

@ -0,0 +1,64 @@
from abc import ABC, abstractmethod
from typing import Union
class AbstractMarkdownBrowser(ABC):
"""
An abstract class for a Markdown web browser.
All MarkdownBrowers work by:
(1) fetching a web page by URL (via requests, Selenium, Playwright, etc.)
(2) converting the page's HTML or DOM to Markdown
(3) operating on the Markdown
Such browsers are simple, and suitable for read-only agentic use.
They cannot be used to interact with complex web applications.
"""
@abstractmethod
def __init__(self) -> None:
pass
@property
@abstractmethod
def address(self) -> str:
pass
@abstractmethod
def set_address(self, uri_or_path: str) -> None:
pass
@property
@abstractmethod
def viewport(self) -> str:
pass
@property
@abstractmethod
def page_content(self) -> str:
pass
@abstractmethod
def page_down(self) -> None:
pass
@abstractmethod
def page_up(self) -> None:
pass
@abstractmethod
def visit_page(self, path_or_uri: str) -> str:
pass
@abstractmethod
def open_local_file(self, local_path: str) -> str:
pass
@abstractmethod
def find_on_page(self, query: str) -> Union[str, None]:
pass
@abstractmethod
def find_next(self) -> Union[str, None]:
pass

Some files were not shown because too many files have changed in this diff Show more