This commit is contained in:
tjb-tech 2025-02-08 22:42:46 +08:00
parent 84ef9823bb
commit 779ce23031
82 changed files with 1053 additions and 8608 deletions

11
.env.template Normal file
View file

@ -0,0 +1,11 @@
# Required Github Tokens
GITHUB_AI_TOKEN=
# Optional API Keys
OPENAI_API_KEY=
DEEPSEEK_API_KEY=
ANTHROPIC_API_KEY=
GEMINI_API_KEY=
HUGGINGFACE_API_KEY=
GROQ_API_KEY=
XAI_API_KEY=

6
.gitignore vendored
View file

@ -24,4 +24,8 @@ evaluation/**/data/
.env
terminal_tmp/*
terminal_tmp/*
!tool_docs.csv
.port*

BIN
assets/cookies/export.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 853 KiB

View file

@ -1,25 +0,0 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_agent
)
from metachain.registry import register_agent
@register_agent(name = "Agent Check Agent", func_name="get_agent_check_agent")
def get_agent_check_agent(model: str):
def instructions(context_variables):
return \
f"""You are a developer working on a project named 'metachain'.
You are given a user request and required to use existing project code to solve the task.
Your goal is to enrich the functionality of of existing list of agents in the `agents` folder as much as possible, so that once the similar task occurs again, the agent can solve it directly without developing new agents.
whether you should develop a new agent to solve the task.
If you have already have an pre-built agent in the `agents` folder and suitable actions in the `actions` folder you could use with it, you should not develop a new agent.
Note that the key of agent is the apprioriate `instructions` and `functions` using existing tools.
Answer 'Needed' or 'Not needed' first and then give your reason.
"""
return Agent(
name="Agent Check Agent",
model=model,
instructions=instructions,
functions=[check_agent],
parallel_tool_calls = False
)

View file

@ -1,147 +0,0 @@
from metachain.types import Agent
from metachain.registry import register_agent
from browsergym.core.action.highlevel import HighLevelActionSet
from metachain.util import function_to_json
import gymnasium as gym
import browsergym.miniwob # register miniwob tasks as gym environments
import importlib
import json
from functools import wraps
from typing import Callable, Union
from metachain.environment.browser_env import BrowserEnv
import inspect
from metachain.types import Result
from browsergym.utils.obs import flatten_axtree_to_str
def get_error_prefix(last_browser_action: str) -> str:
return f'IMPORTANT! Last action is incorrect:\n{last_browser_action}\nThink again with the current observation of the page.\n'
def wrap_browser_action(action_func: Callable, env: BrowserEnv) -> Callable:
"""
包装浏览器动作函数使其能与环境交互
Args:
action_func: 原始的浏览器动作函数
Returns:
包装后的函数可以与环境交互
"""
@wraps(action_func)
def wrapper(*args, **kwargs) -> Union[Result, str]:
error_prefix = ""
try:
# 执行动作
# action = action_func(*args, **kwargs)
action_str = f"{action_func.__name__}({', '.join([f'{repr(v)}' for k, v in kwargs.items()])})"
# 与环境交互
obs = env.step(action_str)
# 返回观察结果
obs_dict = dict(
content=obs['text_content'], # text content of the page
url=obs.get('url', ''), # URL of the page
screenshot=obs.get('screenshot', None), # base64-encoded screenshot, png
open_pages_urls=obs.get('open_pages_urls', []), # list of open pages
active_page_index=obs.get(
'active_page_index', -1
), # index of the active page
dom_object=obs.get('dom_object', {}), # DOM object
axtree_object=obs.get('axtree_object', {}), # accessibility tree object
extra_element_properties=obs.get('extra_element_properties', {}),
focused_element_bid=obs.get(
'focused_element_bid', None
), # focused element bid
last_browser_action=obs.get(
'last_action', ''
), # last browser env action performed
last_browser_action_error=obs.get('last_action_error', ''),
error=True if obs.get('last_action_error', '') else False, # error flag
)
except Exception as e:
obs_dict = dict(
content=str(e),
screenshot='',
error=True,
last_browser_action_error=str(e),
)
if obs_dict['error']:
# add error recovery prompt prefix
error_prefix = get_error_prefix(obs_dict['last_browser_action'])
# self.error_accumulator += 1
# if self.error_accumulator > 5:
# return MessageAction('Too many errors encountered. Task failed.')
cur_url = obs_dict['url']
try:
cur_axtree_txt = flatten_axtree_to_str(
obs_dict['axtree_object'],
extra_properties=obs_dict['extra_element_properties'],
with_clickable=True,
filter_visible_only=True,
)
except Exception as e:
print(
'Error when trying to process the accessibility tree: %s', e
)
return 'Error encountered when browsing.'
ret_value = f"""\
{error_prefix}
# Current Page URL:
{cur_url}
# Current Accessibility Tree:
{cur_axtree_txt}
Here is an example with chain of thought of a valid action when clicking on a button:
"
In order to accomplish my goal I need to click on the button with bid 12
```click("12")```
"
""".strip()
return Result(
value=ret_value,
image=obs_dict['screenshot'],
)
# 保留原函数的签名和文档
wrapper.__signature__ = inspect.signature(action_func)
wrapper.__doc__ = action_func.__doc__
return wrapper
@register_agent(name = "Browsing Agent", func_name="get_browsing_agent")
def get_browsing_agent(model: str):
env = BrowserEnv()
demo_mode = "off"
action_set = HighLevelActionSet(
subsets=["chat", "nav", "bid"], # define a subset of the action space
# subsets=["chat", "bid", "coord", "infeas"] # allow the agent to also use x,y coordinates
strict=False, # less strict on the parsing of the actions
multiaction=False, # does not enable the agent to take multiple actions at once
demo_mode=demo_mode, # add visual effects
)
func_list = [act for act in action_set.action_set.keys()]
func_module = importlib.import_module("browsergym.core.action.functions")
func_list = [getattr(func_module, func) for func in func_list]
wrap_func_list = [wrap_browser_action(func, env) for func in func_list]
def instructions(context_variables):
goal = context_variables.get("goal", "")
action_space = action_set.describe(with_long_description=False, with_examples=True)
return \
f"""Review the current state of the page and all other information to find the best
possible next action to accomplish your goal. Your answer will be interpreted
and executed by a program, make sure to follow the formatting instructions.
# Goal:
{goal}
# Action Space
{action_space}
"""
return Agent(
name="Browsing Agent",
model=model,
instructions=instructions,
functions=wrap_func_list
)

View file

@ -1,12 +0,0 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Condition Extraction Agent", func_name = "get_condition_extraction_agent")
def get_condition_extraction_agent(model):
instruction = """
This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.
"""
return Agent(
name="Condition Extraction Agent",
description=instruction,
model=model,
)

View file

@ -1,12 +0,0 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Math Solver Agent", func_name = "get_math_solver_agent")
def get_math_solver_agent(model):
instruction = """
This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.
"""
return Agent(
name="Math Solver Agent",
description=instruction,
model=model,
)

View file

@ -1,12 +0,0 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Objective Extraction Agent", func_name = "get_objective_extraction_agent")
def get_objective_extraction_agent(model):
instruction = """
This agent is specialized in analyzing math problems and extracting the main objective or question being asked.
"""
return Agent(
name="Objective Extraction Agent",
description=instruction,
model=model,
)

View file

@ -1,12 +0,0 @@
from metachain import Agent
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="Result Aggregator Agent", func_name = "get_result_aggregator_agent")
def get_result_aggregator_agent(model):
instruction = """
This agent is specialized in aggregating results from different models and determining the final answer through majority voting.
"""
return Agent(
name="Math Solver Agent",
description=instruction,
model=model,
)

View file

@ -1,68 +0,0 @@
<agents>
<system_input>
The user request from the specific user about the product or service, mainly categorized into 2 types:
- Purchase a product or service
- Refund a product or service
</system_input>
<system_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</system_output>
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user.</description>
<value>John Doe</value>
</variable>
</global_variables>
<agent>
<name>Personal Sales Agent</name>
<description>The personal sales agent is an agent that serves as a personal sales agent for a specific user.</description>
<instructions>You are a personal sales agent that can be used to help the user {user_name} with their request.</instructions>
<tools category="new">
<tool>
<name>recommend_product</name>
<description>Recommend a product to the user.</description>
</tool>
<tool>
<name>recommend_service</name>
<description>Recommend a service to the user.</description>
</tool>
<tool>
<name>conduct_sales</name>
<description>Conduct sales with the user.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for purchasing a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
<agent>
<name>Personal Refunds Agent</name>
<description>The personal refunds agent is an agent that serves as a personal refunds agent for a specific user.</description>
<instructions>Help the user {user_name} with a refund. If the reason is that it was too expensive, offer the user a discount. If they insist, then process the refund.</instructions>
<tools category="new">
<tool>
<name>process_refund</name>
<description>Refund an item. Refund an item. Make sure you have the item_id of the form item_... Ask for user confirmation before processing the refund.</description>
</tool>
<tool>
<name>apply_discount</name>
<description>Apply a discount to the user's cart.</description>
</tool>
</tools>
<agent_input>
<key>user_request</key>
<description>Request from the specific user for refunding a product or service.</description>
</agent_input>
<agent_output>
<key>response</key>
<description>The response of the agent to the user's request.</description>
</agent_output>
</agent>
</agents>

View file

@ -1,85 +0,0 @@
<agents>
<system_input>
Two types of financial requests:
1. Managing private financial documents stored in the 'financial_docs' folder
2. Searching online financial information for specific company tickers
</system_input>
<system_output>
<key>financial_response</key>
<description>Comprehensive response containing either document analysis results or requested financial information.</description>
</system_output>
<agent>
<name>Financial Agent</name>
<description>A specialized agent that handles both private financial document management and online financial information retrieval.</description>
<instructions>You are a financial assistant with two primary responsibilities:
1. For private financial documents:
- Process and analyze documents in the 'financial_docs' folder
- Store document content for efficient retrieval
- Answer questions about stored financial documents
- Maintain document confidentiality and security
2. For online financial information:
- Retrieve accurate financial data for specified company tickers
- Format and present financial statements clearly
- Ensure data accuracy and proper citation
- Provide context for financial metrics when needed</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Process and store private financial documents into the vector database for efficient retrieval.</description>
</tool>
<tool>
<name>query_db</name>
<description>Search through stored financial documents to find relevant information.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Refine search queries for better document retrieval results.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Provide answers based on information found in stored documents.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Verify if sufficient information exists in stored documents to answer a query.</description>
</tool>
<tool>
<name>get_historical_stock_price</name>
<description>Retrieve historical stock price data for specified tickers.</description>
</tool>
<tool>
<name>visualizer</name>
<description>Visualize financial data and documents when needed.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>get_balance_sheet</name>
<description>Retrieve balance sheet data for a specific company ticker over a given period.</description>
</tool>
<tool>
<name>get_cash_flow</name>
<description>Retrieve cash flow statement data for a specific company ticker over a given period.</description>
</tool>
<tool>
<name>get_income_statement</name>
<description>Retrieve income statement data for a specific company ticker over a given period.</description>
</tool>
</tools>
<agent_input>
<key>financial_request</key>
<description>User request for either document management or financial information retrieval, including:
- Document analysis requests for private financial documents
- Requests for specific financial statements with company ticker and time period</description>
</agent_input>
<agent_output>
<key>financial_response</key>
<description>Comprehensive response containing either:
- Analysis results from private financial documents
- Requested financial statements and data
- Relevant visualizations or summaries as needed</description>
</agent_output>
</agent>
</agents>

View file

@ -1,87 +0,0 @@
{
"system_input": "Two types of financial requests:\n 1. Managing and analyzing private financial documents stored in the `financial_docs` folder\n 2. Retrieving and analyzing public financial information for specific company tickers",
"system_output": {
"key": "financial_analysis",
"description": "The comprehensive financial analysis or response based on either private documents or public financial data."
},
"global_variables": {},
"agents": [
{
"name": "Private Financial Document Manager",
"description": "An agent specialized in managing and analyzing private financial documents stored locally.",
"instructions": "You are a financial document manager responsible for:\n1. Processing and organizing financial documents from the local `financial_docs` folder\n2. Storing document content in a searchable format using vector database\n3. Retrieving relevant financial information from stored documents\n4. Providing detailed analysis based on the stored financial documents\n\nAlways verify document processing success and maintain data confidentiality.",
"tools": {
"existing": [
{
"name": "save_raw_docs_to_vector_db",
"description": "Save the financial documents to the vector database for efficient retrieval and analysis."
},
{
"name": "query_db",
"description": "Search for specific financial information within stored documents."
},
{
"name": "modify_query",
"description": "Refine search queries to get more accurate financial information."
},
{
"name": "answer_query",
"description": "Provide detailed answers based on the financial documents."
},
{
"name": "can_answer",
"description": "Verify if sufficient information exists in stored documents to answer a query."
}
],
"new": []
},
"agent_input": {
"key": "document_request",
"description": "User's request related to private financial documents, including document processing, searching, or analysis needs."
},
"agent_output": {
"key": "financial_analysis",
"description": "Analysis, insights, or information retrieved from private financial documents."
}
},
{
"name": "Public Financial Data Analyst",
"description": "An agent specialized in retrieving and analyzing public financial information for specific company tickers.",
"instructions": "You are a financial data analyst responsible for:\n1. Retrieving public financial data including balance sheets, cash flow statements, and income statements\n2. Analyzing financial metrics and trends\n3. Providing detailed financial analysis based on public data\n4. Ensuring accuracy in financial data retrieval and calculations\n\nAlways verify data accuracy and provide clear sources for financial information.",
"tools": {
"existing": [
{
"name": "get_historical_stock_price",
"description": "Retrieve historical stock price data for analysis."
}
],
"new": [
{
"name": "get_balance_sheet",
"description": "Retrieve balance sheet data for a specific ticker over a given period."
},
{
"name": "get_cash_flow_statement",
"description": "Retrieve cash flow statement data for a specific ticker over a given period."
},
{
"name": "get_income_statement",
"description": "Retrieve income statement data for a specific ticker over a given period."
},
{
"name": "calculate_financial_metrics",
"description": "Calculate key financial metrics and ratios from the retrieved financial statements."
}
]
},
"agent_input": {
"key": "market_request",
"description": "User's request for public financial data, including specific ticker symbols and time periods for analysis."
},
"agent_output": {
"key": "financial_analysis",
"description": "Analysis and insights based on public financial data, including financial statements and calculated metrics."
}
}
]
}

View file

@ -1,98 +0,0 @@
<agents>
<system_input>
Two types of financial requests:
1. Managing and analyzing private financial documents stored in the `financial_docs` folder
2. Retrieving and analyzing public financial information for specific company tickers
</system_input>
<system_output>
<key>financial_analysis</key>
<description>The comprehensive financial analysis or response based on either private documents or public financial data.</description>
</system_output>
<agent>
<name>Private Financial Document Manager</name>
<description>An agent specialized in managing and analyzing private financial documents stored locally.</description>
<instructions>You are a financial document manager responsible for:
1. Processing and organizing financial documents from the local `financial_docs` folder
2. Storing document content in a searchable format using vector database
3. Retrieving relevant financial information from stored documents
4. Providing detailed analysis based on the stored financial documents
Always verify document processing success and maintain data confidentiality.</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Save the financial documents to the vector database for efficient retrieval and analysis.</description>
</tool>
<tool>
<name>query_db</name>
<description>Search for specific financial information within stored documents.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Refine search queries to get more accurate financial information.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Provide detailed answers based on the financial documents.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Verify if sufficient information exists in stored documents to answer a query.</description>
</tool>
</tools>
<agent_input>
<key>document_request</key>
<description>User's request related to private financial documents, including document processing, searching, or analysis needs.</description>
</agent_input>
<agent_output>
<key>financial_analysis</key>
<description>Analysis, insights, or information retrieved from private financial documents.</description>
</agent_output>
</agent>
<agent>
<name>Public Financial Data Analyst</name>
<description>An agent specialized in retrieving and analyzing public financial information for specific company tickers.</description>
<instructions>You are a financial data analyst responsible for:
1. Retrieving public financial data including balance sheets, cash flow statements, and income statements
2. Analyzing financial metrics and trends
3. Providing detailed financial analysis based on public data
4. Ensuring accuracy in financial data retrieval and calculations
Always verify data accuracy and provide clear sources for financial information.</instructions>
<tools category="existing">
<tool>
<name>get_historical_stock_price</name>
<description>Retrieve historical stock price data for analysis.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>get_balance_sheet</name>
<description>Retrieve balance sheet data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>get_cash_flow_statement</name>
<description>Retrieve cash flow statement data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>get_income_statement</name>
<description>Retrieve income statement data for a specific ticker over a given period.</description>
</tool>
<tool>
<name>calculate_financial_metrics</name>
<description>Calculate key financial metrics and ratios from the retrieved financial statements.</description>
</tool>
</tools>
<agent_input>
<key>market_request</key>
<description>User's request for public financial data, including specific ticker symbols and time periods for analysis.</description>
</agent_input>
<agent_output>
<key>financial_analysis</key>
<description>Analysis and insights based on public financial data, including financial statements and calculated metrics.</description>
</agent_output>
</agent>
</agents>

View file

@ -1,54 +0,0 @@
<agents>
<system_input>
Questions from the user about the OpenAI products. The document of the OpenAI products is available at `/workspace/docs/openai_products/`.
</system_input>
<system_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</system_output>
<agent>
<name>Helper Center Agent</name>
<description>The helper center agent is an agent that serves as a helper center agent for a specific user to answer the user's question about the OpenAI products.</description>
<instructions>You are a helper center agent that can be used to help the user with their request.</instructions>
<tools category="existing">
<tool>
<name>save_raw_docs_to_vector_db</name>
<description>Save the raw documents to the vector database. The documents could be:
- ANY text document with the extension of pdf, docx, txt, etcs.
- A zip file containing multiple text documents
- a directory containing multiple text documents
All documents will be converted to raw text format and saved to the vector database in the chunks of 4096 tokens.</description>
</tool>
<tool>
<name>query_db</name>
<description>Query the vector database to find the answer to the user's question.</description>
</tool>
<tool>
<name>modify_query</name>
<description>Modify the user's question to a more specific question.</description>
</tool>
<tool>
<name>answer_query</name>
<description>Answer the user's question based on the answer from the vector database.</description>
</tool>
<tool>
<name>can_answer</name>
<description>Check if the user's question can be answered by the vector database.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>send_email</name>
<description>Send an email to the user.</description>
</tool>
</tools>
<agent_input>
<key>user_question</key>
<description>The question from the user about the OpenAI products.</description>
</agent_input>
<agent_output>
<key>answer</key>
<description>The answer to the user's question.</description>
</agent_output>
</agent>
</agents>

View file

@ -1,137 +0,0 @@
import xml.etree.ElementTree as ET
from typing import Dict, List, Optional
class AgentForm:
def __init__(self, xml_string: str):
# Parse XML string
root = ET.fromstring(xml_string)
# Parse system input/output
self.system_input = root.find('system_input').text.strip()
system_output = root.find('system_output')
self.system_output = {
'key': system_output.find('key').text.strip(),
'description': system_output.find('description').text.strip()
}
# Parse global variables (optional)
global_vars = root.find('global_variables')
self.global_variables = {}
if global_vars is not None:
for var in global_vars.findall('variable'):
self.global_variables[var.find('key').text.strip()] = {
'description': var.find('description').text.strip(),
'value': var.find('value').text.strip()
}
# Parse agents
self.agents = []
for agent_elem in root.findall('agent'):
agent = {
'name': agent_elem.find('name').text.strip(),
'description': agent_elem.find('description').text.strip(),
'instructions': agent_elem.find('instructions').text.strip(),
# Parse tools
'tools': {
'existing': [],
'new': []
},
# Parse agent input/output
'input': {
'key': agent_elem.find('agent_input/key').text.strip(),
'description': agent_elem.find('agent_input/description').text.strip()
},
'output': {
'key': agent_elem.find('agent_output/key').text.strip(),
'description': agent_elem.find('agent_output/description').text.strip()
}
}
# Parse tools for both existing and new categories
for tools_category in agent_elem.findall('tools'):
category = tools_category.get('category')
for tool in tools_category.findall('tool'):
tool_info = {
'name': tool.find('name').text.strip(),
'description': tool.find('description').text.strip()
}
agent['tools'][category].append(tool_info)
self.agents.append(agent)
def validate(self) -> bool:
"""
验证表单是否符合规则
1. system_output必须只有一个key-description对
2. 每个agent的input/output必须只有一个key-description对
3. 对于单agent系统system in/output必须与agent in/output相同
"""
try:
# 检查是否为单agent系统
if len(self.agents) == 1:
agent = self.agents[0]
# 检查system和agent的input/output是否匹配
if agent['output']['key'] != self.system_output['key']:
return False
# 检查每个agent的input/output格式
for agent in self.agents:
if not agent['input'].get('key') or not agent['input'].get('description'):
return False
if not agent['output'].get('key') or not agent['output'].get('description'):
return False
return True
except Exception:
return False
def to_dict(self) -> Dict:
"""将表单转换为字典格式"""
return {
'system_input': self.system_input,
'system_output': self.system_output,
'global_variables': self.global_variables,
'agents': self.agents
}
# 使用示例
def parse_agent_form(xml_path: str) -> Optional[Dict]:
"""
读取并解析agent form XML文件
Args:
xml_path: XML文件路径
Returns:
解析后的字典格式数据如果解析失败返回None
"""
try:
with open(xml_path, 'r', encoding='utf-8') as f:
xml_content = f.read()
form = AgentForm(xml_content)
if not form.validate():
print("Error: Invalid agent form format")
return None
return form.to_dict()
except ET.ParseError as e:
print(f"Error parsing XML: {e}")
return None
except Exception as e:
print(f"Unexpected error: {e}")
return None
# 使用示例
if __name__ == "__main__":
import json
result = parse_agent_form("/Users/tangjiabin/Documents/reasoning/metachain/metachain/agents/meta_agent/agent_form/customer_service.xml")
if result:
print("Successfully parsed agent form:")
print(json.dumps(result, indent=4))

View file

@ -1,118 +0,0 @@
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.environment import DockerEnv, LocalEnv
from metachain.tools.meta.edit_tools import list_tools
from metachain.tools.meta.edit_agents import list_agents
from metachain.agents.meta_agent.agent_editor import get_agent_editor_agent
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from typing import Union
from metachain.tools.inner import case_resolved, case_not_resolved
from pydantic import BaseModel
from metachain.util import function_to_json
from metachain.agents.meta_agent.meta_plan_agent import get_meta_plan_agent
class AgentDescription(BaseModel):
tools: list[str]
existing: bool
class ToolDescription(BaseModel):
tool_functionalities: str
existing: bool
tool_docs: str
class ToolPlan(BaseModel):
tool_name: str
tool_description: ToolDescription
class AgentPlan(BaseModel):
agent_name: str
agent_description: AgentDescription
@register_agent(name = "Meta Agent", func_name="get_meta_agent")
def get_meta_agent(model: str) -> str:
"""
The meta agent is an agent that can be used to create and run other agents.
"""
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful assistant that can help the user with their request by creating and running agents in the Metachain agent framework. Your responsibility is to determine which agent is best suited to handle the user's request under the current context, and transfer the conversation to that agent. And you should not stop to try to solve the user's request by transferring to another agent only until the task is completed.
Existing tools you already have:
{list_tools(context_variables)}
Existing agents you already have:
{list_agents(context_variables)}
You should first transfer the conversation to the `Meta Plan Agent` to plan how to use MetaChain to solve the user's request, and the plan should follow the following constraints:
1. If exising agents are enough for your task, you can directly use them to solve the user's request.
2. If exising agents are not enough for your task but there are enough existing tools, you can transfer the conversation to the `Agent Editor Agent` to develop new agents by using the existing tools.
3. If exising agents and existing tools are not enough for your task, you should first transfer the conversation to the `Tool Editor Agent` to develop new tools, then transfer the conversation to the `Agent Editor Agent` to develop new agents by using the new tools.
"""
return instructions
tool_editor_agent: Agent = get_tool_editor_agent(model)
agent_editor_agent: Agent = get_agent_editor_agent(model)
meta_plan_agent: Agent = get_meta_plan_agent(model)
def transfer_to_tool_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Tool Editor Agent` to do.
"""
return tool_editor_agent
def transfer_to_agent_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Agent Editor Agent` to do.
"""
return agent_editor_agent
def transfer_to_meta_plan_agent(sub_task: str):
"""
Use this function when you want to plan how to use MetaChain to solve the user's request.
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Meta Plan Agent` to do.
"""
return meta_plan_agent
meta_agent = Agent(
name="Meta Agent",
model=model,
instructions=instructions,
functions=[transfer_to_meta_plan_agent, transfer_to_tool_editor_agent, transfer_to_agent_editor_agent, case_resolved, case_not_resolved],
tool_choice = "required",
parallel_tool_calls = False
)
def transfer_back_to_meta_agent(task_status: str):
"""
Args:
task_status: The status of the task that the `Meta Agent` will ask the `Meta Agent` to do.
"""
return meta_agent
def transfer_back_to_meta_agent_with_plans(tool_development_steps: list[ToolPlan]) -> str:
"""
This function is used to plan how to use MetaChain to solve the user's request. You can use this function only after you have fully understood the user's request and have try your best to search information from exsiting resources.
Args:
tool_development_steps: The steps of tool development. It is a list of dictionaries, each dictionary contains the tools name you should use in the exsiting MetaChain or the tools name you should develop. If the tool is not existing, dictionaries should contain the tool documentation.
"""
tool_str = "\n".join([f"{tool['tool_name']}: {tool['tool_description']['tool_functionalities']} [{tool['tool_description']['existing']}]" for tool in tool_development_steps])
ret_val = f"""\
Receiving user's request, I have the following plans to use MetaChain to solve the user's request:
As for using existing tools, I have the following plans:
{tool_str}
"""
return Result(
value=ret_val,
agent=meta_agent
)
tool_editor_agent.functions.append(transfer_back_to_meta_agent)
agent_editor_agent.functions.append(transfer_back_to_meta_agent)
meta_plan_agent.functions.append(transfer_back_to_meta_agent_with_plans)
return meta_agent

View file

@ -1,38 +0,0 @@
from metachain.types import Agent
from pydantic import BaseModel
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.tools.meta.edit_tools import list_tools
from typing import Union
from metachain.environment import DockerEnv, LocalEnv
def get_meta_plan_agent(model: str) -> Agent:
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful planner that can help `Tool Editor Agent` how to use MetaChain to solve the user's request.
Existing tools you already have:
{list_tools(context_variables)}
You should first fully understand the user's request, then analyze the existing tools and determine which tools are needed to solve the user's request, finally, you should transfer the conversation to the `Meta Agent` with the plan of using the tools.
If existing tools are not enough for your task, you should develop new tools.
1. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
2. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
3. [IMPORTANT] You can not use `transfer_back_to_meta_agent_with_plans` util you have fully understood the user's request and have try your best to search information from exsiting resources if you want to create a new tool.
"""
return instructions
return Agent(
name="Meta Plan Agent",
model=model,
instructions=instructions,
functions=[get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc],
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -1,121 +0,0 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command, terminal_page_down, terminal_page_up, terminal_page_to
@register_agent(name = "Tool Editor Agent", func_name="get_tool_editor_agent")
def get_tool_editor_agent(model: str) -> str:
"""
The tool editor is an agent that can be used to edit the tools.
"""
def instructions(context_variables):
return f"""\
You are a Tool Editor specialized in the MetaChain framework. Your role is to create, modify, and maintain tools that agents can use effectively.
CURRENT TOOLS:
{list_tools(context_variables)}
TOOL CREATION GUIDELINES:
1. STRUCTURE AND FORMATTING
- Follow the template structure below:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
- MUST use @register_plugin_tool decorator
- Include clear docstrings with args and returns
- Handle errors gracefully
- If the tool should be used with third-part api key, you should write the api key inside the definition of the tool
- DO NOT direct return too long output (e.g., the raw content of the download file for `download_file_from_url`), instead, save the output to a file in the `workplace/outputs` directory
2. IMPLEMENTATION PRINCIPLES:
- Keep tools GENERIC and REUSABLE
- Avoid over-specific implementations
- Focus on single responsibility
- Ensure proper error handling
- Include input validation
- Return clear, structured outputs
""" + \
r"""
3. TESTING REQUIREMENTS:
- All tools MUST be tested before deployment
- Use this testing template:
```python
from metachain.tools import your_tool_name
if __name__ == "__main__":
# Setup test environment
test_args = {
"arg1": value1,
"arg2": value2
}
# Execute test
result = your_tool_name(**test_args)
print(f"Test result: {result}")
```
- if the output of the tool is too long, you should use the `terminal_page_down` or `terminal_page_up` or `terminal_page_to` function to move the terminal page to the specific page with the meaningful content.
4. DEPENDENCY MANAGEMENT:
- Use execute_command for installing dependencies
- Document all required packages
- Verify compatibility with MetaChain
CRITICAL RULES:
1. ALWAYS use @register_plugin_tool decorator
2. NEVER create overly specific tools
3. ALWAYS test before finalizing
4. ALWAYS handle errors gracefully
5. ALWAYS document clearly
BEST PRACTICES:
1. Keep tools modular and focused
2. Provide clear error messages
3. Include usage examples in docstrings
4. Follow Python PEP 8 style guide
5. Use type hints for better clarity
Remember: A good tool is reusable, reliable, and well-documented. Focus on creating tools that can be effectively used across different agents and scenarios.
"""
tool_list = [list_tools, create_tool, delete_tool, run_tool, get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc, execute_command, terminal_page_down, terminal_page_up, terminal_page_to]
return Agent(
name="Tool Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
"""\
You are a tool editor agent that can be used to edit the tools. You are working on a Agent framework named MetaChain, and your responsibility is to edit the tools in the MetaChain, so that the tools can be used by the agents to help the user with their request.
The existing tools are shown below:
{list_tools(context_variables)}
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully tested the tool with the `test_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(tool_dummy(args1=args1, args2=args1, ...))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
[IMPORTANT] The `register_plugin_tool` registry function is strictly required for a tool implementation to be recognized by the MetaChain framework.
[IMPORTANT] Tools you create should be as general as possible, and you should not create too specific tools, so that the tools can be reused by other agents or other related tasks.
"""

View file

@ -1,53 +0,0 @@
<agents>
<agent>
<name>Personal Sales Agent</name>
<description>The personal sales agent is an agent that serves as a personal sales agent for a specific user.</description>
<instructions>You are a personal sales agent that can be used to help the user {user_name} with their request.</instructions>
<tools category="exsiting">
<tool>
<name>recommend_product</name>
<description>Recommend a product to the user.</description>
</tool>
<tool>
<name>recommend_service</name>
<description>Recommend a service to the user.</description>
</tool>
<tool>
<name>transfer_customer</name>
<description>Transfer a customer to another sales agent.</description>
</tool>
<tool>
<name>search_information</name>
<description>Search for information for the user.</description>
</tool>
</tools>
<tools category="new">
<tool>
<name>create_agent</name>
<description>Create a new agent.</description>
</tool>
</tools>
<global_variables>
<variable>
<key>user_name</key>
<description>The name of the user.</description>
<value>John Doe</value>
</variable>
<variable>
<key>user_email</key>
<description>The email of the user.</description>
<value>john.doe@example.com</value>
</variable>
</global_variables>
</agent>
</agents>
<orchestrate>
<transition>
<from>personal_sales_agent</from>
<to>agent_former_agent</to>
<condition>
<type>user_request</type>
<value>create_agent</value>
</condition>
</transition>
</orchestrate>

View file

@ -1,184 +0,0 @@
{
"name": "math_solver_chain_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "solution",
"description": "The complete solution to the math problem."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent analyzes the math problem and extracts its main objective or goal.",
"category": "existing",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent identifies and extracts all relevant conditions and given information from the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Condition Evaluator Agent",
"description": "This agent evaluates whether the extracted conditions are sufficient to solve the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent solves mathematical problems using analytical and systematic approaches.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "extract_objective",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Extract and clearly state the main objective of the math problem.",
"outputs": [
{
"key": "objective",
"description": "The main objective or question that needs to be answered.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Objective Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "extract_conditions",
"inputs": [
{
"key": "objective",
"description": "The main objective or question that needs to be answered."
}
],
"task": "Extract all relevant conditions and given information from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "The complete set of conditions and information extracted from the problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_objective"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "evaluate_conditions",
"inputs": [
{
"key": "conditions",
"description": "The complete set of conditions and information extracted from the problem."
}
],
"task": "Evaluate if the extracted conditions are sufficient to solve the problem.",
"outputs": [
{
"key": "merged_conditions",
"description": "The merged and organized conditions ready for problem-solving.",
"condition": "When conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
},
{
"key": "insufficient_feedback",
"description": "Feedback on what additional information is needed.",
"condition": "When conditions are insufficient to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Condition Evaluator Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_problem",
"inputs": [
{
"key": "merged_conditions",
"description": "The merged and organized conditions ready for problem-solving."
}
],
"task": "Solve the math problem using the complete set of conditions.",
"outputs": [
{
"key": "solution",
"description": "The complete solution to the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"evaluate_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
}
]
}

View file

@ -1,169 +0,0 @@
<workflow>
<name>math_solver_chain_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
</system_output>
<agents>
<agent category="existing">
<name>Objective Extraction Agent</name>
<description>This agent analyzes the math problem and extracts its main objective or goal.</description>
</agent>
<agent category="existing">
<name>Condition Extraction Agent</name>
<description>This agent identifies and extracts all relevant conditions and given information from the math problem.</description>
</agent>
<agent category="existing">
<name>Condition Evaluator Agent</name>
<description>This agent evaluates whether the extracted conditions are sufficient to solve the math problem.</description>
</agent>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent solves mathematical problems using analytical and systematic approaches.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>extract_objective</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Extract and clearly state the main objective of the math problem.</task>
<outputs>
<output>
<key>objective</key>
<description>The main objective or question that needs to be answered.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Objective Extraction Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<inputs>
<input>
<key>objective</key>
<description>The main objective or question that needs to be answered.</description>
</input>
</inputs>
<task>Extract all relevant conditions and given information from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>The complete set of conditions and information extracted from the problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_objective</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>evaluate_conditions</name>
<inputs>
<input>
<key>conditions</key>
<description>The complete set of conditions and information extracted from the problem.</description>
</input>
</inputs>
<task>Evaluate if the extracted conditions are sufficient to solve the problem.</task>
<outputs>
<output>
<key>merged_conditions</key>
<description>The merged and organized conditions ready for problem-solving.</description>
<condition>When conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>insufficient_feedback</key>
<description>Feedback on what additional information is needed.</description>
<condition>When conditions are insufficient to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Condition Evaluator Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_problem</name>
<inputs>
<input>
<key>merged_conditions</key>
<description>The merged and organized conditions ready for problem-solving.</description>
</input>
</inputs>
<task>Solve the math problem using the complete set of conditions.</task>
<outputs>
<output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>evaluate_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,158 +0,0 @@
{
"name": "math_condition_mining_workflow",
"system_input": {
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
},
"system_output": {
"key": "solution",
"description": "The detailed solution to the mathematical problem."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent is specialized in analyzing math problems and extracting the main objective or question being asked.",
"category": "existing",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.",
"category": "existing",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "extract_objective",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
}
],
"task": "Analyze the math problem and extract the main objective or question being asked.",
"outputs": [
{
"key": "objective",
"description": "The main objective or question extracted from the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Objective Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "extract_conditions",
"inputs": [
{
"key": "math_problem",
"description": "The mathematical problem that needs to be solved."
},
{
"key": "objective",
"description": "The main objective or question extracted from the math problem."
}
],
"task": "Extract all relevant conditions, given values, and constraints from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "The extracted conditions, values, and constraints from the math problem.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_objective"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_problem",
"inputs": [
{
"key": "objective",
"description": "The main objective or question extracted from the math problem."
},
{
"key": "conditions",
"description": "The extracted conditions, values, and constraints from the math problem."
}
],
"task": "Evaluate whether conditions are sufficient and solve the math problem if possible.",
"outputs": [
{
"key": "solution",
"description": "The complete solution to the math problem.",
"condition": "When conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
},
{
"key": "insufficient_conditions",
"description": "Feedback about missing or unclear conditions.",
"condition": "When conditions are insufficient to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
}
]
}

View file

@ -1,148 +0,0 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The detailed solution to the mathematical problem.</description>
</system_output>
<agents>
<agent category="existing">
<name>Objective Extraction Agent</name>
<description>This agent is specialized in analyzing math problems and extracting the main objective or question being asked.</description>
</agent>
<agent category="existing">
<name>Condition Extraction Agent</name>
<description>This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.</description>
</agent>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>extract_objective</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
</inputs>
<task>Analyze the math problem and extract the main objective or question being asked.</task>
<outputs>
<output>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Objective Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<inputs>
<input>
<key>math_problem</key>
<description>The mathematical problem that needs to be solved.</description>
</input>
<input>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
</input>
</inputs>
<task>Extract all relevant conditions, given values, and constraints from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>The extracted conditions, values, and constraints from the math problem.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_objective</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_problem</name>
<inputs>
<input>
<key>objective</key>
<description>The main objective or question extracted from the math problem.</description>
</input>
<input>
<key>conditions</key>
<description>The extracted conditions, values, and constraints from the math problem.</description>
</input>
</inputs>
<task>Evaluate whether conditions are sufficient and solve the math problem if possible.</task>
<outputs>
<output>
<key>solution</key>
<description>The complete solution to the math problem.</description>
<condition>When conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>insufficient_conditions</key>
<description>Feedback about missing or unclear conditions.</description>
<condition>When conditions are insufficient to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,173 +0,0 @@
{
"name": "math_problem_solver_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "final_solution",
"description": "The final solution to the math problem determined by majority voting."
},
"global_variables": {},
"agents": [
{
"name": "Math Solver Agent",
"description": "This agent is specialized in solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
},
{
"name": "Result Aggregator Agent",
"description": "This agent aggregates results from different models and determines the final answer through majority voting.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": null,
"outputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": null
},
{
"name": "solve_with_gpt4",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using GPT-4 model.",
"outputs": [
{
"key": "gpt4_solution",
"description": "The solution provided by GPT-4 model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_claude",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using Claude model.",
"outputs": [
{
"key": "claude_solution",
"description": "The solution provided by Claude model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_with_deepseek",
"inputs": [
{
"key": "math_problem",
"description": "The math problem that needs to be solved."
}
],
"task": "Solve the math problem using Deepseek model.",
"outputs": [
{
"key": "deepseek_solution",
"description": "The solution provided by Deepseek model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "deepseek/deepseek-chat"
}
},
{
"name": "aggregate_results",
"inputs": [
{
"key": "gpt4_solution",
"description": "The solution provided by GPT-4 model."
},
{
"key": "claude_solution",
"description": "The solution provided by Claude model."
},
{
"key": "deepseek_solution",
"description": "The solution provided by Deepseek model."
}
],
"task": "Aggregate the solutions from different models and determine the final answer through majority voting.",
"outputs": [
{
"key": "final_solution",
"description": "The final solution determined by majority voting.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"solve_with_gpt4",
"solve_with_claude",
"solve_with_deepseek"
],
"agent": {
"name": "Result Aggregator Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -1,161 +0,0 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>final_solution</key>
<description>The final solution to the math problem determined by majority voting.</description>
</system_output>
<agents>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving math problems using appropriate mathematical methods.</description>
</agent>
<agent category="existing">
<name>Result Aggregator Agent</name>
<description>This agent aggregates results from different models and determines the final answer through majority voting.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<outputs>
<output>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
</event>
<event>
<name>solve_with_gpt4</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using GPT-4 model.</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>The solution provided by GPT-4 model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using Claude model.</task>
<outputs>
<output>
<key>claude_solution</key>
<description>The solution provided by Claude model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_deepseek</name>
<inputs>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
</inputs>
<task>Solve the math problem using Deepseek model.</task>
<outputs>
<output>
<key>deepseek_solution</key>
<description>The solution provided by Deepseek model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>deepseek/deepseek-chat</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<inputs>
<input>
<key>gpt4_solution</key>
<description>The solution provided by GPT-4 model.</description>
</input>
<input>
<key>claude_solution</key>
<description>The solution provided by Claude model.</description>
</input>
<input>
<key>deepseek_solution</key>
<description>The solution provided by Deepseek model.</description>
</input>
</inputs>
<task>Aggregate the solutions from different models and determine the final answer through majority voting.</task>
<outputs>
<output>
<key>final_solution</key>
<description>The final solution determined by majority voting.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_deepseek</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,159 +0,0 @@
<workflow>
<system_input>
<key>problem_path</key>
<description>The URL of the dataset containing math problems to solve: https://huggingface.co/datasets/openai/gsm8k</description>
</system_input>
<system_output>
<key>solution_results</key>
<description>The aggregated solution results with majority voting from multiple models</description>
</system_output>
<agents>
<agent category="new">
<name>Data Fetcher Agent</name>
<description>This agent is responsible for downloading and processing the dataset from HuggingFace</description>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving mathematical problems step by step</description>
</agent>
<agent category="new">
<name>Result Aggregator Agent</name>
<description>This agent aggregates solutions from different models and performs majority voting</description>
</agent>
</agents>
<global_variables>
<variable>
<key>dataset_url</key>
<description>The URL of the GSM8K dataset</description>
<value>https://huggingface.co/datasets/openai/gsm8k</value>
</variable>
</global_variables>
<events>
<event>
<name>fetch_dataset</name>
<input>
<key>problem_path</key>
<description>The URL of the dataset to download</description>
</input>
<task>Download the GSM8K dataset and save it locally</task>
<outputs>
<output>
<key>dataset_content</key>
<description>The downloaded math problems from the dataset</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Data Fetcher Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using GPT-4 model</task>
<outputs>
<output>
<key>gpt4_solutions</key>
<description>Solutions generated by GPT-4</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4-1106-preview</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using Claude model</task>
<outputs>
<output>
<key>claude_solutions</key>
<description>Solutions generated by Claude</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-opus-20240229</model>
</agent>
</event>
<event>
<name>solve_with_mixtral</name>
<input>
<key>dataset_content</key>
<description>The math problems to solve</description>
</input>
<task>Solve the math problems using Mixtral model</task>
<outputs>
<output>
<key>mixtral_solutions</key>
<description>Solutions generated by Mixtral</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>fetch_dataset</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>mixtral-8x7b-instruct</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>model_solutions</key>
<description>Solutions from different models to be aggregated</description>
</input>
<task>Aggregate solutions from all models using majority voting</task>
<outputs>
<output>
<key>solution_results</key>
<description>Final aggregated solutions with majority voting</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_mixtral</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,167 +0,0 @@
<workflow>
<system_input>
<key>dataset_url</key>
<description>The URL of the math problem dataset on huggingface.</description>
</system_input>
<system_output>
<key>final_result</key>
<description>The final solution of the math problem after majority voting.</description>
</system_output>
<agents>
<agent category="new">
<name>Data Loader Agent</name>
<description>This agent is responsible for downloading and processing the dataset.</description>
<tools>
<tool>download_file</tool>
<tool>analyze_data</tool>
<tool>load_one_instance</tool>
</tools>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is responsible for solving math problems using different language models.</description>
</agent>
<agent category="new">
<name>Result Aggregator Agent</name>
<description>This agent aggregates results from different solvers and performs majority voting.</description>
</agent>
</agents>
<global_variables>
<variable>
<key>dataset_path</key>
<description>Local path where the dataset will be stored</description>
<value>/workspace/data/math_dataset.json</value>
</variable>
</global_variables>
<events>
<event>
<name>download_data</name>
<input>
<key>dataset_url</key>
<description>The URL of the math problem dataset</description>
</input>
<task>Download the dataset from huggingface and analyze its structure</task>
<outputs>
<output>
<key>problem_instance</key>
<description>A single math problem instance from the dataset</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Data Loader Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using GPT-4</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>Solution from GPT-4 model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using Claude</task>
<outputs>
<output>
<key>claude_solution</key>
<description>Solution from Claude model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_palm</name>
<input>
<key>problem_instance</key>
<description>Math problem to solve</description>
</input>
<task>Solve the math problem using PaLM</task>
<outputs>
<output>
<key>palm_solution</key>
<description>Solution from PaLM model</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>download_data</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>palm-2-4-chat-20240125</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>solutions</key>
<description>Solutions from all models</description>
</input>
<task>Aggregate solutions using majority voting</task>
<outputs>
<output>
<key>final_result</key>
<description>Final solution after majority voting</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_palm</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,158 +0,0 @@
{
"name": "math_problem_solver_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"system_output": {
"key": "final_solution",
"description": "The final solution to the math problem determined through majority voting."
},
"global_variables": {},
"agents": [
{
"name": "Math Solver Agent",
"description": "This agent is specialized in solving math problems using appropriate mathematical methods.",
"category": "existing",
"tools": null
},
{
"name": "Result Aggregator Agent",
"description": "This agent is specialized in aggregating results from different models and determining the final answer through majority voting.",
"category": "existing",
"tools": null
}
],
"events": [
{
"name": "on_start",
"input": {
"key": "math_problem",
"description": "The math problem that needs to be solved."
},
"task": "Distribute the math problem to parallel solvers.",
"outputs": [
{
"key": "problem_ready",
"description": "Math problem ready for parallel processing.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_gpt4",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using GPT-4 model.",
"outputs": [
{
"key": "gpt4_solution",
"description": "Solution from GPT-4 model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "solve_with_claude",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using Claude model.",
"outputs": [
{
"key": "claude_solution",
"description": "Solution from Claude model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "claude-3-5-sonnet-20241022"
}
},
{
"name": "solve_with_deepseek",
"input": {
"key": "problem_ready",
"description": "Math problem to be solved."
},
"task": "Solve the math problem using Deepseek model.",
"outputs": [
{
"key": "deepseek_solution",
"description": "Solution from Deepseek model.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Math Solver Agent",
"model": "deepseek/deepseek-chat"
}
},
{
"name": "aggregate_results",
"input": {
"key": "multiple_solutions",
"description": "Solutions from all three models."
},
"task": "Aggregate solutions from different models and determine final answer through majority voting.",
"outputs": [
{
"key": "final_solution",
"description": "Final solution determined through majority voting.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"solve_with_gpt4",
"solve_with_claude",
"solve_with_deepseek"
],
"agent": {
"name": "Result Aggregator Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -1,148 +0,0 @@
<workflow>
<name>math_problem_solver_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</system_input>
<system_output>
<key>final_solution</key>
<description>The final solution to the math problem determined through majority voting.</description>
</system_output>
<agents>
<agent category="existing">
<name>Math Solver Agent</name>
<description>This agent is specialized in solving math problems using appropriate mathematical methods.</description>
</agent>
<agent category="existing">
<name>Result Aggregator Agent</name>
<description>This agent is specialized in aggregating results from different models and determining the final answer through majority voting.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>math_problem</key>
<description>The math problem that needs to be solved.</description>
</input>
<task>Distribute the math problem to parallel solvers.</task>
<outputs>
<output>
<key>problem_ready</key>
<description>Math problem ready for parallel processing.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_gpt4</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using GPT-4 model.</task>
<outputs>
<output>
<key>gpt4_solution</key>
<description>Solution from GPT-4 model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>solve_with_claude</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using Claude model.</task>
<outputs>
<output>
<key>claude_solution</key>
<description>Solution from Claude model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>claude-3-5-sonnet-20241022</model>
</agent>
</event>
<event>
<name>solve_with_deepseek</name>
<input>
<key>problem_ready</key>
<description>Math problem to be solved.</description>
</input>
<task>Solve the math problem using Deepseek model.</task>
<outputs>
<output>
<key>deepseek_solution</key>
<description>Solution from Deepseek model.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>deepseek/deepseek-chat</model>
</agent>
</event>
<event>
<name>aggregate_results</name>
<input>
<key>multiple_solutions</key>
<description>Solutions from all three models.</description>
</input>
<task>Aggregate solutions from different models and determine final answer through majority voting.</task>
<outputs>
<output>
<key>final_solution</key>
<description>Final solution determined through majority voting.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>solve_with_gpt4</event>
<event>solve_with_claude</event>
<event>solve_with_deepseek</event>
</listen>
<agent>
<name>Result Aggregator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,119 +0,0 @@
{
"name": "mining_conditions_workflow",
"system_input": {
"key": "math_problem",
"description": "The math problem that user wants to solve."
},
"system_output": {
"key": "solution",
"description": "The detailed solution of the math problem, including steps and final answer."
},
"global_variables": {},
"agents": [
{
"name": "Objective Extraction Agent",
"description": "This agent is specialized in analyzing math problems and extracting the main objective or question being asked.",
"category": "new",
"tools": null
},
{
"name": "Condition Extraction Agent",
"description": "This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.",
"category": "new",
"tools": null
},
{
"name": "Math Solver Agent",
"description": "This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.",
"category": "new",
"tools": null
}
],
"events": [
{
"name": "on_start",
"input": {
"key": "math_problem",
"description": "The original math problem text."
},
"task": "Extract the main objective or question from the math problem.",
"outputs": [
{
"key": "objective",
"description": "The clearly defined objective or question that needs to be solved.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": null,
"agent": {
"name": "Objective Extraction Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "extract_conditions",
"input": {
"key": "objective",
"description": "The objective of the math problem."
},
"task": "Extract all relevant conditions, given values, and constraints from the math problem.",
"outputs": [
{
"key": "conditions",
"description": "List of all identified conditions and constraints.",
"condition": null,
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"on_start"
],
"agent": {
"name": "Condition Extraction Agent",
"model": "gpt-4o-2024-08-06"
}
},
{
"name": "evaluate_and_solve",
"input": {
"key": "conditions",
"description": "The extracted conditions and constraints."
},
"task": "Evaluate if conditions are sufficient and solve the math problem if possible.",
"outputs": [
{
"key": "insufficient_conditions",
"description": "Conditions are not sufficient to solve the problem.",
"condition": "If the current conditions are not enough to solve the problem.",
"action": {
"type": "GOTO",
"value": "extract_conditions"
}
},
{
"key": "solution",
"description": "Complete solution with steps and final answer.",
"condition": "If conditions are sufficient to solve the problem.",
"action": {
"type": "RESULT",
"value": null
}
}
],
"listen": [
"extract_conditions"
],
"agent": {
"name": "Math Solver Agent",
"model": "gpt-4o-2024-08-06"
}
}
]
}

View file

@ -1,113 +0,0 @@
<workflow>
<name>mining_conditions_workflow</name>
<system_input>
<key>math_problem</key>
<description>The math problem that user wants to solve.</description>
</system_input>
<system_output>
<key>solution</key>
<description>The detailed solution of the math problem, including steps and final answer.</description>
</system_output>
<agents>
<agent category="new">
<name>Objective Extraction Agent</name>
<description>This agent is specialized in analyzing math problems and extracting the main objective or question being asked.</description>
</agent>
<agent category="new">
<name>Condition Extraction Agent</name>
<description>This agent is specialized in identifying and extracting all relevant conditions, given values, and constraints from the math problem.</description>
</agent>
<agent category="new">
<name>Math Solver Agent</name>
<description>This agent is specialized in evaluating whether conditions are sufficient and solving math problems using appropriate mathematical methods.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>math_problem</key>
<description>The original math problem text.</description>
</input>
<task>Extract the main objective or question from the math problem.</task>
<outputs>
<output>
<key>objective</key>
<description>The clearly defined objective or question that needs to be solved.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Objective Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>extract_conditions</name>
<input>
<key>objective</key>
<description>The objective of the math problem.</description>
</input>
<task>Extract all relevant conditions, given values, and constraints from the math problem.</task>
<outputs>
<output>
<key>conditions</key>
<description>List of all identified conditions and constraints.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Condition Extraction Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>evaluate_and_solve</name>
<input>
<key>conditions</key>
<description>The extracted conditions and constraints.</description>
</input>
<task>Evaluate if conditions are sufficient and solve the math problem if possible.</task>
<outputs>
<output>
<key>insufficient_conditions</key>
<description>Conditions are not sufficient to solve the problem.</description>
<condition>If the current conditions are not enough to solve the problem.</condition>
<action>
<type>GOTO</type>
<value>extract_conditions</value>
</action>
</output>
<output>
<key>solution</key>
<description>Complete solution with steps and final answer.</description>
<condition>If conditions are sufficient to solve the problem.</condition>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>extract_conditions</event>
</listen>
<agent>
<name>Math Solver Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,144 +0,0 @@
<workflow>
<system_input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</system_input>
<system_output>
<key>article</key>
<description>The article that satisfies the user's request.</description>
</system_output>
<agents>
<agent category="existing">
<name>Web Surfer Agent</name>
<description>This agent is used to search the web for the user's topic.</description>
</agent>
<agent category="new">
<name>Outline Agent</name>
<description>This agent is used to write an outline for the user's topic.</description>
</agent>
<agent category="new">
<name>Evaluator Agent</name>
<description>This agent is used to evaluate the outline of the user's topic.</description>
</agent>
<agent category="new">
<name>Article Writer Agent</name>
<description>This agent is used to write the article for the user's topic.</description>
</agent>
</agents>
<events>
<event>
<name>on_start</name>
<input>
<key>user_topic</key>
<description>The user's topic that user wants to write a wikipiead-like article about.</description>
</input>
<task>
search the information about the topic and return the result.
</task>
<outputs>
<output>
<key>search_result</key>
<description>The search result of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<agent>
<name>Web Surfer Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_outline</name>
<input>
<key>search_result</key>
<description>The search result of the user's topic.</description>
</input>
<task>
write an outline for the user's topic.
</task>
<outputs>
<output>
<key>outline</key>
<description>The outline of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_start</event>
</listen>
<agent>
<name>Outline Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_evaluate</name>
<input>
<key>outline</key>
<description>The outline of the user's topic.</description>
</input>
<task>
evaluate the outline of the user's topic.
</task>
<outputs>
<output>
<key>positive_feedback</key>
<description>The positive feedback of the outline of the user's topic.</description>
<condition>
If the outline is good enough, give positive feedback.
</condition>
<action>
<type>RESULT</type>
</action>
</output>
<output>
<key>negative_feedback</key>
<description>The negative feedback of the outline of the user's topic.</description>
<condition>
If the outline is not good enough, give negative feedback.
</condition>
<action>
<type>ABORT</type>
</action>
</output>
</outputs>
<listen>
<event>on_outline</event>
</listen>
<agent>
<name>Evaluator Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
<event>
<name>on_write</name>
<input>
<key>outline</key>
<description>The outline of user's topic.</description>
</input>
<task>
write the article for the user's topic.
</task>
<outputs>
<output>
<key>article</key>
<description>The article of the user's topic.</description>
<action>
<type>RESULT</type>
</action>
</output>
</outputs>
<listen>
<event>on_evaluate</event>
</listen>
<agent>
<name>Article Writer Agent</name>
<model>gpt-4o-2024-08-06</model>
</agent>
</event>
</events>
</workflow>

View file

@ -1,40 +0,0 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_agent, check_tool
)
from metachain.registry import register_agent
@register_agent(name = "Plan Agent", func_name="get_plan_agent")
def get_plan_agent(model: str):
def instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are a planner working on an agent project named 'metachain' which can generate a coding plan for a given user request.
I want to use existing project code to solve the task. You should use the tools `check_agent` and `check_tool` to carefully go through the existing code to find out whether you should develop a new agent or new tool.
After you have checked the existing code, you should give a detailed plan for developing agents to solve the task based on the existing code, and ask user to confirm or modify the plan.
Finally, after user confirms the plan, you should generate the final coding plan and output it, and transfer the conversation to the 'Programming Triage Agent' to use the plan to execute the task util you finish the task, otherwise I will lose a lot of money.
Follow the following rules to develop new tools:
1. If you want to create new tools, you should first create a new file in the `metachain/metachain/tools` directory, write the function, and then add the function to the `metachain/metachain/tools/__init__.py`. Note that when add new tools into `__init__.py`, you first read the file content and keep the original content, then add the new tools into the file.
2. The tool is python functions.
3. When developing a new tool, you should follow the coding style of the existing tools, which means you should write docstring for the function, and add some useful comments to explain the code.
4. Function should usually return a `str` (values will be attempted to be cast as a `str`).
5. If you need to develop a new tool through external API, you should use `get_api_plugin_tools_doc` tool to get the tool doc, such as websearch, news search, financial tools, etc, otherwise you should develop a new tool by yourself.
6. If you need to develop a new tool related to vector database, you should use the pre-built class `Memory` in `/{working_dir}/metachain/metachain/memory/rag_memory.py` to save and retrieve the data.
Follow the following instructions to develop new agents:
1. If you want to create new agents, you should first create a new file in the `metachain/metachain/agents` directory, write the function `get_xxx_agent(model: str)`, and then add the function to the `metachain/metachain/agents/__init__.py`. Note that when add new agents into `__init__.py`, you first read the file content and keep the original content, then add the new agents into the file.
Note that your plan should fit the given rules.
"""
return Agent(
name="Plan Agent",
model=model,
instructions=instructions,
functions=[check_agent, check_tool],
parallel_tool_calls = False
)

View file

@ -1,103 +0,0 @@
from metachain.types import Agent
from metachain.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, case_resolved, get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
def examples(context_variables):
working_dir = context_variables.get("working_dir", None)
examples_list = []
examples_list.extend(make_message('user', "Create a list of numbers from 1 to 10, and display them in a web page at port 5000."))
examples_list.extend(make_message('assistant', "I should first use create_file to write the python code into a file named 'app.py' for starting a web server"))
examples_list.extend(make_tool_message(create_file, {'path': f"/{working_dir}/metachain/app.py",
'content': """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
numbers = list(range(1, 11))
return str(numbers)
if __name__ == '__main__':
app.run(port=5000)"""}, f"File created at: /{working_dir}/metachain/app.py"))
examples_list.extend(make_message('assistant', 'I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you using `run_python`'))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/metachain/app.py"}, f"""
Traceback (most recent call last):
File "/{working_dir}/metachain/app.py", line 2, in <module>
from flask import Flask
ModuleNotFoundError: No module named 'flask'"""))
examples_list.extend(make_message('assistant', "It seems that Flask is not installed. Let me install Flask for you using `execute_command` by the command: pip install flask"))
examples_list.extend(make_tool_message(execute_command, {'command': 'pip install flask'}, """Defaulting to user installation because normal site-packages is not writeable
Collecting flask
Using cached flask-3.0.3-py3-none-any.whl (101 kB)
Collecting blinker>=1.6.2
Using cached blinker-1.7.0-py3-none-any.whl (13 kB)
Collecting Werkzeug>=3.0.0
Using cached werkzeug-3.0.2-py3-none-any.whl (226 kB)
Collecting click>=8.1.3
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Collecting itsdangerous>=2.1.2
Using cached itsdangerous-2.2.0-py3-none-any.whl (16 kB)
Requirement already satisfied: Jinja2>=3.1.2 in /home/openhands/.local/lib/python3.10/site-packages (from flask) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.0 in /home/openhands/.local/lib/python3.10/site-packages (from Jinja2>=3.1.2->flask) (2.1.5)
Installing collected packages: Werkzeug, itsdangerous, click, blinker, flask
Successfully installed Werkzeug-3.0.2 blinker-1.7.0 click-8.1.7 flask-3.0.3 itsdangerous-2.2.0"""))
examples_list.extend(make_message('assistant', "Now that Flask is installed, let me run the Python file again using `run_python`"))
examples_list.extend(make_tool_message(run_python, {'code_path': f"/{working_dir}/metachain/app.py"}, """* Serving Flask app 'app'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on http://127.0.0.1:5000
Press CTRL+C to quit"""))
examples_list.extend(make_message('assistant', """The server is running on port 5000 with PID 124. You can access the list of numbers by visiting http://127.0.0.1:5000. Your task is completed."""))
return examples_list
@register_agent(name = "Programming Agent", func_name="get_programming_agent")
def get_programming_agent(model: str):
def instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
ret_instructions = \
f"""You are a programmer working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your can leverage your capabilities by using the specific functions listed below:
1. Creating project structures based on the user requirement using function `create_directory`.
2. Writing clean, efficient, and well-documented code using function `create_file` and `write_file`.
3. You must run python scripts using function `run_python` rather than using the `execute_command` function.
4. Exam the project to re-use the existing code snippets as much as possible, you may need to use
functions like `list_files`, `read_file` and `write_file`.
5. Writing the code into the file when creating new files, do not create empty files.
6. If you are required to code base on the specific directory, you can use function `code_rag` to search the relatd codes in the specific directory, and remember you could only search one thing (like a function name, a class name, a variable name, etc.) in the codebase at a time.
7. Before you write code into the existing files, you should first read the file content using function `read_file` and reserve the original content as much as possible.
8. Decide whether the task requires execution and debugging before moving to the next or not.
9. Generate the commands to run and test the current task, and the dependencies list for this task.
10. You only write Python scripts, don't write Jupiter notebooks which require interactive execution.
11. Note that every path you read, write, or search should be the absolute path (starting with '/').
Your task is using existing project to create agents to complete the user request.
If the existing tools or agents are not enough for your task, you should develop new tools or agents.
Follow the following routine:
1. If there is enough pre-built tools and agents, create a python script in the `/{working_dir}/metachain` folder to run the agent to complete the user request.
2. If you need to develop new tools, create a new tool in the `/{working_dir}/metachain/metachain/tools` folder.
3. If you need to develop new agents, create a new agent in the `/{working_dir}/metachain/metachain/agents` folder.
4. Create a python script in the `/{working_dir}/metachain` folder to run the new agent to complete the user request.
Note that if you need OPENAI_API_KEY, my key is: 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
"""
how_to_guides = context_variables.get("how_to_guides", None)
if how_to_guides:
ret_instructions += \
f"""
If you want to develop new tools or agents, you should follow the following guides:
{how_to_guides}
"""
return ret_instructions
return Agent(
name="Programming Agent",
model=model,
instructions=instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, get_api_plugin_tools_doc],
# examples=examples,
tool_choice = "auto",
parallel_tool_calls = False
)

View file

@ -1,415 +0,0 @@
from metachain.types import Agent
from metachain.tools import (
gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, case_resolved, get_api_plugin_tools_doc
)
from metachain.util import make_message, make_tool_message
from metachain.registry import register_agent
@register_agent(name = "Tool Creation Agent", func_name="get_tool_creation_agent")
def get_tool_creation_agent(model: str):
def tool_creation_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to develop new tools in the `/{working_dir}/metachain/metachain/tools` folder.
Follow the following instructions to develop new tools:
1. If you want to create new tools, you should first create a new file in the `metachain/metachain/tools` directory, write the function, and then add the function to the `metachain/metachain/tools/__init__.py`. Note that when add new tools into `__init__.py`, you first read the file content and keep the original content, then add the new tools into the file.
2. The tool is python functions.
3. When developing a new tool, you should follow the coding style of the existing tools, which means you should write docstring for the function, and add some useful comments to explain the code.
4. Function should usually return a `str` (values will be attempted to be cast as a `str`).
5. If there is any error during the development process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
6. If you need to develop a new tool through external API, you should use `get_api_plugin_tools_doc` tool to get the tool doc, such as websearch, news search, financial tools, etc, otherwise you should develop a new tool by yourself.
7. If you need to develop a new tool related to vector database, you should use the pre-built class `Memory` in `/{working_dir}/metachain/metachain/memory/rag_memory.py` to save and retrieve the data.
8. You can add `if __name__ == "__main__":` at the end of the function file to make sure the function can be executed, and after testing all functions you should develop, using `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to develop all the tools and make sure they can be executed.
Note that if you need OPENAI_API_KEY, my key is: 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
"""
return Agent(
name="Tool Creation Agent",
model=model,
instructions=tool_creation_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag, get_api_plugin_tools_doc],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Agent Creation Agent", func_name="get_agent_creation_agent")
def get_agent_creation_agent(model: str):
def agent_creation_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to develop new agents in the `/{working_dir}/metachain/metachain/agents` folder.
Follow the following instructions to develop new agents:
1. If you want to create new agents, you should first create a new file in the `metachain/metachain/agents` directory, write the function `get_xxx_agent(model: str)`, and then add the function to the `metachain/metachain/agents/__init__.py`. Note that when add new agents into `__init__.py`, you first read the file content and keep the original content, then add the new agents into the file.
2. In this stage, you should not run the agent, you should only develop the agent.
3. You may need to develop more than one agent, and in this stage you should not concern the relationship between agents.
4. After developing a new agent, you should use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to develop all the agents.
And there is a guide for you to follow:
"""+\
r"""An `Agent` simply encapsulates a set of `instructions` with a set of `functions` (plus some additional settings below), and has the capability to hand off execution to another `Agent`.
While it's tempting to personify an `Agent` as "someone who does X", it can also be used to represent a very specific workflow or step defined by a set of `instructions` and `functions` (e.g. a set of steps, a complex retrieval, single step of data transformation, etc). This allows `Agent`s to be composed into a network of "agents", "workflows", and "tasks", all represented by the same primitive.
### `Agent` Fields
| Field | Type | Description | Default |
| ---------------- | ------------------------ | ------------------------------------------------------------ | ---------------------------- |
| **name** | `str` | The name of the agent. | `"Agent"` |
| **model** | `str` | The model to be used by the agent. | `"gpt-4o"` |
| **instructions** | `str` or `func() -> str` | Instructions for the agent, can be a string or a callable returning a string. | `"You are a helpful agent."` |
| **functions** | `List` | A list of functions that the agent can call. | `[]` |
| **tool_choice** | `str` | The tool choice for the agent, if any. | `None` |
#### Instructions
`Agent` `instructions` are directly converted into the `system` prompt of a conversation (as the first message). Only the `instructions` of the active `Agent` will be present at any given time (e.g. if there is an `Agent` handoff, the `system` prompt will change, but the chat history will not.)
```python
agent = Agent(
instructions="You are a helpful agent."
)
```
The `instructions` can either be a regular `str`, or a function that returns a `str`. The function can optionally receive a `context_variables` parameter, which will be populated by the `context_variables` passed into `client.run()`.
```python
def instructions(context_variables):
user_name = context_variables["user_name"]
return f"Help the user, {user_name}, do whatever they want."
agent = Agent(
instructions=instructions
)
response = client.run(
agent=agent,
messages=[{"role":"user", "content": "Hi!"}],
context_variables={"user_name":"John"}
)
print(response.messages[-1]["content"])
```
```
Hi John, how can I assist you today?
```
"""
return Agent(
name="Agent Creation Agent",
model=model,
instructions=agent_creation_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Workflow Run Agent", func_name="get_workflow_run_agent")
def get_workflow_run_agent(model: str):
def workflow_run_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to run workflows to complete the user request.
Follow the following instructions to run workflows:
1. The workflow is a directed graph represented by a dictionary, with the format:
""" +\
r"""
{
"type": "object",
"properties": {
"nodes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"agent_name": {"type": "string"},
"agent_tools": {"type": "array", "items": {"type": "string"}},
"input": {"type": "string"},
"output": {"type": "string"},
"is_start": {"type": "boolean"},
"is_end": {"type": "boolean"}
},
"required": ["agent_name", "agent_tools", "input", "output", "is_start", "is_end"],
"additionalProperties": False
}
},
"edges": {
"type": "array",
"items": {
"type": "object",
"properties": {
"start": {"type": "string"},
"end": {"type": "string"},
"description": {"type": "string"}
},
"required": ["start", "end", "description"],
"additionalProperties": False
}
}
},
"required": ["nodes", "edges"],
"additionalProperties": False
}
2. First create a python script named `run_xxx_workflow.py` in the `/{working_dir}/metachain` directory, and the workflow graph should be instantiated by `Graph` class in `metachain/metachain/workflow/flowgraph.py`, using `Graph.from_dict()` method.
3. After instantiating the workflow graph, you should use `FlowEngine` class in `metachain/metachain/workflow/flowengine.py`, using `FlowEngine(g = g, model=model)` to instantiate the workflow engine.
4. Then you can use `engine.run_meta(query, context_variables = context_variables, debug = True)` to run the workflow
5. After running the workflow, you should tell the 'Programming Triage Agent' final running results and use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent'.
6. If there is any error during the running process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
"""+\
r"""
There is an example to run a workflow based on the 'metachain' project:
```python
from metachain.workflow import Graph, FlowEngine
from metachain.types import Response
import os
os.environ["OPENAI_API_KEY"] = "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"
model = 'gpt-4o-2024-08-06'
workflow_dict = {
"nodes": [
{
"agent_name": "user_request",
"agent_tools": [],
"input": "PDF file",
"output": "PDF file",
"is_start": True,
"is_end": False
},
{
"agent_name": "read_pdf_agent",
"agent_tools": [
"read_pdf"
],
"input": "PDF file",
"output": "Extracted text",
"is_start": False,
"is_end": False
},
{
"agent_name": "chunk_text_agent",
"agent_tools": [
"chunk_text"
],
"input": "Extracted text",
"output": "Chunked text",
"is_start": False,
"is_end": False
},
{
"agent_name": "vectordb_agent",
"agent_tools": [
"vectordb_save"
],
"input": "Chunked text",
"output": "Text saved to VectorDB",
"is_start": False,
"is_end": False
},
{
"agent_name": "retrieve_vectordb_agent",
"agent_tools": [
"retrieve_vectordb"
],
"input": "Text saved to VectorDB",
"output": "Method section text",
"is_start": False,
"is_end": False
},
{
"agent_name": "output",
"agent_tools": [],
"input": "Method section text",
"output": "Description of Method section",
"is_start": False,
"is_end": True
}
],
"edges": [
{
"start": "user_request",
"end": "read_pdf_agent",
"description": "Send PDF to be read."
},
{
"start": "read_pdf_agent",
"end": "chunk_text_agent",
"description": "Send extracted text for chunking."
},
{
"start": "chunk_text_agent",
"end": "vectordb_agent",
"description": "Save chunked text to VectorDB."
},
{
"start": "vectordb_agent",
"end": "retrieve_vectordb_agent",
"description": "Retrieve Method section."
},
{
"start": "retrieve_vectordb_agent",
"end": "output",
"description": "Output of Method section text."
}
]
}
g = Graph.from_dict(workflow_dict)
engine = FlowEngine(g = g, model=model)
query = 'I have a paper in the pdf format, and I want to know what the method section is about.'
context_variables = {}
response: Response = engine.run_meta(query, context_variables = context_variables, debug = True)
print(response.messages[-1]['content'])
```
"""
return Agent(
name="Workflow Run Agent",
model=model,
instructions=workflow_run_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Agent Run Agent", func_name="get_agent_run_agent")
def get_agent_run_agent(model: str):
def agent_run_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
return \
f"""You are working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your task is to run agents to complete the user request.
Follow the following instructions to run agents:
1. To complete the user request using 'metachain' project, you need to run the agent by creating a python file named `run_xxx_agent.py` in the 'metachain' directory, and use `run_python` function to run the agent.
2. If there is any error during the running process, you should use tools to debug the error and fix the error, and you should not transfer the conversation back to the 'Programming Triage Agent' util the error is fixed.
3. After running the agent, you should tell the 'Programming Triage Agent' final running results and use `transfer_back_to_programming_triage_agent` function to transfer the conversation back to the 'Programming Triage Agent', note that you should not transfer the conversation back to the 'Programming Triage Agent' util you finish the your task that is to run all the agents.
Note that if you need OPENAI_API_KEY, my key is: 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
And there is a guide for you to follow:
"""+\
r"""
```python
from metachain import MetaChain
from metachain.agents import get_programming_agent
client = MetaChain()
programming_agent = get_programming_agent(model)
context_variables = {"key": value}
messages = [{"role": "user", "content": task_instructions}]
response = client.run(agent=programming_agent, messages=messages, context_variables=context_variables, debug=True)
```
### `client.run()`
MetaChain's `run()` function is analogous to the `chat.completions.create()` function in the Chat Completions API - it takes `messages` and returns `messages` and saves no state between calls. Importantly, however, it also handles Agent function execution, hand-offs, context variable references, and can take multiple turns before returning to the user.
At its core, MetaChain's `client.run()` implements the following loop:
1. Get a completion from the current Agent
2. Execute tool calls and append results
3. Switch Agent if necessary
4. Update context variables, if necessary
5. If no new function calls, return
#### Arguments
| Argument | Type | Description | Default |
| --------------------- | ------- | ------------------------------------------------------------ | -------------- |
| **agent** | `Agent` | The (initial) agent to be called. | (required) |
| **messages** | `List` | A list of message objects, identical to [Chat Completions `messages`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) | (required) |
| **context_variables** | `dict` | A dictionary of additional context variables, available to functions and Agent instructions | `{}` |
| **max_turns** | `int` | The maximum number of conversational turns allowed | `float("inf")` |
| **model_override** | `str` | An optional string to override the model being used by an Agent | `None` |
| **execute_tools** | `bool` | If `False`, interrupt execution and immediately returns `tool_calls` message when an Agent tries to call a function | `True` |
| **stream** | `bool` | If `True`, enables streaming responses | `False` |
| **debug** | `bool` | If `True`, enables debug logging | `False` |
Once `client.run()` is finished (after potentially multiple calls to agents and tools) it will return a `Response` containing all the relevant updated state. Specifically, the new `messages`, the last `Agent` to be called, and the most up-to-date `context_variables`. You can pass these values (plus new user messages) in to your next execution of `client.run()` to continue the interaction where it left off much like `chat.completions.create()`. (The `run_demo_loop` function implements an example of a full execution loop in `/MetaChain/repl/repl.py`.)
#### `Response` Fields
| Field | Type | Description |
| --------------------- | ------- | ------------------------------------------------------------ |
| **messages** | `List` | A list of message objects generated during the conversation. Very similar to [Chat Completions `messages`](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages), but with a `sender` field indicating which `Agent` the message originated from. |
| **agent** | `Agent` | The last agent to handle a message. |
| **context_variables** | `dict` | The same as the input variables, plus any changes. |
"""
return Agent(
name="Agent Run Agent",
model=model,
instructions=agent_run_instructions,
functions=[gen_code_tree_structure, execute_command, read_file, create_file, write_file, list_files, create_directory, run_python, code_rag],
tool_choice = "auto",
parallel_tool_calls = False
)
@register_agent(name = "Programming Triage Agent", func_name="get_programming_triage_agent")
def get_programming_triage_agent(model: str):
def programming_triage_instructions(context_variables):
working_dir = context_variables.get("working_dir", None)
ret_instructions = \
f"""You are a programmer working on an agent project named 'metachain' whose path is /{working_dir}/metachain.
Your overall task is using existing project to create agents or workflows to complete the user request.
If the existing tools or agents are not enough for your task, you should develop new tools or agents.
And you should determine which agent is best suited to handle the user's request, and transfer the conversation to that agent based on the following routine:
1. If you need to develop new tools, transfer the conversation to the 'Tool Creation Agent' to create a new tool in the `/{working_dir}/metachain/metachain/tools` folder using function `transfer_to_tool_creation_agent`.
2. If you need to develop new agents, transfer the conversation to the 'Agent Creation Agent' to create a new agent in the `/{working_dir}/metachain/metachain/agents` folder using function `transfer_to_agent_creation_agent`.
3. After there is enough pre-built tools and agents, transfer the conversation to the 'Agent Run Agent' or 'Workflow Run Agent' to create agents or workflows to complete the user request using function `transfer_to_agent_run_agent` or `transfer_to_workflow_run_agent`.
4. Note that if you should create both new tools and new agents, you should create the new tools first, and then create the new agents.
Note that if there are not enough pre-built tools, you should develop new tools first, and then develop new agents, and finally run the workflow or agent to complete the user request.
Once you receive the develop plan, you should not stop util you finish the task.
"""
how_to_guides = context_variables.get("how_to_guides", None)
if how_to_guides:
ret_instructions += \
f"""
If you want to develop new tools or agents, you should follow the following guides:
{how_to_guides}
"""
return ret_instructions
tool_creation_agent = get_tool_creation_agent(model)
agent_creation_agent = get_agent_creation_agent(model)
workflow_run_agent = get_workflow_run_agent(model)
agent_run_agent = get_agent_run_agent(model)
def transfer_to_tool_creation_agent(input: str):
return tool_creation_agent
def transfer_to_agent_creation_agent(input: str):
return agent_creation_agent
def transfer_to_workflow_run_agent(input: str):
return workflow_run_agent
def transfer_to_agent_run_agent(input: str):
return agent_run_agent
programming_triage_agent = Agent(
name="Programming Triage Agent",
model=model,
instructions=programming_triage_instructions,
functions=[transfer_to_tool_creation_agent, transfer_to_agent_creation_agent, transfer_to_workflow_run_agent, transfer_to_agent_run_agent],
tool_choice = "auto",
parallel_tool_calls = False
)
def transfer_back_to_programming_triage_agent():
"""Call this function if the existing agent has already finished the sub-task."""
return programming_triage_agent
tool_creation_agent.functions.append(transfer_back_to_programming_triage_agent)
agent_creation_agent.functions.append(transfer_back_to_programming_triage_agent)
workflow_run_agent.functions.append(transfer_back_to_programming_triage_agent)
agent_run_agent.functions.append(transfer_back_to_programming_triage_agent)
return programming_triage_agent
if __name__ == "__main__":
print(agent_creation_instructions({"working_dir": "metachain"}))

View file

@ -1,38 +0,0 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_agents import list_agents, create_agent, delete_agent, run_agent
from metachain.tools.terminal_tools import execute_command
from metachain.types import Agent
from metachain.io_utils import read_file
@register_agent(name = "Agent Editor Agent", func_name="get_agent_editor_agent")
def get_agent_editor_agent(model: str) -> str:
"""
The agent editor is an agent that can be used to edit the agents.
"""
def instructions(context_variables):
return f"""\
You are an agent editor agent that can be used to edit the agents. You are working on a Agent framework named MetaChain, and your responsibility is to edit the agents in the MetaChain, so that the agents can be used to help the user with their request.
The existing agents are shown below:
{list_agents(context_variables)}
If you want to create a new agent, you should:
1. follow the format of the `get_dummy_agent` below:
```python
{read_file('metachain/agents/dummy_agent.py')}
```
2. you successfully create the agent only after you have successfully run the agent with the `run_agent` function to satisfy the user's request.
3. If you encounter any error while creating and running the agent, like dependency missing, you should use the `execute_command` function to install the dependency.
[IMPORTANT] The `register_plugin_agent` registry function is strictly required for a agent implementation to be recognized by the MetaChain framework.
"""
tool_list = [list_agents, create_agent, delete_agent, run_agent, execute_command]
return Agent(
name="Agent Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -1,40 +0,0 @@
from metachain.types import Agent
from pydantic import BaseModel
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.tools.meta.edit_tools import list_tools
from typing import Union
from metachain.environment import DockerEnv, LocalEnv
def get_meta_plan_agent(model: str) -> Agent:
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful planner that can help `Tool Editor Agent` how to use MetaChain to solve the user's request.
Existing tools you already have:
{list_tools(context_variables)}
You should first fully understand the user's request, then analyze the existing tools and determine which tools are needed to solve the user's request, finally, you should transfer the conversation to the `Meta Agent` with the plan of using the tools.
If existing tools are not enough for your task, you should develop new tools.
1. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
2. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
3. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
4. [IMPORTANT] You can not use `transfer_back_to_meta_agent_with_plans` util you have fully understood the user's request and have try your best to search information from exsiting resources if you want to create a new tool.
"""
return instructions
return Agent(
name="Meta Plan Agent",
model=model,
instructions=instructions,
functions=[get_api_plugin_tools_doc, search_trending_models_on_huggingface, get_hf_model_tools_doc],
tool_choice = "required",
parallel_tool_calls = False
)

View file

@ -1,100 +0,0 @@
from metachain.registry import register_agent
from metachain.types import Agent, Result
from metachain.environment import DockerEnv, LocalEnv
from metachain.tools.meta.edit_tools import list_tools
from metachain.agents.tool_agent.tool_editor import get_tool_editor_agent
from typing import Union
from metachain.tools.inner import case_resolved, case_not_resolved
from pydantic import BaseModel
from metachain.util import function_to_json
from metachain.agents.tool_agent.meta_plan_agent import get_meta_plan_agent
class ToolDescription(BaseModel):
tool_functionalities: str
existing: bool
tool_docs: str
class ToolPlan(BaseModel):
tool_name: str
tool_description: ToolDescription
@register_agent(name = "Tool Agent", func_name="get_tool_agent")
def get_tool_agent(model: str) -> Agent:
"""
The tool agent is an agent that can be used to create and run other tools.
"""
def instructions(context_variables):
code_env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
instructions = f"""\
You are a helpful assistant that can help the user with their request by creating and running tools in the Metachain agent framework. Your responsibility is to determine which sub-agent is best suited to handle the user's request under the current context, and transfer the conversation to that sub-agent. And you should not stop to try to solve the user's request by transferring to another sub-agent only until the task is completed.
Your sub-agents are:
1. `Meta Plan Agent`: This agent is used to plan how to use MetaChain to solve the user's request.
2. `Tool Editor Agent`: This agent is used to run and edit tools.
Existing tools you already have:
{list_tools(context_variables)}
You should first transfer the conversation to the `Meta Plan Agent` to plan how to use MetaChain to solve the user's request, and the plan should follow the following constraints:
1. If exising tools are enough for your task, you can directly use them to solve the user's request.
2. If exising tools are not enough for your task, `Meta Plan Agent` should search information from the resources and plan how to create new tools.
3. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
"""
return instructions
tool_editor_agent: Agent = get_tool_editor_agent(model)
meta_plan_agent: Agent = get_meta_plan_agent(model)
def transfer_to_tool_editor_agent(sub_task: str):
"""
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Tool Editor Agent` to do.
"""
return tool_editor_agent
def transfer_to_meta_plan_agent(sub_task: str):
"""
Use this function when you want to plan how to use MetaChain to solve the user's request.
Args:
sub_task: The detailed description of the sub-task that the `Meta Agent` will ask the `Meta Plan Agent` to do.
"""
return meta_plan_agent
meta_agent = Agent(
name="Meta Agent",
model=model,
instructions=instructions,
functions=[transfer_to_meta_plan_agent, transfer_to_tool_editor_agent, case_resolved, case_not_resolved],
tool_choice = "required",
parallel_tool_calls = False
)
def transfer_back_to_meta_agent(task_status: str):
"""
Args:
task_status: The status of the task that the `Meta Agent` will ask the `Meta Agent` to do.
"""
return meta_agent
def transfer_back_to_meta_agent_with_plans(tool_development_steps: list[ToolPlan]) -> str:
"""
This function is used to plan how to use MetaChain to solve the user's request. You can use this function only after you have fully understood the user's request and have try your best to search information from exsiting resources.
Args:
tool_development_steps: The steps of tool development. It is a list of dictionaries, each dictionary contains the tools name you should use in the exsiting MetaChain or the tools name you should develop. If the tool is not existing, dictionaries should contain the tool documentation.
"""
tool_str = "\n".join([f"{tool['tool_name']}: {tool['tool_description']['tool_functionalities']} [{tool['tool_description']['existing']}]" for tool in tool_development_steps])
ret_val = f"""\
Receiving user's request, I have the following plans to use MetaChain to solve the user's request:
As for using existing tools, I have the following plans:
{tool_str}
"""
return Result(
value=ret_val,
agent=meta_agent
)
tool_editor_agent.functions.append(transfer_back_to_meta_agent)
meta_plan_agent.functions.append(transfer_back_to_meta_agent_with_plans)
return meta_agent

View file

@ -1,143 +0,0 @@
from metachain.registry import register_agent
from metachain.tools.meta.edit_tools import list_tools, create_tool, delete_tool, run_tool, get_metachain_path
from metachain.tools.meta.tool_retriever import get_api_plugin_tools_doc
from metachain.tools.meta.search_tools import search_trending_models_on_huggingface, get_hf_model_tools_doc
from metachain.types import Agent
from metachain.io_utils import read_file
from metachain.tools.terminal_tools import execute_command
def get_tool_editor_agent(model: str) -> Agent:
"""
The tool editor is an agent that can be used to edit the tools.
"""
def instructions(context_variables):
return f"""\
You are a tool editor agent responsible for managing plugin tools in the MetaChain framework. Your core responsibility is to edit, create, and manage plugin tools that can be used by other agents.
[PLUGIN TOOLS SYSTEM]
- Plugin tools are the building blocks of MetaChain
- All available plugin tools are as follows:
{list_tools(context_variables)}
- Plugin tools can ONLY be executed using `run_tool(tool_name, run_code)`
- NEVER try to import and run tools directly - always use `run_tool`
[AVAILABLE MANAGEMENT TOOLS]
1. list_tools():
- Lists all existing plugin tools
- Returns: tool name, arguments, docstring, implementation details
- Use this FIRST to check existing tools
2. create_tool(tool_name: str, tool_code: str):
- Creates new plugin tools
- Requires proper registration using @register_plugin_tool
3. run_tool(tool_name: str, run_code: str,):
- REQUIRED method to execute any plugin tool
- Format: run_tool("tool_name", "from metachain.tools import tool_name; print(tool_name(args))")
4. delete_tool(tool_name: str,):
- Removes existing plugin tools
- Use with caution
5. get_api_plugin_tools_doc:
- Required for third-party API integrations
- Must be used for Finance, Entertainment, etc.
6. execute_command:
- Handles system-level operations
- Use for dependency installation
[CRITICAL PRINCIPLES FOR PLUGIN TOOLS]
1. Tools MUST be abstract, modular, and reusable:
- Use generic function names (e.g., `download_media` instead of `download_youtube_video`)
- Break complex tasks into smaller, reusable components
- Avoid task-specific implementations
- Use parameters instead of hardcoded values
2. For ALL visual tasks (images, videos, visual analysis):
- MUST use the existing `visual_question_answering` plugin tool
- NO direct implementation of visual processing
- Chain `visual_question_answering` with other tools as needed
[WORKFLOW FOR PLUGIN TOOL MANAGEMENT]
1. Always start with `list_tools()` to check existing tools
2. For new plugin tools:
a. Design generic, reusable interface
b. Follow the template format:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
c. Create using `create_tool`
d. Test using `run_tool`
e. Handle dependencies with `execute_command`
[IMPORTANT RULES]
- ALL tools must be registered with @register_plugin_tool
- ALL tools must have type hints
- Each tool does ONE thing well
- Create modular tools that can be combined
- ALWAYS use `run_tool` to execute plugin tools
- NEVER modify the `visual_question_answering` tool
[TOOL TESTING EXAMPLE]
Correct way to test a plugin tool:
```python
result = run_tool(
tool_name="your_tool",
run_code="from metachain.tools import your_tool; print(your_tool(param1='value1'))",
context_variables=context_variables
)
```
"""
tool_list = [list_tools, create_tool, run_tool, delete_tool, get_api_plugin_tools_doc, execute_command]
return Agent(
name="Tool Editor Agent",
model=model,
instructions=instructions,
functions=tool_list,
tool_choice = "required",
parallel_tool_calls = False
)
"""
5. [IMPORTANT] If you want to use Hugging Face models, especially for some tasks related to vision, audio, video, you should use the `search_trending_models_on_huggingface` tool to search trending models related to the specific task on Hugging Face, and then use the `get_hf_model_tools_doc` tool to get the detailed information about the specific model.
6. [IMPORTANT] As for the tags ['image-text-to-text', 'visual-question-answering', 'video-text-to-text'] and ANY visual tasks, you should use `visual_question_answering` tool instead of Hugging Face models.
"""
"""\
You are a tool editor agent that can be used to edit the tools. You are working on a Agent framework named MetaChain, and your responsibility is to edit the tools in the MetaChain, so that the tools can be used by the agents to help the user with their request.
The existing tools are shown below:
{list_tools(context_variables)}
If you want to create a new tool, you should:
1. follow the format of the `tool_dummy` below. Note that if the tool should be used with third-part api key, you should write the api key inside the definition of the tool:
```python
{read_file('metachain/tools/dummy_tool.py')}
```
2. you successfully create the tool only after you have successfully run the tool with the `run_tool` function, and an example of testing the tool is shown below.:
```python
from metachain.tools import tool_dummy
if __name__ == "__main__":
... # some pre-operations
print(run_tool(tool_name="tool_dummy", run_code="from metachain.tools import tool_dummy; print(tool_dummy(args1=args1, args2=args1, ...))"))
```
3. If you encounter any error while creating and running the tool, like dependency missing, you should use the `execute_command` function to install the dependency.
4. [IMPORTANT] If you want to use third-party api, especially for some tasks related to Finance, Entertainment, eCommerce, Food, Travel, Sports, you MUST use the `get_api_plugin_tools_doc` tool to search information from existing api documents, it contains how to implement the api and API keys.
[IMPORTANT] The `register_plugin_tool` registry function is strictly required for a tool implementation to be recognized by the MetaChain framework.
[IMPORTANT] The tool you create should be abstract, modular, and reusable. Specifically, the function name must be generic (e.g.,
`count_objects` instead of `count_apples`). The function must use parameters instead of hard-coded values. The
function body must be self-contained.
[IMPORTANT] Explicitly declare input and output data types using type hints.
[IMPORTANT] For ANY visual tasks related to image and video, you should use `visual_question_answering` tool.
"""

View file

@ -1,28 +0,0 @@
from metachain.types import Agent
from metachain.tools import (
get_api_plugin_tools_doc, check_tool
)
from metachain.registry import register_agent
@register_agent(name = "Tool Check Agent", func_name="get_tool_check_agent")
def get_tool_check_agent(model: str):
def instructions(context_variables):
return \
f"""You are a developer working on a project named 'metachain'.
You are given a user request and required to use existing project code to solve the task.
Your goal is to enrich the functionality of existing list of tools in the `tools` folder as much as possible, so that once the similar task occurs again, the agent can solve it directly by using the tools without developing new tools.
whether you should develop some new tools to integrate into the agent to directly solve the task.
If you use an external api, you should always develop a new tool, rather than using coding-related tools.
Answer 'Needed' or 'Not needed' first and then give your reason. ('Needed' means you should develop a new tool, 'Not needed' means you should not develop a new tool).
You can use `check_tool` tool to review the existing tools and check whether developing a new tool is needed.
"""
return Agent(
name="Tool Check Agent",
model=model,
instructions=instructions,
functions=[check_tool],
parallel_tool_calls = False
)
"""If you need to develop a new tool, you must use `get_tool_doc` tool to get the tool doc."""

View file

@ -8,13 +8,6 @@ def load_cookies_from_json(json_path):
cookies = json.load(f)
return cookies
# COOKIES_LIST = []
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "orcid.org.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.researchgate.net.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "github.com.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.youtube.com.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "www.ncbi.nlm.nih.gov.cookies.json")
# COOKIES_LIST += load_cookies_from_json(wd / "cookie_json" / "archive.org.cookies.json")
def convert_cookies_to_python():
all_cookies = []

View file

@ -0,0 +1,36 @@
# How to obtain cookie json files
## What are cookies?
Cookies are small pieces of data stored by websites on users' computers, containing information like login status and preferences. They are essential for web automation as they allow automated browsers to maintain authenticated sessions, skip repeated logins, and simulate real user behavior across multiple page visits.
## How to organize them in this folder?
We recommend you to use the Google Chrome browser with the extension "Export cookie JSON file for Puppeteer", as show in the following figure:
![extension](../../../assets/cookies/extension.png)
1. Go to a specific website and login.
2. Then use the extension to export the cookies, and save it as a json file in the `cookie_json` folder.
![export](../../../assets/cookies/export.png)
3. After you have exported all cookies, use the following command to convert them to python code:
```bash
cd path/to/MetaChain && python metachain/environment/browser_cookies.py
```
## Recommended websites
We recommend you to export the cookies from the following websites:
- [archive.org](https://archive.org)
- [github.com](https://github.com)
- [nature.com](https://nature.com)
- [orcid.org](https://orcid.org)
- [www.collinsdictionary.com](https://www.collinsdictionary.com)
- [www.jstor.org](https://www.jstor.org)
- [www.ncbi.nlm.nih.gov](https://www.ncbi.nlm.nih.gov)
- [www.pnas.org](https://www.pnas.org)
- [www.reddit.com](https://www.reddit.com)
- [www.researchgate.net](https://www.researchgate.net)
- [www.youtube.com](https://www.youtube.com)

View file

@ -1,83 +0,0 @@
[
{
"name": "donation-identifier",
"value": "8ed6af4cc08b88b68b36fffcb6dd7323",
"domain": ".archive.org",
"path": "/",
"expires": 1741773847.95608,
"httpOnly": false,
"secure": false
},
{
"name": "abtest-identifier",
"value": "ca9982a6c4240d53598f01665a3c6100",
"domain": ".archive.org",
"path": "/",
"expires": 1741773847.956153,
"httpOnly": false,
"secure": false
},
{
"name": "test-cookie",
"value": "1",
"domain": ".archive.org",
"path": "/",
"expires": 1734348067.326946,
"httpOnly": false,
"secure": false
},
{
"name": "g_state",
"value": "{\"i_l\":0}",
"domain": "archive.org",
"path": "/",
"expires": 1748690473,
"httpOnly": false,
"secure": false
},
{
"name": "logged-in-sig",
"value": "1764674476%201733138476%20Y3yQCmHjxUil%2FcGs%2FgYR6m%2FHA%2F%2FtAtShDsn25N2tNIzvkGr6EkwbEsYEwDTjZ6%2Bu4Iy65eDH5gZVrZayaRZzJEa6R91agNjLC1rmw%2F47W5OXyDVFN5kLX%2Ba2OxNOzEx6Ws%2BLVwFVr%2Bdnbzhdt1vqNTEpECwy14%2Fu4n9qXGANJ5IKEO7pfu4ONymTb0RWH%2B158Wphp0Gluy9bR1a3t3TSGM%2FyhBEa37FJ56ckJJDghwIVsANhhu%2FextDlCDLXDkPtxLrwdX%2FAlbBoNFIeQ5%2BzoJX21KKQVdJxVWzSRLb4LXyFQsvhkpL221qlJ%2FDQER53IrTAIkmxrDI4cfjumUnKTQ%3D%3D",
"domain": ".archive.org",
"path": "/",
"expires": 1764674476.838234,
"httpOnly": false,
"secure": false
},
{
"name": "logged-in-user",
"value": "jiabintang77%40gmail.com",
"domain": ".archive.org",
"path": "/",
"expires": 1764674476.838343,
"httpOnly": false,
"secure": false
},
{
"name": "PHPSESSID",
"value": "jteta3bg9mb3t8e6dkp7r6mcd4",
"domain": ".archive.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "donation",
"value": "x",
"domain": ".archive.org",
"path": "/",
"expires": 1736767334,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24device_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".archive.org",
"path": "/",
"expires": 1764675133,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,48 +0,0 @@
[
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1767701986.053151,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765083373,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,159 +0,0 @@
[
{
"name": "_device_id",
"value": "49f9d6cfbd603c8509e73807be70a438",
"domain": "github.com",
"path": "/",
"expires": 1764674868.858374,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "MicrosoftApplicationsTelemetryDeviceId",
"value": "3966ee53-78ca-4fa3-95d7-85e299cecee4",
"domain": "github.com",
"path": "/",
"expires": 1763890136.033527,
"httpOnly": false,
"secure": true
},
{
"name": "_octo",
"value": "GH1.1.1313590405.1727940967",
"domain": ".github.com",
"path": "/",
"expires": 1759476967,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "GHCC",
"value": "Required:1-Analytics:1-SocialMedia:1-Advertising:1",
"domain": ".github.com",
"path": "/",
"expires": 1745563377,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "MSFPC",
"value": "GUID=3452f0b49fd14d349a6dbf8ddee26d60&HASH=3452&LV=202410&V=4&LU=1730011383391",
"domain": "github.com",
"path": "/",
"expires": 1761547383.513164,
"httpOnly": false,
"secure": true
},
{
"name": "logged_in",
"value": "yes",
"domain": ".github.com",
"path": "/",
"expires": 1762511337.053395,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "saved_user_sessions",
"value": "151511798%3A8an8gJwE3la35NvNIyacuRFRSHlup_9RBaQ5q4CThhvPV89o%7C152840453%3A2Quysh6Cns_a0IpeKcw-GAUZIt6ZndbJ7BoGdxx11qkZa9bi%7C151510669%3AMpYw2DQuFwt3NJiimm36OWLTQmoWFzVcSUbLuV8SBFRPqN8-%7C165454715%3AZSjwi4MUxVCr91r-m1ElvPL2L0DGDSoSo6uwV7pPpliml3js%7C148674909%3ALnLJclEDIxFjFcwX0eBlgOJzbDpsxKedtd6So7_EFs6HPtL7%7C56426168%3AmM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1739599354.295483,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "user_session",
"value": "mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1734348468.858989,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "__Host-user_session_same_site",
"value": "mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g",
"domain": "github.com",
"path": "/",
"expires": 1734348468.859144,
"httpOnly": true,
"secure": true,
"sameSite": "Strict"
},
{
"name": "dotcom_user",
"value": "tjb-tech",
"domain": ".github.com",
"path": "/",
"expires": 1763647073.257243,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
},
{
"name": "color_mode",
"value": "%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "cpu_bucket",
"value": "xlg",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "preferred_color_mode",
"value": "light",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "tz",
"value": "Asia%2FHong_Kong",
"domain": ".github.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928eb980316cc-050dbe3db24bd2-16525637-16a7f0-1928eb980321bb8%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D",
"domain": ".github.com",
"path": "/",
"expires": 1764674869,
"httpOnly": false,
"secure": false
},
{
"name": "_gh_sess",
"value": "oUZyg0XEvo5fm%2FC18yV17FMePsGYB4hM9R5q8AgiwOAjTritHx1Ux4jNGjnm7Jaxz99%2FOxD4agIy05dUdG6cnSxRP62NJE7bZxIWFV2W64ekLVCwz7ge2oaRcvVlN4HjVhw5dsl2czpD8Irn%2BZG0Dmw16tH9GZZ4yhaFW5%2Fshmte3DBYsndzLNn4rGje9B3P1IFYyz9sYx23j71xRb9wRjwoLHPYGf4Yp3vRKVAzTp3X6nrjvgr4XGU2N%2BGPH3OYDZQYCIPLckTIEmRg7a0dd2KvU2mfcm%2F%2B9N9%2FNNBFTbKvUhPwWM8kIRpv5WTzU%2FI5Y0qBv71gX2B7nNm%2FtIkWjbWUhgizf%2BpxOHAuhs89sRaicpc9NjasSUISwfxRCoH5evWqVXEifhqQvSU42iR4wkhnRHs%3D--za2vZwPq%2FBJxevj3--tEOzEYASRs0gepJUCIv8Mg%3D%3D",
"domain": "github.com",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true,
"sameSite": "Lax"
}
]

View file

@ -1,103 +0,0 @@
[
{
"name": "hum_ieee_visitor",
"value": "3403d64f-1870-4601-9ff7-e5900074a6db",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1756280921.91082,
"httpOnly": false,
"secure": true
},
{
"name": "_zitok",
"value": "6273c58ab3f308a07a711718187500",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1761551757,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1767701986.053151,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_8E929CC25A1FB2B30A495C97%40AdobeOrg",
"value": "1",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "s_cc",
"value": "true",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765086053,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:2$_ss:0$_st:1733551853250$ses_id:1733549982472%3Bexp-session$_pn:2%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765086053,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_8E929CC25A1FB2B30A495C97%40AdobeOrg",
"value": "359503849%7CMCIDTS%7C20065%7CMCMID%7C53777252718039557930823884447397163100%7CMCAID%7CNONE%7CMCOPTOUT-1733557253s%7CNONE%7CvVersion%7C5.0.1",
"domain": ".ieeexplore-ieee-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1768110053.386389,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,67 +0,0 @@
[
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:13$_ss:0$_st:1733552707246$ses_id:1733549982472%3Bexp-session$_pn:9%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765086907,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768110998.70329,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765087052,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,247 +0,0 @@
[
{
"domain": "www.nature.com",
"secure": false,
"expirationDate": 1733745572000,
"hostOnly": true,
"name": "user.uuid.v2",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "\"765b07e9-028b-45d1-8abd-baa7b6c88125\"",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"hostOnly": false,
"name": "Hm_lpvt_d38bce82bcb44717ccc29a90c4b781ea",
"httpOnly": false,
"session": true,
"storeId": null,
"value": "1733140842",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676842000,
"hostOnly": false,
"name": "ajs_anonymous_id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "7e4d00ab-3618-46a2-b0fb-c80b189a0584",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676842000,
"hostOnly": false,
"name": "ajs_user_id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "b1ae7862-b9d6-49c5-a7a5-ad96682ac6dc_SN",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga_B3E4QL2TPR",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GS1.1.1733140776.1.1.1733140841.60.0.0",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga_ERRNTNZ807",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GS1.1.1733140776.1.1.1733140841.60.0.467679787",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767304843000,
"hostOnly": false,
"name": "cto_bundle",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "5685XF9lQVd1dU4zd2xWRE1uZ3BpQTk3SUVXNkx2bGslMkZwTkZodjRWJTJCcGoyd0JWdiUyQjVlcGkwMVoyWHc4aGxKQkM2N3hyeGI4aFlIRzBZRDNTUTJFb1JYZVhPJTJGMUIlMkZka252a0RPZFdlbld4OU1jaUFrMHN6VDVaREYzSSUyRmFDMEtnb0FoaQ",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1766836842000,
"hostOnly": false,
"name": "__gpi",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "UID=00000fa61060e41d:T=1733140842:RT=1733140842:S=ALNI_Mai2WWloG6liac6hEyJYOSjI3WtCg",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1766836841000,
"hostOnly": false,
"name": "_uetvid",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "e6d7f220b0a411efaac753cc9ddac552",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676841000,
"hostOnly": false,
"name": "Hm_lvt_d38bce82bcb44717ccc29a90c4b781ea",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "1733140777",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1748692774000,
"hostOnly": false,
"name": "__eoi",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "ID=1ced890879e93934:T=1733140774:RT=1733140774:S=AA-AfjauQ5O9wXrdBjufrcsmQ-EM",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1766836842000,
"hostOnly": false,
"name": "__gads",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "ID=edf25ef88638a1b3:T=1733140842:RT=1733140842:S=ALNI_MYUdW0s3LG6IOpCKgjBo4gbGPsI1Q",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1740916843000,
"hostOnly": false,
"name": "_fbp",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "fb.1.1733140776577.688163329394303800",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "_ga",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "GA1.1.2115119478.1733140776",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1733227241000,
"hostOnly": false,
"name": "_uetsid",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "e6d7f280b0a411efaed4a5384bcc5d88",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"hostOnly": false,
"name": "HMACCOUNT",
"httpOnly": false,
"session": true,
"storeId": null,
"value": "7B6C1DFC72FE250C",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": true,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "permutive-id",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "7cbbccaf-2079-4e6d-99fc-186a9db51c90",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1767700841000,
"hostOnly": false,
"name": "permutive-session",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "%7B%22session_id%22%3A%221d3a9243-5c93-4975-ae30-63ca2047b7cf%22%2C%22last_updated%22%3A%222024-12-02T12%3A00%3A41.747Z%22%7D",
"path": "/",
"sameSite": "Lax"
},
{
"domain": ".nature.com",
"secure": false,
"expirationDate": 1764676775000,
"hostOnly": false,
"name": "sncc",
"httpOnly": false,
"session": false,
"storeId": null,
"value": "P%3D8%3AV%3D68.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue",
"path": "/",
"sameSite": "Lax"
}
]

View file

@ -1,105 +0,0 @@
[
{
"name": "OptanonAlertBoxClosed",
"value": "2024-06-06T05:28:24.993Z",
"domain": ".orcid.org",
"path": "/",
"expires": 1749187704,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AWSELB",
"value": "CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AWSELBCORS",
"value": "CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "orcidUserConnectionId",
"value": "-114606494029392851",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "XSRF-TOKEN",
"value": "b64bcd3a-f0f5-407b-9115-a1f5183f3997",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "JSESSIONID",
"value": "48DD20615AC49336A91F9A3A6F5B1483",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "locale_v3",
"value": "en",
"domain": "orcid.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24device_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".orcid.org",
"path": "/",
"expires": 1764674617,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Mon+Dec+02+2024+19%3A23%3A37+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202310.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=71ca593a-5b7c-4963-87cf-52c27440ac95&interactionCount=1&landingPath=NotLandingPage&groups=C0001%3A1%2CC0003%3A1%2CC0002%3A1%2CC0004%3A1&geolocation=HK%3B&AwaitingReconsent=false",
"domain": ".orcid.org",
"path": "/",
"expires": 1764674617,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "cookieName",
"value": "dont%20show%20message",
"domain": "orcid.org",
"path": "/",
"expires": 1764674620,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "closable-unique-name",
"value": "understood",
"domain": "orcid.org",
"path": "/",
"expires": 1764674620,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
}
]

View file

@ -1,75 +0,0 @@
[
{
"name": "user.uuid.v2",
"value": "\"f9248aca-ac13-40e6-8b45-eaeb5fe20825\"",
"domain": "www-nature-com.eproxy.lib.hku.hk",
"path": "/",
"expires": 1740916756.716508,
"httpOnly": false,
"secure": false
},
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768110998.70329,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765087308,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765087436,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,138 +0,0 @@
[
{
"name": "ezproxy",
"value": "e1~OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false
},
{
"name": "ezproxyl",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ezproxyn",
"value": "OilZogbDH4iMWPK",
"domain": ".eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "utag_main",
"value": "v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk",
"domain": ".hku.hk",
"path": "/",
"expires": 1765087308,
"httpOnly": false,
"secure": false
},
{
"name": "AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733559088s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768111888.617908,
"httpOnly": false,
"secure": false
},
{
"name": "SID",
"value": "\"EUW1ED0CAFs37MFXuY5NakcbWc5Qu\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "CUSTOMER",
"value": "\"UNIVERSITY OF HONG KONG\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "E_GROUP_NAME",
"value": "\"University of Hong Kong\"",
"domain": ".lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "dU7HSmMJl6w4XDg.tZSoewkYsxb0bX7Barvg4RvulLw-1733551961-1.0.1.1-7Um2w5HRPO8C06bwjScmRD9BaTZkbArPDfX_e8urefWlKlH50nONZAxnxeL4VbDbHzBBcAY1OzwO5TyNuuCUfQ",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1733553761.117424,
"httpOnly": false,
"secure": false
},
{
"name": "AMCVS_242B6472541199F70A4C98A6%40AdobeOrg",
"value": "1",
"domain": ".hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "adBlockEnabled",
"value": "blocked",
"domain": "www-science-org.eproxy.lib.hku.hk",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "cookiePolicy",
"value": "iaccept",
"domain": "www-science-org.eproxy.lib.hku.hk",
"path": "/",
"expires": 1733638777.524329,
"httpOnly": false,
"secure": true
},
{
"name": "AMCV_242B6472541199F70A4C98A6%40AdobeOrg",
"value": "179643557%7CMCIDTS%7C20065%7CMCMID%7C90810009207598809487163227219398447255%7CMCOPTOUT-1733559578s%7CNONE%7CvVersion%7C5.5.0",
"domain": ".hku.hk",
"path": "/",
"expires": 1768112378.032281,
"httpOnly": false,
"secure": false
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24search_engine%22%3A%20%22google%22%7D",
"domain": ".lib.hku.hk",
"path": "/",
"expires": 1765088378,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,103 +0,0 @@
[
{
"name": "_sp_id.a65e",
"value": "a151b61b-0e26-493f-9885-ed0d9579e181.1712037732.1.1712037742..381bfab3-8c2a-4e54-8d4b-44a5c8c997ef..6db53b82-8b6d-471c-b7de-194adad46810.1712037732261.2",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1746597742.393476,
"httpOnly": false,
"secure": true
},
{
"name": "__cflb",
"value": "02DiuFwNDm462z9fWfJeB58usqeie1xoTDrYZciipwE2x",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1733223382.743499,
"httpOnly": true,
"secure": true
},
{
"name": "XSRF-TOKEN",
"value": "64a0f62d-dc8f-40cb-8aa2-66e3ad283ad4",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "dictcode",
"value": "english",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763873478.639472,
"httpOnly": false,
"secure": false
},
{
"name": "searchPanelOpen",
"value": "true",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1733803809.434554,
"httpOnly": false,
"secure": false
},
{
"name": "search",
"value": "hello",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763871009.434808,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "xGchgbvqtkoAYddlxWT4VgRmeTZ1qTVmI0hjpRvOj0w-1733201062-1.0.1.1-SDl6_cuGUlqEOSm4oDQpU5rJdha8wEbITIgLoxdY69GgWrSt5GO7nX47Vc2AihzcBY.yS6GZ9qXVfRKEttQyLw",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1733202862.320396,
"httpOnly": true,
"secure": true
},
{
"name": "last_url",
"value": "https%3A%2F%2Fwww.collinsdictionary.com%2Fdictionary%2Fspanish-english%2Fcaminata",
"domain": "www.collinsdictionary.com",
"path": "/",
"expires": 1763873068.316249,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Tue+Dec+03+2024+12%3A51%3A18+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202409.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0003%3A1%2CC0004%3A1&AwaitingReconsent=false&geolocation=JP%3B27",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1759121478,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "OptanonAlertBoxClosed",
"value": "2024-12-03T04:51:18.738Z",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1759121478,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24device_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".collinsdictionary.com",
"path": "/",
"expires": 1764737478,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,146 +0,0 @@
[
{
"name": "UUID",
"value": "3c4dd735-8d33-4fd0-a40f-83d399a0dc46",
"domain": "www.jstor.org",
"path": "/",
"expires": 1740190342.420181,
"httpOnly": false,
"secure": true
},
{
"name": "_pxvid",
"value": "2dd5c1cb-b670-11ee-9186-3dd546fa1c41",
"domain": "www.jstor.org",
"path": "/",
"expires": 1737166344,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "__zlcmid",
"value": "1O1n3oDgpvApbaQ",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831447,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "csrftoken",
"value": "iyx0piwZPaxHwlEjMlBpPrxRasiSrbVv",
"domain": "www.jstor.org",
"path": "/",
"expires": 1764745045.418981,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "OptanonAlertBoxClosed",
"value": "2024-12-04T06:56:48.465Z",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831408,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "AccessToken",
"value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzZXNzaW9uSWQiOiIwMTkzOTA3NGY4MTg3OTQ1OTg4NzdiNWQzMWE4NDk3MSIsInV1aWQiOiIzYzRkZDczNS04ZDMzLTRmZDAtYTQwZi04M2QzOTlhMGRjNDYiLCJ1c2VyIjp7ImlkIjoiIiwibG9nZ2VkSW4iOmZhbHNlLCJhZG1pbiI6ZmFsc2V9LCJpYXQiOjE3MzMyOTU0MDksImV4cCI6MTczMzI5NTcwOX0.lIt08pG__dm_kZ3kJUYMw_bK0Ow2kAD8i2Jf8OQA0RM",
"domain": "www.jstor.org",
"path": "/",
"expires": 1733299009.610988,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "AccessSessionTimedSignature",
"value": "1b72fc2754973a3daa1baf1b169dfda5ed067ed4113573f1a1005ce5da900999",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AccessSession",
"value": "H4sIAAAAAAAA_42U3W7bMAyF38XXUSFKlEXlLgv2f7Gi7S62oRgYSc68uXHg2B26ou8-KW4adxu6GoEhyYfi4Scqt8Uw1KGYF9pjCFYbQUFrgVWQglFWgnTQzrEMHstiVtTbpAVLJ2DsSfop1Hk17yDBaSetQYzKuqpy1q-YfbBKckWGkm47FWJFQNahcUTWrkzQwITOQhJ2e2HFu765ESFWPDT9nBQiWXIVeAmMUCkGUtJrU6UX5AQN9ylOSYUClJB4Icu5sXOJJ1YTgrJkPmfZ8KRMQ7mX7Z5WmVHV-2Led0OcFdfc7H1PQ9whJAW4Mm2dQ7jvu10xvy2WyyRfnKel5UUatTv-wV-VzfNPaf7uNI3OlofReda8vXizeL8o7tIuQ_9t4X2fdvpyW_Q325g_b3Z93Q993W4yx7aJ-fPlrLiqN_VV_Su-anh9MLx3CyVamaGMM5BSKq3LfAxxvxSjX1HJLGLJTqAMJNhqKYB1NICUzzvH3zuoHznwXQyjwWe3mXhmwoR7iM9v3Xt7L7r25y52p1x39WZdjBhHIHeXd7MJyFBf12HgZsKx-Hj-8qx4iiai1gpB6iNRQ45caj59JOqgghBABBmsQDBOMJUovFaByKtQAkyInsZu124-Jtd_Az3kwwmQdduumyhAOmegTHfMSJsOGYg0HUF8WOTC_6g_lcYPLTWemEWrFD54V-nmhtTNwliuBAZk4TywkBwlQVm5kswxyVlcj33wL2DNfQJNWtGk3m4ftK83H8hIMbXlEaCLJG2IXlTargR6awR7SjfOAzk2fiVZHk287tph-6QHNcm-zuoxeaLxffzL-s_zGONvRDIvs1UFAAA",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "AccessSessionSignature",
"value": "78ecce97f2a2de3ffb4af7c87424885165a11fe7d2e29bf960edff5c48167a35",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "_pxhd",
"value": "xbR2aQnYKWrHiFs3JKCUojuB1cOv3M4cNFTMhSdjqxqtIsdgsSi8ehBRY7OxVo9kz4W0VVvgcv6IyY0Ta0SJPA==:medf83pfHV213oGcCOKWmgsZc4Kr8q2rEC2GEWBrpMBibA5DYuL7eKs10ANKfVD8qmvJUfcosZrIkQ83XUVKtKmZa4Y6lK04fy46yN254wo=",
"domain": "www.jstor.org",
"path": "/",
"expires": 1764831425.214494,
"httpOnly": false,
"secure": false
},
{
"name": "pxcts",
"value": "f8fbc8a1-b20c-11ef-a65c-4947163db9b8",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "_px2",
"value": "eyJ1IjoiZjhjZDk4ZjAtYjIwYy0xMWVmLWFkYzctZGQxMTkxM2QxNDlkIiwidiI6IjJkZDVjMWNiLWI2NzAtMTFlZS05MTg2LTNkZDU0NmZhMWM0MSIsInQiOjE3MzMyOTU3MjY4NjgsImgiOiIyMTFhMjMyMTRlZmQwOWE5OTNhZjlmODU2MDU1ZmI1N2U4MTcwY2RmNDNlZjM0MGFhYzg1Yzk2NzQ0NmVjOWI5In0=",
"domain": "www.jstor.org",
"path": "/",
"expires": 1733296026,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ReferringRequestId",
"value": "fastly-default:258d2927284d8837614cc35365d11c1d",
"domain": "www.jstor.org",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24device_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831445,
"httpOnly": false,
"secure": false
},
{
"name": "OptanonConsent",
"value": "isGpcEnabled=0&datestamp=Wed+Dec+04+2024+14%3A57%3A25+GMT%2B0800+(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202303.1.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=e6c4a174-c538-4f9f-8632-f5f8e9ff428d&interactionCount=2&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0005%3A1%2CC0004%3A1%2CC0003%3A1&AwaitingReconsent=false&geolocation=JP%3B27",
"domain": ".jstor.org",
"path": "/",
"expires": 1764831445,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
}
]

View file

@ -1,111 +0,0 @@
[
{
"name": "ncbi_sid",
"value": "015E11D6531E8483_1525SID",
"domain": ".nih.gov",
"path": "/",
"expires": 1764675079.027761,
"httpOnly": false,
"secure": false
},
{
"name": "pmc-frontend-csrftoken",
"value": "L3uvd1o5Uu2efxgCXWDzwxfDTl5QIFDR",
"domain": "www.ncbi.nlm.nih.gov",
"path": "/",
"expires": 1753769052.705813,
"httpOnly": false,
"secure": false,
"sameSite": "Lax"
},
{
"name": "ncbi-login-route",
"value": "google",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": 1741001395.405247,
"httpOnly": false,
"secure": false
},
{
"name": "PRESERVTARGET",
"value": "%2FtYTXpgzJne16bwfb4ZN2lGInyYoZNk58TVbSvhIR0njSJplCp65%2BiF2SZAktvmmznDxgJBJhBCH%0ANoo2je1cMk0RXykLSXa4UwW7u0%2B%2Fc1X7WzHdCi209NjSVDPLNfOmFzmtz50Uuh6EfD95OQ%2BYQ2B%2B%0Aq7BP3es9s8ArLlZd9XW7NS72Ulu8cigULF%2FZADnu%2FPZf8DmPLOXuV6xWf0fqcNlZXwWhiCjrPJiU%0AU594rDm20QBWFe5y0VjWXnJtzYm7uSPkWDQYJ8htbKyWwjn4aG0xcYfTBSBUTOi9A%2Bo1BnUPHLIi%0A8V9%2Fi7S2i2vLCCwVTCSGS0pctKKWZRmzEmP9NB4rA167%2FSMuyX6ezHZNUyztiKaga84g5monl5bT%0AjNlmWeBFQV90piriK2wjmey3mIoTu2eJyDi%2Bx%2FO7pwMTfeiU2WXZ5h3U4kRBxw%2FR6%2FrCMYtVrzXp%0A%2FexiuMJDHQmiDPowP8dxw97tgs353jnBRGe8jpoCPoPG2hywQnwXtxW8SjWp19yTypxVFl4KnD1e%0A5aoPyq%2F7tPDRPbW7UikYuihFvX0mD1TH7A0G9Bk%2B36y%2F7jL8oW7OArzEbESjcx2aVRL%2B3VqzX1Oc%0AZcFWXfVarYgckE8EeyNwFwhPDoASs2T4SVNAJAQ38A0bYzCAxc6mQLqADqesOuuveClDDgB8WITg%0A1QnE32rGsLz37nzAQ89V",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "NIHSMPROFILE",
"value": "9i9xFyZxcZ3DeEBWJ1M%2B1ygJsb2LhWqfanAC3W20fjIpeXaMrRQ%2F9L3R6DUjYzq5%2FqUDVLhYywfn1%2BT0RJpzID8efN8zNczLDneXLM7waIbhTdfwbIh%2BCnmN0fucHtqYylLU1altZcOhSRTow47jYwyEUFsmJ6kz3T1%2BnZWx3Ol0zaFC8onzZCtl4YHbCxMJVbHYuMcGM4f4YxpiDefQvlDdwY1soBI8z9nvb%2BKMs1B3GgplTzyllWIbC1RHxGLvdlNaz8Zlzw6MU4B3piqrAiCWAvoMF3%2FSShIchSdP0utP%2BMROhcGaoWBU%2FKfkjjDc3lHCPfydE%2F895aasf6uvrL7uccokjb6HxdVs0FA%2FHxfBNJXURVRSpRl9%2BPOd9%2FOOlXQQqhBh1FyAZs6WIxDvLhegMvLITcLh7ahcahuJnoeImSla4b4kK0Ayy6736mJCa0hhXUzGjab4Yhht11PliHlAlh4wLEXj0Dp7X9pj7Ws1%2BdCx8QZyiTWrbauCQJtS1hNXn%2Blg4BoQ2sIq%2FxltuA%3D%3D",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "NIHSMSESSION",
"value": "ZITCB6DlveT31D7iO+eVBnrLBxVxrCJGSz/5hVXNSYwvjuPpvd0O7dD6XqsFf6LKdJXktiX+hhtPWtsNok2mgiSvcpYTBHJxHVefyBt+KiLTVm12lBwYTkx9Gb9OxZNQsMS/Hvoj45/ShvSKut3d7c8e2cEhtjA7DWjHEYHj0tuk3fEoQ4t0UAWkhj6bFt5Vo5tm6dyqen/1EH2o6cBZrVSLbk67LctlbRyV4pc5099Il2lTMPo6LqtyVI1AC/bcSioge+LqDbpDiuP4NOF3EPj/yFSWvBz76/bqQ0Hu5oRGCC1zVPhSGJ1iukio91F6IfYr5vTtMrN00vSuxHRnxj0BYCEuGKtCeNDohuGAZvQVkjhc1aI53oWFu8UNHZvYA+Xo2lpLZUOT+2pkjk1Z/WpAPzE8L+O6mRjwaq8/2b3rUS8gq4Y/2vQYhqmNxeHOBsM01BhpJdRF3Urp3dnIIYakYeCgzC/3Kww+p8ImsBTjvXDO1ivVEjKB4AdnCsRdLpGPszS9xF7439aXXCWHDItM4Wl458T12QWeV+DXiiwzD/kis1QQBWibzsZOzo9KDM3oapKa8I2ouKXb797Z7s+eLR1+Z10lyWZuNVLLZK5ckFT5riayLYeT8+IjFYVER/nfDzm3KpgVPnep/k4DANpDgAOK78iuTv3sBndNMoKrXz2qCZtfi3/gLGZTKcOy90meluFZy9+iLyb+M01VBWuDp/v0a2jSdsJPVmgUQqz7hLVvtc4KpMfiDhfxXGMQnaieP9jREFK3NutAiUrkjS96WS3v5eLK80o/aG1j5IsAvxU/0lMnEri3Yz6Qw1f0ymS6giKiFIUBRUWGXcm5S1qCjwL5GiU71r3nOcaC8T9T1pVLf1R558WqH6Ha95aJVqN6CnEHo8TsZl25lb5tlJgbgb2OFvLSrbUZwuM3R5mA9zP7ciQBywxNm7xFO8sX8QQk0bRhrhgk458KE72Ci/8lhZmvpYy5aqbI4OtaLkuFuu3lX3c7/LsGt+iTFkO6eDSS4CFEnFqg3W5Glvs7WZkTasVI7L0mN0q8DCPXaIDFVPlXEA0shxZuB6Iz+mx4MshQHwY9fMRSWB7gOF5cHjHYUBLfHT/gOwl35rkoJfVf9ikpcgT88mJyk9KTQpVM+CZAGUFDbgHsRqA0jPE19sBum3cqaA6fzh9AnWXfOlAY5KNDdTB4yip4UakCXWsiXVng0GfQ7KvxAguC59L7iZyFjdsIDESi7ZozcPHOpFZleeAU3yFTvMGHmO3G3RFrxyIGCwgWehus3YCqQxZPSE6+yLjXeXTqhqgk0kxcV/MlOFgzMcAhgKEYJS045sLZsmohsIVLV0ONY4uqogSxd3YUzc0WImi1mYdNbzYwbX5tPngah4SK61Nia8Z6xjZuKfXnxNFEkNneezPoPy97Hvd+9wzI+DkU5sa844DzGxeSY/ySE3DTtpowf440r5rX",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "MyNcbiSigninPreferences",
"value": "O2dvb2dsZSY%3D",
"domain": ".nih.gov",
"path": "/",
"expires": 1740915025.611341,
"httpOnly": false,
"secure": false
},
{
"name": "ncbi_prevPHID",
"value": "CE88342C74D8A32100000000003B0036",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "WebCubbyUser",
"value": "3GX25AI24DLUXL8LVDJFIVTH6LJRZBE1%3Blogged-in%3Dtrue%3Bmy-name%3Djiabintang77%2540gmail.com%3Bpersistent%3Dfalse%40015E11D6531E8483_1525SID",
"domain": ".nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "WebEnv",
"value": "1D7wJH%40015E11D6531E8483_1525SID",
"domain": ".nlm.nih.gov",
"path": "/",
"expires": 1733167826.636953,
"httpOnly": true,
"secure": true
},
{
"name": "ncbi_pinger",
"value": "N4IgDgTgpgbg+mAFgSwCYgFwgAwEYCsAorrgCIBs+AzLoQBwAsdAnLgOxU1XPZt354AygElSIAL5A===",
"domain": ".ncbi.nlm.nih.gov",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24device_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D",
"domain": ".nih.gov",
"path": "/",
"expires": 1764675078,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,74 +0,0 @@
[
{
"name": "MAID",
"value": "+O8mvi2rAtZrnJqF+2cRIQ==",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078802.198648,
"httpOnly": true,
"secure": true
},
{
"name": "MACHINE_LAST_SEEN",
"value": "2024-12-02T09%3A00%3A01.960-08%3A00",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078802.198711,
"httpOnly": true,
"secure": true
},
{
"name": "JSESSIONID",
"value": "CEDD494D14F0052C199B1D7AE667EF42",
"domain": ".pnas.org",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "__cf_bm",
"value": "YJQBFxCTLG1d3d9R0fVmwlmAgP9kqVl3zwf02v.COMQ-1733158802-1.0.1.1-tLccs1jD809lM7_9Bhy35sLQdM1TaakBEYvhdDEi1w9cWJS9IGjovTwKGdYQtse6_rWkJNYt._LsHQI2WCwDUQ",
"domain": ".pnas.org",
"path": "/",
"expires": 1733160603.504839,
"httpOnly": true,
"secure": true
},
{
"name": "cookiePolicy",
"value": "accept",
"domain": ".pnas.org",
"path": "/",
"expires": 1767718816.994233,
"httpOnly": true,
"secure": true
},
{
"name": "connect_auto_login",
"value": "true",
"domain": ".pnas.org",
"path": "/",
"expires": 1735750875.510643,
"httpOnly": true,
"secure": true
},
{
"name": "PLUID",
"value": "l8nplDdx7mN9Xh4lErbknypxfmo=",
"domain": ".pnas.org",
"path": "/",
"expires": 1759078875.9476,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24device_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D",
"domain": ".pnas.org",
"path": "/",
"expires": 1764694876,
"httpOnly": false,
"secure": false
}
]

View file

@ -1,122 +0,0 @@
[
{
"name": "edgebucket",
"value": "lyxOSFtqXnsQEn0H9C",
"domain": ".reddit.com",
"path": "/",
"expires": 1738484736.570167,
"httpOnly": false,
"secure": true
},
{
"name": "loid",
"value": "000000000r0luy1m5t.2.1703924736813.Z0FBQUFBQmxqOVFBLVZ2UHJIRWswQW4zQnlJZGtYU2ZBS1dSQlpncW1hQ2o2TmVLMk12QkFYRlBEMFpaOGpqTndjcXhuQjhFS3hhc0dSMXRyZ1o4SUg1cTZvSTNHejk5NW5xdlRTRUtfeUdSU250alJhQTFDY3RSeDJrekdnWG90bk1CWmhhc2hlMWU",
"domain": ".reddit.com",
"path": "/",
"expires": 1763278221.514142,
"httpOnly": false,
"secure": true
},
{
"name": "csv",
"value": "2",
"domain": ".reddit.com",
"path": "/",
"expires": 1738484736.97362,
"httpOnly": false,
"secure": true
},
{
"name": "g_state",
"value": "{\"i_l\":0}",
"domain": "www.reddit.com",
"path": "/",
"expires": 1744270240,
"httpOnly": false,
"secure": false
},
{
"name": "pc",
"value": "nd",
"domain": ".reddit.com",
"path": "/",
"expires": 1760254304,
"httpOnly": false,
"secure": true
},
{
"name": "__stripe_mid",
"value": "104997eb-5535-4dd4-a71a-5a7f697b8a4650cc1f",
"domain": ".www.reddit.com",
"path": "/",
"expires": 1761300348,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
},
{
"name": "t2_r0luy1m5t_recentclicks3",
"value": "t3_o9s5iv%2Ct3_1ga952r%2Ct3_1eefr4x%2Ct3_1fs5q5b",
"domain": ".reddit.com",
"path": "/",
"expires": 1762676289,
"httpOnly": false,
"secure": false,
"sameSite": "Strict"
},
{
"name": "reddit_session",
"value": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpsVFdYNlFVUEloWktaRG1rR0pVd1gvdWNFK01BSjBYRE12RU1kNzVxTXQ4IiwidHlwIjoiSldUIn0.eyJzdWIiOiJ0Ml9yMGx1eTFtNXQiLCJleHAiOjE3NDg4MzgwNzkuNzc0MDEzLCJpYXQiOjE3MzMxOTk2NzkuNzc0MDEzLCJqdGkiOiJyT3l6V2hFUmtxNDA0b0YzX1FSSVR3R240Y0gzS0EiLCJjaWQiOiJjb29raWUiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeUtqZ1VFQUFEX193RVZBTGsiLCJ2MSI6Ijc2MjE3NTUxMDk3OTY5LDIwMjQtMTAtMTJUMDc6MzA6NDEsYWU3Y2U5ZDdiMjU3OGQ3MWVmMTEwYjFiNTc2NTU2NmNmYzJkNDljNiIsImZsbyI6Mn0.NUhfjOfX7pWC5FFUfJvTw0Ts8b0ZICrmmg_Eh4_O6hvnqEH5UHVjjwtS7YNGyxTRv5k0AJTx-GW5CWTUJvhciPOYokV1iM4RirTbijGfqyvSlbl7YIARX8gUMrm6X2TmFvHmQHem4S-0YcrhvBakEXb2TAk0e4KLiPBS6jbEa5c4EoIkp8PjvFVkWZhY_FMge6SxAmPlx2Xksk2c_9s_rJ-UTZkyOWP5ighh7TmA3B_0ZWEPRme7yDBtV-AJ1UH533suaBukxD_-O3afm1AtjMiQAygZ4tl78T7unCopK1_c8PGiunpfLCMTdTTb07NnI2_fo-8AfARF-9O1MPdYFQ",
"domain": ".reddit.com",
"path": "/",
"expires": 1748838078.887127,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221929b3d4cbd13e0-01d450fe6d3301-16525637-16a7f0-1929b3d4cbe2033%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D",
"domain": ".reddit.com",
"path": "/",
"expires": 1764735681,
"httpOnly": false,
"secure": false
},
{
"name": "reddit_chat_view",
"value": "closed",
"domain": "www.reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": false
},
{
"name": "token_v2",
"value": "eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpzS3dsMnlsV0VtMjVmcXhwTU40cWY4MXE2OWFFdWFyMnpLMUdhVGxjdWNZIiwidHlwIjoiSldUIn0.eyJzdWIiOiJ1c2VyIiwiZXhwIjoxNzMzMjg2MDgxLjgwMjc1NywiaWF0IjoxNzMzMTk5NjgxLjgwMjc1NywianRpIjoiTkFrUGZVTVVwRGZ1SmFNbjZkV3dqbnB4U0REMEFnIiwiY2lkIjoiMFItV0FNaHVvby1NeVEiLCJsaWQiOiJ0Ml9yMGx1eTFtNXQiLCJhaWQiOiJ0Ml9yMGx1eTFtNXQiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeGtrZEdPdERBSWhkLWwxejdCX3lwX05odHNjWWFzTFFhb2szbjdEVm9jazcwN2NMNGlIUDhuS0lxRkxFMnVCS0drS1dFRld0T1VOaUx2NTh5OU9aRUZTeUZUUjg0M3l3b2thVXBQVW1ONXB5bFJ3V1prTGxmYXNVS0RCNllwVlM2WjIwS1BTNXZRM0kxRnowNk1xbHhXSHRUWW8zSnBiR01LMnhQanpjWnFReXF1eTZsTVlGa29uOFdMZnZ5Ry10WS1mN2JmaEhZd3JLZ0tEX1RPdUZ4d1lfSERGSGJfbnByMGJGMndxTDNYZzlRLTEtTjI3Yk5tb2RtNV9WelB2emFTY1RtRzVpZll2N3QtQ1IxNDVIbVpVUWN3WWcwX3lyQWo2X0N2T29ES0JRV01KWWhQSTVBcmwyX19KZGl1VGY4YXR5ZC0tR2JFVFdfNHJSbW81eExFb1VfajZ6Y0FBUF9fWERfZTR3IiwicmNpZCI6Ill6STUzaXNuVVRQUm42M3NQbjRSNFBNbVdOcjE4SU1uUU93T2VfaHFuem8iLCJmbG8iOjJ9.F_24jXHdZDXCmMx4aubrjT94AtnYDzD7eg7SjV1Rwa6ymrvrXW8uZnIqgqVkHJio-mZW_JsxlSKzlIDMJ_lrWtgxFHhgGFWnWkS-raKhYrrQt3gwN-C5VPc3iF-1pVUaf0Jf0gX1aYyvdtRD48rRd8sjCoAwHcGiNH8B7abUPN8JJuQcAEH2GzYSc9Zarb0jANLyw7fGdTdWXfWjUXjy33alItwyhMVcgCIXlVf5wlayRBsRXS_ObpKiril2BuAgCrrVuOWDdflpi58FTA0pki4F0wTdcJfORP9yjZ_L7AJUXhXhswx5Lcf0kTU1hFy4RqFRRd95Q0xZg7Yj2uIC7w",
"domain": ".reddit.com",
"path": "/",
"expires": 1733286081.968748,
"httpOnly": true,
"secure": true
},
{
"name": "session_tracker",
"value": "hhjeikkilpgneqelrj.0.1733199684513.Z0FBQUFBQm5Ub2RFN25sX3lzSVRyLUFxSEtEcmozdW1BbGdaQUtmZUZCMVNteW1PSGhxRHNEdkhrR0lneUNCTUgzUlE2QkdUb2tRRHVyTnNNNlpnOV80TVZzd2hlSzFCRUtLLVZmREYwejhDSXIyX1Q4T04yMnlrSGJkMlVla2h5R1huYnFGOUFleGU",
"domain": ".reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true
},
{
"name": "csrf_token",
"value": "04b1407f1ebd23723b261a469d4dac84",
"domain": ".reddit.com",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Strict"
}
]

View file

@ -1,133 +0,0 @@
[
{
"name": "did",
"value": "iXxy5Y0Lo7cY1c90Riq7yRkq4VdJdrz9F1T7N0fqKOFKoaNbUas5EeVE3Oo7jl4M",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764671442.13171,
"httpOnly": true,
"secure": true
},
{
"name": "ptc",
"value": "RG1.8779880038783811042.1696569455",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1767695442.13179,
"httpOnly": true,
"secure": true
},
{
"name": "pl",
"value": "wbbzs5zjXc51gyzsE5huVpQxOu7nxEnyZDiQcl7KEpwjXmoGTD064RedCDxz696tPbB38xaV8xe1oIJFEAAanE6P4MKWta1rUAq6iCLxSGnCYhfeB9JhdfN5cz70vq1R",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764671442.13175,
"httpOnly": true,
"secure": true
},
{
"name": "didomi_token",
"value": "eyJ1c2VyX2lkIjoiMTkxOTNiYWQtMDhhMS02Njg5LWE4NTUtMzAwN2VhYzA5MjAwIiwiY3JlYXRlZCI6IjIwMjQtMDgtMjdUMTI6MDY6MTkuNTMwWiIsInVwZGF0ZWQiOiIyMDI0LTA4LTI3VDEyOjA2OjI4LjQ4M1oiLCJ2ZW5kb3JzIjp7ImVuYWJsZWQiOlsiZ29vZ2xlIiwiYzpnb29nbGVhbmEtNFRYbkppZ1IiLCJjOnBvc3Rob2ctQmpKeEZkRjkiLCJjOmRpZG9taSJdfSwidmVuZG9yc19saSI6eyJlbmFibGVkIjpbImdvb2dsZSJdfSwidmVyc2lvbiI6Mn0=",
"domain": ".researchgate.net",
"path": "/",
"expires": 1756382788,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "euconsent-v2-didomi",
"value": "CQEAuYAQEAuYAAHABBENBDFgALHAAELAAAYgF5wAQF5gXnABAXmAAAAA.djgACFgAAAAA",
"domain": ".researchgate.net",
"path": "/",
"expires": 1756382788,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "dmd-tag",
"value": "29c64880-8f9e-11ef-aabb-5bb25e381cbc",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1767695450.916031,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "sid",
"value": "TCoR8Z544diaoyMwqLA8X9TS7HfkN6SpWX1ropwuTqxNP2j10bZlYc7YIVBKPyVcfem0NqbdChel00sIOkOeF5GtceLW00ubTrpe4dAD5xLJ81ocWo9svlf6J9gynIf6",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "cili",
"value": "_2_MjZhMmQ5N2FmMzgyNmJkYTJiZjc2ZjE0ZjRmMDUxYjMzYjJkZmQxYWY0Njg4Nzc4MTM4ZDE1MGVmNWRhYTc0Nl8yOTU3MzAzNjsw",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "cirgu",
"value": "_1_SaiBmdG2CNFFXfi8YLMIIkC%2BD5oxd2H0CqLqIhkm0V8y0ncsQEoe%2FGUZOhACuKUy5feYcA%3D%3D",
"domain": ".www.researchgate.net",
"path": "/",
"expires": 1764692368.131654,
"httpOnly": false,
"secure": true
},
{
"name": "_cfuvid",
"value": "GehzRPPGzCCv.nPAiw9L7tRQCAi.hQAAinF5RqUksz0-1733135442009-0.0.1.1-604800000",
"domain": ".researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": true,
"secure": true
},
{
"name": "dmd-sid4",
"value": "{%22id%22:%227cce57f0-b098-11ef-924a-5d9117969fc2%22%2C%22timestamp%22:1733135444000%2C%22lastUpdate%22:1733135450000}",
"domain": ".www.researchgate.net",
"path": "/",
"expires": -1,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%22192aea49fbae19-0a0ceb4f539c6b-16525637-16a7f0-192aea49fbb210e%22%2C%22utm_source%22%3A%20%22researchgate%22%2C%22utm_medium%22%3A%20%22email%22%2C%22utm_campaign%22%3A%20%22re442%22%2C%22utm_content%22%3A%20%22re442_up_pb_hnsg_nnaas_p110%22%2C%22utm_term%22%3A%20%22re442_up_pb_hnsg_nnaas%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%7D",
"domain": ".researchgate.net",
"path": "/",
"expires": 1764671451,
"httpOnly": false,
"secure": false
},
{
"name": "__cf_bm",
"value": "X2xX99VpHc3Xm_TRnIRhN7Wp533PLOt.311xMWQYn28-1733136361-1.0.1.1-qskSGITd3dmHUV3UswCd8O6ygg3jdBOF9Wz1PxlPQq66VUx0blvFKst7fH33pTnkl2W1VBjBjlF8CgPVLesusQ",
"domain": ".researchgate.net",
"path": "/",
"expires": 1733138161.246979,
"httpOnly": true,
"secure": true
},
{
"name": "ph_phc_ma1XTQyee96N1GML6qUTgLQRiDifnRcE9STiHTZ0CfZ_posthog",
"value": "%7B%22distinct_id%22%3A%22AC%3A29573036%22%2C%22%24sesid%22%3A%5B1733137066914%2C%22019386ec-50e4-79f9-9b7b-d8ed5854cf9f%22%2C1733135454436%5D%2C%22%24epp%22%3Atrue%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fwww.researchgate.net%2Fprofile%2FJiabin_Tang4%2Fpublications%3FeditMode%3D1%26sorting%3DrecentlyAdded%26utm_medium%3Demail%26utm_source%3Dresearchgate%26utm_campaign%3Dre214%26loginT%3DeX2d52IqLj-iYd58KHUvU88w6Ub-0Rjh_XCM-6tyfVf3Goy3Bf0swonajNlIbQg6gax3uaL6ulhi_ik9eMs%26pli%3D1%26utm_term%3Dre214_x%26utm_content%3Dre214_x_p2%26cp%3Dre214_x_p2%26uid%3D2aJ4s09Uf8rvZLKbNnk9UiFVrlZcTRXt51G2%26ch%3Dreg%22%7D%7D",
"domain": ".researchgate.net",
"path": "/",
"expires": 1764673066,
"httpOnly": false,
"secure": true,
"sameSite": "Lax"
}
]

View file

@ -1,173 +0,0 @@
[
{
"name": "PREF",
"value": "f7=4100&tz=Asia.Hong_Kong&f4=4000000",
"domain": ".youtube.com",
"path": "/",
"expires": 1767698936.819909,
"httpOnly": false,
"secure": true
},
{
"name": "HSID",
"value": "AuvRBV-Q9GEReACoE",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.11147,
"httpOnly": true,
"secure": false
},
{
"name": "SSID",
"value": "AHum7OsxEOAD-Ibp4",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111503,
"httpOnly": true,
"secure": true
},
{
"name": "APISID",
"value": "FRZgwlTWYfVE-B2B/A7FrDbUAZCyMOb6ue",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111539,
"httpOnly": false,
"secure": false
},
{
"name": "SAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111575,
"httpOnly": false,
"secure": true
},
{
"name": "__Secure-1PAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111613,
"httpOnly": false,
"secure": true
},
{
"name": "__Secure-3PAPISID",
"value": "mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111646,
"httpOnly": false,
"secure": true
},
{
"name": "LOGIN_INFO",
"value": "AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs:QUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB",
"domain": ".youtube.com",
"path": "/",
"expires": 1766493681.602721,
"httpOnly": true,
"secure": true
},
{
"name": "SID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsOzHaI33jRkuMY7qPd2ZFEAACgYKAUMSARYSFQHGX2Miunh4qpdUynkY2PXqPSvzGxoVAUF8yKqW-lS90SUYH-cprrTRIWDZ0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111199,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsSADU_B-86vJipiHTuQxp3gACgYKATgSARYSFQHGX2MinsKT7OE3L_H9SEzrOgndQRoVAUF8yKqLdTsMTC89NTlwps9-uRSu0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111393,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSID",
"value": "g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsvr3qlDZAuj7i33G332SpFwACgYKAekSARYSFQHGX2Mi9DYKrElENMuHNeaGctBCdBoVAUF8yKrmoWXREkhXzva2a6J3B2ps0076",
"domain": ".youtube.com",
"path": "/",
"expires": 1766544331.111431,
"httpOnly": true,
"secure": true
},
{
"name": "NID",
"value": "519=I7v3EQhK2PGhnXSOCZyz5QYVGbJ383LEPRVqShP2G15ip3zj5VjyDJWEAlJtS3ifC0qs-7cxlwk_vCTqntg_LWW_hfONxTqG6JVJ8JvpMEr2eM_Fqb9n8nVbc_YNrwSIUEorM5N5FUoZmW2u4Qksi_a0-ssHJWsOBEwdxpDONaY",
"domain": ".youtube.com",
"path": "/",
"expires": 1747984059.767459,
"httpOnly": true,
"secure": true
},
{
"name": "mp_94085d51c4102efbb82a71d85705cdcf_mixpanel",
"value": "%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928f27a2a80-04cb41970e6945-16525637-16a7f0-1928f27a2a920cc%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22utm_source%22%3A%20%22ythp%22%2C%22utm_medium%22%3A%20%22LeftNav%22%2C%22utm_campaign%22%3A%20%22ytgen%22%2C%22utm_content%22%3A%20%22txt%22%7D",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674936,
"httpOnly": false,
"secure": false
},
{
"name": "ST-xuwub9",
"value": "session_logininfo=AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs%3AQUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB",
"domain": ".youtube.com",
"path": "/",
"expires": 1733138942,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSIDTS",
"value": "sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674938.226552,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSIDTS",
"value": "sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674938.226624,
"httpOnly": true,
"secure": true
},
{
"name": "SIDCC",
"value": "AKEyXzWXdf72zjmIboZNkzmg9VURwnmM1MpJVRgAxjRuMRib1b7iu5SkCZccexcK6jf2abrLEoQ",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.01384,
"httpOnly": false,
"secure": false
},
{
"name": "__Secure-1PSIDCC",
"value": "AKEyXzXWzx9lRoJCEXrHvqZeWtAugc_tFou4ucmylPeSpc0nRX2EZ-t3QGTGqberRiTB3QIHjQ",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.013891,
"httpOnly": true,
"secure": true
},
{
"name": "__Secure-3PSIDCC",
"value": "AKEyXzVyGt5J-awGqBrP5_hTnwTmCMsUu5oWISlljhXbP9P7vrGxlzOg05O_vwlgbuGOKRUQGYA",
"domain": ".youtube.com",
"path": "/",
"expires": 1764674940.013962,
"httpOnly": true,
"secure": true
}
]

View file

@ -1,195 +1,2 @@
COOKIES_LIST = [
{'name': 'edgebucket', 'value': 'lyxOSFtqXnsQEn0H9C', 'domain': '.reddit.com', 'path': '/', 'expires': 1738484736.570167, 'httpOnly': False, 'secure': True},
{'name': 'loid', 'value': '000000000r0luy1m5t.2.1703924736813.Z0FBQUFBQmxqOVFBLVZ2UHJIRWswQW4zQnlJZGtYU2ZBS1dSQlpncW1hQ2o2TmVLMk12QkFYRlBEMFpaOGpqTndjcXhuQjhFS3hhc0dSMXRyZ1o4SUg1cTZvSTNHejk5NW5xdlRTRUtfeUdSU250alJhQTFDY3RSeDJrekdnWG90bk1CWmhhc2hlMWU', 'domain': '.reddit.com', 'path': '/', 'expires': 1763278221.514142, 'httpOnly': False, 'secure': True},
{'name': 'csv', 'value': '2', 'domain': '.reddit.com', 'path': '/', 'expires': 1738484736.97362, 'httpOnly': False, 'secure': True},
{'name': 'g_state', 'value': '{"i_l":0}', 'domain': 'www.reddit.com', 'path': '/', 'expires': 1744270240, 'httpOnly': False, 'secure': False},
{'name': 'pc', 'value': 'nd', 'domain': '.reddit.com', 'path': '/', 'expires': 1760254304, 'httpOnly': False, 'secure': True},
{'name': '__stripe_mid', 'value': '104997eb-5535-4dd4-a71a-5a7f697b8a4650cc1f', 'domain': '.www.reddit.com', 'path': '/', 'expires': 1761300348, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 't2_r0luy1m5t_recentclicks3', 'value': 't3_o9s5iv%2Ct3_1ga952r%2Ct3_1eefr4x%2Ct3_1fs5q5b', 'domain': '.reddit.com', 'path': '/', 'expires': 1762676289, 'httpOnly': False, 'secure': False, 'sameSite': 'Strict'},
{'name': 'reddit_session', 'value': 'eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpsVFdYNlFVUEloWktaRG1rR0pVd1gvdWNFK01BSjBYRE12RU1kNzVxTXQ4IiwidHlwIjoiSldUIn0.eyJzdWIiOiJ0Ml9yMGx1eTFtNXQiLCJleHAiOjE3NDg4MzgwNzkuNzc0MDEzLCJpYXQiOjE3MzMxOTk2NzkuNzc0MDEzLCJqdGkiOiJyT3l6V2hFUmtxNDA0b0YzX1FSSVR3R240Y0gzS0EiLCJjaWQiOiJjb29raWUiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeUtqZ1VFQUFEX193RVZBTGsiLCJ2MSI6Ijc2MjE3NTUxMDk3OTY5LDIwMjQtMTAtMTJUMDc6MzA6NDEsYWU3Y2U5ZDdiMjU3OGQ3MWVmMTEwYjFiNTc2NTU2NmNmYzJkNDljNiIsImZsbyI6Mn0.NUhfjOfX7pWC5FFUfJvTw0Ts8b0ZICrmmg_Eh4_O6hvnqEH5UHVjjwtS7YNGyxTRv5k0AJTx-GW5CWTUJvhciPOYokV1iM4RirTbijGfqyvSlbl7YIARX8gUMrm6X2TmFvHmQHem4S-0YcrhvBakEXb2TAk0e4KLiPBS6jbEa5c4EoIkp8PjvFVkWZhY_FMge6SxAmPlx2Xksk2c_9s_rJ-UTZkyOWP5ighh7TmA3B_0ZWEPRme7yDBtV-AJ1UH533suaBukxD_-O3afm1AtjMiQAygZ4tl78T7unCopK1_c8PGiunpfLCMTdTTb07NnI2_fo-8AfARF-9O1MPdYFQ', 'domain': '.reddit.com', 'path': '/', 'expires': 1748838078.887127, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221929b3d4cbd13e0-01d450fe6d3301-16525637-16a7f0-1929b3d4cbe2033%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D', 'domain': '.reddit.com', 'path': '/', 'expires': 1764735681, 'httpOnly': False, 'secure': False},
{'name': 'reddit_chat_view', 'value': 'closed', 'domain': 'www.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'token_v2', 'value': 'eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpzS3dsMnlsV0VtMjVmcXhwTU40cWY4MXE2OWFFdWFyMnpLMUdhVGxjdWNZIiwidHlwIjoiSldUIn0.eyJzdWIiOiJ1c2VyIiwiZXhwIjoxNzMzMjg2MDgxLjgwMjc1NywiaWF0IjoxNzMzMTk5NjgxLjgwMjc1NywianRpIjoiTkFrUGZVTVVwRGZ1SmFNbjZkV3dqbnB4U0REMEFnIiwiY2lkIjoiMFItV0FNaHVvby1NeVEiLCJsaWQiOiJ0Ml9yMGx1eTFtNXQiLCJhaWQiOiJ0Ml9yMGx1eTFtNXQiLCJsY2EiOjE3MDM5MjQ3MzY4MTMsInNjcCI6ImVKeGtrZEdPdERBSWhkLWwxejdCX3lwX05odHNjWWFzTFFhb2szbjdEVm9jazcwN2NMNGlIUDhuS0lxRkxFMnVCS0drS1dFRld0T1VOaUx2NTh5OU9aRUZTeUZUUjg0M3l3b2thVXBQVW1ONXB5bFJ3V1prTGxmYXNVS0RCNllwVlM2WjIwS1BTNXZRM0kxRnowNk1xbHhXSHRUWW8zSnBiR01LMnhQanpjWnFReXF1eTZsTVlGa29uOFdMZnZ5Ry10WS1mN2JmaEhZd3JLZ0tEX1RPdUZ4d1lfSERGSGJfbnByMGJGMndxTDNYZzlRLTEtTjI3Yk5tb2RtNV9WelB2emFTY1RtRzVpZll2N3QtQ1IxNDVIbVpVUWN3WWcwX3lyQWo2X0N2T29ES0JRV01KWWhQSTVBcmwyX19KZGl1VGY4YXR5ZC0tR2JFVFdfNHJSbW81eExFb1VfajZ6Y0FBUF9fWERfZTR3IiwicmNpZCI6Ill6STUzaXNuVVRQUm42M3NQbjRSNFBNbVdOcjE4SU1uUU93T2VfaHFuem8iLCJmbG8iOjJ9.F_24jXHdZDXCmMx4aubrjT94AtnYDzD7eg7SjV1Rwa6ymrvrXW8uZnIqgqVkHJio-mZW_JsxlSKzlIDMJ_lrWtgxFHhgGFWnWkS-raKhYrrQt3gwN-C5VPc3iF-1pVUaf0Jf0gX1aYyvdtRD48rRd8sjCoAwHcGiNH8B7abUPN8JJuQcAEH2GzYSc9Zarb0jANLyw7fGdTdWXfWjUXjy33alItwyhMVcgCIXlVf5wlayRBsRXS_ObpKiril2BuAgCrrVuOWDdflpi58FTA0pki4F0wTdcJfORP9yjZ_L7AJUXhXhswx5Lcf0kTU1hFy4RqFRRd95Q0xZg7Yj2uIC7w', 'domain': '.reddit.com', 'path': '/', 'expires': 1733286081.968748, 'httpOnly': True, 'secure': True},
{'name': 'session_tracker', 'value': 'hhjeikkilpgneqelrj.0.1733199684513.Z0FBQUFBQm5Ub2RFN25sX3lzSVRyLUFxSEtEcmozdW1BbGdaQUtmZUZCMVNteW1PSGhxRHNEdkhrR0lneUNCTUgzUlE2QkdUb2tRRHVyTnNNNlpnOV80TVZzd2hlSzFCRUtLLVZmREYwejhDSXIyX1Q4T04yMnlrSGJkMlVla2h5R1huYnFGOUFleGU', 'domain': '.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'csrf_token', 'value': '04b1407f1ebd23723b261a469d4dac84', 'domain': '.reddit.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 'hum_ieee_visitor', 'value': '3403d64f-1870-4601-9ff7-e5900074a6db', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1756280921.91082, 'httpOnly': False, 'secure': True},
{'name': '_zitok', 'value': '6273c58ab3f308a07a711718187500', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1761551757, 'httpOnly': False, 'secure': True, 'sameSite': 'Strict'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1767701986.053151, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_8E929CC25A1FB2B30A495C97%40AdobeOrg', 'value': '1', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 's_cc', 'value': 'true', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765086053, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:2$_ss:0$_st:1733551853250$ses_id:1733549982472%3Bexp-session$_pn:2%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765086053, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_8E929CC25A1FB2B30A495C97%40AdobeOrg', 'value': '359503849%7CMCIDTS%7C20065%7CMCMID%7C53777252718039557930823884447397163100%7CMCAID%7CNONE%7CMCOPTOUT-1733557253s%7CNONE%7CvVersion%7C5.0.1', 'domain': '.ieeexplore-ieee-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1768110053.386389, 'httpOnly': False, 'secure': False},
{'name': 'did', 'value': 'iXxy5Y0Lo7cY1c90Riq7yRkq4VdJdrz9F1T7N0fqKOFKoaNbUas5EeVE3Oo7jl4M', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764671442.13171, 'httpOnly': True, 'secure': True},
{'name': 'ptc', 'value': 'RG1.8779880038783811042.1696569455', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1767695442.13179, 'httpOnly': True, 'secure': True},
{'name': 'pl', 'value': 'wbbzs5zjXc51gyzsE5huVpQxOu7nxEnyZDiQcl7KEpwjXmoGTD064RedCDxz696tPbB38xaV8xe1oIJFEAAanE6P4MKWta1rUAq6iCLxSGnCYhfeB9JhdfN5cz70vq1R', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764671442.13175, 'httpOnly': True, 'secure': True},
{'name': 'didomi_token', 'value': 'eyJ1c2VyX2lkIjoiMTkxOTNiYWQtMDhhMS02Njg5LWE4NTUtMzAwN2VhYzA5MjAwIiwiY3JlYXRlZCI6IjIwMjQtMDgtMjdUMTI6MDY6MTkuNTMwWiIsInVwZGF0ZWQiOiIyMDI0LTA4LTI3VDEyOjA2OjI4LjQ4M1oiLCJ2ZW5kb3JzIjp7ImVuYWJsZWQiOlsiZ29vZ2xlIiwiYzpnb29nbGVhbmEtNFRYbkppZ1IiLCJjOnBvc3Rob2ctQmpKeEZkRjkiLCJjOmRpZG9taSJdfSwidmVuZG9yc19saSI6eyJlbmFibGVkIjpbImdvb2dsZSJdfSwidmVyc2lvbiI6Mn0=', 'domain': '.researchgate.net', 'path': '/', 'expires': 1756382788, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'euconsent-v2-didomi', 'value': 'CQEAuYAQEAuYAAHABBENBDFgALHAAELAAAYgF5wAQF5gXnABAXmAAAAA.djgACFgAAAAA', 'domain': '.researchgate.net', 'path': '/', 'expires': 1756382788, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'dmd-tag', 'value': '29c64880-8f9e-11ef-aabb-5bb25e381cbc', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1767695450.916031, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'sid', 'value': 'TCoR8Z544diaoyMwqLA8X9TS7HfkN6SpWX1ropwuTqxNP2j10bZlYc7YIVBKPyVcfem0NqbdChel00sIOkOeF5GtceLW00ubTrpe4dAD5xLJ81ocWo9svlf6J9gynIf6', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'cili', 'value': '_2_MjZhMmQ5N2FmMzgyNmJkYTJiZjc2ZjE0ZjRmMDUxYjMzYjJkZmQxYWY0Njg4Nzc4MTM4ZDE1MGVmNWRhYTc0Nl8yOTU3MzAzNjsw', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'cirgu', 'value': '_1_SaiBmdG2CNFFXfi8YLMIIkC%2BD5oxd2H0CqLqIhkm0V8y0ncsQEoe%2FGUZOhACuKUy5feYcA%3D%3D', 'domain': '.www.researchgate.net', 'path': '/', 'expires': 1764692368.131654, 'httpOnly': False, 'secure': True},
{'name': '_cfuvid', 'value': 'GehzRPPGzCCv.nPAiw9L7tRQCAi.hQAAinF5RqUksz0-1733135442009-0.0.1.1-604800000', 'domain': '.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'dmd-sid4', 'value': '{%22id%22:%227cce57f0-b098-11ef-924a-5d9117969fc2%22%2C%22timestamp%22:1733135444000%2C%22lastUpdate%22:1733135450000}', 'domain': '.www.researchgate.net', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%22192aea49fbae19-0a0ceb4f539c6b-16525637-16a7f0-192aea49fbb210e%22%2C%22utm_source%22%3A%20%22researchgate%22%2C%22utm_medium%22%3A%20%22email%22%2C%22utm_campaign%22%3A%20%22re442%22%2C%22utm_content%22%3A%20%22re442_up_pb_hnsg_nnaas_p110%22%2C%22utm_term%22%3A%20%22re442_up_pb_hnsg_nnaas%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%7D', 'domain': '.researchgate.net', 'path': '/', 'expires': 1764671451, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'X2xX99VpHc3Xm_TRnIRhN7Wp533PLOt.311xMWQYn28-1733136361-1.0.1.1-qskSGITd3dmHUV3UswCd8O6ygg3jdBOF9Wz1PxlPQq66VUx0blvFKst7fH33pTnkl2W1VBjBjlF8CgPVLesusQ', 'domain': '.researchgate.net', 'path': '/', 'expires': 1733138161.246979, 'httpOnly': True, 'secure': True},
{'name': 'ph_phc_ma1XTQyee96N1GML6qUTgLQRiDifnRcE9STiHTZ0CfZ_posthog', 'value': '%7B%22distinct_id%22%3A%22AC%3A29573036%22%2C%22%24sesid%22%3A%5B1733137066914%2C%22019386ec-50e4-79f9-9b7b-d8ed5854cf9f%22%2C1733135454436%5D%2C%22%24epp%22%3Atrue%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fwww.researchgate.net%2Fprofile%2FJiabin_Tang4%2Fpublications%3FeditMode%3D1%26sorting%3DrecentlyAdded%26utm_medium%3Demail%26utm_source%3Dresearchgate%26utm_campaign%3Dre214%26loginT%3DeX2d52IqLj-iYd58KHUvU88w6Ub-0Rjh_XCM-6tyfVf3Goy3Bf0swonajNlIbQg6gax3uaL6ulhi_ik9eMs%26pli%3D1%26utm_term%3Dre214_x%26utm_content%3Dre214_x_p2%26cp%3Dre214_x_p2%26uid%3D2aJ4s09Uf8rvZLKbNnk9UiFVrlZcTRXt51G2%26ch%3Dreg%22%7D%7D', 'domain': '.researchgate.net', 'path': '/', 'expires': 1764673066, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'user.uuid.v2', 'value': '"f9248aca-ac13-40e6-8b45-eaeb5fe20825"', 'domain': 'www-nature-com.eproxy.lib.hku.hk', 'path': '/', 'expires': 1740916756.716508, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768110998.70329, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765087308, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765087436, 'httpOnly': False, 'secure': False},
{'name': '_sp_id.a65e', 'value': 'a151b61b-0e26-493f-9885-ed0d9579e181.1712037732.1.1712037742..381bfab3-8c2a-4e54-8d4b-44a5c8c997ef..6db53b82-8b6d-471c-b7de-194adad46810.1712037732261.2', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1746597742.393476, 'httpOnly': False, 'secure': True},
{'name': '__cflb', 'value': '02DiuFwNDm462z9fWfJeB58usqeie1xoTDrYZciipwE2x', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1733223382.743499, 'httpOnly': True, 'secure': True},
{'name': 'XSRF-TOKEN', 'value': '64a0f62d-dc8f-40cb-8aa2-66e3ad283ad4', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'dictcode', 'value': 'english', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763873478.639472, 'httpOnly': False, 'secure': False},
{'name': 'searchPanelOpen', 'value': 'true', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1733803809.434554, 'httpOnly': False, 'secure': False},
{'name': 'search', 'value': 'hello', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763871009.434808, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'xGchgbvqtkoAYddlxWT4VgRmeTZ1qTVmI0hjpRvOj0w-1733201062-1.0.1.1-SDl6_cuGUlqEOSm4oDQpU5rJdha8wEbITIgLoxdY69GgWrSt5GO7nX47Vc2AihzcBY.yS6GZ9qXVfRKEttQyLw', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1733202862.320396, 'httpOnly': True, 'secure': True},
{'name': 'last_url', 'value': 'https%3A%2F%2Fwww.collinsdictionary.com%2Fdictionary%2Fspanish-english%2Fcaminata', 'domain': 'www.collinsdictionary.com', 'path': '/', 'expires': 1763873068.316249, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Tue+Dec+03+2024+12%3A51%3A18+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202409.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0003%3A1%2CC0004%3A1&AwaitingReconsent=false&geolocation=JP%3B27', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1759121478, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-12-03T04:51:18.738Z', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1759121478, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24device_id%22%3A%20%22193873a93591e4d-05e2471014e6fb-1e525636-1fa400-193873a935a287e%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.collinsdictionary.com', 'path': '/', 'expires': 1764737478, 'httpOnly': False, 'secure': False},
{'name': 'UUID', 'value': '3c4dd735-8d33-4fd0-a40f-83d399a0dc46', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1740190342.420181, 'httpOnly': False, 'secure': True},
{'name': '_pxvid', 'value': '2dd5c1cb-b670-11ee-9186-3dd546fa1c41', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1737166344, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': '__zlcmid', 'value': '1O1n3oDgpvApbaQ', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831447, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'csrftoken', 'value': 'iyx0piwZPaxHwlEjMlBpPrxRasiSrbVv', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1764745045.418981, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-12-04T06:56:48.465Z', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831408, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AccessToken', 'value': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzZXNzaW9uSWQiOiIwMTkzOTA3NGY4MTg3OTQ1OTg4NzdiNWQzMWE4NDk3MSIsInV1aWQiOiIzYzRkZDczNS04ZDMzLTRmZDAtYTQwZi04M2QzOTlhMGRjNDYiLCJ1c2VyIjp7ImlkIjoiIiwibG9nZ2VkSW4iOmZhbHNlLCJhZG1pbiI6ZmFsc2V9LCJpYXQiOjE3MzMyOTU0MDksImV4cCI6MTczMzI5NTcwOX0.lIt08pG__dm_kZ3kJUYMw_bK0Ow2kAD8i2Jf8OQA0RM', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1733299009.610988, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'AccessSessionTimedSignature', 'value': '1b72fc2754973a3daa1baf1b169dfda5ed067ed4113573f1a1005ce5da900999', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AccessSession', 'value': 'H4sIAAAAAAAA_42U3W7bMAyF38XXUSFKlEXlLgv2f7Gi7S62oRgYSc68uXHg2B26ou8-KW4adxu6GoEhyYfi4Scqt8Uw1KGYF9pjCFYbQUFrgVWQglFWgnTQzrEMHstiVtTbpAVLJ2DsSfop1Hk17yDBaSetQYzKuqpy1q-YfbBKckWGkm47FWJFQNahcUTWrkzQwITOQhJ2e2HFu765ESFWPDT9nBQiWXIVeAmMUCkGUtJrU6UX5AQN9ylOSYUClJB4Icu5sXOJJ1YTgrJkPmfZ8KRMQ7mX7Z5WmVHV-2Led0OcFdfc7H1PQ9whJAW4Mm2dQ7jvu10xvy2WyyRfnKel5UUatTv-wV-VzfNPaf7uNI3OlofReda8vXizeL8o7tIuQ_9t4X2fdvpyW_Q325g_b3Z93Q993W4yx7aJ-fPlrLiqN_VV_Su-anh9MLx3CyVamaGMM5BSKq3LfAxxvxSjX1HJLGLJTqAMJNhqKYB1NICUzzvH3zuoHznwXQyjwWe3mXhmwoR7iM9v3Xt7L7r25y52p1x39WZdjBhHIHeXd7MJyFBf12HgZsKx-Hj-8qx4iiai1gpB6iNRQ45caj59JOqgghBABBmsQDBOMJUovFaByKtQAkyInsZu124-Jtd_Az3kwwmQdduumyhAOmegTHfMSJsOGYg0HUF8WOTC_6g_lcYPLTWemEWrFD54V-nmhtTNwliuBAZk4TywkBwlQVm5kswxyVlcj33wL2DNfQJNWtGk3m4ftK83H8hIMbXlEaCLJG2IXlTargR6awR7SjfOAzk2fiVZHk287tph-6QHNcm-zuoxeaLxffzL-s_zGONvRDIvs1UFAAA', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AccessSessionSignature', 'value': '78ecce97f2a2de3ffb4af7c87424885165a11fe7d2e29bf960edff5c48167a35', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': '_pxhd', 'value': 'xbR2aQnYKWrHiFs3JKCUojuB1cOv3M4cNFTMhSdjqxqtIsdgsSi8ehBRY7OxVo9kz4W0VVvgcv6IyY0Ta0SJPA==:medf83pfHV213oGcCOKWmgsZc4Kr8q2rEC2GEWBrpMBibA5DYuL7eKs10ANKfVD8qmvJUfcosZrIkQ83XUVKtKmZa4Y6lK04fy46yN254wo=', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1764831425.214494, 'httpOnly': False, 'secure': False},
{'name': 'pxcts', 'value': 'f8fbc8a1-b20c-11ef-a65c-4947163db9b8', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': '_px2', 'value': 'eyJ1IjoiZjhjZDk4ZjAtYjIwYy0xMWVmLWFkYzctZGQxMTkxM2QxNDlkIiwidiI6IjJkZDVjMWNiLWI2NzAtMTFlZS05MTg2LTNkZDU0NmZhMWM0MSIsInQiOjE3MzMyOTU3MjY4NjgsImgiOiIyMTFhMjMyMTRlZmQwOWE5OTNhZjlmODU2MDU1ZmI1N2U4MTcwY2RmNDNlZjM0MGFhYzg1Yzk2NzQ0NmVjOWI5In0=', 'domain': 'www.jstor.org', 'path': '/', 'expires': 1733296026, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ReferringRequestId', 'value': 'fastly-default:258d2927284d8837614cc35365d11c1d', 'domain': 'www.jstor.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24device_id%22%3A%20%221939074fba7618-0aaf84ba8a423a-1e525636-16a7f0-1939074fba82966%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831445, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Wed+Dec+04+2024+14%3A57%3A25+GMT%2B0800+(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202303.1.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=e6c4a174-c538-4f9f-8632-f5f8e9ff428d&interactionCount=2&landingPath=NotLandingPage&groups=C0001%3A1%2CC0002%3A1%2CC0005%3A1%2CC0004%3A1%2CC0003%3A1&AwaitingReconsent=false&geolocation=JP%3B27', 'domain': '.jstor.org', 'path': '/', 'expires': 1764831445, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20060%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733149186s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1767701986.053151, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765083373, 'httpOnly': False, 'secure': False},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:20$_ss:0$_st:1733553108768$ses_id:1733549982472%3Bexp-session$_pn:14%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765087308, 'httpOnly': False, 'secure': False},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733559088s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768111888.617908, 'httpOnly': False, 'secure': False},
{'name': 'SID', 'value': '"EUW1ED0CAFs37MFXuY5NakcbWc5Qu"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'CUSTOMER', 'value': '"UNIVERSITY OF HONG KONG"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'E_GROUP_NAME', 'value': '"University of Hong Kong"', 'domain': '.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': '__cf_bm', 'value': 'dU7HSmMJl6w4XDg.tZSoewkYsxb0bX7Barvg4RvulLw-1733551961-1.0.1.1-7Um2w5HRPO8C06bwjScmRD9BaTZkbArPDfX_e8urefWlKlH50nONZAxnxeL4VbDbHzBBcAY1OzwO5TyNuuCUfQ', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1733553761.117424, 'httpOnly': False, 'secure': False},
{'name': 'AMCVS_242B6472541199F70A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'adBlockEnabled', 'value': 'blocked', 'domain': 'www-science-org.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'cookiePolicy', 'value': 'iaccept', 'domain': 'www-science-org.eproxy.lib.hku.hk', 'path': '/', 'expires': 1733638777.524329, 'httpOnly': False, 'secure': True},
{'name': 'AMCV_242B6472541199F70A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C90810009207598809487163227219398447255%7CMCOPTOUT-1733559578s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768112378.032281, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24search_engine%22%3A%20%22google%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765088378, 'httpOnly': False, 'secure': False},
{'name': 'OptanonAlertBoxClosed', 'value': '2024-06-06T05:28:24.993Z', 'domain': '.orcid.org', 'path': '/', 'expires': 1749187704, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AWSELB', 'value': 'CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'AWSELBCORS', 'value': 'CBD1D7FF1216388FA48838CBCA4774FD22800B8FB55A37124459E84B59F34FE231A4AA84F4ACD29C01160D60FB2ABE4D73D23EFBBE355236CF44A8FEE381C3844BD946CF3D', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'orcidUserConnectionId', 'value': '-114606494029392851', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'XSRF-TOKEN', 'value': 'b64bcd3a-f0f5-407b-9115-a1f5183f3997', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'JSESSIONID', 'value': '48DD20615AC49336A91F9A3A6F5B1483', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'locale_v3', 'value': 'en', 'domain': 'orcid.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24device_id%22%3A%20%22193871b8e67918-084bddcb7389ac-1e525636-1fa400-193871b8e682d76%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.orcid.org', 'path': '/', 'expires': 1764674617, 'httpOnly': False, 'secure': False},
{'name': 'OptanonConsent', 'value': 'isGpcEnabled=0&datestamp=Mon+Dec+02+2024+19%3A23%3A37+GMT%2B0800+(%E9%A6%99%E6%B8%AF%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)&version=202310.2.0&browserGpcFlag=0&isIABGlobal=false&hosts=&consentId=71ca593a-5b7c-4963-87cf-52c27440ac95&interactionCount=1&landingPath=NotLandingPage&groups=C0001%3A1%2CC0003%3A1%2CC0002%3A1%2CC0004%3A1&geolocation=HK%3B&AwaitingReconsent=false', 'domain': '.orcid.org', 'path': '/', 'expires': 1764674617, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'cookieName', 'value': 'dont%20show%20message', 'domain': 'orcid.org', 'path': '/', 'expires': 1764674620, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'closable-unique-name', 'value': 'understood', 'domain': 'orcid.org', 'path': '/', 'expires': 1764674620, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxy', 'value': 'e1~OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False},
{'name': 'ezproxyl', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ezproxyn', 'value': 'OilZogbDH4iMWPK', 'domain': '.eproxy.lib.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'AMCVS_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '1', 'domain': '.hku.hk', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'utag_main', 'value': 'v_id:01939fa183070055958b6429f88c05075005506d00bd0$_sn:1$_se:13$_ss:0$_st:1733552707246$ses_id:1733549982472%3Bexp-session$_pn:9%3Bexp-session$vapi_domain:ieeexplore-ieee-org.eproxy.lib.hku.hk', 'domain': '.hku.hk', 'path': '/', 'expires': 1765086907, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'AMCV_4D6368F454EC41940A4C98A6%40AdobeOrg', 'value': '179643557%7CMCIDTS%7C20065%7CMCMID%7C09958998665032490705964449674769238820%7CMCAID%7CNONE%7CMCOPTOUT-1733558198s%7CNONE%7CvVersion%7C5.5.0', 'domain': '.hku.hk', 'path': '/', 'expires': 1768110998.70329, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24device_id%22%3A%20%22193873d31db19b2-00bbd0bd5ad31-1e525636-1fa400-193873d31dc28b3%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.lib.hku.hk', 'path': '/', 'expires': 1765087052, 'httpOnly': False, 'secure': False},
{'name': 'PREF', 'value': 'f7=4100&tz=Asia.Hong_Kong&f4=4000000', 'domain': '.youtube.com', 'path': '/', 'expires': 1767698936.819909, 'httpOnly': False, 'secure': True},
{'name': 'HSID', 'value': 'AuvRBV-Q9GEReACoE', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.11147, 'httpOnly': True, 'secure': False},
{'name': 'SSID', 'value': 'AHum7OsxEOAD-Ibp4', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111503, 'httpOnly': True, 'secure': True},
{'name': 'APISID', 'value': 'FRZgwlTWYfVE-B2B/A7FrDbUAZCyMOb6ue', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111539, 'httpOnly': False, 'secure': False},
{'name': 'SAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111575, 'httpOnly': False, 'secure': True},
{'name': '__Secure-1PAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111613, 'httpOnly': False, 'secure': True},
{'name': '__Secure-3PAPISID', 'value': 'mPwGiY6zyqe8IurQ/An35YGZHoL6Ged8z8', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111646, 'httpOnly': False, 'secure': True},
{'name': 'LOGIN_INFO', 'value': 'AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs:QUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB', 'domain': '.youtube.com', 'path': '/', 'expires': 1766493681.602721, 'httpOnly': True, 'secure': True},
{'name': 'SID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsOzHaI33jRkuMY7qPd2ZFEAACgYKAUMSARYSFQHGX2Miunh4qpdUynkY2PXqPSvzGxoVAUF8yKqW-lS90SUYH-cprrTRIWDZ0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111199, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsSADU_B-86vJipiHTuQxp3gACgYKATgSARYSFQHGX2MinsKT7OE3L_H9SEzrOgndQRoVAUF8yKqLdTsMTC89NTlwps9-uRSu0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111393, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSID', 'value': 'g.a000qQgZRYfZgm7ZwgtAwFbDQEBvZXpLRLlzTLW27vxVIiEvbgDsvr3qlDZAuj7i33G332SpFwACgYKAekSARYSFQHGX2Mi9DYKrElENMuHNeaGctBCdBoVAUF8yKrmoWXREkhXzva2a6J3B2ps0076', 'domain': '.youtube.com', 'path': '/', 'expires': 1766544331.111431, 'httpOnly': True, 'secure': True},
{'name': 'NID', 'value': '519=I7v3EQhK2PGhnXSOCZyz5QYVGbJ383LEPRVqShP2G15ip3zj5VjyDJWEAlJtS3ifC0qs-7cxlwk_vCTqntg_LWW_hfONxTqG6JVJ8JvpMEr2eM_Fqb9n8nVbc_YNrwSIUEorM5N5FUoZmW2u4Qksi_a0-ssHJWsOBEwdxpDONaY', 'domain': '.youtube.com', 'path': '/', 'expires': 1747984059.767459, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928f27a2a80-04cb41970e6945-16525637-16a7f0-1928f27a2a920cc%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22utm_source%22%3A%20%22ythp%22%2C%22utm_medium%22%3A%20%22LeftNav%22%2C%22utm_campaign%22%3A%20%22ytgen%22%2C%22utm_content%22%3A%20%22txt%22%7D', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674936, 'httpOnly': False, 'secure': False},
{'name': 'ST-xuwub9', 'value': 'session_logininfo=AFmmF2swRgIhALZXJQRg7B6iILvfx41A-mHr8rh7RMGV3cNkppAPlxxvAiEA38fh68Ct3o4p-ywc1zHhWZxrJ5Dpcd0AcsMp4RZONUs%3AQUQ3MjNmeTdGelpVWXZuN1RTeUMzQkYwNEZhVXY1emtGT1pycWFmWC1LU0txanZReHBLaDRxVHJEZGRyOV8wajFIajdyLWYwcE1rSFZfRVlBM3BNaXZSQlMtLVlLR3RmSURpQjhKRlJaU0xJcHQySmZVNUp6eWFFak9rbE4yWDg5WGdjSkM4QjJhcFRYZTAwVEF6a3RPUzhsSzV0R05YWkVB', 'domain': '.youtube.com', 'path': '/', 'expires': 1733138942, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSIDTS', 'value': 'sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674938.226552, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSIDTS', 'value': 'sidts-CjIBQT4rXxBz2VTDVx5cMn6A_YgFHPgo9Z-eWATlXeINT58ZUInn2_vTeUb4czgvWV-j5BAA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674938.226624, 'httpOnly': True, 'secure': True},
{'name': 'SIDCC', 'value': 'AKEyXzWXdf72zjmIboZNkzmg9VURwnmM1MpJVRgAxjRuMRib1b7iu5SkCZccexcK6jf2abrLEoQ', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.01384, 'httpOnly': False, 'secure': False},
{'name': '__Secure-1PSIDCC', 'value': 'AKEyXzXWzx9lRoJCEXrHvqZeWtAugc_tFou4ucmylPeSpc0nRX2EZ-t3QGTGqberRiTB3QIHjQ', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.013891, 'httpOnly': True, 'secure': True},
{'name': '__Secure-3PSIDCC', 'value': 'AKEyXzVyGt5J-awGqBrP5_hTnwTmCMsUu5oWISlljhXbP9P7vrGxlzOg05O_vwlgbuGOKRUQGYA', 'domain': '.youtube.com', 'path': '/', 'expires': 1764674940.013962, 'httpOnly': True, 'secure': True},
{'name': 'MAID', 'value': '+O8mvi2rAtZrnJqF+2cRIQ==', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078802.198648, 'httpOnly': True, 'secure': True},
{'name': 'MACHINE_LAST_SEEN', 'value': '2024-12-02T09%3A00%3A01.960-08%3A00', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078802.198711, 'httpOnly': True, 'secure': True},
{'name': 'JSESSIONID', 'value': 'CEDD494D14F0052C199B1D7AE667EF42', 'domain': '.pnas.org', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': '__cf_bm', 'value': 'YJQBFxCTLG1d3d9R0fVmwlmAgP9kqVl3zwf02v.COMQ-1733158802-1.0.1.1-tLccs1jD809lM7_9Bhy35sLQdM1TaakBEYvhdDEi1w9cWJS9IGjovTwKGdYQtse6_rWkJNYt._LsHQI2WCwDUQ', 'domain': '.pnas.org', 'path': '/', 'expires': 1733160603.504839, 'httpOnly': True, 'secure': True},
{'name': 'cookiePolicy', 'value': 'accept', 'domain': '.pnas.org', 'path': '/', 'expires': 1767718816.994233, 'httpOnly': True, 'secure': True},
{'name': 'connect_auto_login', 'value': 'true', 'domain': '.pnas.org', 'path': '/', 'expires': 1735750875.510643, 'httpOnly': True, 'secure': True},
{'name': 'PLUID', 'value': 'l8nplDdx7mN9Xh4lErbknypxfmo=', 'domain': '.pnas.org', 'path': '/', 'expires': 1759078875.9476, 'httpOnly': True, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24device_id%22%3A%20%221938850d07a6d2-0446945abb35c6-1e525636-16a7f0-1938850d07c132a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.pnas.org', 'path': '/', 'expires': 1764694876, 'httpOnly': False, 'secure': False},
{'name': 'donation-identifier', 'value': '8ed6af4cc08b88b68b36fffcb6dd7323', 'domain': '.archive.org', 'path': '/', 'expires': 1741773847.95608, 'httpOnly': False, 'secure': False},
{'name': 'abtest-identifier', 'value': 'ca9982a6c4240d53598f01665a3c6100', 'domain': '.archive.org', 'path': '/', 'expires': 1741773847.956153, 'httpOnly': False, 'secure': False},
{'name': 'test-cookie', 'value': '1', 'domain': '.archive.org', 'path': '/', 'expires': 1734348067.326946, 'httpOnly': False, 'secure': False},
{'name': 'g_state', 'value': '{"i_l":0}', 'domain': 'archive.org', 'path': '/', 'expires': 1748690473, 'httpOnly': False, 'secure': False},
{'name': 'logged-in-sig', 'value': '1764674476%201733138476%20Y3yQCmHjxUil%2FcGs%2FgYR6m%2FHA%2F%2FtAtShDsn25N2tNIzvkGr6EkwbEsYEwDTjZ6%2Bu4Iy65eDH5gZVrZayaRZzJEa6R91agNjLC1rmw%2F47W5OXyDVFN5kLX%2Ba2OxNOzEx6Ws%2BLVwFVr%2Bdnbzhdt1vqNTEpECwy14%2Fu4n9qXGANJ5IKEO7pfu4ONymTb0RWH%2B158Wphp0Gluy9bR1a3t3TSGM%2FyhBEa37FJ56ckJJDghwIVsANhhu%2FextDlCDLXDkPtxLrwdX%2FAlbBoNFIeQ5%2BzoJX21KKQVdJxVWzSRLb4LXyFQsvhkpL221qlJ%2FDQER53IrTAIkmxrDI4cfjumUnKTQ%3D%3D', 'domain': '.archive.org', 'path': '/', 'expires': 1764674476.838234, 'httpOnly': False, 'secure': False},
{'name': 'logged-in-user', 'value': 'jiabintang77%40gmail.com', 'domain': '.archive.org', 'path': '/', 'expires': 1764674476.838343, 'httpOnly': False, 'secure': False},
{'name': 'PHPSESSID', 'value': 'jteta3bg9mb3t8e6dkp7r6mcd4', 'domain': '.archive.org', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'donation', 'value': 'x', 'domain': '.archive.org', 'path': '/', 'expires': 1736767334, 'httpOnly': False, 'secure': False},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24device_id%22%3A%20%22193871a38b622b1-030839df772cce-1e525636-1fa400-193871a38b71d9a%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D', 'domain': '.archive.org', 'path': '/', 'expires': 1764675133, 'httpOnly': False, 'secure': False},
{'name': 'ncbi_sid', 'value': '015E11D6531E8483_1525SID', 'domain': '.nih.gov', 'path': '/', 'expires': 1764675079.027761, 'httpOnly': False, 'secure': False},
{'name': 'pmc-frontend-csrftoken', 'value': 'L3uvd1o5Uu2efxgCXWDzwxfDTl5QIFDR', 'domain': 'www.ncbi.nlm.nih.gov', 'path': '/', 'expires': 1753769052.705813, 'httpOnly': False, 'secure': False, 'sameSite': 'Lax'},
{'name': 'ncbi-login-route', 'value': 'google', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': 1741001395.405247, 'httpOnly': False, 'secure': False},
{'name': 'PRESERVTARGET', 'value': '%2FtYTXpgzJne16bwfb4ZN2lGInyYoZNk58TVbSvhIR0njSJplCp65%2BiF2SZAktvmmznDxgJBJhBCH%0ANoo2je1cMk0RXykLSXa4UwW7u0%2B%2Fc1X7WzHdCi209NjSVDPLNfOmFzmtz50Uuh6EfD95OQ%2BYQ2B%2B%0Aq7BP3es9s8ArLlZd9XW7NS72Ulu8cigULF%2FZADnu%2FPZf8DmPLOXuV6xWf0fqcNlZXwWhiCjrPJiU%0AU594rDm20QBWFe5y0VjWXnJtzYm7uSPkWDQYJ8htbKyWwjn4aG0xcYfTBSBUTOi9A%2Bo1BnUPHLIi%0A8V9%2Fi7S2i2vLCCwVTCSGS0pctKKWZRmzEmP9NB4rA167%2FSMuyX6ezHZNUyztiKaga84g5monl5bT%0AjNlmWeBFQV90piriK2wjmey3mIoTu2eJyDi%2Bx%2FO7pwMTfeiU2WXZ5h3U4kRBxw%2FR6%2FrCMYtVrzXp%0A%2FexiuMJDHQmiDPowP8dxw97tgs353jnBRGe8jpoCPoPG2hywQnwXtxW8SjWp19yTypxVFl4KnD1e%0A5aoPyq%2F7tPDRPbW7UikYuihFvX0mD1TH7A0G9Bk%2B36y%2F7jL8oW7OArzEbESjcx2aVRL%2B3VqzX1Oc%0AZcFWXfVarYgckE8EeyNwFwhPDoASs2T4SVNAJAQ38A0bYzCAxc6mQLqADqesOuuveClDDgB8WITg%0A1QnE32rGsLz37nzAQ89V', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'NIHSMPROFILE', 'value': '9i9xFyZxcZ3DeEBWJ1M%2B1ygJsb2LhWqfanAC3W20fjIpeXaMrRQ%2F9L3R6DUjYzq5%2FqUDVLhYywfn1%2BT0RJpzID8efN8zNczLDneXLM7waIbhTdfwbIh%2BCnmN0fucHtqYylLU1altZcOhSRTow47jYwyEUFsmJ6kz3T1%2BnZWx3Ol0zaFC8onzZCtl4YHbCxMJVbHYuMcGM4f4YxpiDefQvlDdwY1soBI8z9nvb%2BKMs1B3GgplTzyllWIbC1RHxGLvdlNaz8Zlzw6MU4B3piqrAiCWAvoMF3%2FSShIchSdP0utP%2BMROhcGaoWBU%2FKfkjjDc3lHCPfydE%2F895aasf6uvrL7uccokjb6HxdVs0FA%2FHxfBNJXURVRSpRl9%2BPOd9%2FOOlXQQqhBh1FyAZs6WIxDvLhegMvLITcLh7ahcahuJnoeImSla4b4kK0Ayy6736mJCa0hhXUzGjab4Yhht11PliHlAlh4wLEXj0Dp7X9pj7Ws1%2BdCx8QZyiTWrbauCQJtS1hNXn%2Blg4BoQ2sIq%2FxltuA%3D%3D', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'NIHSMSESSION', 'value': 'ZITCB6DlveT31D7iO+eVBnrLBxVxrCJGSz/5hVXNSYwvjuPpvd0O7dD6XqsFf6LKdJXktiX+hhtPWtsNok2mgiSvcpYTBHJxHVefyBt+KiLTVm12lBwYTkx9Gb9OxZNQsMS/Hvoj45/ShvSKut3d7c8e2cEhtjA7DWjHEYHj0tuk3fEoQ4t0UAWkhj6bFt5Vo5tm6dyqen/1EH2o6cBZrVSLbk67LctlbRyV4pc5099Il2lTMPo6LqtyVI1AC/bcSioge+LqDbpDiuP4NOF3EPj/yFSWvBz76/bqQ0Hu5oRGCC1zVPhSGJ1iukio91F6IfYr5vTtMrN00vSuxHRnxj0BYCEuGKtCeNDohuGAZvQVkjhc1aI53oWFu8UNHZvYA+Xo2lpLZUOT+2pkjk1Z/WpAPzE8L+O6mRjwaq8/2b3rUS8gq4Y/2vQYhqmNxeHOBsM01BhpJdRF3Urp3dnIIYakYeCgzC/3Kww+p8ImsBTjvXDO1ivVEjKB4AdnCsRdLpGPszS9xF7439aXXCWHDItM4Wl458T12QWeV+DXiiwzD/kis1QQBWibzsZOzo9KDM3oapKa8I2ouKXb797Z7s+eLR1+Z10lyWZuNVLLZK5ckFT5riayLYeT8+IjFYVER/nfDzm3KpgVPnep/k4DANpDgAOK78iuTv3sBndNMoKrXz2qCZtfi3/gLGZTKcOy90meluFZy9+iLyb+M01VBWuDp/v0a2jSdsJPVmgUQqz7hLVvtc4KpMfiDhfxXGMQnaieP9jREFK3NutAiUrkjS96WS3v5eLK80o/aG1j5IsAvxU/0lMnEri3Yz6Qw1f0ymS6giKiFIUBRUWGXcm5S1qCjwL5GiU71r3nOcaC8T9T1pVLf1R558WqH6Ha95aJVqN6CnEHo8TsZl25lb5tlJgbgb2OFvLSrbUZwuM3R5mA9zP7ciQBywxNm7xFO8sX8QQk0bRhrhgk458KE72Ci/8lhZmvpYy5aqbI4OtaLkuFuu3lX3c7/LsGt+iTFkO6eDSS4CFEnFqg3W5Glvs7WZkTasVI7L0mN0q8DCPXaIDFVPlXEA0shxZuB6Iz+mx4MshQHwY9fMRSWB7gOF5cHjHYUBLfHT/gOwl35rkoJfVf9ikpcgT88mJyk9KTQpVM+CZAGUFDbgHsRqA0jPE19sBum3cqaA6fzh9AnWXfOlAY5KNDdTB4yip4UakCXWsiXVng0GfQ7KvxAguC59L7iZyFjdsIDESi7ZozcPHOpFZleeAU3yFTvMGHmO3G3RFrxyIGCwgWehus3YCqQxZPSE6+yLjXeXTqhqgk0kxcV/MlOFgzMcAhgKEYJS045sLZsmohsIVLV0ONY4uqogSxd3YUzc0WImi1mYdNbzYwbX5tPngah4SK61Nia8Z6xjZuKfXnxNFEkNneezPoPy97Hvd+9wzI+DkU5sa844DzGxeSY/ySE3DTtpowf440r5rX', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True},
{'name': 'MyNcbiSigninPreferences', 'value': 'O2dvb2dsZSY%3D', 'domain': '.nih.gov', 'path': '/', 'expires': 1740915025.611341, 'httpOnly': False, 'secure': False},
{'name': 'ncbi_prevPHID', 'value': 'CE88342C74D8A32100000000003B0036', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'WebCubbyUser', 'value': '3GX25AI24DLUXL8LVDJFIVTH6LJRZBE1%3Blogged-in%3Dtrue%3Bmy-name%3Djiabintang77%2540gmail.com%3Bpersistent%3Dfalse%40015E11D6531E8483_1525SID', 'domain': '.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': False},
{'name': 'WebEnv', 'value': '1D7wJH%40015E11D6531E8483_1525SID', 'domain': '.nlm.nih.gov', 'path': '/', 'expires': 1733167826.636953, 'httpOnly': True, 'secure': True},
{'name': 'ncbi_pinger', 'value': 'N4IgDgTgpgbg+mAFgSwCYgFwgAwEYCsAorrgCIBs+AzLoQBwAsdAnLgOxU1XPZt354AygElSIAL5A===', 'domain': '.ncbi.nlm.nih.gov', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24device_id%22%3A%20%22193872246ca871-06560f33a3902-1e525636-1fa400-193872246cb267c%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%7D', 'domain': '.nih.gov', 'path': '/', 'expires': 1764675078, 'httpOnly': False, 'secure': False},
{'name': '_device_id', 'value': '49f9d6cfbd603c8509e73807be70a438', 'domain': 'github.com', 'path': '/', 'expires': 1764674868.858374, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'MicrosoftApplicationsTelemetryDeviceId', 'value': '3966ee53-78ca-4fa3-95d7-85e299cecee4', 'domain': 'github.com', 'path': '/', 'expires': 1763890136.033527, 'httpOnly': False, 'secure': True},
{'name': '_octo', 'value': 'GH1.1.1313590405.1727940967', 'domain': '.github.com', 'path': '/', 'expires': 1759476967, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'GHCC', 'value': 'Required:1-Analytics:1-SocialMedia:1-Advertising:1', 'domain': '.github.com', 'path': '/', 'expires': 1745563377, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'MSFPC', 'value': 'GUID=3452f0b49fd14d349a6dbf8ddee26d60&HASH=3452&LV=202410&V=4&LU=1730011383391', 'domain': 'github.com', 'path': '/', 'expires': 1761547383.513164, 'httpOnly': False, 'secure': True},
{'name': 'logged_in', 'value': 'yes', 'domain': '.github.com', 'path': '/', 'expires': 1762511337.053395, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'saved_user_sessions', 'value': '151511798%3A8an8gJwE3la35NvNIyacuRFRSHlup_9RBaQ5q4CThhvPV89o%7C152840453%3A2Quysh6Cns_a0IpeKcw-GAUZIt6ZndbJ7BoGdxx11qkZa9bi%7C151510669%3AMpYw2DQuFwt3NJiimm36OWLTQmoWFzVcSUbLuV8SBFRPqN8-%7C165454715%3AZSjwi4MUxVCr91r-m1ElvPL2L0DGDSoSo6uwV7pPpliml3js%7C148674909%3ALnLJclEDIxFjFcwX0eBlgOJzbDpsxKedtd6So7_EFs6HPtL7%7C56426168%3AmM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1739599354.295483, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'user_session', 'value': 'mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1734348468.858989, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': '__Host-user_session_same_site', 'value': 'mM073me2REE4yufnvYBKQ84KsG54oHKLBiTSs5tXYCoyQx4g', 'domain': 'github.com', 'path': '/', 'expires': 1734348468.859144, 'httpOnly': True, 'secure': True, 'sameSite': 'Strict'},
{'name': 'dotcom_user', 'value': 'tjb-tech', 'domain': '.github.com', 'path': '/', 'expires': 1763647073.257243, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'name': 'color_mode', 'value': '%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'cpu_bucket', 'value': 'xlg', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'preferred_color_mode', 'value': 'light', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'tz', 'value': 'Asia%2FHong_Kong', 'domain': '.github.com', 'path': '/', 'expires': -1, 'httpOnly': False, 'secure': True, 'sameSite': 'Lax'},
{'name': 'mp_94085d51c4102efbb82a71d85705cdcf_mixpanel', 'value': '%7B%22distinct_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%2C%22%24device_id%22%3A%20%221928eb980316cc-050dbe3db24bd2-16525637-16a7f0-1928eb980321bb8%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.google.com%2F%22%2C%22%24initial_referring_domain%22%3A%20%22www.google.com%22%2C%22%24user_id%22%3A%20%22cm28oqri501xjtwbjzt1prdzm%22%7D', 'domain': '.github.com', 'path': '/', 'expires': 1764674869, 'httpOnly': False, 'secure': False},
{'name': '_gh_sess', 'value': 'oUZyg0XEvo5fm%2FC18yV17FMePsGYB4hM9R5q8AgiwOAjTritHx1Ux4jNGjnm7Jaxz99%2FOxD4agIy05dUdG6cnSxRP62NJE7bZxIWFV2W64ekLVCwz7ge2oaRcvVlN4HjVhw5dsl2czpD8Irn%2BZG0Dmw16tH9GZZ4yhaFW5%2Fshmte3DBYsndzLNn4rGje9B3P1IFYyz9sYx23j71xRb9wRjwoLHPYGf4Yp3vRKVAzTp3X6nrjvgr4XGU2N%2BGPH3OYDZQYCIPLckTIEmRg7a0dd2KvU2mfcm%2F%2B9N9%2FNNBFTbKvUhPwWM8kIRpv5WTzU%2FI5Y0qBv71gX2B7nNm%2FtIkWjbWUhgizf%2BpxOHAuhs89sRaicpc9NjasSUISwfxRCoH5evWqVXEifhqQvSU42iR4wkhnRHs%3D--za2vZwPq%2FBJxevj3--tEOzEYASRs0gepJUCIv8Mg%3D%3D', 'domain': 'github.com', 'path': '/', 'expires': -1, 'httpOnly': True, 'secure': True, 'sameSite': 'Lax'},
{'domain': 'www.nature.com', 'secure': False, 'expirationDate': 1733745572000, 'hostOnly': True, 'name': 'user.uuid.v2', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '"765b07e9-028b-45d1-8abd-baa7b6c88125"', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'hostOnly': False, 'name': 'Hm_lpvt_d38bce82bcb44717ccc29a90c4b781ea', 'httpOnly': False, 'session': True, 'storeId': None, 'value': '1733140842', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676842000, 'hostOnly': False, 'name': 'ajs_anonymous_id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '7e4d00ab-3618-46a2-b0fb-c80b189a0584', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676842000, 'hostOnly': False, 'name': 'ajs_user_id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'b1ae7862-b9d6-49c5-a7a5-ad96682ac6dc_SN', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga_B3E4QL2TPR', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GS1.1.1733140776.1.1.1733140841.60.0.0', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga_ERRNTNZ807', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GS1.1.1733140776.1.1.1733140841.60.0.467679787', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767304843000, 'hostOnly': False, 'name': 'cto_bundle', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '5685XF9lQVd1dU4zd2xWRE1uZ3BpQTk3SUVXNkx2bGslMkZwTkZodjRWJTJCcGoyd0JWdiUyQjVlcGkwMVoyWHc4aGxKQkM2N3hyeGI4aFlIRzBZRDNTUTJFb1JYZVhPJTJGMUIlMkZka252a0RPZFdlbld4OU1jaUFrMHN6VDVaREYzSSUyRmFDMEtnb0FoaQ', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1766836842000, 'hostOnly': False, 'name': '__gpi', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'UID=00000fa61060e41d:T=1733140842:RT=1733140842:S=ALNI_Mai2WWloG6liac6hEyJYOSjI3WtCg', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1766836841000, 'hostOnly': False, 'name': '_uetvid', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'e6d7f220b0a411efaac753cc9ddac552', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676841000, 'hostOnly': False, 'name': 'Hm_lvt_d38bce82bcb44717ccc29a90c4b781ea', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '1733140777', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1748692774000, 'hostOnly': False, 'name': '__eoi', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'ID=1ced890879e93934:T=1733140774:RT=1733140774:S=AA-AfjauQ5O9wXrdBjufrcsmQ-EM', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1766836842000, 'hostOnly': False, 'name': '__gads', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'ID=edf25ef88638a1b3:T=1733140842:RT=1733140842:S=ALNI_MYUdW0s3LG6IOpCKgjBo4gbGPsI1Q', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1740916843000, 'hostOnly': False, 'name': '_fbp', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'fb.1.1733140776577.688163329394303800', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': '_ga', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'GA1.1.2115119478.1733140776', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1733227241000, 'hostOnly': False, 'name': '_uetsid', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'e6d7f280b0a411efaed4a5384bcc5d88', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'hostOnly': False, 'name': 'HMACCOUNT', 'httpOnly': False, 'session': True, 'storeId': None, 'value': '7B6C1DFC72FE250C', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': True, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': 'permutive-id', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '7cbbccaf-2079-4e6d-99fc-186a9db51c90', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1767700841000, 'hostOnly': False, 'name': 'permutive-session', 'httpOnly': False, 'session': False, 'storeId': None, 'value': '%7B%22session_id%22%3A%221d3a9243-5c93-4975-ae30-63ca2047b7cf%22%2C%22last_updated%22%3A%222024-12-02T12%3A00%3A41.747Z%22%7D', 'path': '/', 'sameSite': 'Lax'},
{'domain': '.nature.com', 'secure': False, 'expirationDate': 1764676775000, 'hostOnly': False, 'name': 'sncc', 'httpOnly': False, 'session': False, 'storeId': None, 'value': 'P%3D8%3AV%3D68.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue', 'path': '/', 'sameSite': 'Lax'},
]

View file

@ -1,95 +0,0 @@
import os
import subprocess
from constant import GITHUB_AI_TOKEN, AI_USER, BASE_IMAGES
import time
from metachain.util import run_command_in_container
def init_container(workplace_name, container_name, test_pull_name = 'test_pull_1010', task_name = 'test_task', git_clone = False, setup_package = 'setup_package'):
# get the current working directory's subfolder path
workplace = os.path.join(os.getcwd(), workplace_name)
# check if the container exists
container_check_command = ["docker", "ps", "-a", "--filter", f"name={container_name}", "--format", "{{.Names}}"]
existing_container = subprocess.run(container_check_command, capture_output=True, text=True)
os.makedirs(workplace, exist_ok=True)
# cp_command = ["cp", "tcp_server.py", workplace]
if not os.path.exists(os.path.join(workplace, 'tcp_server.py')):
unzip_command = ["tar", "-xzvf", f"packages/{setup_package}.tar.gz", "-C", workplace]
subprocess.run(unzip_command)
if git_clone:
if not os.path.exists(os.path.join(workplace, 'metachain')):
git_command = ["cd", workplace, "&&", "git", "clone", "-b", test_pull_name, f"https://{AI_USER}:{GITHUB_AI_TOKEN}@github.com/tjb-tech/metachain.git"]
git_command = " ".join(git_command)
result = subprocess.run(git_command, shell=True)
if result.returncode != 0:
raise Exception(f"Failed to clone the repository. Please check your internet connection and try again.")
# create a new branch
new_branch_name = f"{test_pull_name}_{task_name}"
create_branch_command = f"cd {workplace}/metachain && git checkout -b {new_branch_name}"
result = subprocess.run(create_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(Exception(f"Failed to create and switch to new branch. Error: {result.stderr}"))
switch_branch_command = f"cd {workplace}/metachain && git checkout {new_branch_name}"
result = subprocess.run(switch_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to switch to new branch. Error: {result.stderr}")
else:
print(f"Successfully switched to new branch: {new_branch_name}")
else:
print(f"Successfully created and switched to new branch: {new_branch_name}")
if existing_container.stdout.strip() == container_name:
# check if the container is running
running_check_command = ["docker", "ps", "--filter", f"name={container_name}", "--format", "{{.Names}}"]
running_container = subprocess.run(running_check_command, capture_output=True, text=True)
if running_container.stdout.strip() == container_name:
print(f"Container '{container_name}' is already running. Skipping creation.")
return # container is already running, skip creation
else:
# container exists but is not running, start it
start_command = ["docker", "start", container_name]
subprocess.run(start_command)
print(f"Container '{container_name}' has been started.")
return
# if the container does not exist, create and start a new container
docker_command = [
"docker", "run", "-d", "--name", container_name, "--user", "root",
"-v", f"{workplace}:/{workplace_name}",
"-w", f"/{workplace_name}", "-p", "12345:12345", BASE_IMAGES,
"/bin/bash", "-c",
f"python3 /{workplace_name}/tcp_server.py --workplace {workplace_name}"
]
# execute the docker command
result = subprocess.run(docker_command, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to start container: {result.stderr}")
if wait_for_container_ready(container_name, timeout=60):
print(f"Container '{container_name}' has been created and started.")
def wait_for_container_ready(container_name, timeout=30):
"""using subprocess to check if the container is running"""
start_time = time.time()
while time.time() - start_time < timeout:
result = subprocess.run(
["docker", "inspect", "--format", "{{.State.Running}}", container_name],
capture_output=True,
text=True
)
if result.returncode == 0 and "true" in result.stdout.lower():
# 额外检查 tcp_server 是否运行
try:
result = run_command_in_container('ps aux')
if "tcp_server.py" in result['result']:
return True
except Exception as e:
pass
time.sleep(1)
raise TimeoutError(f"Container {container_name} failed to start within {timeout} seconds")

View file

@ -52,24 +52,25 @@ class DockerEnv:
unzip_command = ["tar", "-xzvf", f"packages/{self.setup_package}.tar.gz", "-C", self.local_workplace]
subprocess.run(unzip_command)
if self.git_clone:
if not os.path.exists(os.path.join(self.local_workplace, 'metachain')):
git_command = ["cd", self.local_workplace, "&&", "git", "clone", "-b", self.test_pull_name, f"https://{AI_USER}:{GITHUB_AI_TOKEN}@github.com/tjb-tech/metachain.git"]
if not os.path.exists(os.path.join(self.local_workplace, 'MetaChain')):
git_command = ["cd", self.local_workplace, "&&", "git", "clone", "-b", self.test_pull_name, f"https://{AI_USER}:{GITHUB_AI_TOKEN}@github.com/HKUDS/MetaChain.git"]
print(git_command)
git_command = " ".join(git_command)
result = subprocess.run(git_command, shell=True)
if result.returncode != 0:
raise Exception(f"Failed to clone the repository. Please check your internet connection and try again.")
copy_env_command = f"cp .env {self.local_workplace}/metachain"
raise Exception(f"Failed to clone the repository. The error is: {result.stdout}")
copy_env_command = f"cp .env {self.local_workplace}/MetaChain"
result = subprocess.run(copy_env_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to copy .env file to the metachain directory. Error: {result.stderr}")
raise Exception(f"Failed to copy .env file to the MetaChain directory. Error: {result.stderr}")
# create a new branch
new_branch_name = f"{self.test_pull_name}_{self.task_name}"
create_branch_command = f"cd {self.local_workplace}/metachain && git checkout -b {new_branch_name}"
create_branch_command = f"cd {self.local_workplace}/MetaChain && git checkout -b {new_branch_name}"
result = subprocess.run(create_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(Exception(f"Failed to create and switch to new branch. Error: {result.stderr}"))
switch_branch_command = f"cd {self.local_workplace}/metachain && git checkout {new_branch_name}"
switch_branch_command = f"cd {self.local_workplace}/MetaChain && git checkout {new_branch_name}"
result = subprocess.run(switch_branch_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"Failed to switch to new branch. Error: {result.stderr}")

View file

@ -1,76 +0,0 @@
import os
import shutil
import time
from seleniumbase.config import settings
from seleniumbase.fixtures import constants
# The "downloads_folder" is a folder for saving downloaded files.
# Works for downloads initiated by Chromium and Firefox WebDriver clicks.
# Browser type doesn't matter if using self.download_file(file_url)
# or self.save_file_as(file_url, new_file_name)
# The "downloads_folder" is cleaned out at the start of each pytest run,
# but there is an option to save existing files in "archived_files".
DOWNLOADS_DIR = constants.Files.DOWNLOADS_FOLDER
abs_path = os.path.abspath("./examples")
downloads_path = os.path.join(abs_path, DOWNLOADS_DIR)
def get_downloads_folder():
return downloads_path
def reset_downloads_folder():
"""Clears the downloads folder.
If settings.ARCHIVE_EXISTING_DOWNLOADS is set to True, archives it."""
downloads_dir = constants.Files.DOWNLOADS_FOLDER
archive_dir = constants.Files.ARCHIVED_DOWNLOADS_FOLDER
if downloads_dir.endswith("/"):
downloads_dir = downloads_dir[:-1]
if downloads_dir.startswith("/"):
downloads_dir = downloads_dir[1:]
if archive_dir.endswith("/"):
archive_dir = archive_dir[:-1]
if archive_dir.startswith("/"):
archive_dir = archive_dir[1:]
if len(downloads_dir) < 10 or len(archive_dir) < 10:
return # Prevent accidental deletions if constants are renamed
archived_downloads_folder = os.path.join(os.getcwd(), archive_dir) + os.sep
if os.path.exists(downloads_path) and not os.listdir(downloads_path) == []:
reset_downloads_folder_assistant(archived_downloads_folder)
if os.path.exists(downloads_path) and os.listdir(downloads_path) == []:
try:
os.rmdir(downloads_path)
except OSError:
pass
if (
os.path.exists(archived_downloads_folder)
and os.listdir(archived_downloads_folder) == []
):
try:
os.rmdir(archived_downloads_folder)
except OSError:
pass
def reset_downloads_folder_assistant(archived_downloads_folder):
if not os.path.exists(archived_downloads_folder):
try:
os.makedirs(archived_downloads_folder, exist_ok=True)
except Exception:
pass # Should only be reachable during multi-threaded test runs
new_archived_downloads_sub_folder = "%s/downloads_%s" % (
archived_downloads_folder,
int(time.time()),
)
if os.path.exists(downloads_path):
if not os.listdir(downloads_path) == []:
try:
shutil.move(downloads_path, new_archived_downloads_sub_folder)
os.makedirs(downloads_path, exist_ok=True)
except Exception:
pass
if not settings.ARCHIVE_EXISTING_DOWNLOADS:
try:
shutil.rmtree(new_archived_downloads_sub_folder)
except OSError:
pass

View file

@ -1,568 +0,0 @@
import base64
import io
import logging
import pkgutil
import re
from typing import Literal
import numpy as np
import PIL.Image
import playwright.sync_api
from .constants import BROWSERGYM_ID_ATTRIBUTE as BID_ATTR
from .constants import BROWSERGYM_SETOFMARKS_ATTRIBUTE as SOM_ATTR
from .constants import BROWSERGYM_VISIBILITY_ATTRIBUTE as VIS_ATTR
MARK_FRAMES_MAX_TRIES = 3
logger = logging.getLogger(__name__)
class MarkingError(Exception):
pass
def _pre_extract(
page: playwright.sync_api.Page,
tags_to_mark: Literal["all", "standard_html"] = "standard_html",
lenient: bool = False,
):
"""
pre-extraction routine, marks dom elements (set bid and dynamic attributes like value and checked)
"""
js_frame_mark_elements = pkgutil.get_data(__name__, "javascript/frame_mark_elements.js").decode(
"utf-8"
)
# we can't run this loop in JS due to Same-Origin Policy
# (can't access the content of an iframe from a another one)
def mark_frames_recursive(frame, frame_bid: str):
assert frame_bid == "" or re.match(r"^[a-z][a-zA-Z]*$", frame_bid)
logger.debug(f"Marking frame {repr(frame_bid)}")
# mark all DOM elements in the frame (it will use the parent frame element's bid as a prefix)
warning_msgs = frame.evaluate(
js_frame_mark_elements,
[frame_bid, BID_ATTR, tags_to_mark],
)
# print warning messages if any
for msg in warning_msgs:
logger.warning(msg)
# recursively mark all descendant frames
for child_frame in frame.child_frames:
# deal with detached frames
if child_frame.is_detached():
continue
# deal with weird frames (pdf viewer in <embed>)
child_frame_elem = child_frame.frame_element()
if not child_frame_elem.content_frame() == child_frame:
logger.warning(
f"Skipping frame '{child_frame.name}' for marking, seems problematic."
)
continue
# deal with sandboxed frames with blocked script execution
sandbox_attr = child_frame_elem.get_attribute("sandbox")
if sandbox_attr is not None and "allow-scripts" not in sandbox_attr.split():
continue
child_frame_bid = child_frame_elem.get_attribute(BID_ATTR)
if child_frame_bid is None:
if lenient:
logger.warning("Cannot mark a child frame without a bid. Skipping frame.")
continue
else:
raise MarkingError("Cannot mark a child frame without a bid.")
mark_frames_recursive(child_frame, frame_bid=child_frame_bid)
# mark all frames recursively
mark_frames_recursive(page.main_frame, frame_bid="")
def _post_extract(page: playwright.sync_api.Page):
js_frame_unmark_elements = pkgutil.get_data(
__name__, "javascript/frame_unmark_elements.js"
).decode("utf-8")
# we can't run this loop in JS due to Same-Origin Policy
# (can't access the content of an iframe from a another one)
for frame in page.frames:
try:
if not frame == page.main_frame:
# deal with weird frames (pdf viewer in <embed>)
if not frame.frame_element().content_frame() == frame:
logger.warning(
f"Skipping frame '{frame.name}' for unmarking, seems problematic."
)
continue
# deal with sandboxed frames with blocked script execution
sandbox_attr = frame.frame_element().get_attribute("sandbox")
if sandbox_attr is not None and "allow-scripts" not in sandbox_attr.split():
continue
# deal with frames without a BID
bid = frame.frame_element().get_attribute(BID_ATTR)
if bid is None:
continue
frame.evaluate(js_frame_unmark_elements)
except playwright.sync_api.Error as e:
if any(msg in str(e) for msg in ("Frame was detached", "Frame has been detached")):
pass
else:
raise e
def extract_screenshot(page: playwright.sync_api.Page):
"""
Extracts the screenshot image of a Playwright page using Chrome DevTools Protocol.
Args:
page: the playwright page of which to extract the screenshot.
Returns:
A screenshot of the page, in the form of a 3D array (height, width, rgb).
"""
cdp = page.context.new_cdp_session(page)
cdp_answer = cdp.send(
"Page.captureScreenshot",
{
"format": "png",
},
)
cdp.detach()
# bytes of a png file
png_base64 = cdp_answer["data"]
png_bytes = base64.b64decode(png_base64)
with io.BytesIO(png_bytes) as f:
# load png as a PIL image
img = PIL.Image.open(f)
# convert to RGB (3 channels)
img = img.convert(mode="RGB")
# convert to a numpy array
img = np.array(img)
return img
# we could handle more data items here if needed
__BID_EXPR = r"([a-zA-Z0-9]+)"
__DATA_REGEXP = re.compile(r"^browsergym_id_" + __BID_EXPR + r"\s?" + r"(.*)")
def extract_data_items_from_aria(string: str, log_level: int = logging.NOTSET):
"""
Utility function to extract temporary data stored in the ARIA attributes of a node
"""
match = __DATA_REGEXP.fullmatch(string)
if not match:
logger.log(
level=log_level,
msg=f"Failed to extract BrowserGym data from ARIA string: {repr(string)}",
)
return [], string
groups = match.groups()
data_items = groups[:-1]
original_aria = groups[-1]
return data_items, original_aria
def extract_dom_snapshot(
page: playwright.sync_api.Page,
computed_styles=[],
include_dom_rects: bool = True,
include_paint_order: bool = True,
temp_data_cleanup: bool = True,
):
"""
Extracts the DOM snapshot of a Playwright page using Chrome DevTools Protocol.
Args:
page: the playwright page of which to extract the screenshot.
computed_styles: whitelist of computed styles to return.
include_dom_rects: whether to include DOM rectangles (offsetRects, clientRects, scrollRects) in the snapshot.
include_paint_order: whether to include paint orders in the snapshot.
temp_data_cleanup: whether to clean up the temporary data stored in the ARIA attributes.
Returns:
A document snapshot, including the full DOM tree of the root node (including iframes,
template contents, and imported documents) in a flattened array, as well as layout
and white-listed computed style information for the nodes. Shadow DOM in the returned
DOM tree is flattened.
"""
cdp = page.context.new_cdp_session(page)
dom_snapshot = cdp.send(
"DOMSnapshot.captureSnapshot",
{
"computedStyles": computed_styles,
"includeDOMRects": include_dom_rects,
"includePaintOrder": include_paint_order,
},
)
cdp.detach()
# if requested, remove temporary data stored in the ARIA attributes of each node
if temp_data_cleanup:
pop_bids_from_attribute(dom_snapshot, "aria-roledescription")
pop_bids_from_attribute(dom_snapshot, "aria-description")
return dom_snapshot
def pop_bids_from_attribute(dom_snapshot, attr: str):
try:
target_attr_name_id = dom_snapshot["strings"].index(attr)
except ValueError:
target_attr_name_id = -1
# run the cleanup only if the target attribute string is present
if target_attr_name_id > -1:
processed_string_ids = set()
for document in dom_snapshot["documents"]:
for node_attributes in document["nodes"]["attributes"]:
i = 0
# find the target attribute, if any
for i in range(0, len(node_attributes), 2):
attr_name_id = node_attributes[i]
attr_value_id = node_attributes[i + 1]
if attr_name_id == target_attr_name_id:
attr_value = dom_snapshot["strings"][attr_value_id]
# remove any data stored in the target attribute
if attr_value_id not in processed_string_ids:
_, new_attr_value = extract_data_items_from_aria(attr_value)
dom_snapshot["strings"][
attr_value_id
] = new_attr_value # update the string in the metadata
processed_string_ids.add(
attr_value_id
) # mark string as processed (in case several nodes share the same target attribute string value)
attr_value = new_attr_value
# remove target attribute (name and value) if empty
if attr_value == "":
del node_attributes[i : i + 2]
# once target attribute is found, exit the search
break
def extract_dom_extra_properties(dom_snapshot):
def to_string(idx):
if idx == -1:
return None
else:
return dom_snapshot["strings"][idx]
# pre-locate important string ids
try:
bid_string_id = dom_snapshot["strings"].index(BID_ATTR)
except ValueError:
bid_string_id = -1
try:
vis_string_id = dom_snapshot["strings"].index(VIS_ATTR)
except ValueError:
vis_string_id = -1
try:
som_string_id = dom_snapshot["strings"].index(SOM_ATTR)
except ValueError:
som_string_id = -1
# build the iframe tree (DFS from the first frame)
doc_properties = {
0: {
"parent": None,
}
}
docs_to_process = [0]
while docs_to_process:
doc = docs_to_process.pop(-1) # DFS
children = dom_snapshot["documents"][doc]["nodes"]["contentDocumentIndex"]
for node, child_doc in zip(children["index"], children["value"]):
doc_properties[child_doc] = {
"parent": {
"doc": doc, # parent frame index
"node": node, # node index within the parent frame
}
}
docs_to_process.append(child_doc)
# recover the absolute x and y position of the frame node in the parent (if any)
parent = doc_properties[doc]["parent"]
if parent:
parent_doc = parent["doc"]
parent_node = parent["node"]
try:
node_layout_idx = dom_snapshot["documents"][parent_doc]["layout"][
"nodeIndex"
].index(parent_node)
except ValueError:
node_layout_idx = -1
if node_layout_idx >= 0:
node_bounds = dom_snapshot["documents"][parent_doc]["layout"]["bounds"][
node_layout_idx
] # can be empty?
# absolute position of parent + relative position of frame node within parent
parent_node_abs_x = doc_properties[parent_doc]["abs_pos"]["x"] + node_bounds[0]
parent_node_abs_y = doc_properties[parent_doc]["abs_pos"]["y"] + node_bounds[1]
else:
parent_node_abs_x = 0
parent_node_abs_y = 0
else:
parent_node_abs_x = 0
parent_node_abs_y = 0
# get the frame's absolute position, by adding any scrolling offset if any
doc_properties[doc]["abs_pos"] = {
"x": parent_node_abs_x - dom_snapshot["documents"][doc]["scrollOffsetX"],
"y": parent_node_abs_y - dom_snapshot["documents"][doc]["scrollOffsetY"],
}
document = dom_snapshot["documents"][doc]
doc_properties[doc]["nodes"] = [
{
"bid": None, # default value, to be filled (str)
"visibility": None, # default value, to be filled (float)
"bbox": None, # default value, to be filled (list)
"clickable": False, # default value, to be filled (bool)
"set_of_marks": None, # default value, to be filled (bool)
}
for _ in enumerate(document["nodes"]["parentIndex"])
] # all nodes in document
# extract clickable property
for node_idx in document["nodes"]["isClickable"]["index"]:
doc_properties[doc]["nodes"][node_idx]["clickable"] = True
# extract bid and visibility properties (attribute-based)
for node_idx, node_attrs in enumerate(document["nodes"]["attributes"]):
i = 0
# loop over all attributes
for i in range(0, len(node_attrs), 2):
name_string_id = node_attrs[i]
value_string_id = node_attrs[i + 1]
if name_string_id == bid_string_id:
doc_properties[doc]["nodes"][node_idx]["bid"] = to_string(value_string_id)
if name_string_id == vis_string_id:
doc_properties[doc]["nodes"][node_idx]["visibility"] = float(
to_string(value_string_id)
)
if name_string_id == som_string_id:
doc_properties[doc]["nodes"][node_idx]["set_of_marks"] = (
to_string(value_string_id) == "1"
)
# extract bbox property (in absolute coordinates)
for node_idx, bounds, client_rect in zip(
document["layout"]["nodeIndex"],
document["layout"]["bounds"],
document["layout"]["clientRects"],
):
# empty clientRect means element is not actually rendered
if not client_rect:
doc_properties[doc]["nodes"][node_idx]["bbox"] = None
else:
# bounds gives the relative position within the document
doc_properties[doc]["nodes"][node_idx]["bbox"] = bounds.copy()
# adjust for absolute document position
doc_properties[doc]["nodes"][node_idx]["bbox"][0] += doc_properties[doc]["abs_pos"][
"x"
]
doc_properties[doc]["nodes"][node_idx]["bbox"][1] += doc_properties[doc]["abs_pos"][
"y"
]
# Note: other interesting fields
# document["nodes"]["parentIndex"] # parent node
# document["nodes"]["nodeType"]
# document["nodes"]["nodeName"]
# document["nodes"]["nodeValue"]
# document["nodes"]["textValue"]
# document["nodes"]["inputValue"]
# document["nodes"]["inputChecked"]
# document["nodes"]["optionSelected"]
# document["nodes"]["pseudoType"]
# document["nodes"]["pseudoIdentifier"]
# document["nodes"]["isClickable"]
# document["textBoxes"]
# document["layout"]["nodeIndex"]
# document["layout"]["bounds"]
# document["layout"]["offsetRects"]
# document["layout"]["scrollRects"]
# document["layout"]["clientRects"]
# document["layout"]["paintOrders"]
# collect the extra properties of all nodes with a browsergym_id attribute
extra_properties = {}
for doc in doc_properties.keys():
for node in doc_properties[doc]["nodes"]:
bid = node["bid"]
if bid:
if bid in extra_properties:
logger.warning(f"duplicate {BID_ATTR}={repr(bid)} attribute detected")
extra_properties[bid] = {
extra_prop: node[extra_prop]
for extra_prop in ("visibility", "bbox", "clickable", "set_of_marks")
}
return extra_properties
def extract_all_frame_axtrees(page: playwright.sync_api.Page):
"""
Extracts the AXTree of all frames (main document and iframes) of a Playwright page using Chrome DevTools Protocol.
Args:
page: the playwright page of which to extract the frame AXTrees.
Returns:
A dictionnary of AXTrees (as returned by Chrome DevTools Protocol) indexed by frame IDs.
"""
cdp = page.context.new_cdp_session(page)
# extract the frame tree
frame_tree = cdp.send(
"Page.getFrameTree",
{},
)
# extract all frame IDs into a list
# (breadth-first-search through the frame tree)
frame_ids = []
root_frame = frame_tree["frameTree"]
frames_to_process = [root_frame]
while frames_to_process:
frame = frames_to_process.pop()
frames_to_process.extend(frame.get("childFrames", []))
# extract the frame ID
frame_id = frame["frame"]["id"]
frame_ids.append(frame_id)
# extract the AXTree of each frame
frame_axtrees = {
frame_id: cdp.send(
"Accessibility.getFullAXTree",
{"frameId": frame_id},
)
for frame_id in frame_ids
}
cdp.detach()
# extract browsergym data from ARIA attributes
for ax_tree in frame_axtrees.values():
for node in ax_tree["nodes"]:
data_items = []
# look for data in the node's "roledescription" property
if "properties" in node:
for i, prop in enumerate(node["properties"]):
if prop["name"] == "roledescription":
data_items, new_value = extract_data_items_from_aria(prop["value"]["value"])
prop["value"]["value"] = new_value
# remove the "description" property if empty
if new_value == "":
del node["properties"][i]
break
# look for data in the node's "description" (fallback plan)
if "description" in node:
data_items_bis, new_value = extract_data_items_from_aria(
node["description"]["value"]
)
node["description"]["value"] = new_value
if new_value == "":
del node["description"]
if not data_items:
data_items = data_items_bis
# add the extracted "browsergym" data to the AXTree
if data_items:
(browsergym_id,) = data_items
node["browsergym_id"] = browsergym_id
return frame_axtrees
def extract_merged_axtree(page: playwright.sync_api.Page):
"""
Extracts the merged AXTree of a Playwright page (main document and iframes AXTrees merged) using Chrome DevTools Protocol.
Args:
page: the playwright page of which to extract the merged AXTree.
Returns:
A merged AXTree (same format as those returned by Chrome DevTools Protocol).
"""
frame_axtrees = extract_all_frame_axtrees(page)
cdp = page.context.new_cdp_session(page)
# merge all AXTrees into one
merged_axtree = {"nodes": []}
for ax_tree in frame_axtrees.values():
merged_axtree["nodes"].extend(ax_tree["nodes"])
# connect each iframe node to the corresponding AXTree root node
for node in ax_tree["nodes"]:
if node["role"]["value"] == "Iframe":
frame_id = (
cdp.send("DOM.describeNode", {"backendNodeId": node["backendDOMNodeId"]})
.get("node", {})
.get("frameId", None)
)
if not frame_id:
logger.warning(
f"AXTree merging: unable to recover frameId of node with backendDOMNodeId {repr(node['backendDOMNodeId'])}, skipping"
)
# it seems Page.getFrameTree() from CDP omits certain Frames (empty frames?)
# if a frame is not found in the extracted AXTrees, we just ignore it
elif frame_id in frame_axtrees:
# root node should always be the first node in the AXTree
frame_root_node = frame_axtrees[frame_id]["nodes"][0]
assert frame_root_node["frameId"] == frame_id
node["childIds"].append(frame_root_node["nodeId"])
else:
logger.warning(
f"AXTree merging: extracted AXTree does not contain frameId '{frame_id}', skipping"
)
cdp.detach()
return merged_axtree
def extract_focused_element_bid(page: playwright.sync_api.Page):
# this JS code will dive through ShadowDOMs
extract_focused_element_with_bid_script = """\
() => {
// This recursive function traverses shadow DOMs
function getActiveElement(root) {
const active_element = root.activeElement;
if (!active_element) {
return null;
}
if (active_element.shadowRoot) {
return getActiveElement(active_element.shadowRoot);
} else {
return active_element;
}
}
return getActiveElement(document);
}"""
# this playwright code will dive through iFrames
frame = page
focused_bid = ""
while frame:
focused_element = frame.evaluate_handle(
extract_focused_element_with_bid_script, BID_ATTR
).as_element()
if focused_element:
frame = focused_element.content_frame()
focused_bid = focused_element.get_attribute(BID_ATTR)
else:
frame = None
return focused_bid

View file

@ -1,491 +0,0 @@
import base64
import io
import logging
import re
from typing import Dict, List, Optional
import numpy as np
import PIL.Image
logger = logging.getLogger(__name__)
BID_ATTR = "browsergym_id"
VIS_ATTR = "browsergym_visibility"
SOM_ATTR = "browsergym_set_of_marks"
class MarkingError(Exception):
pass
def ensure_cdp_activated(browser):
"""确保CDP模式已激活"""
if not hasattr(browser, 'cdp') or browser.cdp is None:
current_url = browser.get_current_url()
browser.activate_cdp_mode(current_url if current_url else "about:blank")
browser.sleep(1) # 等待CDP模式激活
def _pre_extract(browser):
"""标记DOM元素"""
try:
ensure_cdp_activated(browser)
# 定义并注入标记函数
browser.cdp.evaluate("""
window.markElements = function(frameBid='') {
function markElementsInDocument(doc, bid_prefix='') {
const elements = doc.getElementsByTagName('*');
for (let element of elements) {
if (!element.hasAttribute('browsergym_id')) {
const bid = bid_prefix + element.tagName.toLowerCase() + '_' +
Math.random().toString(36).substr(2, 9);
element.setAttribute('browsergym_id', bid);
}
}
// 递归处理所有iframe
const iframes = doc.getElementsByTagName('iframe');
for (let iframe of iframes) {
try {
const frameDoc = iframe.contentDocument;
if (frameDoc) {
const frameBid = iframe.getAttribute('browsergym_id') || '';
const sandbox = iframe.getAttribute('sandbox');
if (!sandbox || sandbox.includes('allow-scripts')) {
markElementsInDocument(frameDoc, frameBid);
}
}
} catch (e) {
// 跨域iframe会抛出错误忽略即可
console.log('Cannot access iframe:', e);
}
}
}
// 从当前文档开始标记
markElementsInDocument(document, frameBid);
return true;
};
""")
# 执行标记
success = browser.cdp.evaluate("window.markElements()")
if not success:
raise MarkingError("Failed to mark elements")
except Exception as e:
raise MarkingError(f"Error marking elements: {str(e)}")
def extract_dom_snapshot(browser):
"""获取DOM快照"""
try:
ensure_cdp_activated(browser)
# 定义函数
browser.cdp.evaluate("""
window.getDOMSnapshot = function() {
const strings = new Map();
let stringId = 0;
function getStringId(str) {
if (str === null || str === undefined) return -1;
if (!strings.has(str)) {
strings.set(str, stringId++);
}
return strings.get(str);
}
function processDocument(doc) {
function processNode(node, parentIndex) {
const nodeData = {
nodeType: [],
nodeName: [],
nodeValue: [],
parentIndex: [],
attributes: [],
contentDocumentIndex: {
index: [],
value: []
}
};
nodeData.nodeType.push(node.nodeType);
nodeData.nodeName.push(getStringId(node.nodeName));
nodeData.nodeValue.push(getStringId(node.nodeValue));
nodeData.parentIndex.push(parentIndex);
const attrs = [];
if (node.attributes) {
for (let attr of node.attributes) {
attrs.push(getStringId(attr.name));
attrs.push(getStringId(attr.value));
}
}
nodeData.attributes.push(attrs);
if (node.nodeType === 1) { // Element node
const iframes = node.getElementsByTagName('iframe');
for (let i = 0; i < iframes.length; i++) {
try {
const frameDoc = iframes[i].contentDocument;
if (frameDoc) {
nodeData.contentDocumentIndex.index.push(nodeData.nodeType.length - 1);
nodeData.contentDocumentIndex.value.push(1); // Assuming single document for now
}
} catch (e) {
console.log('Cannot access iframe:', e);
}
}
}
for (let child of node.childNodes) {
const childData = processNode(child, nodeData.nodeType.length - 1);
nodeData.nodeType.push(...childData.nodeType);
nodeData.nodeName.push(...childData.nodeName);
nodeData.nodeValue.push(...childData.nodeValue);
nodeData.parentIndex.push(...childData.parentIndex);
nodeData.attributes.push(...childData.attributes);
nodeData.contentDocumentIndex.index.push(...childData.contentDocumentIndex.index);
nodeData.contentDocumentIndex.value.push(...childData.contentDocumentIndex.value);
}
return nodeData;
}
return processNode(doc.documentElement, -1);
}
const rootData = processDocument(document);
const stringsArray = Array.from(strings.keys());
return {
documents: [{
nodes: rootData
}],
strings: stringsArray
};
};
""")
# 执行函数
dom_snapshot = browser.cdp.evaluate("window.getDOMSnapshot()")
return dom_snapshot
except Exception as e:
logger.error(f"Error capturing DOM snapshot: {str(e)}")
return {"documents": [], "strings": []}
def extract_dom_extra_properties(browser) -> Dict:
"""获取DOM元素的有意义的额外属性"""
try:
ensure_cdp_activated(browser)
browser.cdp.evaluate("""
window.getExtraProperties = function() {
const BID_ATTR = 'browsergym_id';
const VIS_ATTR = 'browsergym_visibility';
const SOM_ATTR = 'browsergym_set_of_marks';
// 定义重要的标签和属性
const IMPORTANT_TAGS = new Set([
'A', 'BUTTON', 'INPUT', 'SELECT', 'TEXTAREA', 'FORM',
'IMG', 'VIDEO', 'AUDIO', 'IFRAME', 'LABEL', 'H1', 'H2',
'H3', 'H4', 'H5', 'H6'
]);
const IMPORTANT_ROLES = new Set([
'button', 'link', 'checkbox', 'radio', 'textbox', 'combobox',
'listbox', 'menu', 'menuitem', 'tab', 'tabpanel', 'tree',
'treeitem', 'dialog', 'alert', 'alertdialog', 'tooltip'
]);
function isElementVisible(element) {
const style = window.getComputedStyle(element);
return style.display !== 'none' &&
style.visibility !== 'hidden' &&
style.opacity !== '0' &&
element.offsetWidth > 0 &&
element.offsetHeight > 0;
}
function isElementInteractive(element) {
// 检查是否可交互
return element.onclick !== null ||
element.onmousedown !== null ||
element.onmouseup !== null ||
element.onkeydown !== null ||
element.onkeyup !== null ||
element.onchange !== null ||
element.onfocus !== null ||
element.onblur !== null;
}
function isElementMeaningful(element) {
// 检查标签是否重要
if (IMPORTANT_TAGS.has(element.tagName)) return true;
// 检查角色是否重要
const role = element.getAttribute('role');
if (role && IMPORTANT_ROLES.has(role)) return true;
// 检查是否有重要的ARIA属性
if (element.hasAttribute('aria-label')) return true;
if (element.hasAttribute('aria-description')) return true;
// 检查是否可交互
if (isElementInteractive(element)) return true;
// 检查是否有有意义的文本内容
const text = element.textContent.trim();
if (text && text.length > 1 && !/^[\s\d.,]+$/.test(text)) return true;
// 检查是否有有意义的图片
if (element.tagName === 'IMG' && element.alt) return true;
return false;
}
function getDocumentProperties(doc, parentFrame = null) {
const properties = {};
const frameOffset = {
x: 0,
y: 0
};
if (parentFrame) {
const frameRect = parentFrame.getBoundingClientRect();
frameOffset.x = frameRect.x + window.pageXOffset;
frameOffset.y = frameRect.y + window.pageYOffset;
}
const elements = doc.querySelectorAll(`[${BID_ATTR}]`);
elements.forEach(element => {
// 只处理有意义的元素
if (!isElementMeaningful(element)) return;
// 只处理可见元素
if (!isElementVisible(element)) return;
const bid = element.getAttribute(BID_ATTR);
if (!bid) return;
let visibility = element.getAttribute(VIS_ATTR);
visibility = visibility ? parseFloat(visibility) : 1.0;
const rect = element.getBoundingClientRect();
const bbox = rect ? [
rect.x + window.pageXOffset + frameOffset.x,
rect.y + window.pageYOffset + frameOffset.y,
rect.width,
rect.height
] : null;
// 更精确的可点击检测
const isClickable = (
element.tagName === 'BUTTON' ||
element.tagName === 'A' ||
(element.tagName === 'INPUT' &&
['button', 'submit', 'reset', 'radio', 'checkbox'].includes(element.type)) ||
element.getAttribute('role') === 'button' ||
isElementInteractive(element) ||
window.getComputedStyle(element).cursor === 'pointer'
);
const setOfMarks = element.getAttribute(SOM_ATTR) === '1';
// 添加额外的有用信息
const extraInfo = {
tag: element.tagName.toLowerCase(),
type: element.type || null,
role: element.getAttribute('role') || null,
text: element.textContent.trim() || null,
ariaLabel: element.getAttribute('aria-label') || null
};
properties[bid] = {
visibility: visibility,
bbox: bbox,
clickable: isClickable,
set_of_marks: setOfMarks,
...extraInfo
};
});
// 递归处理iframe
const iframes = doc.getElementsByTagName('iframe');
for (let iframe of iframes) {
try {
const frameDoc = iframe.contentDocument;
if (frameDoc) {
const frameProperties = getDocumentProperties(frameDoc, iframe);
Object.assign(properties, frameProperties);
}
} catch (e) {
console.log('Cannot access iframe:', e);
}
}
return properties;
}
return getDocumentProperties(document);
};
""")
extra_properties = browser.cdp.evaluate("window.getExtraProperties()")
return extra_properties
except Exception as e:
logger.error(f"Error extracting extra properties: {str(e)}")
return {}
def extract_merged_axtree(browser):
"""获取更清晰的Accessibility Tree"""
try:
ensure_cdp_activated(browser)
browser.cdp.evaluate("""
window.getAccessibilityTree = function() {
let nodeId = 1;
// 需要忽略的角色
const IGNORED_ROLES = new Set([
'generic',
'presentation',
'none',
'ScrollBar',
'background'
]);
// 需要保留的HTML标签
const IMPORTANT_TAGS = new Set([
'a', 'button', 'input', 'select', 'textarea', 'header',
'nav', 'main', 'footer', 'form', 'table', 'iframe',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6'
]);
function getElementRole(element) {
// 优先使用aria角色
const ariaRole = element.getAttribute('role');
if (ariaRole) return ariaRole;
// 特殊元素的默认角色
const tagName = element.tagName.toLowerCase();
switch (tagName) {
case 'a': return 'link';
case 'button': return 'button';
case 'input':
const type = element.type;
if (type === 'checkbox') return 'checkbox';
if (type === 'radio') return 'radio';
if (type === 'submit') return 'button';
return 'textbox';
case 'select': return 'combobox';
case 'textarea': return 'textbox';
case 'img': return 'img';
case 'table': return 'table';
default: return tagName;
}
}
function getElementName(element) {
// 按优先级获取元素名称
return element.getAttribute('aria-label') ||
element.getAttribute('title') ||
element.getAttribute('alt') ||
element.getAttribute('name') ||
element.value ||
element.textContent.trim();
}
function shouldIncludeElement(element) {
const tagName = element.tagName.toLowerCase();
const role = getElementRole(element);
// 检查是否是重要标签
if (IMPORTANT_TAGS.has(tagName)) return true;
// 检查是否有重要属性
if (element.getAttribute('aria-label')) return true;
if (element.getAttribute('role')) return true;
if (element.onclick) return true;
// 检查是否可交互
const style = window.getComputedStyle(element);
if (style.cursor === 'pointer') return true;
// 忽略无用角色
if (IGNORED_ROLES.has(role)) return false;
// 忽略空文本节点
const text = element.textContent.trim();
if (!text) return false;
return true;
}
function processNode(element) {
if (!shouldIncludeElement(element)) return null;
const role = getElementRole(element);
const name = getElementName(element);
// 如果既没有有效的角色也没有名称则跳过
if ((!role || IGNORED_ROLES.has(role)) && !name) return null;
const node = {
nodeId: nodeId++,
role: { value: role },
name: { value: name },
properties: [],
childIds: [],
backendDOMNodeId: element.getAttribute('browsergym_id') || null,
frameId: element.ownerDocument?.defaultView?.frameElement?.getAttribute('browsergym_id') || null
};
// 收集重要的ARIA属性
for (let attr of element.attributes) {
if (attr.name.startsWith('aria-')) {
node.properties.push({
name: { value: attr.name },
value: { value: attr.value }
});
}
}
// 递归处理子元素
for (let child of element.children) {
const childNode = processNode(child);
if (childNode) {
node.childIds.push(childNode.nodeId);
}
}
return node;
}
const nodes = [];
function traverse(element) {
const node = processNode(element);
if (node) {
nodes.push(node);
for (let child of element.children) {
traverse(child);
}
}
}
traverse(document.documentElement);
return { nodes: nodes };
};
""")
axtree = browser.cdp.evaluate("window.getAccessibilityTree()")
return axtree
except Exception as e:
logger.error(f"Error getting accessibility tree: {str(e)}")
return {"nodes": []}

View file

@ -7,7 +7,7 @@ def setup_metachain(workplace_name: str, env: DockerEnv):
if response['status'] == 0:
print("Metachain is already installed.")
return
cmd = f"cd /{workplace_name}/metachain && pip install -e ."
cmd = f"cd /{workplace_name}/MetaChain && pip install -e ."
response = env.run_command(cmd, print_stream)
if response['status'] == 0:
print("Metachain is installed.")

View file

@ -1,86 +0,0 @@
import importlib
import inspect
import os
from typing import Dict, Any, Union
# from metachain.util import run_command_in_container
from metachain.environment import DockerEnv, LocalEnv
from metachain.registry import register_tool
@register_tool("check_tool")
def check_tool(context_variables) -> str:
"""
Extract tools from existing code.
Args:
Returns:
A dictionary containing all function definitions {function name: {'source': function source code, 'file': function file path}}
"""
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
python_script = \
"""import importlib
import inspect
import os
from typing import Dict, Any
def check_tool():
module = importlib.import_module(f"metachain.tools")
# obtain all function definitions
functions = {}
for name, obj in inspect.getmembers(module):
if inspect.isfunction(obj):
try:
# get the source code of the function
source = inspect.getsource(obj)
# get the file path of the function definition
file_path = inspect.getfile(obj)
functions[name] = {
"source": source,
"file": file_path
}
except Exception as e:
functions[name] = {
"source": f"Failed to get source code: {str(e)}",
"file": "Unknown"
}
return functions
print(check_tool())
"""
exec_script = f"cd {env.docker_workplace}/metachain && python -c '{python_script.strip()}'"
response = env.run_command(exec_script)
if response["status"] == 0:
return response["result"]
else:
return f"Failed to get tool definitions. Error: {response['result']}"
@register_tool("check_agent")
def check_agent(context_variables) -> str:
"""
Extract agents from existing code.
Args:
Returns:
A dictionary containing all agents definitions {agent name: {'source': agent source code, 'file': agent file path}}
"""
env: Union[DockerEnv, LocalEnv] = context_variables.get("code_env", LocalEnv())
cmd = f"ls -1 {env.docker_workplace}/metachain/metachain/agents"
response = env.run_command(cmd)
if response["status"] == 0:
agents_files = response["result"].split("\n")
else:
return f"Failed to get agent definitions. Error: {response['result']}"
agents = {}
print(agents_files)
for file in agents_files:
if file in ["__init__.py", "", "__pycache__"]:
continue
cmd = f"cat {env.docker_workplace}/metachain/metachain/agents/{file}"
response = env.run_command(cmd)
if response["status"] == 0:
agent_name = file.split(".")[0]
agents[agent_name] = {'source': response["result"], 'file': f"{env.docker_workplace}/metachain/metachain/agents/{file}"}
else:
return f"Failed to get agent definitions. Error: {response['result']}"
return agents

View file

@ -1,415 +0,0 @@
from metachain.registry import registry
from metachain.environment import LocalEnv, DockerEnv
from typing import Union
from metachain.tools.terminal_tools import (
create_file,
create_directory,
execute_command,
run_python,
print_stream,
process_terminal_response
)
from metachain.registry import register_tool
import json
from metachain.tools.meta.edit_tools import get_metachain_path
from string import Formatter
from pydantic import BaseModel
import subprocess
import sys
@register_tool("list_agents")
def list_agents(context_variables):
"""
List all plugin agents in the MetaChain.
Returns:
A list of information of all plugin agents including name, args, docstring, body, return_type, file_path.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
python_code = '"from metachain.registry import registry; import json; print(\\"AGENT_LIST_START\\"); print(json.dumps(registry.display_plugin_agents_info, indent=4)); print(\\"AGENT_LIST_END\\")"'
list_agents_cmd = f"cd {path} && DEFAULT_LOG=False python -c {python_code}"
result = env.run_command(list_agents_cmd)
if result['status'] != 0:
return "[ERROR] Failed to list agents. Error: " + result['result']
try:
output = result['result']
start_marker = "AGENT_LIST_START"
end_marker = "AGENT_LIST_END"
start_idx = output.find(start_marker) + len(start_marker)
end_idx = output.find(end_marker)
if start_idx == -1 or end_idx == -1:
return "[ERROR] Failed to parse agent list: markers not found"
json_str = output[start_idx:end_idx].strip()
return json_str
except Exception as e:
return f"[ERROR] Failed to process output: {str(e)}"
@register_tool("create_agent_old")
def create_agent_old(agent_func: str, agent_code: str, context_variables):
"""
Create a new plugin agent.
Args:
agent_func: The name of the function of getting the new agent.
agent_code: The code of creating the new agent. (You should strictly follow the format of the template given to you to create the new agent.)
Returns:
A string representation of the result of the agent creation.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
try:
msg = create_file(agents_dir + "/" + agent_func + ".py", agent_code, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create agent. Error: " + msg
return "[SUCCESS] Successfully created agent: " + agent_func + " in " + agents_dir + "/" + agent_func + ".py"
except Exception as e:
return "[ERROR] Failed to create agent. Error: " + str(e)
@register_tool("delete_agent")
def delete_agent(agent_name: str, context_variables):
"""
Delete a plugin agent.
Args:
agent_name: The name of the agent to be deleted.
Returns:
A string representation of the result of the agent deletion.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "[ERROR] Failed to list agents. Error: " + agent_list
agent_dict = json.loads(agent_list)
if agent_name in agent_dict.keys():
agent_info = agent_dict[agent_name]
else:
return "[ERROR] The agent " + agent_name + " does not exist."
except Exception as e:
return "[ERROR] Before deleting a agent, you should list all agents first. But the following error occurred: " + str(e)
agent_path = agent_info['file_path']
try:
result = env.run_command(f"rm {agent_path}")
if result['status'] != 0:
return f"[ERROR] Failed to delete agent: `{agent_name}`. Error: " + result['result']
return f"[SUCCESS] Successfully deleted agent: `{agent_name}`."
except Exception as e:
return f"[ERROR] Failed to delete agent: `{agent_name}`. Error: " + str(e)
@register_tool("run_agent")
@process_terminal_response
def run_agent(agent_name: str, model: str, query: str, ctx_vars: dict, context_variables):
"""
Run a plugin agent.
Args:
agent_name: The name of the agent.
model: The model to be used for the agent.
query: The query to be used for the agent.
ctx_vars: The global context variables to be used for the agent.
Returns:
A string representation of the result of the agent run.
"""
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to get the path of the MetaChain. Error: " + str(e)
try:
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "[ERROR] Failed to list agents. Error: " + agent_list
agent_dict = json.loads(agent_list)
if agent_name in agent_dict.keys():
agent_info = agent_dict[agent_name]
agent_func = agent_info['func_name']
else:
return "[ERROR] The agent " + agent_name + " does not exist."
except Exception as e:
return "[ERROR] Before running a agent, you should list all agents first. But the following error occurred: " + str(e)
ctx_vars_str = ""
for key, value in ctx_vars.items():
ctx_vars_str += f"{key}={value} "
try:
run_cmd = f'cd {path} && DEFAULT_LOG=False mc agent --model={model} --agent_func={agent_func} --query="{query}" {ctx_vars_str}'
result = env.run_command(run_cmd, print_stream)
# if result['status'] != 0:
# return f"[ERROR] Failed to run agent: `{agent_func}`. Error: " + result['result']
# return f"[SUCCESS] Successfully run agent: `{agent_func}`. The result is: \n{result['result']}"
return result
except Exception as e:
return "[ERROR] Failed to run the agent. Error: " + str(e)
def has_format_keys(s):
formatter = Formatter()
return any(tuple_item[1] is not None for tuple_item in formatter.parse(s))
def extract_format_keys(s):
formatter = Formatter()
ret_list = []
for tuple_item in formatter.parse(s):
if tuple_item[1] is not None and tuple_item[1] not in ret_list:
ret_list.append(tuple_item[1])
return ret_list
@register_tool("create_agent")
def create_agent(agent_name: str, agent_description: str, agent_tools: list[str], agent_instructions: str, context_variables):
"""
Use this tool to create a new agent or modify an existing agent.
Args:
agent_name: The name of the agent.
agent_description: The description of the agent.
agent_tools: The tools of the agent. The tools MUST be included in the list of given tools.
agent_instructions: The system instructions of the agent, which tells the agent about the responsibility of the agent, the tools it can use and other important information. It could be a pure string or a string with the format of {global_keys}, where the global keys are the keys of the variables that are given to the agent.
Returns:
A string representation of the result of the agent creation or modification.
"""
tools_str = ""
code_env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(code_env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
for tool in agent_tools:
tools_str += f"from metachain.tools import {tool}\n"
agent_func = f"get_{agent_name.lower().replace(' ', '_')}"
if has_format_keys(agent_instructions):
format_keys = extract_format_keys(agent_instructions)
format_keys_values = []
for fk in format_keys:
format_keys_values.append(f"{fk}=context_variables.get('{fk}', '')")
format_keys_values_str = ", ".join(format_keys_values)
instructions_str = f"""\
def instructions(context_variables):
return {repr(agent_instructions)}.format({format_keys_values_str})
"""
else:
instructions_str = f"""instructions = {repr(agent_instructions)}"""
tool_list = "[{}]".format(', '.join(f'{tool}' for tool in agent_tools))
create_codes = f"""\
from metachain.types import Agent
{tools_str}
from metachain.registry import register_plugin_agent
@register_plugin_agent(name="{agent_name}", func_name="{agent_func}")
def {agent_func}(model: str):
'''
{agent_description}
'''
{instructions_str}
return Agent(
name="{agent_name}",
model=model,
instructions=instructions,
tools={tool_list}
)
"""
# print(create_codes)
# with open(f"metachain/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# f.write(create_codes)
try:
msg = create_file(agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py", create_codes, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create agent. Error: " + msg
result = code_env.run_command('cd {} && python metachain/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
if result['status'] != 0:
return "[ERROR] Failed to create agent. Error: " + result['result']
return "Successfully created agent: " + agent_func + " in " + agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py"
except Exception as e:
return "[ERROR] Failed to create agent. Error: " + str(e)
class SubAgent(BaseModel):
name: str
agent_input: str
agent_output: str
@register_tool("create_orchestrator_agent")
def create_orchestrator_agent(agent_name: str, agent_description: str, sub_agents: list[SubAgent], agent_instructions: str, context_variables):
"""
Use this tool to create a orchestrator agent for the given sub-agents. You MUST use this tool when you need to create TWO or MORE agents and regard them as a whole to complete a task.
Args:
agent_name: The name of the orchestrator agent for the given sub-agents.
agent_description: The description of the orchestrator agent.
sub_agents: The list of sub-agents. Each sub-agent contains the name of the sub-agent, the input of the sub-agent and the output of the sub-agent.
agent_instructions: The system instructions of the orchestrator agent, which tells the agent about the responsibility of the agent (orchestrate the workflow of the given sub-agents), the given sub-agents and other important information. It could be a pure string or a string with the format of {global_keys}, where the global keys are the keys of the variables that are given to the agent.
Returns:
A string representation of the result of the agent creation or modification.
"""
code_env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(code_env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "Failed to list agents. Error: " + agent_list
agent_dict = json.loads(agent_list)
sub_agent_info = [agent_dict[sub_agent["name"]] for sub_agent in sub_agents]
import_agent_str = ""
for ainfo in sub_agent_info:
import_agent_str += f"""
from metachain.agents import {ainfo['func_name']}
"""
if has_format_keys(agent_instructions):
format_keys = extract_format_keys(agent_instructions)
format_keys_values = []
for fk in format_keys:
format_keys_values.append(f"{fk}=context_variables.get('{fk}', '')")
format_keys_values_str = ", ".join(format_keys_values)
instructions_str = f"""\
def instructions(context_variables):
return {repr(agent_instructions)}.format({format_keys_values_str})
"""
else:
instructions_str = f"""instructions = {repr(agent_instructions)}"""
orchestrator_agent_def = f"""
{agent_name.lower().replace(' ', '_')} = Agent(
name="{agent_name}",
model=model,
instructions=instructions,
)
"""
sub_agent_funcs = [ainfo['func_name'] for ainfo in sub_agent_info]
get_sub_agents = ""
transfer_sub_agent_func = ""
transfer_back_to_orchestrator_func = ""
transfer_funcs_str = []
for sub_agent_func, sub_agent in zip(sub_agent_funcs, sub_agents):
get_sub_agents += f"""
{sub_agent_func.replace('get_', '')}: Agent = {sub_agent_func}(model)
"""
transfer_sub_agent_func += f"""
def transfer_to_{sub_agent_func.replace('get_', '')}({sub_agent["agent_input"]}):
return Result(value = {sub_agent["agent_input"]}, agent = {sub_agent_func.replace('get_', '')})
"""
transfer_funcs_str.append(f"transfer_to_{sub_agent_func.replace('get_', '')}")
transfer_back_to_orchestrator_func += f"""
def transfer_back_to_{agent_name.lower().replace(' ', '_')}({sub_agent["agent_input"]}):
return Result(value = {sub_agent["agent_input"]}, agent = {agent_name.lower().replace(' ', '_')})
{sub_agent_func.replace('get_', '')}.functions.append(transfer_back_to_{agent_name.lower().replace(' ', '_')})
"""
agent_func = f"get_{agent_name.lower().replace(' ', '_')}"
create_codes = f"""\
from metachain.types import Agent
from metachain.registry import register_plugin_agent
from metachain.types import Result
@register_plugin_agent(name = "{agent_name}", func_name="{agent_func}")
def {agent_func}(model: str):
'''
{agent_description}
'''
{import_agent_str}
{instructions_str}
{orchestrator_agent_def}
{get_sub_agents}
{transfer_sub_agent_func}
{transfer_back_to_orchestrator_func}
{agent_name.lower().replace(' ', '_')}.functions = [{", ".join(transfer_funcs_str)}]
return {agent_name.lower().replace(' ', '_')}
"""
# print(create_codes)
# with open(f"metachain/agents/{agent_name.lower().replace(' ', '_')}.py", "w", encoding="utf-8") as f:
# f.write(create_codes)
try:
msg = create_file(agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py", create_codes, context_variables)
if msg.startswith("Error creating file:"):
return "[ERROR] Failed to create agent. Error: " + msg
result = code_env.run_command('cd {} && python metachain/agents/{}.py'.format(path, agent_name.lower().replace(' ', '_')))
if result['status'] != 0:
return "[ERROR] Failed to create agent. Error: " + result['result']
return "Successfully created agent: " + agent_func + " in " + agents_dir + "/" + agent_name.lower().replace(' ', '_') + ".py"
except Exception as e:
return "[ERROR] Failed to create agent. Error: " + str(e)
def read_agent(agent_name: str, context_variables: dict):
try:
env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(env)
except Exception as e:
return "[ERROR] Failed to get the path of the MetaChain. Error: " + str(e)
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "Failed to list agents. Error: " + agent_list
agent_dict = json.loads(agent_list)
if agent_name not in agent_dict.keys():
return "[ERROR] The agent " + agent_name + " does not exist."
agent_info = agent_dict[agent_name]
ret_val = f"""\
The information of the agent {agent_name} is:
{agent_info}
"""
return ret_val
except Exception as e:
return "[ERROR] Failed to read the agent. Error: " + str(e)
if __name__ == "__main__":
# print(list_agents({}))
from litellm import completion
from metachain.util import function_to_json
tools = [function_to_json(create_agent)]
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": """\
Create an Personalized RAG agent that can answer the question about the given document. There are some tools you can use:
- save_raw_docs_to_vector_db: Save the raw documents to the vector database. The documents could be:
- ANY text document with the extension of pdf, docx, txt, etcs.
- A zip file containing multiple text documents
- a directory containing multiple text documents
All documents will be converted to raw text format and saved to the vector database in the chunks of 4096 tokens.
- query_db: Retrieve information from the database. Use this function when you need to search for information in the database.
- modify_query: Modify the query based on what you know. Use this function when you need to modify the query to search for more relevant information.
- answer_query: Answer the user query based on the supporting documents.
- can_answer: Check if you have enough information to answer the user query.
- visual_question_answering: This tool is used to answer questions about attached images or videos.
There are some global variables you can use:
glbal_keys | global_vals
-----------|-----------
user_name | "Jiabin Tang"
user_email | "jiabin.tang@gmail.com"
[IMPORTANT] NOT ALL tools are required to be used. You can choose the tools that you think are necessary.
"""},
]
for tool in tools:
params = tool["function"]["parameters"]
params["properties"].pop("context_variables", None)
if "context_variables" in params["required"]:
params["required"].remove("context_variables")
# response = completion(
# model="claude-3-5-sonnet-20241022",
# messages=messages,
# tools=tools,
# tool_choice="auto", # auto is default, but we'll be explicit
# )
# print("\nLLM Response1:\n", response.choices[0].message.tool_calls)
# args = json.loads(response.choices[0].message.tool_calls[0].function.arguments)
# create_agent(args["agent_name"], args["agent_description"], args["agent_tools"], args["agent_instructions"], {})
# print(list_agents({}))
print(create_orchestrator_agent("Orchestrator Coding RAG Agent", "An Orchestrator Agent that orchestrates the workflow of the codig agent and the RAG agent.", [{"name": "Personalized RAG Agent", "agent_input": "doc_query", "agent_output": "queried_doc_content"}, {"name": "Coding Agent", "agent_input": "coding_query", "agent_output": "coding_result"}], "You are a helpful assistant.", {}))

View file

@ -1,207 +0,0 @@
from typing import Union
from metachain.environment import LocalEnv, DockerEnv
from metachain.tools.meta.edit_tools import get_metachain_path
from metachain.tools.meta.edit_agents import list_agents
import json
from metachain import MetaChain
from metachain.types import Response
CODE_PREFIX = """\
import asyncio
import json
import argparse
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageToolCall
from metachain.flow import default_drive, EventInput, ReturnBehavior
from metachain.flow.dynamic import goto_events, abort_this
import re
from metachain import MetaChain
from metachain.types import Response
from metachain.registry import register_workflow
def extract_answer(response: str, key: str):
pattern = f"<{key}>(.*?)</{key}>"
matches = re.findall(pattern, response)
return matches[0] if len(matches) > 0 else None
"""
CODE_SUFFIX = r"""
def get_args():
parser = argparse.ArgumentParser(description="running_workflow")
parser.add_argument('--system_input', type=str, default=None)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
"""
CODE_MAIN = """
storage_results = dict({input_key} = args.system_input)
asyncio.run(
default_drive.invoke_event(
on_start,
global_ctx=storage_results,
)
)
system_output = storage_results.get({output_key}, None)
print("The system output is: " + system_output)
"""
EVENT_TEMPLATE_PREFIX = """\
@default_drive.{event_method}
async def {event_name}(event: EventInput, global_ctx):
inputs = {inputs}
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = {task}
outputs = {output_list}
agent = {agent_func_name}({model})
"""
EVENT_TEMPLATE_FIX = r"""
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
"""
# QUERY_TEMPLATE = """\
# query = input_str + '.\\nThe task is: ' + task + '.\\n'
# """
START_EVENT_CODE = """\
@default_drive.make_event
async def on_start(event: EventInput, global_ctx):
print("start the workflow:" + {workflow_name})
"""
IF_ELSE_SUFFIX = \
"""
You should follow the above instructions, and return the result in the following format:
"""
EVENT_TEMPLATE_SUFFIX = """\
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
"""
def start_event_to_code(workflow_name: str) -> str:
"""
Convert the start event to code.
"""
return START_EVENT_CODE.format(workflow_name = repr(workflow_name))
def single_event_to_code(event: dict, agent_info_dict: dict) -> str:
"""
Convert a single event to code.
A event contains:
- name (str): the name of the event
- input (dict): the input to the event
- task (str): the task to perform
- outputs (list[dict]): the outputs to the event
- listen (list[str]): the listen to the event
- agent (dict): the agent to run
"""
if event["listen"] == None or len(event["listen"]) == 0:
event_method = "make_event"
else:
event_method = "listen_group([{}])".format(", ".join(event["listen"]))
inputs = event["inputs"]
event_code = EVENT_TEMPLATE_PREFIX.format(event_method = event_method, event_name = event["name"], inputs = inputs, task = repr(event["task"]), output_list = event["outputs"], agent_mode_name = agent_info_dict[event["agent"]["name"]]["mode_name"], agent_func_name = agent_info_dict[event["agent"]["name"]]["func_name"], model = repr(event["agent"]["model"])) + EVENT_TEMPLATE_FIX
if len(event["outputs"]) > 1:
condition_str = []
for output in event["outputs"]:
condition_str.append(f"If {output['condition']}, then encapsulate your final answer (answer ONLY) within <{output['key']}> and </{output['key']}>. ")
query_suffix = "\n".join(condition_str)
query_suffix = f"""
query_suffix = {repr(IF_ELSE_SUFFIX)}
query_suffix += {repr(query_suffix)}
query += query_suffix
"""
event_code += query_suffix + EVENT_TEMPLATE_SUFFIX
else:
event_code += EVENT_TEMPLATE_SUFFIX
return event_code
def json_to_workflow_code(workflow_form: dict, context_variables: dict) -> str:
system_input = workflow_form['system_input']
system_output = workflow_form['system_output']
code_env: Union[LocalEnv, DockerEnv] = context_variables.get("code_env", LocalEnv())
try:
path = get_metachain_path(code_env)
except Exception as e:
return "[ERROR] Failed to list agents. Error: " + str(e)
agents_dir = path + "/metachain/agents"
agent_list = list_agents(context_variables)
if agent_list.startswith("[ERROR]"):
return "Failed to list agents. Error: " + agent_list
agent_dict = json.loads(agent_list)
agent_info_dict = {}
workflow_name = workflow_form["name"]
for a in workflow_form["agents"]:
agent_info_dict[a["name"]] = {"name": a["name"], "func_name": agent_dict[a["name"]]["func_name"], "mode_name": a["name"].replace(" ", "_").lower()}
import_agent_str = ""
for ainfo in agent_info_dict.values():
import_agent_str += f"""
from metachain.agents import {ainfo['func_name']}
"""
events = workflow_form["events"]
events_code = CODE_PREFIX + import_agent_str
for event in events:
if event["name"] == "on_start":
events_code += start_event_to_code(workflow_name)
else:
events_code += single_event_to_code(event, agent_info_dict)
events_code += CODE_SUFFIX
events_code += CODE_MAIN.format(input_key = system_input["key"], output_key = repr(system_output["key"]))
with open("math_majority_voting.py", "w") as f:
f.write(events_code)
def create_workflow(workflow_form: dict, context_variables: dict) -> str:
pass
def run_workflow(workflow_name: str, context_variables: dict) -> str:
pass
if __name__ == "__main__":
workflow_form = json.load(open("metachain/agents/meta_agent/workflow_form/math_majority_voting.json", "r"))
json_to_workflow_code(workflow_form, {})

View file

@ -159,6 +159,7 @@ def process_terminal_response(func):
else:
res_output = f"[SUCCESS] {res_output}"
tmp_file = os.path.join(os.getcwd(), "terminal_tmp", "terminal_output_{}___{}.txt".format(datetime.now().strftime("%Y%m%d_%H%M%S"), func.__name__))
Path(tmp_file).parent.mkdir(parents=True, exist_ok=True)
with open(tmp_file, "w") as f:
f.write(res_output)
return open_local_terminal_output(tmp_file)

View file

@ -1,266 +0,0 @@
from constant import DOCKER_WORKPLACE_NAME
from metachain.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from metachain.workflow import Graph, FlowEngine, meta_agent
from metachain.environment.utils import setup_metachain
from metachain.types import Response
from metachain import MetaChain
from metachain.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from metachain.agents.meta_agent import tool_editor, agent_editor
from metachain.tools.meta.edit_tools import list_tools
from metachain.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from metachain.agents import get_system_triage_agent
from metachain.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
from metachain.agents.meta_agent.agent_former import get_agent_former_agent
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from metachain.agents.meta_agent.agent_creator import get_agent_creator_agent
import re
from metachain.agents.meta_agent.form_complie import parse_agent_form
def get_args():
parser = argparse.ArgumentParser(description="working@tjb-tech")
parser.add_argument('--container_name', type=str, default='gpu_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--test_pull_name', type=str, default='test_pull_1010')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--port', type=int, default=12350)
parser.add_argument('--git_clone', action='store_true', default=False)
parser.add_argument('--task_cfg', type=str, default='benchmarks/case_study/finance_agent/case_finance_agent_single.yaml')
args = parser.parse_args()
return args
def get_config(args):
container_name = args.container_name
port_info = check_container_ports(container_name)
port = args.port
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = args.port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
git_clone=args.git_clone,
test_pull_name=args.test_pull_name,
task_name = "meta_agent_" + timestamp
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def extract_agents_content(text):
pattern = r'(<agents>.*?</agents>)'
# re.DOTALL 让 . 也能匹配换行符
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1)
return None
def main(args):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(args)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{args.container_name}_{args.model}.log')
os.makedirs(osp.dirname(log_path), exist_ok=True)
LoggerManager.set_logger(MetaChainLogger(log_path = log_path))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up metachain...[/cyan]\n")
setup_metachain(workplace_name=docker_config.workplace_name, env=code_env)
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
task_cfg = read_yaml_file(args.task_cfg)
# generate agent form
client = MetaChain(LoggerManager.get_logger())
agent_former = get_agent_former_agent(args.model)
messages = [
{"role": "user", "content": task_cfg["requirements"] + """
Directly output the form in the XML format without ANY other text.
"""}
]
response = client.run(agent_former, messages, context_variables)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
MAX_RETRY = 3
for i in range(MAX_RETRY):
try:
output_xml_form = extract_agents_content(output_xml_form)
assert output_xml_form is not None, "No the XML form should be found in the output with the tag <agents>...</agents>."
agent_form = parse_agent_form(output_xml_form)
break
except Exception as e:
print(f"Error parsing XML to agent form: {e}. Retry {i+1}/{MAX_RETRY}")
messages.append({"role": "user", "content": f"Error parsing XML to agent form: {e}\nNote that there are some special restrictions for creating agent form, please try again."})
response = client.run(agent_former, messages, context_variables)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
print(output_xml_form)
tool_editor_agent = get_tool_editor_agent(args.model)
def case_resolved(task_response: str, context_variables: dict):
"""
Use this tools when the desired tool is created and tested successfully. You can NOT use this tool if the tool is not created or tested successfully by running the tool.
Args:
task_response: the response of creating the tool which contains the completion status of the tool.
"""
return f"Case resolved. The desired tool is created and tested successfully. Details: {task_response}"
def case_not_resolved(task_response: str, context_variables: dict):
"""
Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired tool. You can NOT use this tool before you have tried your best.
Args:
task_response: the reason why the tool is not created or tested successfully.
"""
return f"Case not resolved. The desired tool is not created or tested successfully. Details: {task_response}"
tool_editor_agent.functions.extend([case_resolved, case_not_resolved])
agents = agent_form.agents
for agent in agents:
if len(agent.tools.new) > 0:
new_tools = []
for idx, tool in enumerate(agent.tools.new):
new_tools.append(f"{idx+1}. Tool name: {tool.name}, Tool description: {tool.description}")
new_tools_str = "\n".join(new_tools)
messages.append({"role": "user", "content": f"""\
Your task is to create a list of new tools for me, the tools are:
{new_tools_str}
Please create these new tools for me, note that you can NOT stop util you have created all the tools and tested them using `run_tool` successfully.
If EVERY tool is created and tested successfully, you can stop and output "Case resolved". Otherwise, you should continue to create the tools. After you have tried your best, you can output "Case not resolved" and give the reason why the tool is not created or tested successfully.
[IMPORTANT] EVERY tool MUST be tested successfully by running the tool using `run_tool` before you stop.
"""})
response = client.run(tool_editor_agent, messages, context_variables)
content = response.messages[-1]["content"]
for i in range(MAX_RETRY):
if content.startswith("Case resolved"):
break
messages.append({"role": "user", "content": f"""\
Your task is to create a list of new tools for me, the tools are:
{new_tools_str}
Please create these new tools for me, note that you can NOT stop util you have created all the tools and tested them using `run_tool` successfully.
The last attempt failed with the following error: {content}, please try again to create the tools.
"""})
response = client.run(tool_editor_agent, messages, context_variables)
content = response.messages[-1]["content"]
if i == MAX_RETRY:
return f"The desired tool is not created or tested successfully with {MAX_RETRY} attempts."
# create agents:
agent_creator_agent = get_agent_creator_agent(args.model)
def case_resolved(task_response: str, context_variables: dict):
"""
Use this tools when the desired agent(s) is created and tested successfully. You can NOT use this tool if the agent(s) is not created or tested successfully by running the agent(s).
"""
return f"Case resolved. The desired agent(s) is created and tested successfully. : {task_response}"
def case_not_resolved(task_response: str, context_variables: dict):
"""
Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired agent(s). You can NOT use this tool before you have tried your best.
"""
return f"Case not resolved. The desired agent(s) is not created or tested successfully. Details: {task_response}"
agent_creator_agent.functions.extend([case_resolved, case_not_resolved])
messages.append({"role": "user", "content": f"""\
The user's request to create agent(s) is: {task_cfg["requirements"]}
Given the completed agent form with XML format: {output_xml_form}
After previous attempts, you have created new tools that required by the desired agent(s).
Your task is to create the desired agent(s) for me, note that you may create ONE single agent or multiple agents connected by orchestrator agent.
After you have created the agent(s), you should test the agent(s) by running the agent(s) using `run_agent` tool to complete the user's task:
{task_cfg["task"]}
Note that you can NOT stop util you have created the agent(s) and tested it successfully.
"""})
response = client.run(agent_creator_agent, messages, context_variables)
content = response.messages[-1]["content"]
for i in range(MAX_RETRY):
if content.startswith("Case resolved"):
break
messages.append({"role": "user", "content": f"""\
The user's request to create agent(s) is: {task_cfg["requirements"]}
Given the completed agent form with XML format: {output_xml_form}
After previous attempts, you have created new tools that required by the desired agent(s).
Your task is to create the desired agent(s) for me, note that you may create ONE single agent or multiple agents connected by orchestrator agent.
After you have created the agent(s), you should test the agent(s) by running the agent(s) using `run_agent` tool to complete the user's task:
{task_cfg["task"]}
Note that you can NOT stop util you have created the agent(s) and tested it successfully.
The last attempt failed with the following error: {content}, please try again to create the desired agent(s).
"""})
response = client.run(agent_creator_agent, messages, context_variables)
content = response.messages[-1]["content"]
if i == MAX_RETRY:
return f"The desired agent(s) is not created or tested successfully with {MAX_RETRY} attempts."
if __name__ == "__main__":
args = get_args()
main(args)

View file

@ -1,13 +0,0 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export DEBUG=True
export BASE_IMAGES=tjb-gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-sonnet-20241022
# export COMPLETION_MODEL=gpt-4o-2024-08-06
export MC_MODE=False
python metachain_meta_agent.py --container_name nl2agent_showcase --model ${COMPLETION_MODEL} --test_pull_name test_pull_0111 --debug --port 12350 --git_clone
# python /Users/tangjiabin/Documents/reasoning/metachain/test_gaia_tool.py

View file

@ -1,231 +0,0 @@
from constant import DOCKER_WORKPLACE_NAME
from metachain.io_utils import read_yaml_file, get_md5_hash_bytext, read_file
from metachain.workflow import Graph, FlowEngine, meta_agent
from metachain.environment.utils import setup_metachain
from metachain.types import Response
from metachain import MetaChain
from metachain.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.progress import Progress, SpinnerColumn, TextColumn
import json
import argparse
from datetime import datetime
from metachain.agents.meta_agent import tool_editor, agent_editor
from metachain.tools.meta.edit_tools import list_tools
from metachain.tools.meta.edit_agents import list_agents
from loop_utils.font_page import MC_LOGO, version_table, NOTES, GOODBYE_LOGO
from rich.live import Live
from metachain.environment.docker_env import DockerEnv, DockerConfig, check_container_ports
from metachain.environment.browser_env import BrowserEnv
from metachain.environment.markdown_browser import RequestsMarkdownBrowser
from evaluation.utils import update_progress, check_port_available, run_evaluation, clean_msg
import os
import os.path as osp
from metachain.agents import get_system_triage_agent
from metachain.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
from metachain.agents.meta_agent.workflow_former import get_workflow_former_agent
from metachain.agents.meta_agent.workflow_creator import get_workflow_creator_agent
import re
from metachain.agents.meta_agent.worklow_form_complie import parse_workflow_form, WorkflowForm
def get_args():
parser = argparse.ArgumentParser(description="working@tjb-tech")
parser.add_argument('--container_name', type=str, default='gpu_test')
parser.add_argument('--model', type=str, default='gpt-4o-2024-08-06')
parser.add_argument('--test_pull_name', type=str, default='test_pull_1010')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--port', type=int, default=12350)
parser.add_argument('--git_clone', action='store_true', default=False)
parser.add_argument('--task_cfg', type=str, default='benchmarks/case_study/math_workflow/majority_voting(paralizing).yaml')
args = parser.parse_args()
return args
def get_config(args):
container_name = args.container_name
port_info = check_container_ports(container_name)
port = args.port
if port_info:
port = port_info[0]
else:
# while not check_port_available(port):
# port += 1
# 使用文件锁来确保端口分配的原子性
import filelock
lock_file = os.path.join(os.getcwd(), ".port_lock")
lock = filelock.FileLock(lock_file)
with lock:
port = args.port
while not check_port_available(port):
port += 1
print(f'{port} is not available, trying {port+1}')
# 立即标记该端口为已使用
with open(os.path.join(os.getcwd(), f".port_{port}"), 'w') as f:
f.write(container_name)
local_root = os.path.join(os.getcwd(), f"workspace_meta_showcase", f"showcase_{container_name}")
os.makedirs(local_root, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
docker_config = DockerConfig(
workplace_name=DOCKER_WORKPLACE_NAME,
container_name=container_name,
communication_port=port,
conda_path='/root/miniconda3',
local_root=local_root,
git_clone=args.git_clone,
test_pull_name=args.test_pull_name,
task_name = "meta_agent_" + timestamp
)
return docker_config
def create_environment(docker_config: DockerConfig):
"""
1. create the code environment
2. create the web environment
3. create the file environment
"""
code_env = DockerEnv(docker_config)
code_env.init_container()
web_env = BrowserEnv(browsergym_eval_env = None, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name)
file_env = RequestsMarkdownBrowser(viewport_size=1024 * 5, local_root=docker_config.local_root, workplace_name=docker_config.workplace_name, downloads_folder=os.path.join(docker_config.local_root, docker_config.workplace_name, "downloads"))
return code_env, web_env, file_env
def main(args):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True # 这会让进度条完成后消失
) as progress:
task = progress.add_task("[cyan]Initializing...", total=None)
progress.update(task, description="[cyan]Initializing config...[/cyan]\n")
docker_config = get_config(args)
progress.update(task, description="[cyan]Setting up logger...[/cyan]\n")
log_path = osp.join("casestudy_results", 'logs', f'agent_{args.container_name}_{args.model}.log')
LoggerManager.set_logger(MetaChainLogger(log_path = None))
progress.update(task, description="[cyan]Creating environment...[/cyan]\n")
code_env, web_env, file_env = create_environment(docker_config)
progress.update(task, description="[cyan]Setting up metachain...[/cyan]\n")
setup_metachain(workplace_name=docker_config.workplace_name, env=code_env)
context_variables = {"working_dir": docker_config.workplace_name, "code_env": code_env, "web_env": web_env, "file_env": file_env}
task_cfg = read_yaml_file(args.task_cfg)
# generate agent form
client = MetaChain()
workflow_former = get_workflow_former_agent(args.model)
messages = [
{"role": "user", "content": task_cfg["requirements"] + """
Directly output the form in the XML format without ANY other text.
"""}
]
response = client.run(workflow_former, messages, context_variables)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
MAX_RETRY = 3
for i in range(MAX_RETRY):
workflow_form = parse_workflow_form(output_xml_form)
if isinstance(workflow_form, WorkflowForm):
break
elif isinstance(workflow_form, str):
print(f"Error parsing XML to workflow form: {workflow_form}. Retry {i+1}/{MAX_RETRY}")
messages.append({"role": "user", "content": f"Error parsing XML to workflow form, the error message is: {workflow_form}\nNote that there are some special restrictions for creating workflow form, please try again."})
response = client.run(workflow_former, messages, context_variables)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
else:
raise ValueError(f"Unexpected error: {workflow_form}")
print(output_xml_form)
# context_variables["workflow_form"] = workflow_form
# agents = workflow_form.agents
# new_agents = []
# for agent in agents:
# if agent.category == "new":
# new_agents.append(agent)
# print(new_agents)
# if len(new_agents) != 0:
# new_agent_str = "AGENT CREATION INSTRUCTIONS:\nBefore you create the workflow, you need to create the following new agents in the workflow:\n"
# for agent in new_agents:
# new_agent_str += f"Agent name: {agent.name}\nAgent description: {agent.description}\n"
# new_agent_str += f"Agent tools: {agent.tools}\n" if agent.tools else "Agent tools: []\n"
# else:
# new_agent_str = ""
# workflow_creator_agent = get_workflow_creator_agent(args.model)
# def case_resolved(task_response: str, context_variables: dict):
# """
# Use this tools when the desired workflow is created and tested successfully. You can NOT use this tool if the workflow is not created or tested successfully by running the workflow.
# """
# return f"Case resolved. The desired workflow is created and tested successfully. : {task_response}"
# def case_not_resolved(task_response: str, context_variables: dict):
# """
# Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired workflow. You can NOT use this tool before you have tried your best.
# """
# return f"Case not resolved. The desired workflow is not created or tested successfully. Details: {task_response}"
# workflow_creator_agent.functions.extend([case_resolved, case_not_resolved])
# messages.append({"role": "user", "content": f"""\
# WORKFLOW CREATION INSTRUCTIONS:
# The user's request to create workflow is: {task_cfg["requirements"]}
# Given the completed workflow form with XML format: {output_xml_form}
# TASK:
# Your task is to create the workflow for me, and then test the workflow by running the workflow using `run_workflow` tool to complete the user's task:
# {task_cfg["task"]}
# {new_agent_str}
# TERMINATION INSTRUCTIONS:
# After you have created the workflow and tested it successfully, you can use the `case_resolved` tool to indicate the case is resolved, otherwise you should try your best to create the workflow. And ONLY after you have tried multiple times, you can use the `case_not_resolved` tool to indicate the case is not resolved and give the reason.
# Remember: you can NOT stop util you have created the workflow and tested it successfully.
# """})
# response = client.run(workflow_creator_agent, messages, context_variables)
# content = response.messages[-1]["content"]
# for i in range(MAX_RETRY):
# if content.startswith("Case resolved"):
# break
# messages.append({"role": "user", "content": f"""\
# WORKFLOW CREATION INSTRUCTIONS:
# The user's request to create workflow is: {task_cfg["requirements"]}
# Given the completed workflow form with XML format: {output_xml_form}
# TASK:
# Your task is to create the workflow for me, and then test the workflow by running the workflow using `run_workflow` tool to complete the user's task:
# {task_cfg["task"]}
# {new_agent_str}
# TERMINATION INSTRUCTIONS:
# After you have created the workflow and tested it successfully, you can use the `case_resolved` tool to indicate the case is resolved, otherwise you should try your best to create the workflow. And ONLY after you have tried multiple times, you can use the `case_not_resolved` tool to indicate the case is not resolved and give the reason.
# Remember: you can NOT stop util you have created the workflow and tested it successfully.
# FEEDBACK:
# The last attempt failed with the following error: {content}, please try again to create the desired workflow.
# """})
# response = client.run(workflow_creator_agent, messages, context_variables)
# content = response.messages[-1]["content"]
# if i == MAX_RETRY:
# return f"The desired workflow is not created or tested successfully with {MAX_RETRY} attempts."
if __name__ == "__main__":
args = get_args()
main(args)

View file

@ -1,16 +0,0 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export DEBUG=True
export BASE_IMAGES=tjb-gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-sonnet-20241022
# export COMPLETION_MODEL=gpt-4o-2024-08-06
export MC_MODE=False
task_cfg="benchmarks/case_study/math_workflow/majority_voting(paralizing).yaml"
# task_cfg="benchmarks/case_study/math_workflow/condition_mining(evaluator-optimizer).yaml"
python metachain_meta_workflow.py --container_name nl2agent_showcase --model ${COMPLETION_MODEL} --test_pull_name test_pull_0111 --debug --port 12350 --git_clone --task_cfg ${task_cfg}
# python /Users/tangjiabin/Documents/reasoning/metachain/test_gaia_tool.py

View file

@ -32,7 +32,8 @@ from rich.columns import Columns
from rich.text import Text
from rich.panel import Panel
import re
from playground.cli.metachain_meta_agent import meta_agent
from playground.cli.metachain_meta_workflow import meta_workflow
def get_args():
parser = argparse.ArgumentParser(description="working@tjb-tech")
parser.add_argument('--container_name', type=str, default='gpu_test')
@ -148,63 +149,14 @@ def user_mode(model: str, context_variables: dict, debug: bool = True):
# attempt to parse model_answer
if model_answer_raw.startswith('Case resolved'):
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw)
model_answer = re.findall(r'<solution>(.*?)</solution>', model_answer_raw, re.DOTALL)
if len(model_answer) == 0:
model_answer = model_answer_raw
else:
model_answer = model_answer[0]
else:
model_answer = model_answer_raw
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
pass
def agent_chain(model: str, context_variables: dict, debug: bool = True):
from metachain.agents import get_plan_agent
from metachain.agents.programming_triage_agent import get_programming_triage_agent, get_agent_run_agent, get_tool_creation_agent, get_agent_creation_agent
programming_triage_agent = get_programming_triage_agent(model)
agent_run_agent = get_agent_run_agent(model)
tool_creation_agent = get_tool_creation_agent(model)
agent_creation_agent = get_agent_creation_agent(model)
def transfer_to_programming_triage_agent():
return programming_triage_agent
plan_agent = get_plan_agent(model)
plan_agent.functions.append(transfer_to_programming_triage_agent)
messages = []
agent = plan_agent
agents = {plan_agent.name.replace(' ', '_'): plan_agent, programming_triage_agent.name.replace(' ', '_'): programming_triage_agent, agent_run_agent.name.replace(' ', '_'): agent_run_agent, tool_creation_agent.name.replace(' ', '_'): tool_creation_agent, agent_creation_agent.name.replace(' ', '_'): agent_creation_agent}
# REPL loop
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
mc = MetaChain(timestamp)
while True:
# query = ask_text("Tell me what you want to do:")
query = session.prompt(
'Tell me what you want to do (type "exit" to quit): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents')
)
if query.strip().lower() == 'exit':
debug_print(debug, 'Agent completed. See you next time! :waving_hand:', color='green')
break
words = query.split()
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
print(word, end=' ')
messages.append({"role": "user", "content": query})
response = mc.run(agent, messages, context_variables, debug=debug)
messages.extend(response.messages)
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] has finished with the response:\n[/bold green] [bold blue]{model_answer}[/bold blue]")
agent = response.agent
def tool_to_table(tool_dict: dict):
@ -240,9 +192,7 @@ def update_guidance(context_variables):
console.print(Panel(logo_text, style="bold salmon1", expand=True))
console.print(version_table)
console.print(Panel(NOTES,title="Important Notes", expand=True))
def workflow_chain(model: str, debug: bool = True):
pass
def main(args):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
with Progress(
@ -284,17 +234,16 @@ def main(args):
user_mode(args.model, context_variables, args.debug)
case 'agent editor':
clear_screen()
agent_chain(args.model, context_variables, args.debug)
meta_agent(args.model, context_variables, args.debug)
case 'workflow editor':
clear_screen()
workflow_chain(args.model, context_variables, args.debug)
meta_workflow(args.model, context_variables, args.debug)
case 'exit':
console = Console()
logo_text = Text(GOODBYE_LOGO, justify="center")
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
if __name__ == "__main__":
args = get_args()
main(args)

View file

@ -1,14 +1,15 @@
current_dir=$(dirname "$(readlink -f "$0")")
cd $current_dir
cd ../..
export DOCKER_WORKPLACE_NAME=workplace
export EVAL_MODE=True
export DEBUG=True
export BASE_IMAGES=tjbtech1/gaia-bookworm:v2
export COMPLETION_MODEL=claude-3-5-sonnet-20241022
# export COMPLETION_MODEL=gpt-4o-2024-08-06
export DEBUG=False
export DEBUG=True
export MC_MODE=True
export AI_USER=tjb-tech
python metachain_loop.py --container_name quick_start --model ${COMPLETION_MODEL} --test_pull_name test_pull_1225 --debug --port 12350 --git_clone
# python /Users/tangjiabin/Documents/reasoning/metachain/test_gaia_tool.py
python playground/cli/metachain_cli.py --container_name quick_start --model ${COMPLETION_MODEL} --test_pull_name mirror_branch_0207 --debug --port 12345 --git_clone

View file

@ -0,0 +1,255 @@
from metachain import MetaChain
from metachain.util import UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from metachain.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.panel import Panel
from metachain.agents.meta_agent.agent_former import get_agent_former_agent
from metachain.agents.meta_agent.tool_editor import get_tool_editor_agent
from metachain.agents.meta_agent.agent_creator import get_agent_creator_agent
import re
from metachain.agents.meta_agent.form_complie import parse_agent_form
def extract_agents_content(text):
pattern = r'(<agents>.*?</agents>)'
# re.DOTALL 让 . 也能匹配换行符
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1)
return None
def agent_profiling(agent_former, client, messages, context_variables, requirements, debug):
messages.append({"role": "user", "content": requirements+ """
Directly output the form in the XML format without ANY other text.
"""})
response = client.run(agent_former, messages, context_variables, debug=debug)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
agent_form = None
MAX_RETRY = 3
for i in range(MAX_RETRY):
try:
output_xml_form = extract_agents_content(output_xml_form)
assert output_xml_form is not None, "No the XML form should be found in the output with the tag <agents>...</agents>."
agent_form = parse_agent_form(output_xml_form)
break
except Exception as e:
print(f"Error parsing XML to agent form: {e}. Retry {i+1}/{MAX_RETRY}")
messages.append({"role": "user", "content": f"Error parsing XML to agent form: {e}\nNote that there are some special restrictions for creating agent form, please try again."})
response = client.run(agent_former, messages, context_variables, debug=debug)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
return agent_form, output_xml_form, messages
def tool_editing(tool_editor_agent, client, messages, context_variables, agent_form, output_xml_form, debug, suggestions = ""):
def case_resolved(task_response: str, context_variables: dict):
"""
Use this tools when ALL desired tools are created and tested successfully. You can NOT use this tool if tools are not created or tested successfully by running the tools.
Args:
task_response: the response of creating the tool which contains the completion status of the tool.
"""
return f"Case resolved. ALL desired tools are created and tested successfully. Details: {task_response}"
def case_not_resolved(task_response: str, context_variables: dict):
"""
Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired tool. You can NOT use this tool before you have tried your best.
Args:
task_response: the reason why the tool is not created or tested successfully.
"""
return f"Case not resolved. Some desired tools are not created or tested successfully. Details: {task_response}"
tool_editor_agent.functions.extend([case_resolved, case_not_resolved])
MAX_RETRY = 3
if suggestions != "":
suggestions = "[IMPORTANT] Here are some suggestions for creating the tools: " + suggestions
agents = agent_form.agents
new_tools = []
for agent in agents:
if len(agent.tools.new) > 0:
for idx, tool in enumerate(agent.tools.new):
new_tools.append(f"{idx+1}. Tool name: {tool.name}, Tool description: {tool.description}")
if len(new_tools) == 0:
return "Case resolved. ALL desired tools are created and tested successfully.", messages
new_tools_str = "\n".join(new_tools)
messages.append({"role": "user", "content": f"""\
Your task is to create a list of new tools for me, the tools are:
{new_tools_str}
{suggestions}
Please create these new tools for me, note that you can NOT stop util you have created all the tools and tested them using `run_tool` successfully.
If ALL tools are created and tested successfully, you can stop and use `case_resolved` tool. Otherwise, you should continue to create the tools. After you have tried your best, you can use `case_not_resolved` tool to give the reason why the tool is not created or tested successfully.
[IMPORTANT] ALL tools MUST be tested successfully by running the tools using `run_tool` before you stop.
"""})
response = client.run(tool_editor_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
for i in range(MAX_RETRY):
if content.startswith("Case resolved"):
return content, messages
messages.append({"role": "user", "content": f"""\
Your task is to create a list of new tools for me, the tools are:
{new_tools_str}
Please create these new tools for me, note that you can NOT stop util you have created all the tools and tested them using `run_tool` successfully.
The last attempt failed with the following error: {content}, please try again to create the tools.
"""})
response = client.run(tool_editor_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
if i == MAX_RETRY:
return f"{content}\nSome desired tools are not created or tested successfully with {MAX_RETRY} attempts.", messages
def agent_editing(agent_creator_agent, client, messages, context_variables, agent_form, output_xml_form, requirements, task, debug, suggestions = ""):
MAX_RETRY = 3
if suggestions != "":
suggestions = "[IMPORTANT] Here are some suggestions for creating the agent(s): " + suggestions
def case_resolved(task_response: str, context_variables: dict):
"""
Use this tools when the desired agent(s) is created and tested successfully. You can NOT use this tool if the agent(s) is not created or tested successfully by running the agent(s).
"""
return f"Case resolved. The desired agent(s) is created and tested successfully. : {task_response}"
def case_not_resolved(task_response: str, context_variables: dict):
"""
Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired agent(s). You can NOT use this tool before you have tried your best.
"""
return f"Case not resolved. The desired agent(s) is not created or tested successfully. Details: {task_response}"
agent_creator_agent.functions.extend([case_resolved, case_not_resolved])
messages.append({"role": "user", "content": f"""\
The user's request to create agent(s) is: {requirements}
Given the completed agent form with XML format: {output_xml_form}
After previous attempts, you have created new tools that required by the desired agent(s).
Your task is to create the desired agent(s) for me, note that you may create ONE single agent or multiple agents connected by orchestrator agent.
After you have created the agent(s), you should test the agent(s) by running the agent(s) using `run_agent` tool to complete the user's task:
{task}
Note that you can NOT stop util you have created the agent(s) and tested it successfully.
{suggestions}
"""})
response = client.run(agent_creator_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
for i in range(MAX_RETRY):
if content.startswith("Case resolved"):
return content, messages
messages.append({"role": "user", "content": f"""\
The user's request to create agent(s) is: {requirements}
Given the completed agent form with XML format: {output_xml_form}
After previous attempts, you have created new tools that required by the desired agent(s).
Your task is to create the desired agent(s) for me, note that you may create ONE single agent or multiple agents connected by orchestrator agent.
After you have created the agent(s), you should test the agent(s) by running the agent(s) using `run_agent` tool to complete the user's task:
{task}
Note that you can NOT stop util you have created the agent(s) and tested it successfully.
The last attempt failed with the following error: {content}, please try again to create the desired agent(s).
{suggestions}
"""})
response = client.run(agent_creator_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
if i == MAX_RETRY:
return f"{content}\nThe desired agent(s) is not created or tested successfully with {MAX_RETRY} attempts.", messages
def meta_agent(model: str, context_variables: dict, debug: bool = True):
logger = LoggerManager.get_logger()
# generate agent form
agent_former = get_agent_former_agent(model)
tool_editor_agent = get_tool_editor_agent(model)
agent_creator_agent = get_agent_creator_agent(model)
# enter agent
agent = agent_former
agents = {agent_former.name.replace(' ', '_'): agent_former, tool_editor_agent.name.replace(' ', '_'): tool_editor_agent, agent_creator_agent.name.replace(' ', '_'): agent_creator_agent}
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
console = Console()
messages = []
last_message = "Tell me what do you want to create with `Agent Chain`?"
while True:
query = session.prompt(
f'{last_message} (type "exit" to quit, press "Enter" to continue): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents'),
)
if query.strip().lower() == 'exit':
logo_text = "Agent Chain completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
match agent_name:
case 'Agent Former Agent':
if query == "":
console.print(f"[bold red]There MUST be a request to create the agent form.[/bold red]")
continue
requirements = query
agent_form, output_xml_form, messages = agent_profiling(agent_former, client, messages, context_variables, requirements, debug)
if agent_form is None:
console.print(f"[bold red]The agent form is not created successfully, please modify your requirements again.[/bold red]")
last_message = "Tell me what do you want to create with `Agent Chain`?"
continue
agent = tool_editor_agent
console.print(f"[bold green]The agent form is created successfully. [/bold green]")
last_message = "It is time to create the desired tools, do you have any suggestions for creating the tools?"
case 'Tool Editor Agent':
suggestions = query
tool_response, messages = tool_editing(tool_editor_agent, client, messages, context_variables, agent_form, output_xml_form, debug, suggestions)
if tool_response.startswith("Case not resolved"):
console.print(f"[bold red]Some desired tools are not created or tested successfully, please try again.[/bold red]")
agent = tool_editor_agent
last_message = "The tools are not created successfully, do you have any suggestions for creating the tools?"
continue
elif tool_response.startswith("Case resolved"):
agent = agent_creator_agent
console.print(f"[bold green]The tools are created successfully. [/bold green]")
last_message = "It is time to create the desired agent(s), do you have any suggestions for creating the agent(s)?"
else:
raise ValueError(f"Unknown tool response: {tool_response}")
case 'Agent Creator Agent':
suggestions = query
default_value='Come up with a task for the agent(s) to test your created agent(s), and use `run_agent` tool to test your created agent(s).' # 这里设置你想要的默认值
task = session.prompt(
'It is time to create the desired agent(s), what task do you want to complete with the agent(s)? (Press Enter if none): ',
)
task = default_value if not task.strip() else task
agent_response, messages = agent_editing(agent_creator_agent, client, messages, context_variables, agent_form, output_xml_form, requirements, task, debug, suggestions)
if agent_response.startswith("Case not resolved"):
console.print(f"[bold red]The agent(s) is not created or tested successfully, please try again.[/bold red]")
agent = agent_creator_agent
last_message = "The agent(s) are not created successfully, do you have any suggestions for creating the agent(s)?"
continue
else:
console.print(f"[bold green]The agent(s) are created successfully. [/bold green]")
last_message = "Tell me what do you want to create with `Agent Chain`?"

View file

@ -0,0 +1,194 @@
from metachain import MetaChain
from metachain.util import ask_text, single_select_menu, print_markdown, debug_print, UserCompleter
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from metachain.logger import LoggerManager, MetaChainLogger
from rich.console import Console
from rich.panel import Panel
from metachain.agents.meta_agent.workflow_former import get_workflow_former_agent
from metachain.agents.meta_agent.workflow_creator import get_workflow_creator_agent
import re
from metachain.agents.meta_agent.worklow_form_complie import parse_workflow_form, WorkflowForm
def workflow_profiling(workflow_former, client, messages, context_variables, requirements, debug):
messages.append({"role": "user", "content": requirements + """
Directly output the form in the XML format without ANY other text.
"""})
response = client.run(workflow_former, messages, context_variables, debug=debug)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
MAX_RETRY = 3
for i in range(MAX_RETRY):
workflow_form = parse_workflow_form(output_xml_form)
if isinstance(workflow_form, WorkflowForm):
break
elif isinstance(workflow_form, str):
print(f"Error parsing XML to workflow form: {workflow_form}. Retry {i+1}/{MAX_RETRY}")
messages.append({"role": "user", "content": f"Error parsing XML to workflow form, the error message is: {workflow_form}\nNote that there are some special restrictions for creating workflow form, please try again."})
response = client.run(workflow_former, messages, context_variables, debug=debug)
output_xml_form = response.messages[-1]["content"]
messages.extend(response.messages)
else:
raise ValueError(f"Unexpected error: {workflow_form}")
return workflow_form, output_xml_form, messages
def workflow_editing(workflow_creator_agent, client, messages, context_variables, workflow_form, output_xml_form, requirements, task, debug, suggestions = ""):
MAX_RETRY = 3
if suggestions != "":
suggestions = "[IMPORTANT] Here are some suggestions for creating the workflow: " + suggestions
agents = workflow_form.agents
new_agents = []
for agent in agents:
if agent.category == "new":
new_agents.append(agent)
if len(new_agents) != 0:
new_agent_str = "AGENT CREATION INSTRUCTIONS:\nBefore you create the workflow, you need to create the following new agents in the workflow:\n"
for agent in new_agents:
new_agent_str += f"Agent name: {agent.name}\nAgent description: {agent.description}\n"
new_agent_str += f"Agent tools: {agent.tools}\n" if agent.tools else "Agent tools: []\n"
else:
new_agent_str = ""
def case_resolved(task_response: str, context_variables: dict):
"""
Use this tools when the desired workflow is created and tested successfully. You can NOT use this tool if the workflow is not created or tested successfully by running the workflow.
"""
return f"Case resolved. The desired workflow is created and tested successfully. : {task_response}"
def case_not_resolved(task_response: str, context_variables: dict):
"""
Use this tools when you encounter irresistible errors after trying your best with multiple attempts for creating the desired workflow. You can NOT use this tool before you have tried your best.
"""
return f"Case not resolved. The desired workflow is not created or tested successfully. Details: {task_response}"
workflow_creator_agent.functions.extend([case_resolved, case_not_resolved])
messages.append({"role": "user", "content": f"""\
WORKFLOW CREATION INSTRUCTIONS:
The user's request to create workflow is: {requirements}
Given the completed workflow form with XML format: {output_xml_form}
TASK:
Your task is to create the workflow for me, and then test the workflow by running the workflow using `run_workflow` tool to complete the user's task:
{task}
{new_agent_str}
TERMINATION INSTRUCTIONS:
After you have created the workflow and tested it successfully, you can use the `case_resolved` tool to indicate the case is resolved, otherwise you should try your best to create the workflow. And ONLY after you have tried multiple times, you can use the `case_not_resolved` tool to indicate the case is not resolved and give the reason.
Remember: you can NOT stop util you have created the workflow and tested it successfully.
{suggestions}
"""})
response = client.run(workflow_creator_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
for i in range(MAX_RETRY):
if content.startswith("Case resolved"):
return content, messages
messages.append({"role": "user", "content": f"""\
WORKFLOW CREATION INSTRUCTIONS:
The user's request to create workflow is: {requirements}
Given the completed workflow form with XML format: {output_xml_form}
TASK:
Your task is to create the workflow for me, and then test the workflow by running the workflow using `run_workflow` tool to complete the user's task:
{task}
{new_agent_str}
TERMINATION INSTRUCTIONS:
After you have created the workflow and tested it successfully, you can use the `case_resolved` tool to indicate the case is resolved, otherwise you should try your best to create the workflow. And ONLY after you have tried multiple times, you can use the `case_not_resolved` tool to indicate the case is not resolved and give the reason.
Remember: you can NOT stop util you have created the workflow and tested it successfully.
FEEDBACK:
The last attempt failed with the following error: {content}, please try again to create the desired workflow.
{suggestions}
"""})
response = client.run(workflow_creator_agent, messages, context_variables, debug=debug)
content = response.messages[-1]["content"]
if i == MAX_RETRY:
return f"The desired workflow is not created or tested successfully with {MAX_RETRY} attempts.", messages
def meta_workflow(model: str, context_variables: dict, debug: bool = True):
print('\033[s\033[?25l', end='') # Save cursor position and hide cursor
logger = LoggerManager.get_logger()
workflow_former = get_workflow_former_agent(model)
workflow_creator_agent = get_workflow_creator_agent(model)
agent = workflow_former
agents = {workflow_former.name.replace(' ', '_'): workflow_former, workflow_creator_agent.name.replace(' ', '_'): workflow_creator_agent}
style = Style.from_dict({
'bottom-toolbar': 'bg:#333333 #ffffff',
})
# 创建会话
session = PromptSession(
completer=UserCompleter(agents.keys()),
complete_while_typing=True,
style=style
)
client = MetaChain(log_path=logger)
console = Console()
messages = []
last_message = "Tell me what do you want to create with `Workflow Chain`?"
while True:
query = session.prompt(
f'{last_message} (type "exit" to quit, press "Enter" to continue): ',
bottom_toolbar=HTML('<b>Prompt:</b> Enter <b>@</b> to mention Agents'),
)
if query.strip().lower() == 'exit':
logo_text = "Workflow Chain completed. See you next time! :waving_hand:"
console.print(Panel(logo_text, style="bold salmon1", expand=True))
break
words = query.split()
console.print(f"[bold green]Your request: {query}[/bold green]", end=" ")
for word in words:
if word.startswith('@') and word[1:] in agents.keys():
# print(f"[bold magenta]{word}[bold magenta]", end=' ')
agent = agents[word.replace('@', '')]
else:
# print(word, end=' ')
pass
print()
agent_name = agent.name
console.print(f"[bold green][bold magenta]@{agent_name}[/bold magenta] will help you, be patient...[/bold green]")
match agent_name:
case "Workflow Former Agent":
if query == "":
console.print(f"[bold red]There MUST be a request to create the agent form.[/bold red]")
continue
requirements = query
workflow_form, output_xml_form, messages = workflow_profiling(workflow_former, client, messages, context_variables, requirements, debug)
if workflow_form is None:
console.print(f"[bold red]The workflow form is not created successfully, please modify your requirements again.[/bold red]")
last_message = "Tell me what do you want to create with `Workflow Chain`?"
continue
agent = workflow_creator_agent
context_variables["workflow_form"] = workflow_form
console.print(f"[bold green]The workflow form is created successfully. [/bold green]")
last_message = "It is time to create the desired workflow, do you have any suggestions for creating the workflow?"
case "Workflow Creator Agent":
suggestions = query
default_value='Come up with a task for the workflow to test your created workflow, and use `run_workflow` tool to test your created workflow.' # 这里设置你想要的默认值
task = session.prompt(
'It is time to create the desired workflow, what task do you want to complete with the workflow? (Press Enter if none): ',
)
task = default_value if not task.strip() else task
agent_response, messages = workflow_editing(workflow_creator_agent, client, messages, context_variables, workflow_form, output_xml_form, requirements, task, debug, suggestions)
if agent_response.startswith("Case not resolved"):
console.print(f"[bold red]The workflow is not created or tested successfully, please try again.[/bold red]")
agent = workflow_creator_agent
else:
console.print(f"[bold green]The workflow is created successfully. [/bold green]")
last_message = "Tell me what do you want to create with `Workflow Chain` next?"

20
process_tool_docs.py Normal file
View file

@ -0,0 +1,20 @@
from pandas import read_csv
import json
from rich import print
df = read_csv("tool_docs.csv")
rapidapi_tools = df[df['Platform'] == 'RapidAPI']['Tool_Name'].unique()
print("[bold blue]Current RapidAPI tools:[/bold blue]")
print(json.dumps(rapidapi_tools.tolist(), indent=4))
print("[bold red][IMPORTANT][/bold red] [bold yellow]If you want to use these tools, you should go to RapidAPI and subscribe to them. More convenient tool platforms such as Composio are under development.[/bold yellow]")
your_api_key = input("Please input your RapidAPI API key:")
for column in df.columns:
if df[column].dtype == 'object':
df[column] = df[column].str.replace('YOUR_RAPID_API_KEY', your_api_key)
df.to_csv('tool_docs.csv', index=False)
print("[bold green]Done![/bold green]")

View file

@ -64,6 +64,7 @@ install_requires =
youtube_transcript_api
moviepy
faster_whisper
sentence_transformers
python_requires = >=3.10

510
tool_docs.csv Normal file

File diff suppressed because one or more lines are too long