From bd1d11ce7ceb843735b763ba60d2a4d0cb5eb826 Mon Sep 17 00:00:00 2001 From: fuzi233 Date: Sat, 26 Jul 2025 16:27:21 +0800 Subject: [PATCH 1/5] new file: mas_arena/agents/mad.py --- mas_arena/agents/mad.py | 280 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 280 insertions(+) create mode 100644 mas_arena/agents/mad.py diff --git a/mas_arena/agents/mad.py b/mas_arena/agents/mad.py new file mode 100644 index 0000000..caa1f14 --- /dev/null +++ b/mas_arena/agents/mad.py @@ -0,0 +1,280 @@ +import time +import json +import os +import asyncio +from dotenv import load_dotenv +from dataclasses import dataclass +from typing import Dict, TypedDict, Any, List, Optional + +from pydantic import BaseModel +from langchain_openai import ChatOpenAI +from langchain_core.messages import SystemMessage, HumanMessage +from mas_arena.agents.base import AgentSystem, AgentSystemRegistry + +load_dotenv() + +@dataclass +class DebateAgent: + """Represents a debate participant""" + agent_id: str + name: str + model_name: str + temperature: float + memory_lst: List[Dict[str, str]] = None + + def __post_init__(self): + if self.memory_lst is None: + self.memory_lst = [] + self.llm = ChatOpenAI( + model=self.model_name, + temperature=self.temperature, + request_timeout=60, + max_retries=2 + ) + + def set_meta_prompt(self, meta_prompt: str): + """Set meta prompt""" + self.memory_lst.append({"role": "system", "content": meta_prompt}) + + def add_event(self, event: str): + """Add new event to memory""" + self.memory_lst.append({"role": "user", "content": event}) + + def add_memory(self, memory: str): + """Add generated response to memory""" + self.memory_lst.append({"role": "assistant", "content": memory}) + + def ask(self): + """Query and get response""" + from langchain_core.messages import AIMessage + + messages = [] + for msg in self.memory_lst: + if msg["role"] == "system": + messages.append(SystemMessage(content=msg["content"])) + elif msg["role"] == "user": + messages.append(HumanMessage(content=msg["content"])) + elif msg["role"] == "assistant": + messages.append(AIMessage(content=msg["content"])) + + response = self.llm.invoke(messages) + response.name = self.name + response.id = self.agent_id + return response + +@dataclass +class ResultExtractor: + """Result extractor""" + model_name: str + format_prompt: str = "" + + def __post_init__(self): + self.llm = ChatOpenAI( + model=self.model_name, + request_timeout=60, + max_retries=2 + ) + self.name = "result_extractor" + + def extract(self, agent_histories: List[List[Dict]], problem_text: str): + """Extract final answer from agent history""" + # Build extraction prompt + extract_prompt = f"""Based on the debate history, please extract the final answer to the following problem: + +Problem: {problem_text} + +{self.format_prompt} + +Please provide only the final answer.""" + + messages = [HumanMessage(content=extract_prompt)] + response = self.llm.invoke(messages) + return {"message": response} + +class MADAgent(AgentSystem): + """Multi-Agent Debate system""" + + def __init__(self, name: str = "mad", config: Dict[str, Any] = None): + super().__init__(name, config) + self.config = config or {} + self.num_players = self.config.get("num_players", 3) + self.max_round = self.config.get("max_round", 3) + self.model_name = self.config.get("model_name") or os.getenv("MODEL_NAME", "gpt-4o-mini") + self.temperature = self.config.get("temperature", 0) + + # Debate configuration + self.debate_config = { + "debate_topic": "", + "base_answer": "", + "debate_answer": "", + "player_meta_prompt": "You are a debater. Hello and welcome to the debate. It's not necessary to fully agree with each other's perspectives, as our objective is to find the correct answer.\nThe debate topic is stated as follows:\n##debate_topic##", + "moderator_meta_prompt": "You are a moderator. There will be two debaters involved in a debate. They will present their answers and discuss their perspectives on the following topic: \"##debate_topic##\"\nAt the end of each round, you will evaluate answers and decide which is correct.", + "affirmative_prompt": "##debate_topic##", + "negative_prompt": "##aff_ans##\n\nYou disagree with my answer. Provide your answer and reasons.", + "moderator_prompt": "Now the ##round## round of debate for both sides has ended.\n\nAffirmative side arguing:\n##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nYou, as the moderator, will evaluate both sides' answers and determine if there is a clear preference for an answer candidate. If so, please summarize your reasons for supporting affirmative/negative side and give the final answer that you think is correct, and the debate will conclude. If not, the debate will continue to the next round. Now please output your answer in json format, with the format as follows: {\"Whether there is a preference\": \"Yes or No\", \"Supported Side\": \"Affirmative or Negative\", \"Reason\": \"\", \"debate_answer\": \"\"}. Please strictly output in JSON format, do not output irrelevant content.", + "judge_prompt_last1": "Affirmative side arguing: ##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nNow, what answer candidates do we have? Present them without reasons.", + "judge_prompt_last2": "Therefore, ##debate_topic##\nPlease summarize your reasons and give the final answer that you think is correct. Now please output your answer in json format, with the format as follows: {\"Reason\": \"\", \"debate_answer\": \"\"}. Please strictly output in JSON format, do not output irrelevant content.", + "debate_prompt": "##oppo_ans##\n\nDo you agree with my perspective? Please provide your reasons and answer." + } + + # Initialize components + agent_components = self._create_agents() + self.players = [w for w in agent_components["workers"] if isinstance(w, DebateAgent)] + extractors = [w for w in agent_components["workers"] if isinstance(w, ResultExtractor)] + if extractors: + self.extractor = extractors[0] + else: + self.extractor = ResultExtractor(self.model_name, self.format_prompt) + + def _create_agents(self) -> Dict[str, List]: + """Create debate participants and result extractor""" + name_list = ["Affirmative side", "Negative side", "Moderator"] + + players = [] + for i, name in enumerate(name_list): + agent = DebateAgent( + agent_id=f"agent_{i+1}", + name=name, + model_name=self.model_name, + temperature=self.temperature + ) + players.append(agent) + + # Create result extractor + extractor = ResultExtractor(self.model_name, self.format_prompt) + + return { + "workers": players + [extractor] + } + + def init_prompt(self, debate_topic: str): + """Initialize and replace placeholders in prompt templates""" + config = self.debate_config.copy() + for key in config: + if isinstance(config[key], str): + config[key] = config[key].replace("##debate_topic##", debate_topic) + return config + + def round_dct(self, num: int) -> str: + """Convert number to ordinal word""" + dct = { + 1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', + 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth' + } + return dct.get(num, str(num)) + + async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: + """Run debate process""" + problem_text = problem["problem"] + + # Store all LLM responses + all_messages = [] + + # Use format_prompt as debate topic + debate_topic = f"{problem_text}\n\n{self.format_prompt}" if self.format_prompt else problem_text + + # Initialize prompts + config = self.init_prompt(debate_topic) + + # Get participants + affirmative = self.players[0] + negative = self.players[1] + moderator = self.players[2] + + # Set meta prompts + affirmative.set_meta_prompt(config['player_meta_prompt']) + negative.set_meta_prompt(config['player_meta_prompt']) + moderator.set_meta_prompt(config['moderator_meta_prompt']) + + # First round debate + affirmative.add_event(config['affirmative_prompt']) + aff_response = affirmative.ask() + affirmative.add_memory(aff_response.content) + all_messages.append(aff_response) + aff_ans = aff_response.content + + negative.add_event(config['negative_prompt'].replace('##aff_ans##', aff_ans)) + neg_response = negative.ask() + negative.add_memory(neg_response.content) + all_messages.append(neg_response) + neg_ans = neg_response.content + + moderator.add_event(config['moderator_prompt'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans).replace('##round##', 'first')) + mod_response = moderator.ask() + moderator.add_memory(mod_response.content) + all_messages.append(mod_response) + + try: + mod_ans = json.loads(mod_response.content) + except: + mod_ans = {"debate_answer": "", "Whether there is a preference": "No"} + + # Multi-round debate + for round_num in range(2, self.max_round + 1): + if mod_ans.get("debate_answer", "") != "": + break + + affirmative.add_event(config['debate_prompt'].replace('##oppo_ans##', neg_ans)) + aff_response = affirmative.ask() + affirmative.add_memory(aff_response.content) + all_messages.append(aff_response) + aff_ans = aff_response.content + + negative.add_event(config['debate_prompt'].replace('##oppo_ans##', aff_ans)) + neg_response = negative.ask() + negative.add_memory(neg_response.content) + all_messages.append(neg_response) + neg_ans = neg_response.content + + moderator.add_event(config['moderator_prompt'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans).replace('##round##', self.round_dct(round_num))) + mod_response = moderator.ask() + moderator.add_memory(mod_response.content) + all_messages.append(mod_response) + + try: + mod_ans = json.loads(mod_response.content) + except: + mod_ans = {"debate_answer": "", "Whether there is a preference": "No"} + + # If no consensus reached, use judge + final_answer = mod_ans.get("debate_answer", "") + if not final_answer: + judge = DebateAgent( + agent_id="judge", + name="Judge", + model_name=self.model_name, + temperature=self.temperature + ) + judge.set_meta_prompt(config['moderator_meta_prompt']) + + # Get final answer candidates + judge.add_event(config['judge_prompt_last1'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans)) + judge_response1 = judge.ask() + judge.add_memory(judge_response1.content) + all_messages.append(judge_response1) + + # Select final answer + judge.add_event(config['judge_prompt_last2']) + judge_response2 = judge.ask() + judge.add_memory(judge_response2.content) + all_messages.append(judge_response2) + + try: + judge_ans = json.loads(judge_response2.content) + final_answer = judge_ans.get("debate_answer", judge_response2.content) + except: + final_answer = judge_response2.content + + return { + "messages": all_messages, + "final_answer": final_answer + } + +# Register agent system +AgentSystemRegistry.register( + "mad", + MADAgent, + num_players=3, + max_round=3, + temperature=0 +) \ No newline at end of file From 410f09e98e7cf429fc9d1c78cf0d38a695d5cdad Mon Sep 17 00:00:00 2001 From: fuzi233 Date: Mon, 28 Jul 2025 11:48:52 +0800 Subject: [PATCH 2/5] modified: mas_arena/agents/mad.py --- mas_arena/agents/mad.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/mas_arena/agents/mad.py b/mas_arena/agents/mad.py index caa1f14..91850a4 100644 --- a/mas_arena/agents/mad.py +++ b/mas_arena/agents/mad.py @@ -44,7 +44,7 @@ def add_memory(self, memory: str): """Add generated response to memory""" self.memory_lst.append({"role": "assistant", "content": memory}) - def ask(self): + async def ask(self): """Query and get response""" from langchain_core.messages import AIMessage @@ -57,7 +57,7 @@ def ask(self): elif msg["role"] == "assistant": messages.append(AIMessage(content=msg["content"])) - response = self.llm.invoke(messages) + response = await self.llm.ainvoke(messages) response.name = self.name response.id = self.agent_id return response @@ -76,7 +76,7 @@ def __post_init__(self): ) self.name = "result_extractor" - def extract(self, agent_histories: List[List[Dict]], problem_text: str): + async def extract(self, agent_histories: List[List[Dict]], problem_text: str): """Extract final answer from agent history""" # Build extraction prompt extract_prompt = f"""Based on the debate history, please extract the final answer to the following problem: @@ -88,7 +88,7 @@ def extract(self, agent_histories: List[List[Dict]], problem_text: str): Please provide only the final answer.""" messages = [HumanMessage(content=extract_prompt)] - response = self.llm.invoke(messages) + response = await self.llm.ainvoke(messages) return {"message": response} class MADAgent(AgentSystem): @@ -188,19 +188,19 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: # First round debate affirmative.add_event(config['affirmative_prompt']) - aff_response = affirmative.ask() + aff_response = await affirmative.ask() affirmative.add_memory(aff_response.content) all_messages.append(aff_response) aff_ans = aff_response.content negative.add_event(config['negative_prompt'].replace('##aff_ans##', aff_ans)) - neg_response = negative.ask() + neg_response = await negative.ask() negative.add_memory(neg_response.content) all_messages.append(neg_response) neg_ans = neg_response.content moderator.add_event(config['moderator_prompt'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans).replace('##round##', 'first')) - mod_response = moderator.ask() + mod_response = await moderator.ask() moderator.add_memory(mod_response.content) all_messages.append(mod_response) @@ -215,19 +215,19 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: break affirmative.add_event(config['debate_prompt'].replace('##oppo_ans##', neg_ans)) - aff_response = affirmative.ask() + aff_response = await affirmative.ask() affirmative.add_memory(aff_response.content) all_messages.append(aff_response) aff_ans = aff_response.content negative.add_event(config['debate_prompt'].replace('##oppo_ans##', aff_ans)) - neg_response = negative.ask() + neg_response = await negative.ask() negative.add_memory(neg_response.content) all_messages.append(neg_response) neg_ans = neg_response.content moderator.add_event(config['moderator_prompt'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans).replace('##round##', self.round_dct(round_num))) - mod_response = moderator.ask() + mod_response = await moderator.ask() moderator.add_memory(mod_response.content) all_messages.append(mod_response) @@ -236,7 +236,7 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: except: mod_ans = {"debate_answer": "", "Whether there is a preference": "No"} - # If no consensus reached, use judge + # If still no consensus, use judge final_answer = mod_ans.get("debate_answer", "") if not final_answer: judge = DebateAgent( @@ -249,13 +249,13 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: # Get final answer candidates judge.add_event(config['judge_prompt_last1'].replace('##aff_ans##', aff_ans).replace('##neg_ans##', neg_ans)) - judge_response1 = judge.ask() + judge_response1 = await judge.ask() judge.add_memory(judge_response1.content) all_messages.append(judge_response1) # Select final answer judge.add_event(config['judge_prompt_last2']) - judge_response2 = judge.ask() + judge_response2 = await judge.ask() judge.add_memory(judge_response2.content) all_messages.append(judge_response2) From 3837ad6dbbd8056ae2626a64ccdbff97dbfc9764 Mon Sep 17 00:00:00 2001 From: fuzi233 Date: Thu, 31 Jul 2025 20:03:39 +0800 Subject: [PATCH 3/5] modified: mas_arena/agents/mad.py --- mas_arena/agents/mad.py | 53 +++++------------------------------------ 1 file changed, 6 insertions(+), 47 deletions(-) diff --git a/mas_arena/agents/mad.py b/mas_arena/agents/mad.py index 91850a4..0ed6025 100644 --- a/mas_arena/agents/mad.py +++ b/mas_arena/agents/mad.py @@ -62,35 +62,6 @@ async def ask(self): response.id = self.agent_id return response -@dataclass -class ResultExtractor: - """Result extractor""" - model_name: str - format_prompt: str = "" - - def __post_init__(self): - self.llm = ChatOpenAI( - model=self.model_name, - request_timeout=60, - max_retries=2 - ) - self.name = "result_extractor" - - async def extract(self, agent_histories: List[List[Dict]], problem_text: str): - """Extract final answer from agent history""" - # Build extraction prompt - extract_prompt = f"""Based on the debate history, please extract the final answer to the following problem: - -Problem: {problem_text} - -{self.format_prompt} - -Please provide only the final answer.""" - - messages = [HumanMessage(content=extract_prompt)] - response = await self.llm.ainvoke(messages) - return {"message": response} - class MADAgent(AgentSystem): """Multi-Agent Debate system""" @@ -108,23 +79,18 @@ def __init__(self, name: str = "mad", config: Dict[str, Any] = None): "base_answer": "", "debate_answer": "", "player_meta_prompt": "You are a debater. Hello and welcome to the debate. It's not necessary to fully agree with each other's perspectives, as our objective is to find the correct answer.\nThe debate topic is stated as follows:\n##debate_topic##", - "moderator_meta_prompt": "You are a moderator. There will be two debaters involved in a debate. They will present their answers and discuss their perspectives on the following topic: \"##debate_topic##\"\nAt the end of each round, you will evaluate answers and decide which is correct.", + "moderator_meta_prompt": "You are a moderator. There will be two debaters involved in a debate. They will present their answers and discuss their perspectives on the following topic: \"##debate_topic##\"\nAt the end of each round, you will evaluate answers and decide which is correct.make sure the final answer in the format: {self.format_prompt}", "affirmative_prompt": "##debate_topic##", "negative_prompt": "##aff_ans##\n\nYou disagree with my answer. Provide your answer and reasons.", "moderator_prompt": "Now the ##round## round of debate for both sides has ended.\n\nAffirmative side arguing:\n##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nYou, as the moderator, will evaluate both sides' answers and determine if there is a clear preference for an answer candidate. If so, please summarize your reasons for supporting affirmative/negative side and give the final answer that you think is correct, and the debate will conclude. If not, the debate will continue to the next round. Now please output your answer in json format, with the format as follows: {\"Whether there is a preference\": \"Yes or No\", \"Supported Side\": \"Affirmative or Negative\", \"Reason\": \"\", \"debate_answer\": \"\"}. Please strictly output in JSON format, do not output irrelevant content.", "judge_prompt_last1": "Affirmative side arguing: ##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nNow, what answer candidates do we have? Present them without reasons.", - "judge_prompt_last2": "Therefore, ##debate_topic##\nPlease summarize your reasons and give the final answer that you think is correct. Now please output your answer in json format, with the format as follows: {\"Reason\": \"\", \"debate_answer\": \"\"}. Please strictly output in JSON format, do not output irrelevant content.", - "debate_prompt": "##oppo_ans##\n\nDo you agree with my perspective? Please provide your reasons and answer." + "judge_prompt_last2": "Therefore, ##debate_topic##\nPlease summarize your reasons and give the final answer that you think is correct. make sure the final answer in the format: {self.format_prompt}", + "debate_prompt": "##oppo_ans##\n\nDo you agree with my perspective? Please provide your reasons and answer. the debate_answer must be the final answer in the format: {self.format_prompt}" } # Initialize components agent_components = self._create_agents() self.players = [w for w in agent_components["workers"] if isinstance(w, DebateAgent)] - extractors = [w for w in agent_components["workers"] if isinstance(w, ResultExtractor)] - if extractors: - self.extractor = extractors[0] - else: - self.extractor = ResultExtractor(self.model_name, self.format_prompt) def _create_agents(self) -> Dict[str, List]: """Create debate participants and result extractor""" @@ -139,12 +105,9 @@ def _create_agents(self) -> Dict[str, List]: temperature=self.temperature ) players.append(agent) - - # Create result extractor - extractor = ResultExtractor(self.model_name, self.format_prompt) - + return { - "workers": players + [extractor] + "workers": players } def init_prompt(self, debate_topic: str): @@ -259,11 +222,7 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: judge.add_memory(judge_response2.content) all_messages.append(judge_response2) - try: - judge_ans = json.loads(judge_response2.content) - final_answer = judge_ans.get("debate_answer", judge_response2.content) - except: - final_answer = judge_response2.content + final_answer = judge_response2.content return { "messages": all_messages, From 1c283c65997831a7bf94c471fe3a1a6bb5697d56 Mon Sep 17 00:00:00 2001 From: fuzi233 Date: Tue, 5 Aug 2025 10:42:27 +0800 Subject: [PATCH 4/5] modified: mas_arena/agents/mad.py --- mas_arena/agents/mad.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/mas_arena/agents/mad.py b/mas_arena/agents/mad.py index 0ed6025..316d897 100644 --- a/mas_arena/agents/mad.py +++ b/mas_arena/agents/mad.py @@ -11,7 +11,7 @@ from langchain_core.messages import SystemMessage, HumanMessage from mas_arena.agents.base import AgentSystem, AgentSystemRegistry -load_dotenv() +load_dotenv(override=True) @dataclass class DebateAgent: @@ -75,18 +75,19 @@ def __init__(self, name: str = "mad", config: Dict[str, Any] = None): # Debate configuration self.debate_config = { - "debate_topic": "", - "base_answer": "", - "debate_answer": "", - "player_meta_prompt": "You are a debater. Hello and welcome to the debate. It's not necessary to fully agree with each other's perspectives, as our objective is to find the correct answer.\nThe debate topic is stated as follows:\n##debate_topic##", - "moderator_meta_prompt": "You are a moderator. There will be two debaters involved in a debate. They will present their answers and discuss their perspectives on the following topic: \"##debate_topic##\"\nAt the end of each round, you will evaluate answers and decide which is correct.make sure the final answer in the format: {self.format_prompt}", - "affirmative_prompt": "##debate_topic##", - "negative_prompt": "##aff_ans##\n\nYou disagree with my answer. Provide your answer and reasons.", - "moderator_prompt": "Now the ##round## round of debate for both sides has ended.\n\nAffirmative side arguing:\n##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nYou, as the moderator, will evaluate both sides' answers and determine if there is a clear preference for an answer candidate. If so, please summarize your reasons for supporting affirmative/negative side and give the final answer that you think is correct, and the debate will conclude. If not, the debate will continue to the next round. Now please output your answer in json format, with the format as follows: {\"Whether there is a preference\": \"Yes or No\", \"Supported Side\": \"Affirmative or Negative\", \"Reason\": \"\", \"debate_answer\": \"\"}. Please strictly output in JSON format, do not output irrelevant content.", - "judge_prompt_last1": "Affirmative side arguing: ##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nNow, what answer candidates do we have? Present them without reasons.", - "judge_prompt_last2": "Therefore, ##debate_topic##\nPlease summarize your reasons and give the final answer that you think is correct. make sure the final answer in the format: {self.format_prompt}", - "debate_prompt": "##oppo_ans##\n\nDo you agree with my perspective? Please provide your reasons and answer. the debate_answer must be the final answer in the format: {self.format_prompt}" - } + + "debate_topic": "", + "base_answer": "", + "debate_answer": "", + "player_meta_prompt": "You are a debater. Hello and welcome to the debate. It's not necessary to fully agree with each other's perspectives, as our objective is to find the correct answer.\nThe debate topic is stated as follows:\n##debate_topic##", + "moderator_meta_prompt": "You are a moderator overseeing a debate on the topic: \"##debate_topic##\". Your role is to evaluate arguments and determine the correct answer. **IMPORTANT: You must output your decision in a strict JSON format. The final answer within the JSON must EXACTLY follow the format: {format_prompt}. All backslashes in the answer must be escaped (e.g., use `\\\\` for a single backslash).**", + "affirmative_prompt": "##debate_topic##", + "negative_prompt": "##aff_ans##\n\nYou disagree with my answer. Provide your answer and reasons.", + "moderator_prompt": "Now the ##round## round of debate for both sides has ended.\n\nAffirmative side arguing:\n##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nYou, as the moderator, will evaluate both sides' answers. If a clear preference emerges, summarize your reasons, declare the supported side, and provide the final correct answer. If not, the debate continues. **Please output your decision strictly in JSON format as follows, with no additional text: {{\"Whether there is a preference\": \"Yes or No\", \"Supported Side\": \"Affirmative or Negative\", \"Reason\": \"Your reason here.\", \"debate_answer\": \"The final answer here, escaping backslashes.\"}}**", + "judge_prompt_last1": "Affirmative side arguing: ##aff_ans##\n\nNegative side arguing: ##neg_ans##\n\nNow, what answer candidates do we have? Present them without reasons.", + "judge_prompt_last2": "Therefore, ##debate_topic##\nPlease summarize your reasons and give the final answer that you think is correct. **IMPORTANT: You must output your decision in a strict JSON format as follows, with no additional text: {{\"Whether there is a preference\": \"Yes\", \"Supported Side\": \"Affirmative or Negative based on your judgement\", \"Reason\": \"Your reason here.\", \"debate_answer\": \"The final answer here, escaping backslashes.\"}}**", + "debate_prompt": "##oppo_ans##\n\nDo you agree with my perspective? Provide your reasons and your answer. **IMPORTANT: The debate_answer must EXACTLY follow the format: {format_prompt}.** The latex format requires **two backslashes to be output**" + } # Initialize components agent_components = self._create_agents() @@ -128,6 +129,7 @@ def round_dct(self, num: int) -> str: async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: """Run debate process""" + problem_text = problem["problem"] # Store all LLM responses @@ -144,6 +146,11 @@ async def run_agent(self, problem: Dict[str, Any], **kwargs) -> Dict[str, Any]: negative = self.players[1] moderator = self.players[2] + # Clear memory for each agent to prevent context overflow + affirmative.memory_lst.clear() + negative.memory_lst.clear() + moderator.memory_lst.clear() + # Set meta prompts affirmative.set_meta_prompt(config['player_meta_prompt']) negative.set_meta_prompt(config['player_meta_prompt']) From 78ac12f7b59de9fe9e316949176bb046d72e5740 Mon Sep 17 00:00:00 2001 From: jiaqi Date: Wed, 6 Aug 2025 20:41:30 +0800 Subject: [PATCH 5/5] remove useless packages --- mas_arena/agents/mad.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mas_arena/agents/mad.py b/mas_arena/agents/mad.py index 316d897..30eb7fb 100644 --- a/mas_arena/agents/mad.py +++ b/mas_arena/agents/mad.py @@ -1,12 +1,9 @@ -import time import json import os -import asyncio from dotenv import load_dotenv from dataclasses import dataclass -from typing import Dict, TypedDict, Any, List, Optional +from typing import Dict, Any, List -from pydantic import BaseModel from langchain_openai import ChatOpenAI from langchain_core.messages import SystemMessage, HumanMessage from mas_arena.agents.base import AgentSystem, AgentSystemRegistry