Compare commits

...

20 Commits

Author SHA1 Message Date
xyz
35c40d97d6 feat: add deepseek-coder model support 2026-01-09 10:41:07 +08:00
xyz
f1b3f29be0 移除缓存文件和敏感配置文件的git跟踪 2026-01-09 10:32:38 +08:00
bc6ddab598 更新 README.md 2026-01-09 10:29:28 +08:00
xyz
b1bca4a876 更新 DeepSeek 模型列表 & 完善 README 文档 2026-01-09 10:27:24 +08:00
xyz
667a6b2ba9 移除 API key 变量定义 2026-01-09 10:22:03 +08:00
xyz
567a9c7cb0 移除硬编码的 API key 2026-01-09 10:14:25 +08:00
xyz
27ec6b3d85 feat: 添加智能专家生成、决策场景模板和用户反馈功能 2026-01-09 09:25:02 +08:00
yueye
0de5aa038b 增加追问 2026-01-07 16:03:41 +08:00
yueye
35771a9a60 修正 2026-01-07 15:29:01 +08:00
yueye
758785e57b 添加了背景图片功能 2026-01-07 15:13:17 +08:00
xyz
02eea5bfb4 feat: implement council v3 round-table mode 2026-01-07 14:42:29 +08:00
xyz
5913d2dc47 chore: Update compiled Python bytecode files. 2026-01-07 14:05:17 +08:00
xyz
1b7d45fc34 message 2026-01-07 14:04:52 +08:00
xyz
8734b15be6 feat: implement council v3 round-table mode 2026-01-07 14:02:17 +08:00
xyz
2e04312a7e feat: implement council v3 round-table mode 2026-01-07 13:57:30 +08:00
xyz
84b01a07a2 feat: implement council v3 round-table mode 2026-01-07 13:56:37 +08:00
xyz
e90d42ac7b feat: implement council v3 round-table mode 2026-01-07 13:47:38 +08:00
xyz
da7ccd2d26 feat: implement council v3 round-table mode 2026-01-07 13:44:46 +08:00
xyz
d26a7a36be refactor: Reimplement research workflow with dynamic expert configuration, multi-round discussion, and Mermaid diagram synthesis. 2026-01-07 13:38:51 +08:00
xyz
9dda930868 feat: Implement Multi-Model Council mode with 3-expert workflow 2026-01-07 12:59:56 +08:00
28 changed files with 2692 additions and 217 deletions

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Python 字节码缓存
__pycache__/
*.py[cod]
*$py.class
# 项目特定的存储/缓存文件夹
.storage/
# 环境变量文件(通常包含敏感信息)
.env

1
.python-version Normal file
View File

@ -0,0 +1 @@
3.12

105
README.md
View File

@ -1 +1,104 @@
# multi-agent
# 🍎 智能决策工作坊 (Multi-Agent Council V4)
AI驱动的多智能体决策分析系统 - 基于多模型智囊团
## 👥 团队成员与贡献
| 姓名 | 学号 | 主要贡献 (具体分工) |
|------|------|---------------------|
| 徐睿敏 | 2411020228 | (组长) 核心逻辑开发、Multi-Agent 编排、Prompt 工程 |
| 高玮 | 2411020226 | 前端界面设计 (Streamlit UI)、决策场景模块、PPT 制作 |
| 马菁艺 | 2411020230 | 文档撰写、测试与 Bug 修复、用户反馈模块 |
## ✨ 核心功能
### 🧪 Multi-Model Council V4 (智囊团模式)
- **多轮对话讨论**: 专家像真实会议一样进行多轮对话,互相批判、补充观点
- **动态专家组建**: 自定义 2-5 位专家,为每位指定最擅长的模型
- **🪄 智能专家生成**: AI 根据主题自动推荐最合适的专家角色
- **最终决策合成**: 最后一位专家综合全场观点,生成方案并绘制 Mermaid 路线图
### 🎯 内置决策场景
系统预置 4 大典型决策场景,每个场景都配置了专业的典型问题:
| 场景 | 描述 |
|------|------|
| 🚀 新产品发布评审 | 评估产品可行性、市场潜力和实施计划 |
| 💰 投资审批决策 | 分析投资项目的 ROI、风险和战略价值 |
| 🤝 合作伙伴评估 | 评估合作伙伴的匹配度和合作价值 |
| 📦 供应商评估 | 对比分析供应商的综合能力 |
### 🎭 Debate Workshop (辩论工作坊)
让 AI 扮演不同立场角色,通过辩论帮助厘清复杂决策的利弊
### 💬 用户反馈
内置用户反馈系统,收集功能建议和使用体验
### 🌐 多平台支持
- **DeepSeek**: V3, R1, Coder
- **OpenAI**: GPT-4o, GPT-4o-mini
- **Anthropic**: Claude 3.5 Sonnet
- **Google**: Gemini 1.5/2.0
- **SiliconFlow / AIHubMix / Deepseek**
---
## 🛠️ 安装
```bash
# 克隆项目
git clone http://hblu.top:3000/Python2025-CourseDesign/wd666.git
cd multi-agent
# 初始化 uv 项目(如首次使用)
uv init
# 安装依赖
uv add streamlit openai anthropic python-dotenv
# 或者同步现有依赖
uv sync
```
## 🚀 快速开始
```bash
cd wd666
uv run streamlit run app.py
```
### 使用步骤
1. **配置 API**: 在侧边栏选择 Provider 并输入 API Key
2. **选择场景**: 点击预置的决策场景或自定义主题
3. **生成专家**: 点击 "🪄 根据主题自动生成专家" 或手动配置
4. **开始决策**: 观察专家们如何互相对话,生成综合方案
---
## 📁 项目结构
```
multi_agent_workshop/
├── app.py # Streamlit 主应用
├── config.py # 配置文件
├── agents/ # Agent 定义
│ ├── agent_profiles.py # 预设角色配置
│ ├── base_agent.py # 基础 Agent 类
│ └── research_agent.py # 研究型 Agent
├── orchestrator/ # 编排器
│ ├── debate_manager.py # 辩论管理
│ └── research_manager.py # 智囊团管理
├── utils/
│ ├── llm_client.py # LLM 客户端封装
│ ├── storage.py # 存储管理
│ └── auto_agent_generator.py # 智能专家生成
└── report/ # 报告生成
```
## 心得体会
在当今的工作及学习生活中,决策充斥着我们生活的多个方面。而在决策时往往面临着个人情绪以及经验对决策的过度影响、数据量庞大缺乏有效分析以及结构化建议等问题。此程序的设计,可以为用户提供系统化评估框架,明确决策优先级,以及内置评估维度和权重体系。同时,我们在决策过程中引入 "虚拟会议" 功能,模拟真实决策会议中不同角色的视角和建议,帮助用户全面理解各利益相关方的立场。
在程序设计过程中使用ai协助无疑大大降低了代码编写的门槛加快了代码编写的速度。先构思程序功能让ai生成骨架在根据测试完善功能这转变了我们的编程思维从纠结语法细节转向专注业务逻辑从边写边想升级直接设计然后优化功能。在开发决策系统时AI不仅快速生成核心代码还创意性地建议加入“模拟会议”功能让项目更加丰富立体。
当然AI辅助编程也有挑战和陷阱。最大的考验是如何精准表达需求——模糊的描述会得到模糊的答案。在设计过程中因表达不全面在优化过程中ai删去了需要的功能这也让我们看到ai的局限性这提醒我们用更结构化的语言说明需求以获得更符合要求的方案。
## 📝 License
[MIT License](LICENSE)

Binary file not shown.

Binary file not shown.

View File

@ -19,16 +19,18 @@ class AgentMessage:
class BaseAgent:
"""Agent 基类"""
def __init__(self, agent_id: str, llm_client):
def __init__(self, agent_id: str, llm_client, language: str = "Chinese"):
"""
初始化 Agent
Args:
agent_id: Agent 标识符 ( 'ceo', 'cto')
llm_client: LLM 客户端实例
language: 输出语言
"""
self.agent_id = agent_id
self.llm_client = llm_client
self.language = language
profile = get_agent_profile(agent_id)
if not profile:
@ -38,10 +40,17 @@ class BaseAgent:
self.emoji = profile["emoji"]
self.perspective = profile["perspective"]
self.focus_areas = profile["focus_areas"]
self.system_prompt = profile["system_prompt"]
self.system_prompt = f"{profile['system_prompt']}\n\nIMPORTANT: You MUST output your response in {self.language}."
# 存储对话历史
self.conversation_history = []
@property
def model_name(self) -> str:
"""获取当前使用的模型名称"""
if hasattr(self.llm_client, "model"):
return self.llm_client.model
return "Unknown Model"
def generate_response(
self,

View File

@ -1,39 +1,56 @@
from typing import Generator, List, Dict
from typing import Generator
from utils.llm_client import LLMClient
import config
class ResearchAgent:
"""研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient):
def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
self.role = role
self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = self.role_config.get("name", role.capitalize())
self.name = name if name else self.role_config.get("name", role.capitalize())
self.language = language
@property
def model_name(self) -> str:
return self.llm_client.model
def _get_system_prompt(self, context: str = "") -> str:
if self.role == "planner":
return f"""You are a Senior Research Planner.
Your goal is to break down a complex user topic into a structured research plan.
You must create a clear, step-by-step plan that covers different angles of the topic.
Format your output as a Markdown list of steps.
base_prompt = ""
if self.role == "council_member":
base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
Your goal is to participate in a round-table discussion to solve the user's problem.
Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
Context: {context}"""
elif self.role == "expert_a":
base_prompt = f"""You are Expert A, a Senior Analyst.
You are participating in a round-table discussion.
Your goal is to analyze the topic and propose solutions.
Be conversational, direct, and responsive to other experts.
Do not write a full final report; focus on the current discussion turn.
Context: {context}"""
elif self.role == "researcher":
return f"""You are a Deep Researcher.
Your goal is to execute a specific research step and provide detailed, in-depth analysis.
Use your vast knowledge to provide specific facts, figures, and logical reasoning.
Do not be superficial. Go deep.
elif self.role == "expert_b":
base_prompt = f"""You are Expert B, a Critical Reviewer.
You are participating in a round-table discussion.
Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
Context: {context}"""
elif self.role == "writer":
return f"""You are a Senior Report Writer.
Your goal is to synthesize multiple research findings into a cohesive, high-quality report.
The report should be well-structured, easy to read, and provide actionable insights.
Context: {context}"""
elif self.role == "expert_c":
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
else:
return "You are a helpful assistant."
base_prompt = "You are a helpful assistant."
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream"""

1060
app.py

File diff suppressed because it is too large Load Diff

View File

@ -9,35 +9,110 @@ load_dotenv()
# API 配置
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "")
SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY", "")
# AIHubMix 配置
# LLM Providers Configuration
LLM_PROVIDERS = {
"AIHubMix": {
"base_url": "https://aihubmix.com/v1",
"api_key_var": "AIHUBMIX_API_KEY",
"default_model": "gpt-4o"
},
"DeepSeek": {
"base_url": "https://api.deepseek.com",
"api_key_var": "DEEPSEEK_API_KEY",
"default_model": "deepseek-chat"
},
"SiliconFlow": {
"base_url": "https://api.siliconflow.cn/v1",
"api_key_var": "SILICONFLOW_API_KEY",
"default_model": "deepseek-ai/DeepSeek-V3" # SiliconFlow often uses full path
},
"OpenAI": {
"base_url": "https://api.openai.com/v1",
"api_key_var": "OPENAI_API_KEY",
"default_model": "gpt-4o"
},
"Custom": {
"base_url": "http://localhost:8000/v1",
"api_key_var": "CUSTOM_API_KEY",
"default_model": "local-model"
}
}
# AIHubMix 配置 (Legacy, keeping for backward compatibility if needed, but main logic will use LLM_PROVIDERS)
AIHUBMIX_BASE_URL = "https://aihubmix.com/v1"
# 模型配置
DEFAULT_MODEL = "gpt-4o" # AIHubMix 支持的模型
LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix
# 支持的模型列表
AVAILABLE_MODELS = {
# OpenAI
"gpt-4o": "GPT-4o (OpenAI)",
"gpt-4o-mini": "GPT-4o Mini (OpenAI)",
"gpt-4-turbo": "GPT-4 Turbo (OpenAI)",
# Anthropic
"claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet (Anthropic)",
"claude-3-opus-20240229": "Claude 3 Opus (Anthropic)",
"claude-3-haiku-20240307": "Claude 3 Haiku (Anthropic)",
# Google
"gemini-1.5-pro": "Gemini 1.5 Pro (Google)",
"gemini-1.5-flash": "Gemini 1.5 Flash (Google)",
"gemini-2.0-flash-exp": "Gemini 2.0 Flash Exp (Google)",
# DeepSeek (官方 API: https://api.deepseek.com)
"deepseek-chat": "DeepSeek V3 通用对话 (DeepSeek)",
"deepseek-reasoner": "DeepSeek R1 深度推理 (DeepSeek)",
"deepseek-coder": "DeepSeek Coder 代码模型 (DeepSeek)",
# Meta
"llama-3.3-70b-instruct": "Llama 3.3 70B (Meta)",
"llama-3.1-405b-instruct": "Llama 3.1 405B (Meta)",
# Alibaba
"qwen-2.5-72b-instruct": "Qwen 2.5 72B (Alibaba)",
"qwen-plus": "Qwen Plus (Alibaba)",
"qwen-turbo": "Qwen Turbo (Alibaba)",
# Mistral
"mistral-large-latest": "Mistral Large (Mistral)",
# Perplexity
"llama-3.1-sonar-huge-128k-online": "Sonar Huge Online (Perplexity)",
}
# 辩论配置
MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数
MAX_AGENTS = 6 # 最大参与 Agent 数量
# 支持的输出语言
SUPPORTED_LANGUAGES = ["Chinese", "English", "Japanese", "Spanish", "French", "German"]
# 生成配置
MAX_OUTPUT_TOKENS = 300 # 限制单次回复长度,保持精简
# 研究模式模型角色配置
RESEARCH_MODEL_ROLES = {
"planner": {
"name": "Planner",
"expert_a": {
"name": "Expert A (Analyst)",
"default_model": "gpt-4o",
"description": "负责拆解问题,制定研究计划"
"description": "负责初步分析,提出核心观点和方案"
},
"researcher": {
"name": "Researcher",
"expert_b": {
"name": "Expert B (Critique)",
"default_model": "gemini-1.5-pro",
"description": "负责执行具体的研究步骤,深度分析"
"description": "负责批判性分析,指出潜在问题和漏洞"
},
"writer": {
"name": "Writer",
"expert_c": {
"name": "Expert C (Synthesizer)",
"default_model": "claude-3-5-sonnet-20241022",
"description": "负责汇总信息,撰写最终报告"
"description": "负责综合各方观点,生成最终决策方案"
}
}

6
main.py Normal file
View File

@ -0,0 +1,6 @@
def main():
print("Hello from multi-agent!")
if __name__ == "__main__":
main()

View File

@ -17,6 +17,8 @@ class DebateConfig:
context: str = ""
agent_ids: List[str] = None
max_rounds: int = 2
agent_clients: dict = None # Map[agent_id, LLMClient]
language: str = "Chinese"
@dataclass
@ -58,7 +60,12 @@ class DebateManager:
# 创建参与的 Agent
for agent_id in debate_config.agent_ids:
agent = BaseAgent(agent_id, self.llm_client)
# Check if specific client is provided in config, else use default
client = self.llm_client
if hasattr(debate_config, 'agent_clients') and debate_config.agent_clients and agent_id in debate_config.agent_clients:
client = debate_config.agent_clients[agent_id]
agent = BaseAgent(agent_id, client, language=debate_config.language)
self.agents.append(agent)
def run_debate_stream(
@ -106,6 +113,7 @@ class DebateManager:
"agent_id": agent.agent_id,
"agent_name": agent.name,
"emoji": agent.emoji,
"model_name": agent.model_name,
"round": round_num
}

View File

@ -8,44 +8,92 @@ import config
class ResearchConfig:
topic: str
context: str = ""
planner_model: str = "gpt-4o"
researcher_model: str = "gemini-1.5-pro"
writer_model: str = "claude-3-5-sonnet-20241022"
# Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...]
experts: List[Dict[str, str]] = None
language: str = "Chinese"
class ResearchManager:
"""Manages the Deep Research workflow"""
"""Manages the Multi-Model Council workflow"""
def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"):
self.api_key = api_key
self.base_url = base_url
self.provider = provider
self.agents = {}
self.agents = []
def _get_client(self, model: str) -> LLMClient:
return LLMClient(
provider=self.provider,
provider=self.provider, # Configured to respect provider or default to aihubmix logic inside client
api_key=self.api_key,
base_url=self.base_url,
model=model
)
def create_agents(self, config: ResearchConfig):
"""Initialize agents with specific models"""
self.agents["planner"] = ResearchAgent("planner", self._get_client(config.planner_model))
self.agents["researcher"] = ResearchAgent("researcher", self._get_client(config.researcher_model))
self.agents["writer"] = ResearchAgent("writer", self._get_client(config.writer_model))
"""Initialize agents with specific models from config"""
self.agents = []
if config.experts:
for idx, expert_conf in enumerate(config.experts):
role_type = "council_member"
agent = ResearchAgent(
role=role_type,
llm_client=self._get_client(expert_conf["model"]),
name=expert_conf.get("name", f"Expert {idx+1}"),
language=config.language
)
self.agents.append(agent)
def generate_plan(self, topic: str, context: str) -> Generator[str, None, None]:
"""Step 1: Generate Research Plan"""
prompt = f"Please create a comprehensive research plan for the topic: '{topic}'.\nBreak it down into 3-5 distinct, actionable steps."
yield from self.agents["planner"].generate(prompt, context)
def collaborate(self, topic: str, context: str, max_rounds: int = 3) -> Generator[Dict[str, str], None, None]:
"""
Execute the collaborative research process with multi-round discussion:
1. Conversation Loop (All Experts Round Robin)
2. Final Synthesis (Last Expert)
"""
conversation_history = []
discussion_context = f"Topic: '{topic}'\nBackground Context: {context}\n\n"
# Round-Robin Discussion
for round_num in range(1, max_rounds + 1):
for agent in self.agents:
yield {"type": "step_start", "step": f"Round {round_num}: {agent.name}", "agent": agent.name, "model": agent.model_name}
# Construct prompt
if round_num == 1 and not conversation_history:
prompt = f"You are {agent.name}. You are starting the discussion on '{topic}'. Provide your initial analysis and key points. Be conversational but substantive."
else:
prompt = f"You are {agent.name}. Review the discussion so far. Respond to previous points. Defend your views or refine them. Keep the discussion moving towards a solution.\n\nDiscussion History:\n{_format_history(conversation_history)}"
response = ""
for chunk in agent.generate(prompt, context=discussion_context):
response += chunk
yield {"type": "content", "content": chunk}
conversation_history.append({"agent": agent.name, "content": response})
yield {"type": "step_end", "output": response}
def execute_step(self, step: str, previous_findings: str) -> Generator[str, None, None]:
"""Step 2: Execute a single research step"""
prompt = f"Execute this research step: '{step}'.\nPrevious findings: {previous_findings}"
yield from self.agents["researcher"].generate(prompt)
# Final Synthesis by the LAST agent (or a specific designated one)
synthesizer = self.agents[-1]
yield {"type": "step_start", "step": f"Final Synthesis ({synthesizer.name})", "agent": synthesizer.name, "model": synthesizer.model_name}
prompt_syn = f"""Synthesize the entire discussion into a final comprehensive plan for '{topic}'.
Discussion History:
{_format_history(conversation_history)}
def generate_report(self, topic: str, all_findings: str) -> Generator[str, None, None]:
"""Step 3: Generate Final Report"""
prompt = f"Write a final comprehensive report on '{topic}' based on these findings:\n{all_findings}"
yield from self.agents["writer"].generate(prompt)
IMPORTANT:
1. Reconcile the different viewpoints from all experts.
2. Provide a concrete action plan.
3. You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the roadmap or process."""
findings_syn = ""
for chunk in synthesizer.generate(prompt_syn, context=discussion_context):
findings_syn += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_syn}
def _format_history(history: List[Dict[str, str]]) -> str:
formatted = ""
for turn in history:
formatted += f"[{turn['agent']}]: {turn['content']}\n\n"
return formatted

13
pyproject.toml Normal file
View File

@ -0,0 +1,13 @@
[project]
name = "multi-agent"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"anthropic>=0.75.0",
"openai>=2.14.0",
"pydantic>=2.12.5",
"python-dotenv>=1.2.1",
"streamlit>=1.52.2",
]

View File

@ -0,0 +1,108 @@
"""
Auto Agent Generator - 根据主题自动生成专家配置
Uses LLM to analyze the topic and suggest appropriate expert agents.
"""
import json
import re
from typing import List, Dict
from utils.llm_client import LLMClient
EXPERT_GENERATION_PROMPT = """You are an expert team composition advisor. Given a research/decision topic, you need to suggest the most appropriate team of experts to analyze it.
Instructions:
1. Analyze the topic carefully to understand its domain and key aspects
2. Generate {num_experts} distinct expert roles that would provide the most valuable perspectives
3. Each expert should have a unique focus area relevant to the topic
4. The LAST expert should always be a "Synthesizer" role who can integrate all perspectives
Output Format (MUST be valid JSON array):
[
{{"name": "Expert Name", "perspective": "Brief description of their viewpoint", "focus": "Key areas they analyze"}},
...
]
Examples of good expert names based on topic:
- For "Should we launch an e-commerce platform?": "市场渠道分析师", "电商运营专家", "供应链顾问", "数字化转型综合师"
- For "Career transition to AI field": "职业发展顾问", "AI行业专家", "技能评估分析师", "综合规划师"
IMPORTANT:
- Use {language} for all names and descriptions
- Make names specific to the topic, not generic like "Expert 1"
- The last expert MUST be a synthesizer/integrator type
Topic: {topic}
Generate exactly {num_experts} experts as a JSON array:"""
def generate_experts_for_topic(
topic: str,
num_experts: int,
llm_client: LLMClient,
language: str = "Chinese"
) -> List[Dict[str, str]]:
"""
Use LLM to generate appropriate expert configurations based on the topic.
Args:
topic: The research/decision topic
num_experts: Number of experts to generate (2-5)
llm_client: LLM client instance for API calls
language: Output language (Chinese/English)
Returns:
List of expert dicts: [{"name": "...", "perspective": "...", "focus": "..."}, ...]
"""
if not topic.strip():
return []
prompt = EXPERT_GENERATION_PROMPT.format(
topic=topic,
num_experts=num_experts,
language=language
)
try:
response = llm_client.chat(
system_prompt="You are a helpful assistant that generates JSON output only. No markdown, no explanation.",
user_prompt=prompt,
max_tokens=800
)
# Extract JSON from response (handle potential markdown wrapping)
json_match = re.search(r'\[[\s\S]*\]', response)
if json_match:
experts = json.loads(json_match.group())
# Validate structure
if isinstance(experts, list) and len(experts) >= 1:
validated = []
for exp in experts[:num_experts]:
if isinstance(exp, dict) and "name" in exp:
validated.append({
"name": exp.get("name", "Expert"),
"perspective": exp.get("perspective", ""),
"focus": exp.get("focus", "")
})
return validated
except (json.JSONDecodeError, Exception) as e:
print(f"[AutoAgentGenerator] Error parsing LLM response: {e}")
# Fallback: return generic experts
fallback = []
for i in range(num_experts):
if i == num_experts - 1:
fallback.append({"name": f"综合分析师", "perspective": "整合视角", "focus": "综合决策"})
else:
fallback.append({"name": f"专家 {i+1}", "perspective": "分析视角", "focus": "专业分析"})
return fallback
def get_default_model_for_expert(expert_index: int, total_experts: int, available_models: list) -> str:
"""
Assign a default model to an expert based on their position.
Spreads experts across available models for diversity.
"""
if not available_models:
return "gpt-4o"
return available_models[expert_index % len(available_models)]

View File

@ -5,6 +5,8 @@ from typing import Generator
import os
import config
class LLMClient:
"""LLM API 统一客户端"""
@ -36,24 +38,25 @@ class LLMClient:
self.client = OpenAI(api_key=api_key)
self.model = model or "gpt-4o"
elif self.provider == "aihubmix":
# AIHubMix 兼容 OpenAI API 格式
elif self.provider in ["aihubmix", "deepseek", "siliconflow", "custom"]:
# OpenAI 兼容接口 Providers
from openai import OpenAI
default_urls = {
"aihubmix": "https://aihubmix.com/v1",
"deepseek": "https://api.deepseek.com",
"siliconflow": "https://api.siliconflow.cn/v1",
"custom": "http://localhost:8000/v1"
}
final_base_url = base_url or default_urls.get(self.provider)
self.client = OpenAI(
api_key=api_key,
base_url=base_url or "https://aihubmix.com/v1"
base_url=final_base_url
)
self.model = model or "gpt-4o"
elif self.provider == "custom":
# 自定义 OpenAI 兼容接口vLLM、Ollama、TGI 等)
from openai import OpenAI
self.client = OpenAI(
api_key=api_key or "not-needed",
base_url=base_url or "http://localhost:8000/v1"
)
self.model = model or "local-model"
else:
raise ValueError(f"不支持的 provider: {self.provider}")
@ -61,7 +64,7 @@ class LLMClient:
self,
system_prompt: str,
user_prompt: str,
max_tokens: int = 1024
max_tokens: int = config.MAX_OUTPUT_TOKENS
) -> Generator[str, None, None]:
"""
流式对话

184
utils/storage.py Normal file
View File

@ -0,0 +1,184 @@
"""
Storage Manager - Handle local persistence of configuration, history/reports, and assets.
"""
import os
import json
import time
from typing import List, Dict, Any
from pathlib import Path
# Constants
STORAGE_DIR = ".storage"
CONFIG_FILE = "config.json"
HISTORY_DIR = "history"
ASSETS_DIR = "assets"
class StorageManager:
def __init__(self):
self.root_dir = Path(STORAGE_DIR)
self.config_path = self.root_dir / CONFIG_FILE
self.history_dir = self.root_dir / HISTORY_DIR
self.assets_dir = self.root_dir / ASSETS_DIR
# Ensure directories exist
self.root_dir.mkdir(exist_ok=True)
self.history_dir.mkdir(exist_ok=True)
self.assets_dir.mkdir(exist_ok=True)
def save_config(self, config_data: Dict[str, Any]):
"""Save UI configuration to file"""
try:
with open(self.config_path, 'w', encoding='utf-8') as f:
json.dump(config_data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving config: {e}")
def load_config(self) -> Dict[str, Any]:
"""Load UI configuration from file"""
if not self.config_path.exists():
return {}
try:
with open(self.config_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
print(f"Error loading config: {e}")
return {}
def save_asset(self, uploaded_file) -> str:
"""Save an uploaded file (e.g., background image) into assets directory.
Args:
uploaded_file: a file-like object (Streamlit UploadedFile) or bytes-like
Returns:
The saved file path as string, or None on failure.
"""
try:
# Determine filename
if hasattr(uploaded_file, 'name'):
filename = uploaded_file.name
else:
filename = f"asset_{int(time.time())}"
# sanitize
safe_name = "".join([c for c in filename if c.isalnum() or c in (' ', '.', '_', '-')]).strip().replace(' ', '_')
dest = self.assets_dir / f"{int(time.time())}_{safe_name}"
# Write bytes
with open(dest, 'wb') as out:
# Streamlit UploadedFile has getbuffer()
if hasattr(uploaded_file, 'getbuffer'):
out.write(uploaded_file.getbuffer())
else:
# try reading
data = uploaded_file.read()
if isinstance(data, str):
data = data.encode('utf-8')
out.write(data)
return str(dest)
except Exception as e:
print(f"Error saving asset: {e}")
return None
def save_history(self, session_type: str, topic: str, content: str, metadata: Dict[str, Any] = None):
"""
Save a session report/history
Args:
session_type: 'council' or 'debate'
topic: The main topic
content: The full markdown report or content
metadata: Additional info (model used, date, etc)
"""
timestamp = int(time.time())
date_str = time.strftime("%Y-%m-%d %H:%M:%S")
# Create a safe filename
safe_topic = "".join([c for c in topic[:20] if c.isalnum() or c in (' ', '_', '-')]).strip().replace(' ', '_')
filename = f"{timestamp}_{session_type}_{safe_topic}.json"
data = {
"id": str(timestamp),
"timestamp": timestamp,
"date": date_str,
"type": session_type,
"topic": topic,
"content": content,
"metadata": metadata or {}
}
try:
with open(self.history_dir / filename, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"Error saving history: {e}")
return False
def list_history(self) -> List[Dict[str, Any]]:
"""List all history items (metadata only)"""
items = []
if not self.history_dir.exists():
return []
for file in self.history_dir.glob("*.json"):
try:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
# Return summary info
items.append({
"id": data.get("id"),
"date": data.get("date"),
"type": data.get("type"),
"topic": data.get("topic"),
"filename": file.name
})
except Exception:
continue
# Sort by timestamp desc
return sorted(items, key=lambda x: x.get("date", ""), reverse=True)
def load_history_item(self, filename: str) -> Dict[str, Any]:
"""Load full content of a history item"""
path = self.history_dir / filename
if not path.exists():
return None
try:
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
# ==================== Session Cache (Resume Functionality) ====================
def save_session_state(self, key: str, data: Dict[str, Any]):
"""Save temporary session state for recovery"""
try:
# We use a dedicated cache file per key
cache_file = self.root_dir / f"{key}_cache.json"
data["_timestamp"] = int(time.time())
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving session cache: {e}")
def load_session_state(self, key: str) -> Dict[str, Any]:
"""Load temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if not cache_file.exists():
return None
try:
with open(cache_file, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
def clear_session_state(self, key: str):
"""Clear temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if cache_file.exists():
try:
os.remove(cache_file)
except Exception:
pass

1126
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff