Compare commits

..

20 Commits

Author SHA1 Message Date
xyz
35c40d97d6 feat: add deepseek-coder model support 2026-01-09 10:41:07 +08:00
xyz
f1b3f29be0 移除缓存文件和敏感配置文件的git跟踪 2026-01-09 10:32:38 +08:00
bc6ddab598 更新 README.md 2026-01-09 10:29:28 +08:00
xyz
b1bca4a876 更新 DeepSeek 模型列表 & 完善 README 文档 2026-01-09 10:27:24 +08:00
xyz
667a6b2ba9 移除 API key 变量定义 2026-01-09 10:22:03 +08:00
xyz
567a9c7cb0 移除硬编码的 API key 2026-01-09 10:14:25 +08:00
xyz
27ec6b3d85 feat: 添加智能专家生成、决策场景模板和用户反馈功能 2026-01-09 09:25:02 +08:00
yueye
0de5aa038b 增加追问 2026-01-07 16:03:41 +08:00
yueye
35771a9a60 修正 2026-01-07 15:29:01 +08:00
yueye
758785e57b 添加了背景图片功能 2026-01-07 15:13:17 +08:00
xyz
02eea5bfb4 feat: implement council v3 round-table mode 2026-01-07 14:42:29 +08:00
xyz
5913d2dc47 chore: Update compiled Python bytecode files. 2026-01-07 14:05:17 +08:00
xyz
1b7d45fc34 message 2026-01-07 14:04:52 +08:00
xyz
8734b15be6 feat: implement council v3 round-table mode 2026-01-07 14:02:17 +08:00
xyz
2e04312a7e feat: implement council v3 round-table mode 2026-01-07 13:57:30 +08:00
xyz
84b01a07a2 feat: implement council v3 round-table mode 2026-01-07 13:56:37 +08:00
xyz
e90d42ac7b feat: implement council v3 round-table mode 2026-01-07 13:47:38 +08:00
xyz
da7ccd2d26 feat: implement council v3 round-table mode 2026-01-07 13:44:46 +08:00
xyz
d26a7a36be refactor: Reimplement research workflow with dynamic expert configuration, multi-round discussion, and Mermaid diagram synthesis. 2026-01-07 13:38:51 +08:00
xyz
9dda930868 feat: Implement Multi-Model Council mode with 3-expert workflow 2026-01-07 12:59:56 +08:00
28 changed files with 2589 additions and 216 deletions

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Python 字节码缓存
__pycache__/
*.py[cod]
*$py.class
# 项目特定的存储/缓存文件夹
.storage/
# 环境变量文件(通常包含敏感信息)
.env

1
.python-version Normal file
View File

@ -0,0 +1 @@
3.12

View File

@ -63,6 +63,7 @@ uv sync
## 🚀 快速开始 ## 🚀 快速开始
```bash ```bash
cd wd666
uv run streamlit run app.py uv run streamlit run app.py
``` ```

Binary file not shown.

Binary file not shown.

View File

@ -19,16 +19,18 @@ class AgentMessage:
class BaseAgent: class BaseAgent:
"""Agent 基类""" """Agent 基类"""
def __init__(self, agent_id: str, llm_client): def __init__(self, agent_id: str, llm_client, language: str = "Chinese"):
""" """
初始化 Agent 初始化 Agent
Args: Args:
agent_id: Agent 标识符 ( 'ceo', 'cto') agent_id: Agent 标识符 ( 'ceo', 'cto')
llm_client: LLM 客户端实例 llm_client: LLM 客户端实例
language: 输出语言
""" """
self.agent_id = agent_id self.agent_id = agent_id
self.llm_client = llm_client self.llm_client = llm_client
self.language = language
profile = get_agent_profile(agent_id) profile = get_agent_profile(agent_id)
if not profile: if not profile:
@ -38,11 +40,18 @@ class BaseAgent:
self.emoji = profile["emoji"] self.emoji = profile["emoji"]
self.perspective = profile["perspective"] self.perspective = profile["perspective"]
self.focus_areas = profile["focus_areas"] self.focus_areas = profile["focus_areas"]
self.system_prompt = profile["system_prompt"] self.system_prompt = f"{profile['system_prompt']}\n\nIMPORTANT: You MUST output your response in {self.language}."
# 存储对话历史 # 存储对话历史
self.conversation_history = [] self.conversation_history = []
@property
def model_name(self) -> str:
"""获取当前使用的模型名称"""
if hasattr(self.llm_client, "model"):
return self.llm_client.model
return "Unknown Model"
def generate_response( def generate_response(
self, self,
topic: str, topic: str,

View File

@ -1,39 +1,56 @@
from typing import Generator, List, Dict from typing import Generator
from utils.llm_client import LLMClient from utils.llm_client import LLMClient
import config import config
class ResearchAgent: class ResearchAgent:
"""研究模式专用 Agent""" """研究模式专用 Agent"""
def __init__(self, role: str, llm_client: LLMClient): def __init__(self, role: str, llm_client: LLMClient, name: str = None, language: str = "Chinese"):
self.role = role self.role = role
self.llm_client = llm_client self.llm_client = llm_client
self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {}) self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {})
self.name = self.role_config.get("name", role.capitalize()) self.name = name if name else self.role_config.get("name", role.capitalize())
self.language = language
@property
def model_name(self) -> str:
return self.llm_client.model
def _get_system_prompt(self, context: str = "") -> str: def _get_system_prompt(self, context: str = "") -> str:
if self.role == "planner": base_prompt = ""
return f"""You are a Senior Research Planner. if self.role == "council_member":
Your goal is to break down a complex user topic into a structured research plan. base_prompt = f"""You are {self.name}, a member of the Multi-Model Decision Council.
You must create a clear, step-by-step plan that covers different angles of the topic. Your goal is to participate in a round-table discussion to solve the user's problem.
Format your output as a Markdown list of steps. Be conversational, insightful, and constructive.
Build upon others' ideas or respectfully disagree with valid reasoning.
Context: {context}""" Context: {context}"""
elif self.role == "researcher": elif self.role == "expert_a":
return f"""You are a Deep Researcher. base_prompt = f"""You are Expert A, a Senior Analyst.
Your goal is to execute a specific research step and provide detailed, in-depth analysis. You are participating in a round-table discussion.
Use your vast knowledge to provide specific facts, figures, and logical reasoning. Your goal is to analyze the topic and propose solutions.
Do not be superficial. Go deep. Be conversational, direct, and responsive to other experts.
Do not write a full final report; focus on the current discussion turn.
Context: {context}""" Context: {context}"""
elif self.role == "writer": elif self.role == "expert_b":
return f"""You are a Senior Report Writer. base_prompt = f"""You are Expert B, a Critical Reviewer.
Your goal is to synthesize multiple research findings into a cohesive, high-quality report. You are participating in a round-table discussion.
The report should be well-structured, easy to read, and provide actionable insights. Your goal is to critique Expert A's points and offer alternative perspectives.
Be conversational and constructive. Challenge assumptions directly.
Context: {context}""" Context: {context}"""
elif self.role == "expert_c":
base_prompt = f"""You are Expert C, a Senior Strategist and Visual Thinker.
Your goal is to synthesize the final output.
Combine the structural strength of Expert A with the critical insights of Expert B.
Produce a final, polished, comprehensive plan or report.
CRITICAL: You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the timeline, process, or architecture."""
else: else:
return "You are a helpful assistant." base_prompt = "You are a helpful assistant."
return f"{base_prompt}\n\nIMPORTANT: You MUST output your response in {self.language}."
def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]: def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]:
"""Generate response stream""" """Generate response stream"""

1034
app.py

File diff suppressed because it is too large Load Diff

View File

@ -9,35 +9,110 @@ load_dotenv()
# API 配置 # API 配置
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "") ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73") AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "")
SILICONFLOW_API_KEY = os.getenv("SILICONFLOW_API_KEY", "")
# AIHubMix 配置 # LLM Providers Configuration
LLM_PROVIDERS = {
"AIHubMix": {
"base_url": "https://aihubmix.com/v1",
"api_key_var": "AIHUBMIX_API_KEY",
"default_model": "gpt-4o"
},
"DeepSeek": {
"base_url": "https://api.deepseek.com",
"api_key_var": "DEEPSEEK_API_KEY",
"default_model": "deepseek-chat"
},
"SiliconFlow": {
"base_url": "https://api.siliconflow.cn/v1",
"api_key_var": "SILICONFLOW_API_KEY",
"default_model": "deepseek-ai/DeepSeek-V3" # SiliconFlow often uses full path
},
"OpenAI": {
"base_url": "https://api.openai.com/v1",
"api_key_var": "OPENAI_API_KEY",
"default_model": "gpt-4o"
},
"Custom": {
"base_url": "http://localhost:8000/v1",
"api_key_var": "CUSTOM_API_KEY",
"default_model": "local-model"
}
}
# AIHubMix 配置 (Legacy, keeping for backward compatibility if needed, but main logic will use LLM_PROVIDERS)
AIHUBMIX_BASE_URL = "https://aihubmix.com/v1" AIHUBMIX_BASE_URL = "https://aihubmix.com/v1"
# 模型配置 # 模型配置
DEFAULT_MODEL = "gpt-4o" # AIHubMix 支持的模型 DEFAULT_MODEL = "gpt-4o" # AIHubMix 支持的模型
LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix
# 支持的模型列表
AVAILABLE_MODELS = {
# OpenAI
"gpt-4o": "GPT-4o (OpenAI)",
"gpt-4o-mini": "GPT-4o Mini (OpenAI)",
"gpt-4-turbo": "GPT-4 Turbo (OpenAI)",
# Anthropic
"claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet (Anthropic)",
"claude-3-opus-20240229": "Claude 3 Opus (Anthropic)",
"claude-3-haiku-20240307": "Claude 3 Haiku (Anthropic)",
# Google
"gemini-1.5-pro": "Gemini 1.5 Pro (Google)",
"gemini-1.5-flash": "Gemini 1.5 Flash (Google)",
"gemini-2.0-flash-exp": "Gemini 2.0 Flash Exp (Google)",
# DeepSeek (官方 API: https://api.deepseek.com)
"deepseek-chat": "DeepSeek V3 通用对话 (DeepSeek)",
"deepseek-reasoner": "DeepSeek R1 深度推理 (DeepSeek)",
"deepseek-coder": "DeepSeek Coder 代码模型 (DeepSeek)",
# Meta
"llama-3.3-70b-instruct": "Llama 3.3 70B (Meta)",
"llama-3.1-405b-instruct": "Llama 3.1 405B (Meta)",
# Alibaba
"qwen-2.5-72b-instruct": "Qwen 2.5 72B (Alibaba)",
"qwen-plus": "Qwen Plus (Alibaba)",
"qwen-turbo": "Qwen Turbo (Alibaba)",
# Mistral
"mistral-large-latest": "Mistral Large (Mistral)",
# Perplexity
"llama-3.1-sonar-huge-128k-online": "Sonar Huge Online (Perplexity)",
}
# 辩论配置 # 辩论配置
MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数 MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数
MAX_AGENTS = 6 # 最大参与 Agent 数量 MAX_AGENTS = 6 # 最大参与 Agent 数量
# 支持的输出语言
SUPPORTED_LANGUAGES = ["Chinese", "English", "Japanese", "Spanish", "French", "German"]
# 生成配置
MAX_OUTPUT_TOKENS = 300 # 限制单次回复长度,保持精简
# 研究模式模型角色配置 # 研究模式模型角色配置
RESEARCH_MODEL_ROLES = { RESEARCH_MODEL_ROLES = {
"planner": { "expert_a": {
"name": "Planner", "name": "Expert A (Analyst)",
"default_model": "gpt-4o", "default_model": "gpt-4o",
"description": "负责拆解问题,制定研究计划" "description": "负责初步分析,提出核心观点和方案"
}, },
"researcher": { "expert_b": {
"name": "Researcher", "name": "Expert B (Critique)",
"default_model": "gemini-1.5-pro", "default_model": "gemini-1.5-pro",
"description": "负责执行具体的研究步骤,深度分析" "description": "负责批判性分析,指出潜在问题和漏洞"
}, },
"writer": { "expert_c": {
"name": "Writer", "name": "Expert C (Synthesizer)",
"default_model": "claude-3-5-sonnet-20241022", "default_model": "claude-3-5-sonnet-20241022",
"description": "负责汇总信息,撰写最终报告" "description": "负责综合各方观点,生成最终决策方案"
} }
} }

6
main.py Normal file
View File

@ -0,0 +1,6 @@
def main():
print("Hello from multi-agent!")
if __name__ == "__main__":
main()

View File

@ -17,6 +17,8 @@ class DebateConfig:
context: str = "" context: str = ""
agent_ids: List[str] = None agent_ids: List[str] = None
max_rounds: int = 2 max_rounds: int = 2
agent_clients: dict = None # Map[agent_id, LLMClient]
language: str = "Chinese"
@dataclass @dataclass
@ -58,7 +60,12 @@ class DebateManager:
# 创建参与的 Agent # 创建参与的 Agent
for agent_id in debate_config.agent_ids: for agent_id in debate_config.agent_ids:
agent = BaseAgent(agent_id, self.llm_client) # Check if specific client is provided in config, else use default
client = self.llm_client
if hasattr(debate_config, 'agent_clients') and debate_config.agent_clients and agent_id in debate_config.agent_clients:
client = debate_config.agent_clients[agent_id]
agent = BaseAgent(agent_id, client, language=debate_config.language)
self.agents.append(agent) self.agents.append(agent)
def run_debate_stream( def run_debate_stream(
@ -106,6 +113,7 @@ class DebateManager:
"agent_id": agent.agent_id, "agent_id": agent.agent_id,
"agent_name": agent.name, "agent_name": agent.name,
"emoji": agent.emoji, "emoji": agent.emoji,
"model_name": agent.model_name,
"round": round_num "round": round_num
} }

View File

@ -8,44 +8,92 @@ import config
class ResearchConfig: class ResearchConfig:
topic: str topic: str
context: str = "" context: str = ""
planner_model: str = "gpt-4o" # Dynamic list of experts: [{"name": "Expert 1", "model": "gpt-4o", "role": "analyst"}, ...]
researcher_model: str = "gemini-1.5-pro" experts: List[Dict[str, str]] = None
writer_model: str = "claude-3-5-sonnet-20241022" language: str = "Chinese"
class ResearchManager: class ResearchManager:
"""Manages the Deep Research workflow""" """Manages the Multi-Model Council workflow"""
def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"): def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"):
self.api_key = api_key self.api_key = api_key
self.base_url = base_url self.base_url = base_url
self.provider = provider self.provider = provider
self.agents = {} self.agents = []
def _get_client(self, model: str) -> LLMClient: def _get_client(self, model: str) -> LLMClient:
return LLMClient( return LLMClient(
provider=self.provider, provider=self.provider, # Configured to respect provider or default to aihubmix logic inside client
api_key=self.api_key, api_key=self.api_key,
base_url=self.base_url, base_url=self.base_url,
model=model model=model
) )
def create_agents(self, config: ResearchConfig): def create_agents(self, config: ResearchConfig):
"""Initialize agents with specific models""" """Initialize agents with specific models from config"""
self.agents["planner"] = ResearchAgent("planner", self._get_client(config.planner_model)) self.agents = []
self.agents["researcher"] = ResearchAgent("researcher", self._get_client(config.researcher_model)) if config.experts:
self.agents["writer"] = ResearchAgent("writer", self._get_client(config.writer_model)) for idx, expert_conf in enumerate(config.experts):
role_type = "council_member"
agent = ResearchAgent(
role=role_type,
llm_client=self._get_client(expert_conf["model"]),
name=expert_conf.get("name", f"Expert {idx+1}"),
language=config.language
)
self.agents.append(agent)
def generate_plan(self, topic: str, context: str) -> Generator[str, None, None]: def collaborate(self, topic: str, context: str, max_rounds: int = 3) -> Generator[Dict[str, str], None, None]:
"""Step 1: Generate Research Plan""" """
prompt = f"Please create a comprehensive research plan for the topic: '{topic}'.\nBreak it down into 3-5 distinct, actionable steps." Execute the collaborative research process with multi-round discussion:
yield from self.agents["planner"].generate(prompt, context) 1. Conversation Loop (All Experts Round Robin)
2. Final Synthesis (Last Expert)
"""
def execute_step(self, step: str, previous_findings: str) -> Generator[str, None, None]: conversation_history = []
"""Step 2: Execute a single research step""" discussion_context = f"Topic: '{topic}'\nBackground Context: {context}\n\n"
prompt = f"Execute this research step: '{step}'.\nPrevious findings: {previous_findings}"
yield from self.agents["researcher"].generate(prompt)
def generate_report(self, topic: str, all_findings: str) -> Generator[str, None, None]: # Round-Robin Discussion
"""Step 3: Generate Final Report""" for round_num in range(1, max_rounds + 1):
prompt = f"Write a final comprehensive report on '{topic}' based on these findings:\n{all_findings}" for agent in self.agents:
yield from self.agents["writer"].generate(prompt) yield {"type": "step_start", "step": f"Round {round_num}: {agent.name}", "agent": agent.name, "model": agent.model_name}
# Construct prompt
if round_num == 1 and not conversation_history:
prompt = f"You are {agent.name}. You are starting the discussion on '{topic}'. Provide your initial analysis and key points. Be conversational but substantive."
else:
prompt = f"You are {agent.name}. Review the discussion so far. Respond to previous points. Defend your views or refine them. Keep the discussion moving towards a solution.\n\nDiscussion History:\n{_format_history(conversation_history)}"
response = ""
for chunk in agent.generate(prompt, context=discussion_context):
response += chunk
yield {"type": "content", "content": chunk}
conversation_history.append({"agent": agent.name, "content": response})
yield {"type": "step_end", "output": response}
# Final Synthesis by the LAST agent (or a specific designated one)
synthesizer = self.agents[-1]
yield {"type": "step_start", "step": f"Final Synthesis ({synthesizer.name})", "agent": synthesizer.name, "model": synthesizer.model_name}
prompt_syn = f"""Synthesize the entire discussion into a final comprehensive plan for '{topic}'.
Discussion History:
{_format_history(conversation_history)}
IMPORTANT:
1. Reconcile the different viewpoints from all experts.
2. Provide a concrete action plan.
3. You MUST include a Mermaid.js diagram (using ```mermaid code block) to visualize the roadmap or process."""
findings_syn = ""
for chunk in synthesizer.generate(prompt_syn, context=discussion_context):
findings_syn += chunk
yield {"type": "content", "content": chunk}
yield {"type": "step_end", "output": findings_syn}
def _format_history(history: List[Dict[str, str]]) -> str:
formatted = ""
for turn in history:
formatted += f"[{turn['agent']}]: {turn['content']}\n\n"
return formatted

13
pyproject.toml Normal file
View File

@ -0,0 +1,13 @@
[project]
name = "multi-agent"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"anthropic>=0.75.0",
"openai>=2.14.0",
"pydantic>=2.12.5",
"python-dotenv>=1.2.1",
"streamlit>=1.52.2",
]

View File

@ -0,0 +1,108 @@
"""
Auto Agent Generator - 根据主题自动生成专家配置
Uses LLM to analyze the topic and suggest appropriate expert agents.
"""
import json
import re
from typing import List, Dict
from utils.llm_client import LLMClient
EXPERT_GENERATION_PROMPT = """You are an expert team composition advisor. Given a research/decision topic, you need to suggest the most appropriate team of experts to analyze it.
Instructions:
1. Analyze the topic carefully to understand its domain and key aspects
2. Generate {num_experts} distinct expert roles that would provide the most valuable perspectives
3. Each expert should have a unique focus area relevant to the topic
4. The LAST expert should always be a "Synthesizer" role who can integrate all perspectives
Output Format (MUST be valid JSON array):
[
{{"name": "Expert Name", "perspective": "Brief description of their viewpoint", "focus": "Key areas they analyze"}},
...
]
Examples of good expert names based on topic:
- For "Should we launch an e-commerce platform?": "市场渠道分析师", "电商运营专家", "供应链顾问", "数字化转型综合师"
- For "Career transition to AI field": "职业发展顾问", "AI行业专家", "技能评估分析师", "综合规划师"
IMPORTANT:
- Use {language} for all names and descriptions
- Make names specific to the topic, not generic like "Expert 1"
- The last expert MUST be a synthesizer/integrator type
Topic: {topic}
Generate exactly {num_experts} experts as a JSON array:"""
def generate_experts_for_topic(
topic: str,
num_experts: int,
llm_client: LLMClient,
language: str = "Chinese"
) -> List[Dict[str, str]]:
"""
Use LLM to generate appropriate expert configurations based on the topic.
Args:
topic: The research/decision topic
num_experts: Number of experts to generate (2-5)
llm_client: LLM client instance for API calls
language: Output language (Chinese/English)
Returns:
List of expert dicts: [{"name": "...", "perspective": "...", "focus": "..."}, ...]
"""
if not topic.strip():
return []
prompt = EXPERT_GENERATION_PROMPT.format(
topic=topic,
num_experts=num_experts,
language=language
)
try:
response = llm_client.chat(
system_prompt="You are a helpful assistant that generates JSON output only. No markdown, no explanation.",
user_prompt=prompt,
max_tokens=800
)
# Extract JSON from response (handle potential markdown wrapping)
json_match = re.search(r'\[[\s\S]*\]', response)
if json_match:
experts = json.loads(json_match.group())
# Validate structure
if isinstance(experts, list) and len(experts) >= 1:
validated = []
for exp in experts[:num_experts]:
if isinstance(exp, dict) and "name" in exp:
validated.append({
"name": exp.get("name", "Expert"),
"perspective": exp.get("perspective", ""),
"focus": exp.get("focus", "")
})
return validated
except (json.JSONDecodeError, Exception) as e:
print(f"[AutoAgentGenerator] Error parsing LLM response: {e}")
# Fallback: return generic experts
fallback = []
for i in range(num_experts):
if i == num_experts - 1:
fallback.append({"name": f"综合分析师", "perspective": "整合视角", "focus": "综合决策"})
else:
fallback.append({"name": f"专家 {i+1}", "perspective": "分析视角", "focus": "专业分析"})
return fallback
def get_default_model_for_expert(expert_index: int, total_experts: int, available_models: list) -> str:
"""
Assign a default model to an expert based on their position.
Spreads experts across available models for diversity.
"""
if not available_models:
return "gpt-4o"
return available_models[expert_index % len(available_models)]

View File

@ -5,6 +5,8 @@ from typing import Generator
import os import os
import config
class LLMClient: class LLMClient:
"""LLM API 统一客户端""" """LLM API 统一客户端"""
@ -36,24 +38,25 @@ class LLMClient:
self.client = OpenAI(api_key=api_key) self.client = OpenAI(api_key=api_key)
self.model = model or "gpt-4o" self.model = model or "gpt-4o"
elif self.provider == "aihubmix": elif self.provider in ["aihubmix", "deepseek", "siliconflow", "custom"]:
# AIHubMix 兼容 OpenAI API 格式 # OpenAI 兼容接口 Providers
from openai import OpenAI from openai import OpenAI
default_urls = {
"aihubmix": "https://aihubmix.com/v1",
"deepseek": "https://api.deepseek.com",
"siliconflow": "https://api.siliconflow.cn/v1",
"custom": "http://localhost:8000/v1"
}
final_base_url = base_url or default_urls.get(self.provider)
self.client = OpenAI( self.client = OpenAI(
api_key=api_key, api_key=api_key,
base_url=base_url or "https://aihubmix.com/v1" base_url=final_base_url
) )
self.model = model or "gpt-4o" self.model = model or "gpt-4o"
elif self.provider == "custom":
# 自定义 OpenAI 兼容接口vLLM、Ollama、TGI 等)
from openai import OpenAI
self.client = OpenAI(
api_key=api_key or "not-needed",
base_url=base_url or "http://localhost:8000/v1"
)
self.model = model or "local-model"
else: else:
raise ValueError(f"不支持的 provider: {self.provider}") raise ValueError(f"不支持的 provider: {self.provider}")
@ -61,7 +64,7 @@ class LLMClient:
self, self,
system_prompt: str, system_prompt: str,
user_prompt: str, user_prompt: str,
max_tokens: int = 1024 max_tokens: int = config.MAX_OUTPUT_TOKENS
) -> Generator[str, None, None]: ) -> Generator[str, None, None]:
""" """
流式对话 流式对话

184
utils/storage.py Normal file
View File

@ -0,0 +1,184 @@
"""
Storage Manager - Handle local persistence of configuration, history/reports, and assets.
"""
import os
import json
import time
from typing import List, Dict, Any
from pathlib import Path
# Constants
STORAGE_DIR = ".storage"
CONFIG_FILE = "config.json"
HISTORY_DIR = "history"
ASSETS_DIR = "assets"
class StorageManager:
def __init__(self):
self.root_dir = Path(STORAGE_DIR)
self.config_path = self.root_dir / CONFIG_FILE
self.history_dir = self.root_dir / HISTORY_DIR
self.assets_dir = self.root_dir / ASSETS_DIR
# Ensure directories exist
self.root_dir.mkdir(exist_ok=True)
self.history_dir.mkdir(exist_ok=True)
self.assets_dir.mkdir(exist_ok=True)
def save_config(self, config_data: Dict[str, Any]):
"""Save UI configuration to file"""
try:
with open(self.config_path, 'w', encoding='utf-8') as f:
json.dump(config_data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving config: {e}")
def load_config(self) -> Dict[str, Any]:
"""Load UI configuration from file"""
if not self.config_path.exists():
return {}
try:
with open(self.config_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
print(f"Error loading config: {e}")
return {}
def save_asset(self, uploaded_file) -> str:
"""Save an uploaded file (e.g., background image) into assets directory.
Args:
uploaded_file: a file-like object (Streamlit UploadedFile) or bytes-like
Returns:
The saved file path as string, or None on failure.
"""
try:
# Determine filename
if hasattr(uploaded_file, 'name'):
filename = uploaded_file.name
else:
filename = f"asset_{int(time.time())}"
# sanitize
safe_name = "".join([c for c in filename if c.isalnum() or c in (' ', '.', '_', '-')]).strip().replace(' ', '_')
dest = self.assets_dir / f"{int(time.time())}_{safe_name}"
# Write bytes
with open(dest, 'wb') as out:
# Streamlit UploadedFile has getbuffer()
if hasattr(uploaded_file, 'getbuffer'):
out.write(uploaded_file.getbuffer())
else:
# try reading
data = uploaded_file.read()
if isinstance(data, str):
data = data.encode('utf-8')
out.write(data)
return str(dest)
except Exception as e:
print(f"Error saving asset: {e}")
return None
def save_history(self, session_type: str, topic: str, content: str, metadata: Dict[str, Any] = None):
"""
Save a session report/history
Args:
session_type: 'council' or 'debate'
topic: The main topic
content: The full markdown report or content
metadata: Additional info (model used, date, etc)
"""
timestamp = int(time.time())
date_str = time.strftime("%Y-%m-%d %H:%M:%S")
# Create a safe filename
safe_topic = "".join([c for c in topic[:20] if c.isalnum() or c in (' ', '_', '-')]).strip().replace(' ', '_')
filename = f"{timestamp}_{session_type}_{safe_topic}.json"
data = {
"id": str(timestamp),
"timestamp": timestamp,
"date": date_str,
"type": session_type,
"topic": topic,
"content": content,
"metadata": metadata or {}
}
try:
with open(self.history_dir / filename, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"Error saving history: {e}")
return False
def list_history(self) -> List[Dict[str, Any]]:
"""List all history items (metadata only)"""
items = []
if not self.history_dir.exists():
return []
for file in self.history_dir.glob("*.json"):
try:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
# Return summary info
items.append({
"id": data.get("id"),
"date": data.get("date"),
"type": data.get("type"),
"topic": data.get("topic"),
"filename": file.name
})
except Exception:
continue
# Sort by timestamp desc
return sorted(items, key=lambda x: x.get("date", ""), reverse=True)
def load_history_item(self, filename: str) -> Dict[str, Any]:
"""Load full content of a history item"""
path = self.history_dir / filename
if not path.exists():
return None
try:
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
# ==================== Session Cache (Resume Functionality) ====================
def save_session_state(self, key: str, data: Dict[str, Any]):
"""Save temporary session state for recovery"""
try:
# We use a dedicated cache file per key
cache_file = self.root_dir / f"{key}_cache.json"
data["_timestamp"] = int(time.time())
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
except Exception as e:
print(f"Error saving session cache: {e}")
def load_session_state(self, key: str) -> Dict[str, Any]:
"""Load temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if not cache_file.exists():
return None
try:
with open(cache_file, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return None
def clear_session_state(self, key: str):
"""Clear temporary session state"""
cache_file = self.root_dir / f"{key}_cache.json"
if cache_file.exists():
try:
os.remove(cache_file)
except Exception:
pass

1126
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff