from typing import Generator, List, Dict from utils.llm_client import LLMClient import config class ResearchAgent: """研究模式专用 Agent""" def __init__(self, role: str, llm_client: LLMClient): self.role = role self.llm_client = llm_client self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {}) self.name = self.role_config.get("name", role.capitalize()) def _get_system_prompt(self, context: str = "") -> str: if self.role == "planner": return f"""You are a Senior Research Planner. Your goal is to break down a complex user topic into a structured research plan. You must create a clear, step-by-step plan that covers different angles of the topic. Format your output as a Markdown list of steps. Context: {context}""" elif self.role == "researcher": return f"""You are a Deep Researcher. Your goal is to execute a specific research step and provide detailed, in-depth analysis. Use your vast knowledge to provide specific facts, figures, and logical reasoning. Do not be superficial. Go deep. Context: {context}""" elif self.role == "writer": return f"""You are a Senior Report Writer. Your goal is to synthesize multiple research findings into a cohesive, high-quality report. The report should be well-structured, easy to read, and provide actionable insights. Context: {context}""" else: return "You are a helpful assistant." def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]: """Generate response stream""" system_prompt = self._get_system_prompt(context) yield from self.llm_client.chat_stream( system_prompt=system_prompt, user_prompt=prompt )