From cb97f7c49a2da5668e35e7905a612b075e349283 Mon Sep 17 00:00:00 2001 From: xyz Date: Wed, 7 Jan 2026 11:02:05 +0800 Subject: [PATCH] Initial commit of Deep Research Mode --- .env.example | 11 + Project_Design.md | 156 +++++ __pycache__/app.cpython-313.pyc | Bin 0 -> 21825 bytes __pycache__/config.cpython-313.pyc | Bin 0 -> 937 bytes agents/__init__.py | 17 + agents/__pycache__/__init__.cpython-313.pyc | Bin 0 -> 480 bytes .../agent_profiles.cpython-313.pyc | Bin 0 -> 7176 bytes agents/__pycache__/base_agent.cpython-313.pyc | Bin 0 -> 5005 bytes .../research_agent.cpython-313.pyc | Bin 0 -> 2797 bytes agents/agent_profiles.py | 195 ++++++ agents/base_agent.py | 131 +++++ agents/research_agent.py | 44 ++ app.py | 556 ++++++++++++++++++ config.py | 50 ++ orchestrator/__init__.py | 4 + .../__pycache__/__init__.cpython-313.pyc | Bin 0 -> 344 bytes .../debate_manager.cpython-313.pyc | Bin 0 -> 6029 bytes .../research_manager.cpython-313.pyc | Bin 0 -> 4072 bytes orchestrator/debate_manager.py | 160 +++++ orchestrator/research_manager.py | 51 ++ report/__init__.py | 4 + report/__pycache__/__init__.cpython-313.pyc | Bin 0 -> 287 bytes .../report_generator.cpython-313.pyc | Bin 0 -> 4954 bytes report/report_generator.py | 143 +++++ requirements.txt | 7 + utils/__init__.py | 4 + utils/__pycache__/__init__.cpython-313.pyc | Bin 0 -> 271 bytes utils/__pycache__/llm_client.cpython-313.pyc | Bin 0 -> 5294 bytes utils/llm_client.py | 141 +++++ 29 files changed, 1674 insertions(+) create mode 100644 .env.example create mode 100644 Project_Design.md create mode 100644 __pycache__/app.cpython-313.pyc create mode 100644 __pycache__/config.cpython-313.pyc create mode 100644 agents/__init__.py create mode 100644 agents/__pycache__/__init__.cpython-313.pyc create mode 100644 agents/__pycache__/agent_profiles.cpython-313.pyc create mode 100644 agents/__pycache__/base_agent.cpython-313.pyc create mode 100644 agents/__pycache__/research_agent.cpython-313.pyc create mode 100644 agents/agent_profiles.py create mode 100644 agents/base_agent.py create mode 100644 agents/research_agent.py create mode 100644 app.py create mode 100644 config.py create mode 100644 orchestrator/__init__.py create mode 100644 orchestrator/__pycache__/__init__.cpython-313.pyc create mode 100644 orchestrator/__pycache__/debate_manager.cpython-313.pyc create mode 100644 orchestrator/__pycache__/research_manager.cpython-313.pyc create mode 100644 orchestrator/debate_manager.py create mode 100644 orchestrator/research_manager.py create mode 100644 report/__init__.py create mode 100644 report/__pycache__/__init__.cpython-313.pyc create mode 100644 report/__pycache__/report_generator.cpython-313.pyc create mode 100644 report/report_generator.py create mode 100644 requirements.txt create mode 100644 utils/__init__.py create mode 100644 utils/__pycache__/__init__.cpython-313.pyc create mode 100644 utils/__pycache__/llm_client.cpython-313.pyc create mode 100644 utils/llm_client.py diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c28f633 --- /dev/null +++ b/.env.example @@ -0,0 +1,11 @@ +# 复制此文件为 .env 并填入你的 API Key +# API Key 会自动从环境变量加载,无需在界面中输入 + +# AIHubMix API Key (默认使用) +AIHUBMIX_API_KEY=sk-your-api-key-here + +# Anthropic API Key (可选) +ANTHROPIC_API_KEY=your-anthropic-api-key-here + +# OpenAI API Key (可选) +OPENAI_API_KEY=your-openai-api-key-here diff --git a/Project_Design.md b/Project_Design.md new file mode 100644 index 0000000..212522a --- /dev/null +++ b/Project_Design.md @@ -0,0 +1,156 @@ +# 多 Agent 决策工作坊 (Multi-Agent Decision Workshop) + +## 🎯 一句话描述 + +**Multi-Agent Decision Workshop** 是一个面向**产品经理、团队负责人、创业者**的 AI 辅助决策工具,通过模拟多角色(CEO、CTO、CFO、用户代言人、风险分析师等)从不同视角对方案进行辩论,帮助用户获得全面的决策洞察。 + +--- + +## 👤 目标用户与痛点 + +| 用户角色 | 真实痛点 | +|---------|---------| +| 产品经理 | 方案评审时容易陷入单一视角,忽略技术/成本/用户体验的平衡 | +| 创业者 | 独自决策缺乏多元反馈,容易盲目乐观或过度保守 | +| 团队负责人 | 会议中难以让所有人充分表达,强势声音主导决策 | +| 学生/个人 | 重要人生决策(职业、投资)缺乏专业视角指导 | + +--- + +## 🔧 核心功能 (MVP - 3个必须有的功能) + +### 1. 📝 决策议题输入 +- 用户输入待决策的问题/方案 +- 可选择决策类型(产品方案、商业决策、技术选型、个人规划) +- 支持上传背景资料(可选) + +### 2. 🎭 多角色辩论模拟 +- 系统自动分配 4-6 个不同视角的 Agent +- 每个 Agent 代表一个角色立场发表观点 +- Agent 之间可以相互质疑和回应(多轮辩论) + +### 3. 📊 决策报告生成 +- 汇总各方观点的支持/反对理由 +- 提炼关键决策要点和风险点 +- 给出建议的决策框架和下一步行动 + +--- + +## 🎭 预设 Agent 角色库 + +| 角色 | 视角定位 | 关注点 | +|------|---------|--------| +| 🧑‍💼 CEO | 战略全局 | 愿景、市场机会、竞争格局 | +| 👨‍💻 CTO | 技术可行性 | 技术难度、资源需求、技术债 | +| 💰 CFO | 财务健康 | ROI、成本、现金流、盈利模式 | +| 👥 用户代言人 | 用户体验 | 用户需求、痛点、使用场景 | +| ⚠️ 风险分析师 | 风险控制 | 潜在风险、失败模式、应急预案 | +| 🚀 增长黑客 | 快速验证 | MVP思维、增长杠杆、数据驱动 | +| 🎨 产品设计师 | 产品体验 | 交互设计、用户旅程、差异化 | +| 📈 市场分析师 | 市场洞察 | 市场规模、趋势、竞品分析 | + +--- + +## 🔄 用户交互流程 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ 用户打开 App │ +│ ↓ │ +│ [选择决策类型] 产品方案 / 商业决策 / 技术选型 / 个人规划 │ +│ ↓ │ +│ [输入决策议题] "我们是否应该在Q2推出AI助手功能?" │ +│ ↓ │ +│ [选择参与角色] ☑️CEO ☑️CTO ☑️CFO ☑️用户代言人 (可自定义) │ +│ ↓ │ +│ [开始辩论] → 观看多Agent实时辩论(流式输出) │ +│ ↓ │ +│ [生成报告] → 下载决策要点 PDF / Markdown │ +└─────────────────────────────────────────────────────────────────┘ +``` + +--- + +## 🏗️ 技术架构 + +``` +┌──────────────────────────────────────────────────────────────┐ +│ Frontend (Streamlit) │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ 议题输入区 │ │ 辩论展示区 │ │ 决策报告区 │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ +└────────────────────────────┬─────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────┐ +│ Backend (Python) │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ Agent管理器 │ │ 辩论编排器 │ │ 报告生成器 │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ +└────────────────────────────┬─────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────┐ +│ LLM API Layer │ +│ Claude API / OpenAI API / 本地模型 │ +└──────────────────────────────────────────────────────────────┘ +``` + +--- + +## 📁 项目文件结构 + +``` +multi_agent_workshop/ +├── app.py # Streamlit 主入口 +├── config.py # 配置文件(API Key、模型设置) +├── requirements.txt # 依赖包 +│ +├── agents/ # Agent 相关 +│ ├── __init__.py +│ ├── base_agent.py # Agent 基类 +│ ├── agent_factory.py # Agent 工厂(创建不同角色) +│ └── agent_profiles.py # 角色定义和 Prompt 模板 +│ +├── orchestrator/ # 辩论编排 +│ ├── __init__.py +│ ├── debate_manager.py # 辩论流程管理 +│ └── turn_strategy.py # 发言顺序策略 +│ +├── report/ # 报告生成 +│ ├── __init__.py +│ ├── summarizer.py # 观点汇总 +│ └── report_generator.py # 报告输出 +│ +├── ui/ # UI 组件 +│ ├── __init__.py +│ ├── input_panel.py # 输入面板 +│ ├── debate_panel.py # 辩论展示 +│ └── report_panel.py # 报告展示 +│ +└── utils/ # 工具函数 + ├── __init__.py + └── llm_client.py # LLM API 封装 +``` + +--- + +## ⏱️ 开发里程碑 + +| 阶段 | 目标 | 预计时间 | +|------|------|---------| +| Phase 1 | 单 Agent 问答(验证 API 调用) | 30 分钟 | +| Phase 2 | 多 Agent 顺序发言 | 1 小时 | +| Phase 3 | Agent 交互辩论 | 1.5 小时 | +| Phase 4 | 决策报告生成 | 1 小时 | +| Phase 5 | UI 美化 + 导出功能 | 1 小时 | + +--- + +## 🚀 扩展功能(Nice to Have) + +- [ ] 自定义 Agent 角色 +- [ ] 保存历史决策会话 +- [ ] 决策追踪(后续验证决策效果) +- [ ] 团队协作模式(多人实时参与) +- [ ] 知识库集成(基于公司内部文档决策) diff --git a/__pycache__/app.cpython-313.pyc b/__pycache__/app.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af80baf3b6faecc674d22c4f5f43fd6ce2a3f917 GIT binary patch literal 21825 zcmcI~30NCfwq|JoBq2b6z>?UC%^*PRU=|x&HjCM93MXEa5COs{5r~we*e2;TapG9U zj=_!>OcrA&Y2z$5-FZnIFNyu$n|XQD^SV`rSQYhjw>yI{%=9l0*YH&X?&%+OjR1707F zB8G@1;)r-6fxwAGA}KqJR1(QV3Xw{zB+{~lq>4y~wTpcmxumha99~Kf5#~csXSIr|`-U;eXu3SsX^>_^04F z%OU?E9F~(I*~_8e!5o%7ixiFn%&+5~ecybH z@skE$5BCIKP*?=^ihdZHCiZ#ZC(smuFBO;aJ$yDT9A%$@W);VWRO{5~9uj>dhR&|l z?~D?nlqe(0O9fxBH_YRugJsO%C6`K3htb&LPxg zu$CV1fZ#RrhzepY$Q7%L_0PfQ!!Z6Z8Dl-X-(_^6Te>!AZyl#S9LvSX9G$%7sikx+ z9ZUN-T|{CV`!LJ~@{{=_vZL;$v`Lt*fY1iOKqyA6hvyBR=gI=1t_VhzEj>$TaVu~V z!n`UpPp2%0Pdyy1c7#0jxTElPt zXu16n8y{U3qE?0CRvl4a8bFo+N8rEs2TrB|Y@v~8BASU!Kv(t%=gn{Nf5w}+9Ws{f z-_u%%%mtq2-){(_mDoZ&2KHGF()g!&oY)F7ZzHys3dz;WQtcpg#1jmsHOtaGNwg79 zF*FrG^WtI+VHoI0PqY&qL?>Z@)z$&kUcXdbfFe6f`Ce@J8Z4*W+qw#^b>7w|;D_D1 z^(;E{aU46rGa$Dx>MB|eES*a{&CQO)9>XmB9QXr| zQe&7IYdN@-p7V)?+#8h`w&j7;oA`~HtPNZEy|7#2ne2c^-=A&+@&cegv{W8A1Cwqw zcy*7i9QH5dcEecOVSn`84sfg)#ie0?G(@kDj~{IIlNId#`dBlT_8^~JN?J>W*?IS93_}m-@N$bf5UA-sj`ITE&OFZkr&>nd9U~7SZ z;{q19IHomt`1$t{lCbDDFvw?J;3=TOzr{|(i25O?zF4lt*ICP4+^_RQuNt8q5DWN@ z`qiDyw_~93t1IHU`)K?l$u43yAjuxUwkF`?pQfL9o_K+wX$Bhqdi)vjqOOUvyiAPv zIdI&h+r;(gCtxq$as7Efn}_Fj56#~mAwLTRM9U-wN_pgFT^Kw!>6&#p#7j&(%^~)B zzF$V)#48YQT6CLR&MnoruH`;mjxVJEMA*mCAd4d+$SuI@qotHc1}^6UPZ2=|@wje_ z?r}{BJZ))&8zYLk(Bs!9O^F=GC6?otzGXc}@B8LXrG~EutA%wQmQgJiVPEBT4Dv2# zJ^TId!=GG)oj3q?vsJg%{~X`mVcfqR4iYaC2TS?nHr=)byNrR;4O@n15~r6(u%@kk zW7#223lPhA-dF}XuQ~idv8)AR?&2TfHI5f2J&L1^(AdIz5HE;yW!^}~BX{U_c;^e? zgtz9PzJCW_04vvlUVp0n9Oib**w5v~+CaGu7dOc#b#36kJGdF(Ilu74n5Rg+?nw{I z|D!)1#E?$UV7&KM1#)+Q++X>l_O56E|NWPx6gs_5x4?fP3jzP_@c3_s&d{RqGpB=- z8^;>K8W|7w@2?w|&Tjeb(mrAs?TI*~>sXY3=!hfC$WeBLK^5I%tXSM`9QVrfQ_u2s z-wq$D*>2Fk5w!dD(tQCsF4eBlqg|t}+pl(wzO}vDH7?`t-rGNn`_Jr-`MCM#(Neus zZqDEI&;FYb_4s{j`;F_5F4KLyZY`A=V%CXA=SG|?0xUs4a3>6GCLvQli~etbr|#q1%}T%U6ErZ&H-iabiHtw zk06&&>$n`)zn0$RqyZEb0mhoRHrb~O@~uKX3;XeZe(!NJa4ZKSN!_!6&h>zc7M+Fo z1(Rd-AsI=l&Pv*V%cjLTK}Zj@zTDp8%k6bG;vC0^LGkfY0q1Q@bX)~HC;ee0H_Ino zW1|=6{?^;nZpOy_@Bh4S1=!sqa(>WF3iMZT-wn#Y#+JDa!M*>S=T@^=R}h<)(bWdu z9vatqw1nybB-c+gl|?}PiLEdW`#VA6eU27xx#dhX!8iMZGVtpM&i-)0Y*YvQv)?s7 zx}Pt^zzYw&|2;}};umppFyi879w(Cxu#enD?k4x>c0KS$Iic&}tTPi^P!EAT0sL#R zM!{ z2Xv7Tk3NAr@~65I;xn+cmvq4(*-IQ9xmPEJc8{)CN9xLmOF;3mZf^~*MeIk4m$_MJ zujD?0Prk~~hApM#vg}B#hPM&cet>^@5dye_A1?j@yZg!~K@G1l@PV;; z_2T&$#vy_ps*Sqv`>b}5vAY#Z=WupMCV*7_^PzwCN%hc^M>m;6xYr;1zLb+!&$~d+UL5Z1gVs;SMe05+`}NL8m+(Gb%jv?e7X9safzBRf ze}1zEqK}h_$sCT8C&-h!6P}pucelh%4#O75?4A5XEX!-{ojeTd;^SBXtz%=EAJ|Ku+BcrI9o}!P0o-=RHt< zQPYzT(boRinX1_Q;d*QPC(gpY6d6+t5|IGc;sC)nAnK%B<{ob`Zqpz?WBy(uP zO=_Mi*ksmswso4VhTiAY0$1oxgSAa>GPUWETozX}YLkWzb5D<1kXwvU7+pMlAW3SO_)SKEYFtJ@vwppyO7kwTiagx;*>mHM# zLW09jHlr3zho0=jpJ#edZ=b%i)7ZPS49_DCJ>}l+b~D*&AT^}E(`d7l;knsG_YL>x z$(_btO}p7@HTNtX_RKaJx~ye*Vc%}tVm28&@#I2nX|A?#!7BQWXLgciTW_bP!)!8> zWq5K|Nmpr?o+bGiE4QW3VCd-9wA|5a9vO8{DcscUee!y= zu&di>HTZhHyI_yuryLb~zKgcfVY9&TX;{1HGLt=i_v1YpLrGU*SBW37euC*Nr(BPt zX)IPl-$H3vb7T#wz-Lg^)rIQ|*Dn!C!xHA@_U-nKEwWkP;Ljwhb3A_13Vs`I>$T{+ z3ZS_^#T$l~rp=vg@Y6{JG`pn_++1G~EHo32_ z5xf#lcYpc8+`wsde=tLdx7x^dvn|>~Rghi8_TcGvt_;n5a#-CTzQLrobsBIqM3{Sf z4c7h;t_yGOvsr8?h4%Jr)El3%`BniKGxQM#!!!7XZoRcX)YmkFvG#}77RhcjnaP*@<7`H4TlLLPoSk zv$+>yfE9G|rhWSJgEONi=iWN2R)_Tm?(DOIvigJBZw(T~)6iIvUiw4O2<+U`RCO@A z&=hEjG!{k|n*7}S!o1x4{QhX~m>HD9^h0-P{|aw^H&P8dP>{Q-Kco{>V*%A@IvCaX znyhMeGOE#lYBYUFH4EBZNHzVEXLlKTJ(7u;=RQzEf51*tkEXCcw8yh=&2xF0l4ls* zOkY0de*LAH6Q9qVu`?a+HX=;muJtt;(I&B?Rm#E!l0Zg%2TtX{Ap-a?fAVDR1lS|kG^7k_bk8?rE6^i13)qny4X*G0K zyTUvjU7&MIx1rM|VlmZVaRri0;BtkqV8F%55Ypp`ZG9$vuS@1_dU(3Rygd+u`cN41 zb|Bxiw6Wc`5U`SOM^AlsX$YUa@Wq{L7i^KxGk^Nn5gdF8uQ%-R^s9MfD2y;xG=1$$ zNC0N{zwf?)jFF@45AsX_1?Vl7T>uKURG4$;_^CVRFT3rh*m-vjUv!^+**!Eew|{8* z(npXupk*MA3$1(l3@%}}!PI9%Ha&m(^+6oeKXdT>%=j7i)uH|{Hu8H@nK{VqF~EcX zBgEp}jBN9#&%KY|89R38{HJKr+Xv_Ny$0KI348TDhJHSt?h5WSSUN~!pB0iK7fd}xoDpS0%sL%N7VZb?lo$kf z><0un$FUt;Kl~3YR8L63&ht=)B-1{ot!>b*Z>VZ(Y_6%7?77K&0u*huL%uX;)i| zvm&a6WHwN_f>AKhTid#9y&cTvNR;z>bbjm=oSiy*?ZV9Qi8++{%%S}-XE}3m2=yNT z&wz7h63n@muT5Vabq^hx9Uqv!G%)kw8So7;;78kYdXo)uq8|Nj&tm}7!>)$x?AtfT zajzM%StetD-@bhupZV;HdvI*##Du3G;Tikr9)9i4xd{dh-M`oiiR#S9$K8Wx=FWTp znCJ@aHSA*DqzxT8*i|t2NQa@@Yyy-ebI?wFyzr_X^V3Eyy}RfN^UXG(5w(C^12fo7 z05*}9g&bZ5fshYed(EZ*DxWl89CQ!c0VrnAAre=Vk1K+PK0}Aq_?*FpY9Q`?AJ2R| zerNCJ-(9uO4V-ijede~mIyZXkyQ>FmxR)o(ZSI}JbEnQ{;b^~K9GrRU%*=;hER?v* z3@E`UkX00TG)RYnNl%~EmIB;nhK6RpoS6Oc)byqISsE~qnSr-v2VMpw1Q*NB&YtT8 zLYHiT$jp4u#9A~E*ljTG?6$IMKo#k4-`wXpES4kKb)?N=XoDo!ss~sk+aP7OcDsT+ z5XOY&1*gtvh?k`fAfXnCxdIry!=d>Ug$x!Zx9LfP-r@?e>;VYtX#?~{+d@d;{_Iz* zxv&zH;VvG|nCtAt&t~_3?4CH{KJ$V5)l=*+Yd`Lzhd9k70gH?bFB4G70`bt*2W%`O zV-7hqiT?k(4PHC|I-NWMu;~&4XtV)3GKPi1KZ8IhwtEpDZvGvAcXq^v{+A`%))%k4_rYClZ06;U5z~F| zu~Z;DO9&^a<}*h|R>Krq6qwSvo9Z-Idu{8k|7k_)Xeo~`pT8*Yscr#zU6-R z1lm7CGX3S5nU^jilh}kfu$z1RE&R^0(=+yAcD>mPXCNA)SZIU{4q%)_kna+88ZCOb z4|IYx1nK*XZGfIY1i=an!X@E?5j+R(A|ab5)d8*mtGUnE0d@k0W7utVg?Pw4!0Zij zU{NeoOWCPxi}9wJkKS>=IqtT93U8gQVo$0?uR%aI-b@qNGp@r(0dvPov+|CZw}nKVrLDBJu(kN&TCnA?hA+D zG&%Kf)7UX~G8c{CgTleV*=yI>-u?*g6yX++Zr9m8owg=eJY)94dALaf#Guf_`rP!D z!_${ua}OV45s{5B-(5Y#mJ`5?eYJ#rb+k?AWR&Wv1i5B~xRGP4)o z1M9^%=i_i=fyxR}9~4_=4jgn3o}PXA+ILq^OC&S)6SD*RP}H*@Vdu;oe$PF05`p@a zYY;El5)N+e>Vg}vd-x>Gn%j*;LonxytP5QnVh;-jDC#i5ae@Eax?tYS(D3vZZ=g~S zI@m$5gP9ZiS$sgJ$m~Xk!G!407J>?;u4*e(W} zU3_CFBupqDY2!ST0c3QU%gFEt=iEuL@qt|m&j$KxoZ=z<)U8QY{1tp|KT2LX zjeN*uW(&{(88N$Wh=*N96Wkd@dSkb(y~nt_e?_;|+Gi=t$>Dmkq1u-7TplE7A|QVb zDpDlb3X8!NMh_v|Mw(m!47Nh52=A;g=0IFxb^&9j#U(+>98+Vlu(%e?G}yb0D}ZU( zl40iRPUCYpT=y)NHAxF=GD#JHO=s~tAAX425DwvxPR?E!hsxsW9GFt!lCpOnmSaCZ zBf}+Omq*#0ceJD1*87Yrfc=tqra&1HHqRIVm;)O}daa;dE82<;ee}YIOX9h20lf_+ z#3+ODq{~-cpLy%_cUNEX;4KsASeRi!58*M&IGDT1!U0%?Q5ZsB){NneEF!xfG=0HD z-5=5EDb4!uvp)nyz8r|8Ou&J23NY#0;Z`E-)LZo~F*;x-t%aMVvB$uK2FSbt-RF*- zn!R|j47VW&Ljd&70E99YfVc}94vxm&1lYSC3p}t@h=Mj1w<~aO$l<-XM(q@+9z&KX?zftBdh?PSHWC0EuQ)FZB-HmD(Y zw6(d!7AquxJtm{o#katp8Lr1{b(ut|naBSvB)YMX=$b_$7j{X_P#QMCTvYfaQR+sD zP=DOQ{%mh7>tUMM9M56W}b|_ zU5J&z1Xmp-hR|e$BNIXv$X=((r_l}qEl_s`2r{8PRiKY`=wm(l*Z?0cF(WO?`^hTQ z4s5qUNxRo2Fd2FouXY7k`j9~c!1)^cKqa=04pfKL8V>k^ULqKn(o0*r6;|=sj7!3*s?FT~ zG*oOrYM^59$O4k}8JXNyjI!D`v@ci4eXK|l?J(HT2^#HW>pBrkIS1f@eZRphCSm$4 zyNqz9f?Y!`#}(d38lE$n;f4eiB$N(XTw;)fy?ud+0TH?_?F2qBV(7K?FdK03j4mM> zgmZv*5;nM6ar0UE;40Q<-NRn{q=wyw4jWUoLKC1ifzTFGK$Q6*X}KG=OA1Wjb%&>L z73SR|*MTx==FO1jETR6wzM*PE^`8Y{N6={`UBL_95Wy+>ULS$qA>;DBHws`h?Fuzo zU~TVPB~)Bv%R7uqKygjT7=Z}R9_cd=e1(0=_dXRMsDauXqi-?NKJ!STLfYY$vMt zktE7znCR(>@Wi-pfh^uyY7(C@O1FT4rz)k>z;I1Ut0ebO+yb9ivNj#M%!|o1O zFw%3E8D0mHc?%o)3mXMcuipAM~A@%VgqE1%tfA1~1FVdFX{~9J4$Qfp}iP zp6e66j=B&bk5xb znVj#yWb^5mES`p6sR};tiUT6}1EH|8P%<3h3{RxO6DI|UbZq9pdZ$oM3FT8lCCDt4 zkK{XJGO3u%NkJyPa=~aaEs&3_cgAK>v00OXEIKXAH#3D6gbcShqf)7;)JZ`qjVJlW z;%GteaK1A#nTkxF6eQE}xNj_m7DS9BIaL{yDq~WRL8qqq#*&aUk;{(Kf~b)eXPkzL z(@Y99bh^ejGj*|~YTp=0sv1jjCgoE}`ICZtT9e}&%K%LWiuP}Rc{?o(++Vx5)^45> z#)B9V=}^pIj3X*zqIFVSMk`}Z$Bf1}QY$8vYwe+b2v*Y3ijzG@duW-Gj!K}TR^JU0 zt_VF)GcUzfthCqMi^L@5^U+v9(Ehr;b+0r4Luu$x$zTaBilM~{hbq${%AzBqSQR-V z)2Ybx@#e|M@&o$o;lN57I8Zq+!Qv8qk}7Y5fDE(SLBaN)PX@))BFT_+Q0j=vo)YE2 z3JR57N++wG$rV&`g)_O5O0K+>Tun(6>>G!TpyzYCvpPC5cH}WCGQ|;*I@U5Sp3qR~ z>l|s?>){UBhHn(HjyR1oE}x3apNuPNj$QL#cKuvngfvq%wC>p7#x`9T7=~=idzu442pg<|8oB z_qLT$)Mzr6!ZX(j9N0LM47ik!iIJX?rGC>f`6jA-lVfx1RQZ<4w2qtQTPF0*ZBJ3# zo^oz$|3Ey>yBKsn=z^Fk-$JEz(CSi9)jD4E@Oq~_g_5T@<@uC6f1-qvubvOZqE?)& zJ6h*R%%526$lq{1@A{LD%2r3hmMQsTv^@Ic#-kgL*WHE(M3hiHC9k=s#zNQe=Ci@7 zoxw3waEvop4Zk!PdHrC$L$P{Fy5^hotoyg))NH0Sn7D?K zR(%VkE0ibq9NjafoJg72J@K?7SL_$n0H*G>sk2naUr-RKBeIVDU(jrzUy;2l%KO0X@kLfui_ zShk~Lr=!n8MOg=$(E2YNeZi4Z?x;07NGr9%<`Bwn!(!r5u_LMcy4tb5d+M=nN?~*e zqh^8>Z$C+A<=Od8k%AH_rbKEwQ{!u;(|HBHR`#DF635f1h)&QS^04F{r>^ODl2K z*T!FTtZlxf+%y~jFgCJxRDQp|v=15z2eGe74*&$Nh367(43w?5E=%Q7=Rz93YPMI@%EtS3Yx^ODH z3cx9iYowh zt3vH{pq3E1Q<_Lg6UQ>9q#BSaG1(!FL#|LaSa+xaJv0n9IF$KQ(t_Ku@uwR`8{TNL zH~uLmWo*l>n9L(V!@S`_S5y*;G0IWp*pm~L6UC0?vg@sr$~rndVaAh4J>zJp|$x7Yx<&d`#KI zrz4{w4PZiFC>APXW7wLnLn=_-0nP>K9T3KC@k!z&!h7a)&ap&5T-GP>Lz4A3$el?q{$iJhQ0hg>}+%Ai%T_ORbY zCD^OzxTLYdQ#;1%0d7#h%(6G5820?&^K_(QWW|vRM?}(C;#k%A$}^2)YaOxzIyw~v z|1D_NNTWlZaW6zA{$Pa3`rc*PKKo3H*y<1y=Jgx_~|o|V{Ma(MGm|e!jW7%C0&27kp~Ab z-@@}|gtwDZ&y}4md*3*jTtW%)fsOX=5z)8ea1{7zkJh3*N0o5Q=!~vAQ!w`P$>dd+ z5~<{JXkK4+^?5p6MdK-8JW&$67|vZ1@x2T_ymOUkG(g}T{a;c{6i#R4_~15;&dl|- z(jnXNwG!#16kjU=nT=>rbVza!oG<+3mQ$9owUY@26VFb;g(M@}N11j%m)m}W!~X;u z%lIZ1AB%S;R#1r*ld)?ZA|)+~c8XFdQL0mvMTr*W4O60qdErw$amDSJRC@!QjVhT| zCew-pTA5Er$AB6T5Z?vM&m1qCj4QS`fpN#FVF=Nv)O1WL(8Pt>8^Jq5Vw}>no6@xL zkSS>~7<^p3y^&5#1-QDcOmiwrZz@Zj%5nfHFzo0fYsMNUBlGAK^?2Hurr}0URzj=R zTh=4jA46ibVhkPY~{L$vyQK|(jlEFb9Kl36bFB)#9)u5q3+y zj-xY8M&-j&Tu!H<+%<5s$ZCM0L?HzHsQ?fjC^VoWJ>pR;WvLpx(!=bm)C8P3YTqRgI9hXYSCeH_9 z@m0JZFkyVea21j+dCWk`Gl!deyAHi2UwbzM2mmWHONaSR8BWRYDOnnvVR)odmUI)i zt4e4H+wu9dG9516;Suxkn5M$s^tBXWu|(<=D=D$kDaPUV0@&Q*by>LW=+O`wW!K+{;Fh`j)cpMv;&-HXhtK z{NkC8@x*t!#AOQe!Q0gd)&9x zl?mb-l`0Vu=SF2x0FwPiWg7GsLw}y9zZ9ew1>dL=uOFMH#P9YPy?b%AwMz)PiY(M?p>W-l7>@BV zF(ag=v5D~phXX!8H^`Gr-p!6-fQ>#s>t(o0q=DgvL(UOy^a9ECN|T*)F-I^l*_|4C zWEYa|v8XdP6t(sCM*5?Uh%Gi~b@VuFj`99Jhjk$06#9E}EfbkcTDn8i;lxBXl1$8Y zMa3jNZOy6U=}f0h#M+C43i&rsQ4^EZ6LYY$QF=XJUj0^HUp@s&D@zC4Z%RvF4nMA* z0_ErV^4m|9{4?_2vy|-*2Rl1Q8w;i7f<=WjX(Yz7(G1o`vQi=?ND@|Mgy;m;rUfL4 zsqmD=h&4RvN|fiZLX@!jtU*Xtg4J=bbz@`yqly|XCCdl#uU&!l%SSLfwebyU<*yv&hImr3Ed~nRq z!uOy`E&sf{Q~X3LEVDkSPhz6*0=&!Y`1>y*}K+P z1g3pc(^_kxwRq$9W?;|Moj2|`HLnd628y@nqIG<)DP9B(a$XnV5^rkF8}{p4^LiP( eF6SGJdEK!VG_>!Nsc$3pq_(C}F>gd|M9E)&2oP5Q literal 0 HcmV?d00001 diff --git a/agents/__init__.py b/agents/__init__.py new file mode 100644 index 0000000..646f748 --- /dev/null +++ b/agents/__init__.py @@ -0,0 +1,17 @@ +"""Agents 模块""" +from agents.base_agent import BaseAgent, AgentMessage +from agents.agent_profiles import ( + AGENT_PROFILES, + get_agent_profile, + get_all_agents, + get_recommended_agents +) + +__all__ = [ + "BaseAgent", + "AgentMessage", + "AGENT_PROFILES", + "get_agent_profile", + "get_all_agents", + "get_recommended_agents" +] diff --git a/agents/__pycache__/__init__.cpython-313.pyc b/agents/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f87cb18f8b8842c76f27eea388f5bda007f5be3 GIT binary patch literal 480 zcmZuuyH3L}6m^_u3$3USOblJKG!qg7s#HWlq);A#iNOku2@y$L*{Mpq@ezoLfek(h zFANAIHbe$i>@)}=afjpk@HyvN&TF+Q5D~RbBVPgFMHW8+%OqbD$qn!T1)k!;p5j6Z zU6ra$S*tzG)v4|p)NoB|x)!yXzyY3isL)EN+s3FS=$#O!8Ed{h+`r!4zDr>n^2&zK za53TY#j%GO^FwTFyx!UF_K(rQ(f-!XZr9_D5NAk?(hLnLIget@>qRS$XKlupWliyb zBneIf9F%?ApsKK=ngm^fA;FYj3FzBZZXy(r0Ybb{Sg_WaSZv1sCG(|n_-76FpQaS_ z#b)RcX)RU6+em^W(RBVxwE!z}HiP(SJe+d#GDSu|~IrkC;8hM@lQJ@9-&6!K) zMs>Xx(B-{zX6DS9`OeJTqsOJC+a0*xDu2J`*iRjfKhjI?DzzRS|0f;>9A1a!@H)LN zZ^3bg*L}|EE%X+Z7kamOi@nc)vK^EXMcLsk_3i}aN1*Idl-=Gv-o2pg1LavodCt4v z`(scJfO1e#e&YS9_jyoW0Oe7d{Us<_ zqx-Mb{g}F!gXeg;TXT8;>bC{nmw)Si#qIs;gN_f~S|Q$wUU7MUqu#c8Us2;l7=Kk! ziZ7ff`Mw`rq7Ceg^ zou0E=-Ic5L9vPXD6Y1@NUC{ITsWToK8a4*zvojmnp=h84#LcON-fx0kn^QMZfjxL< z2GJZ5D?!#)Rv6ep15If;5)-kt?0SzPhlAM=>$5%`K3F{|jg zW!$+XdP1W4jtIw(V@yvZAjwwpdcRFgLDE|4&;zWNB_Wm#He!?)4Y$a)4ly1P4WoK8 zsIP@skUb#V2SoF{B^XSJjUkM(<7B8uj^804*rjZS(n?g!{+0e?lM=BxB)O8wXT^}Z_= zR~0}RnC}3X?Lj#XB=m{s0}GhF5e8;5@GRj^F^F)?oavOog&$&5jb%qS5NjLqvg1Ly z(x?cQbTA>>XA}X7WZR-x8GtSuhxwpGw5J1mT|dMo#qr^aLgf})^QbgqO?qmX=aGYJ z@GaQ$=EB>EQFdhUsR*^Kj7}Qe%e)$Kt5IJcK%}u2c$!gSO)_nPOpMKDho>xfH9r({ zt;Sd!VK=X@t3X+@Vs2C{_li_dgnJp6j8SY~ks^QsReEB~?kO;^0kV5RgdT{wE;-St z_7m+2<6hE8PVp-9zQjVYXly&#J_pz4?I&c}_MA0KxEB#%pZ`B-W=1Qowa>A<;_vjmAqWPh|-bY)BL6l6G(I~9|n?X^`*peAR5^8}$ zJw1_~m}EK)mZ?^N~3O-MsF;u{K+GtVO}I2Zo%jPye`I| zd7c5Mw!V?XP|?E-r6hgH>hD_EF?mU$loFrRs?HKg`z;w7JwReH{@`)0_Ya= z@&HBJWEE5)61NzyoDJpZ##uBJwJfx5_EZQU1Yp6wpZrjx&V92w1FCh={b`a|;qOS|HFu;K4Y;YP% zU?cn~y0N2?t`uTzK4oSZZm|COfABKO*R0&d@_Wd6Q6_NCX&?z4Ho^*5G;{%Igmx$pf}5sjsI&qpCg< zV-OCFp(+6Rz{#Jf3t$mB)NDjLnB(@O=xd<*7wZp{Bi z@eXzY*!Uqk+Y5x}@k#+;U&L-!kufo6yRDDv(M;WFMO(#^=%WK8d(>_Z2__>03$ zKrnHt8e!oDv5P0z5=do(ZL(ughI;kn9lOEQk~D?EDVOH-fo46?LAzD?jPM;>%R3m^ zQ+76(OSk0p0R>GfCv%e)4+V;VqHjj0zm10#IC+@s%MZXQ*T)eZ9#%*bb1UZRye%P$ zM86EqQ%af%DGq&O!n{9--G<7f2y#3qCc@Y)qUC>wsRa03JIN@5J+dt=!b1!g$~`%(+Pw%>XD?UQdj5fG zbzTKUShXS9kuH6`o83h=DMPIQBK@PI>Ym3Oa4FgJq)Zu{Kxbg2#zm|JZbpA@j`WJ& z2-2e3ms*eUuhd z!DkfBPNF1blf6i(R+=0QDkO^Z?QC$0a%8L_k3_1=Z@+gI848Bm6mU1rAXDu$r+-B- z(@V&D(3R^(jiF{4yP>D1<&CAle{X@Iuo;AP>FF6U-)6T3II&llr$UcVupH)cMwOLah9MCz?2I}rGQi~$AjNE2SE%6+AI{-whWU8>4 zv4L#=2=r50y+;l6lkkU62h?a0ONpLd_*#U!kh;utD*~18Dn%+`E??&WSQepoSB`>> zlmb<}xww-ORou<>JEC(?rowV?h#=S<5v#3e%8apg5gJ3DVY|#|LM26xjpZsNMXuM~ z5d}mZM8it6st4_UGrELQ;8E(YV#_Xz(6c3{!)d+JKp6C^trp)vtHfP|= zE#TW?2koo$I=-3YWP{nd3U}IVc`EXlkpeIs7wPM4jy=GPJ2Se-RphYfYduUK{_zfu ztW{i8XF)r==N3BQEp=61Zx@5(a_E5wC0R=Orwv=g?i&_|a;!m*_p6-RfD;B5VM{^e zEn#%rG1}H(c;0Pz6&nq9tLPJnfI{a7mPZl`M*kG`VL9Jgne_=-j}c-dTg>)GvQ!0@ zvBV`*?Qklp=1j!g*ic)6qQKwVB$67=jx~dBPmzoR>->e9Kt83&*D(uJeId?nd|I9sP!SNdOmLPR0UTYbo7Mtnx#P;C9y~vpIGyV3#9>(GKuC8$|6vJvkA{fevjzs&cD-qPXL|0WKfn$4Pb-WT~27?)TNF* z5CIV$M$-5mWvZvo=SAyFTDhG=&sDXoI8#vNtHZ6HZXeWTN&|kMR#92~QC(GKy)Uyv ztMOl|sI0BLjDw#{=>@Is?0QmI|?)#n7t z3bkTjTcbj((SfD5lWue>oExIEV~sX|Mrh3qDFG##rz-iJl%gtyq)Q_uW$w&2%brZ} z?*jgsTA$yaao78*E@q19K&uXm$P`!7aco6Rb*4!3)d#fNDgZuzQ6?|I-{W%J1|09_ z2hJ09%k?gfvR}nF6J0Mk%3J{)eml(Whax;k#~RU54nl@JpL{s;6MJa*FO2=ljA4rKCZo7S6S^pekE|Z zzNUiB|F}-OgwyK) z+;r9XFHWzs5IpbVKLGUWEhsNI>v|gp*Ch^*!(UMAtc4@}uH5qnh-u(37%iu^;gGQn zJqz{%d}?`DgocoKHHw2qmX;M{Ts76UcbB#wiyTGNskNsv3VyU2R4EHJq+e zxN{fv1Io`I;^8m(`CZ5RiOQ+MzRT6?R1uA$C>e-^QT@v^Y+`PPM58TaKH ze?7b1eaUyt4}en`8Q|>m=g8-pud43K6<=+&uR7=ZBjEcz@gBvEK>GYwC5JvQIW&E2 zb>C|3S0~>5{KT7IocQN2UViHj=l@Xo*~@3YC^`3;`<(U@u%9k_CbO-gqPniCqC(BC z{T$Rx0c^>*t81$2)fUix0iuerMw2Z%lq_3S{C8K7&@TJ_UOR>%a+=?R`(}aD>3rmJ zIo+E(9J^oqdiM)o@BdY1|AkH0j)R9DmFz*hkLKv*ie7r~#G{hEp#NIY_bdMDO;_o` zU*_qDzuwJs=qWw;0_e{ve$Wr(^fUcgMgLijU+KZT-}Bq_lb!=PJzMneFM4#reZjTI z72Nr!_X-`m-f?Djoy+WaE3@nDqh|`AbA9I6v$@mdET1mebl@lA{*Hb&iwm7a!+6FI V4WD;LUZnB!&gGZ>tbUaT{}-|0;J*L> literal 0 HcmV?d00001 diff --git a/agents/__pycache__/base_agent.cpython-313.pyc b/agents/__pycache__/base_agent.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3cf15b1f8d951a429abab213f9a2876a6d70a19 GIT binary patch literal 5005 zcmZ`7TW}NC^{%9qR-65m|EYYDr9I zGUgGGgA<$ZGLHm1(}0sOHf>&DL&#^pI&w{GW!rRUqnD>2h&VLUzn-%z$)>=yb@tqQ z&OP_uv*&ep#$w?JG*9DBpP`VDAFz{cYzCQm6_67|Ae0D+ZjawBirD$3H{hl2P>@2W zsNEZKxlyF6lLp&-ey=C959OiFbY?aKa)PuF%D@>l0z-Bb${yZ}EyEtUsLxB*_XuJxRRpXll4S@l`mnZ&Ws_8}zlxoQeh9 z9bTFBb_93(WP>Ld2*Lj4JQ|b&t*(I7@d?>#&y$TV7xKAWGUsx21Y0FP;%1lYHj}ZJ53Iu%%D;VHsY%vIVEk=QB$%7vJ zE!U#GFXsdk&e+XR?g6|k(9McO6U;>U7Nutv@~NerwHL~H>)@=)Yo0C!d6F4`(#=>k z#Y20trUy9D!4La0e*$oV>?riZQ*AqVKa<6GX5EoEnH8DQfYzy9l3{2Cee6WlDotRV zOg?GWWI4TzQ!8jtgp>GjMWw-*a%vcxm97pb3I zT|GQP!8*G#!SC;IdHghsS!cv**jK}|rGO`TxEonBAKx5q1++JYWQbh)Y5Ey@qM`=O%Y71 zk8=`w62Mm*W8*{G4R~cZDCz+Ues1O@bZLh5#U_3qI&SD ziHc=m-A!ZZy`2TMgRY4ME09v2){{kbVclPhRf#0GkA zvH0P!+t+&LdWND|2cH@lPK}JEMnCQgsWZVW*hQ7HH6-oeMN z9*Ot78-M$&*m%bI_}udG@Tt_$urf23T^xM;`Z1W1x;~P)G^EhDSA1T7YsOt6aX9$7 zn+9o#lZPQICwmSjM!w`^V%Mm~Z;~KSWcI){% zE_VoW5O`yUB8b+)6j|@??DPg&?MAu?y3~$fDS}54ppDTc0J5b`^7~!XD|SLsgFNcl zB?Vp=@in4fVN-nzY6GQE@Xqp9%sSb9#6}Px{uaPA`r8sRzhbJSVWOmA*c(~BZL(x% z*aWUoRuQ&DZDpriM_p64$0lr#Mav(VDzBd?uaB12q*+q3Y=&q{xbXV4kyy$n%+=v7 zQS1EPO?ON%3QnA7nKD*Q7^|j?OD2p)4 zu!2(($W8DasM9i`zljO3PE9sFxFIY$*#~)-pmA#ErV6n8TvBmEX|yw!teo0xIuW#v z%r@@;5$kx$tQvVa3i`g;CX;^D&DN0mA3_sWJ2%M2`0x5 z*v0b?-Gv!EI1@KOWUhg1%Y)+rQ1XR>?YR_|n}V@Gsg!3&5X?TAOp4gxb0If$OnZX|=>YPr_Ov@!`Sv_*nAzFyuR4%})~4?4+jV-XI|r!61jD za{~m$fGJ_tK$02}Vq;_Azp0D+ll!mk)pARhs@aj@m6Wzuw{)q4Q#^JWqq2l`7*38| zAB1VK@$vXOLtO0oIjF{Y36tRMYrUz_uVdryWjL-Ji(NUBx_Bsc@qLJO(lUjmCq8;L zb>npG%BN~j!&o>poIH0V@%f1#y7zO_hu?+m#NYlr@%ixWYiIUq>UrrETp2_s$4^6` z%d)-M9?GV>doX^zKWJ zbSJ*pmpIoGf9EU=N)-<}7i=*xFq*t{iI?I)=OH4hp%`YsPNv@v!#VC(W9r6PxSzz& zei$D+4!h?XP+Rf--o(MaMEGR9?_HdA?9F?s zWWA$>9i%y8whb^xR_COqHB50iMELXe)AoUiNbS0Bmrp$-Ogtk**1eb}5QM9v)r&9M zhwQ`8PSvlPs9!Z%{ZyoCHIUZ4&ZJpwk)OG%Wi9#Xd}1k6kAor?L>DX`*pw!^B^#Ku zmMwao`AKU4E-NXV*Von4HBdQOusC8|tQ24#^vi6^<$*yMQEI6yWzI934_aYnJKW~C z=c4;86U4gNP7W(3FEOH~iHKM?L5K;!{ol!`mj#GvEZ~_a-mH5;vjrj+MrlzK$X!=z zp{I_t5xYUcM4o!@>v-SE)ab|A^1r7W{Zc89z&sO22f*x=OH|^_{=}(4m9m%R8X6in z31d5YD18hN3DYCGse@Ny7StiNFhU_oVNHUo$;>Vh!(g@uFyRLRQR?V$(=ORIYmjq> zG3SfF2GC7z6%IPGfx^FBq z6^~QuqFG;5iXyv~Vo9=Z6;iCfAfHDptXO^L|)BZ3l45%7PEDV z0C;8uAf^gHOcj6_2(bKQLF@&_$eIR(p9wtFz2mmwBZ;B_*_8V{1a}cMc3B_l>huNL z6)ng%^%F&-dJoOs!<0Lo`k_M6!M%EsjjPm8E~_cj9t3?*#P^WaG7NJ^qhWM+^@OSZ lo>YEMn&BK8)6W_`Q=BFMX0*^vWA|rkC&QS2BnXv#{}0cNh$;X8 literal 0 HcmV?d00001 diff --git a/agents/__pycache__/research_agent.cpython-313.pyc b/agents/__pycache__/research_agent.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb03714e330747e3c53caf281d88fed49aa1f704 GIT binary patch literal 2797 zcmai0O^g&p6t4c6zn&dH7?zPWQv8TJ$ug@5>TZmRy9j}0G1E+hi0Sn9)XbLjbT?Hs z3&Y8HFpyw~k&PVnw%ovja_~T+2jXqBCcCy8P5dF;23JG8`Ko(*cNL9wcvb!C)qAhL z_v*b~E))y|NT>6EFTPVdvr)K>JC&N61fOam`-It9F} z< zJETZFny9Q!hPr3BNIA=-A(jUcrXAmF&}LYy9yAXgsE!@BjvXIAIAK+fPaHHutw}_f zmpEBHIm597Dr}ctAWXUd`Jr>-Hda+~i5kW@EZH&;e36($-y<*MB##eR1H~`GIE_2S(=yMsMxAy>Ve+{G$GKwgf_p z#a;8oU5mxR`QqSQ|Bd2j+iq@Kc>0xv;@Cy~z1%YBLSY6jA;@_VrWp}!<~co}v==fNq*Zv`$nkK3Fu z*bJzHsRu$f=7O~yz=PCr5-Th%M+$MP%>{NqlaRI@7v9(q93jD;sm>lbN3vK<+aTu8 z5XNzxHX4LM@UiVRUBa>7fPE5Mtr)NRtOddy>8;-uK^x>a=ssq%Gm*0{R2g|jYUdSW zERNbJ4voY=!Yq-KM#mQ8*ukTQ)C4EtmAh793p+MM2VwhgacbUq0Ulc{GIKdL0Q0+!KIY3on2|eb1f(7pTX;~k3y-bdZJM}2qwPX+ z0QAE4M5U}mb*8McP4F<8Ebm;tTZ~x|rf{PL1Q^3?SSPid z1p>t;zy8X|rIBm7h5Yues+Wvz7Uefye6*X>i~U`s7y7$>Xx+vur!JkkHndRK{eLop z3xyqBRRcZgSWiZidbCJSaWC5SlYDZ!n6!ep6*>>yt~R z$Ga(190u5n#sj3~jAcpwyf`vTZ`M-BOn3)IjGzxTbAsd z(6S86YWejxv_vcHvn=^MdOPbat3eqTF7=4#!*<59>b?UD4Q|im{{h=9NrVlcj@T1G z!p#<-V+-n7wh)X?L8aoBbsa_N5hXU04KX4zmK<6TdKZ18@964!YFxQyLTbW1M}Lpz z`+fbXy>mOe2&NnS4>{d`DxET}@h*aC4#Z))RlYrTr+7!)K0ZHs;(_Eu?AWNZSI&lM z(Fve5u*g>#Bqr^L8`^@pyt4Y|6yHrgq}bD8xsu5pKg;Y3F=8*Wy}*;{$_IdSHAPW= ZK!fvW@O!lTTU2 dict: + """获取指定 Agent 的配置""" + return AGENT_PROFILES.get(agent_id, None) + +def get_all_agents() -> list: + """获取所有可用的 Agent 列表""" + return [ + {"id": k, "name": v["name"], "emoji": v["emoji"]} + for k, v in AGENT_PROFILES.items() + ] + +def get_recommended_agents(decision_type: str) -> list: + """根据决策类型获取推荐的 Agent 组合""" + return RECOMMENDED_AGENTS.get(decision_type, list(AGENT_PROFILES.keys())[:5]) diff --git a/agents/base_agent.py b/agents/base_agent.py new file mode 100644 index 0000000..148534e --- /dev/null +++ b/agents/base_agent.py @@ -0,0 +1,131 @@ +""" +Agent 基类 - 定义 Agent 的基本行为 +""" +from dataclasses import dataclass +from typing import Generator +from agents.agent_profiles import get_agent_profile + + +@dataclass +class AgentMessage: + """Agent 发言消息""" + agent_id: str + agent_name: str + emoji: str + content: str + round_num: int + + +class BaseAgent: + """Agent 基类""" + + def __init__(self, agent_id: str, llm_client): + """ + 初始化 Agent + + Args: + agent_id: Agent 标识符 (如 'ceo', 'cto') + llm_client: LLM 客户端实例 + """ + self.agent_id = agent_id + self.llm_client = llm_client + + profile = get_agent_profile(agent_id) + if not profile: + raise ValueError(f"未知的 Agent ID: {agent_id}") + + self.name = profile["name"] + self.emoji = profile["emoji"] + self.perspective = profile["perspective"] + self.focus_areas = profile["focus_areas"] + self.system_prompt = profile["system_prompt"] + + # 存储对话历史 + self.conversation_history = [] + + def generate_response( + self, + topic: str, + context: str = "", + previous_speeches: list = None, + round_num: int = 1 + ) -> Generator[str, None, None]: + """ + 生成 Agent 的发言(流式输出) + + Args: + topic: 讨论议题 + context: 背景信息 + previous_speeches: 之前其他 Agent 的发言列表 + round_num: 当前轮次 + + Yields: + str: 流式输出的文本片段 + """ + # 构建对话 prompt + user_prompt = self._build_user_prompt(topic, context, previous_speeches, round_num) + + # 调用 LLM 生成回复 + full_response = "" + for chunk in self.llm_client.chat_stream( + system_prompt=self.system_prompt, + user_prompt=user_prompt + ): + full_response += chunk + yield chunk + + # 保存到历史 + self.conversation_history.append({ + "round": round_num, + "content": full_response + }) + + def _build_user_prompt( + self, + topic: str, + context: str, + previous_speeches: list, + round_num: int + ) -> str: + """构建用户 prompt""" + + prompt_parts = [f"## 讨论议题\n{topic}"] + + if context: + prompt_parts.append(f"\n## 背景信息\n{context}") + + if previous_speeches and len(previous_speeches) > 0: + prompt_parts.append("\n## 其他人的观点") + for speech in previous_speeches: + prompt_parts.append( + f"\n**{speech['emoji']} {speech['name']}**:\n{speech['content']}" + ) + + if round_num == 1: + prompt_parts.append( + f"\n## 你的任务\n" + f"作为 {self.name},请从你的专业视角({self.perspective})对这个议题发表看法。\n" + f"重点关注:{', '.join(self.focus_areas)}\n" + f"请给出 2-3 个核心观点,每个观点用 1-2 句话阐述。保持简洁有力。" + ) + else: + prompt_parts.append( + f"\n## 你的任务\n" + f"这是第 {round_num} 轮讨论。请针对其他人的观点进行回应:\n" + f"- 你同意或反对哪些观点?为什么?\n" + f"- 有没有被忽略的重要问题?\n" + f"- 你的立场有没有调整?\n" + f"请保持简洁,聚焦于最重要的 1-2 个点。" + ) + + return "\n".join(prompt_parts) + + def get_summary(self) -> str: + """获取该 Agent 所有发言的摘要""" + if not self.conversation_history: + return "暂无发言" + + return "\n---\n".join([ + f"第 {h['round']} 轮: {h['content']}" + for h in self.conversation_history + ]) diff --git a/agents/research_agent.py b/agents/research_agent.py new file mode 100644 index 0000000..9248196 --- /dev/null +++ b/agents/research_agent.py @@ -0,0 +1,44 @@ +from typing import Generator, List, Dict +from utils.llm_client import LLMClient +import config + +class ResearchAgent: + """研究模式专用 Agent""" + + def __init__(self, role: str, llm_client: LLMClient): + self.role = role + self.llm_client = llm_client + self.role_config = config.RESEARCH_MODEL_ROLES.get(role, {}) + self.name = self.role_config.get("name", role.capitalize()) + + def _get_system_prompt(self, context: str = "") -> str: + if self.role == "planner": + return f"""You are a Senior Research Planner. +Your goal is to break down a complex user topic into a structured research plan. +You must create a clear, step-by-step plan that covers different angles of the topic. +Format your output as a Markdown list of steps. +Context: {context}""" + + elif self.role == "researcher": + return f"""You are a Deep Researcher. +Your goal is to execute a specific research step and provide detailed, in-depth analysis. +Use your vast knowledge to provide specific facts, figures, and logical reasoning. +Do not be superficial. Go deep. +Context: {context}""" + + elif self.role == "writer": + return f"""You are a Senior Report Writer. +Your goal is to synthesize multiple research findings into a cohesive, high-quality report. +The report should be well-structured, easy to read, and provide actionable insights. +Context: {context}""" + + else: + return "You are a helpful assistant." + + def generate(self, prompt: str, context: str = "") -> Generator[str, None, None]: + """Generate response stream""" + system_prompt = self._get_system_prompt(context) + yield from self.llm_client.chat_stream( + system_prompt=system_prompt, + user_prompt=prompt + ) diff --git a/app.py b/app.py new file mode 100644 index 0000000..223000a --- /dev/null +++ b/app.py @@ -0,0 +1,556 @@ +""" +Multi-Agent Decision Workshop - 主应用 +多 Agent 决策工作坊:通过多角色辩论帮助用户做出更好的决策 +""" +import streamlit as st +import os +from dotenv import load_dotenv + +# 加载环境变量 +load_dotenv() + +from agents import get_all_agents, get_recommended_agents, AGENT_PROFILES +from orchestrator import DebateManager, DebateConfig +from orchestrator.research_manager import ResearchManager, ResearchConfig +from report import ReportGenerator +from utils import LLMClient +import config + +# ==================== 页面配置 ==================== +st.set_page_config( + page_title="🎭 多 Agent 决策工作坊", + page_icon="🎭", + layout="wide", + initial_sidebar_state="expanded" +) + +# ==================== 样式 ==================== +st.markdown(""" + +""", unsafe_allow_html=True) + +# ==================== 常量定义 ==================== +# 从环境变量读取 API Key(隐藏在 .env 文件中) +DEFAULT_API_KEY = os.getenv("AIHUBMIX_API_KEY", "") + +# 支持的模型列表 +AVAILABLE_MODELS = { + "gpt-4o": "GPT-4o (推荐)", + "gpt-4o-mini": "GPT-4o Mini (快速)", + "gpt-4-turbo": "GPT-4 Turbo", + "gpt-3.5-turbo": "GPT-3.5 Turbo (经济)", + "claude-3-5-sonnet-20241022": "Claude 3.5 Sonnet", + "claude-3-opus-20240229": "Claude 3 Opus", + "claude-3-haiku-20240307": "Claude 3 Haiku (快速)", + "deepseek-chat": "DeepSeek Chat", + "deepseek-coder": "DeepSeek Coder", + "gemini-1.5-pro": "Gemini 1.5 Pro", + "gemini-1.5-flash": "Gemini 1.5 Flash", + "qwen-turbo": "通义千问 Turbo", + "qwen-plus": "通义千问 Plus", + "glm-4": "智谱 GLM-4", + "moonshot-v1-8k": "Moonshot (月之暗面)", +} + +# 决策类型 +DECISION_TYPES = { + "product": "产品方案", + "business": "商业决策", + "tech": "技术选型", + "personal": "个人规划" +} + +# ==================== 初始化 Session State ==================== +if "mode" not in st.session_state: + st.session_state.mode = "Deep Research" + +# Debate State +if "debate_started" not in st.session_state: + st.session_state.debate_started = False +if "debate_finished" not in st.session_state: + st.session_state.debate_finished = False +if "speeches" not in st.session_state: + st.session_state.speeches = [] +if "report" not in st.session_state: + st.session_state.report = "" +if "custom_agents" not in st.session_state: + st.session_state.custom_agents = {} + +# Research State +if "research_plan" not in st.session_state: + st.session_state.research_plan = "" +if "research_started" not in st.session_state: + st.session_state.research_started = False +if "research_output" not in st.session_state: + st.session_state.research_output = "" # Final report +if "research_steps_output" not in st.session_state: + st.session_state.research_steps_output = [] # List of step results + + +# ==================== 侧边栏:配置 ==================== +with st.sidebar: + st.header("⚙️ 设置") + + # 全局 API Key 设置 + with st.expander("🔑 API Key 设置", expanded=True): + use_custom_key = st.checkbox("使用自定义 API Key") + if use_custom_key: + api_key = st.text_input( + "API Key", + type="password", + help="留空则使用环境变量中的 Key" + ) + else: + api_key = DEFAULT_API_KEY + + st.divider() + + # 模式选择 + mode = st.radio( + "📊 选择模式", + ["Deep Research", "Debate Workshop"], + index=0 if st.session_state.mode == "Deep Research" else 1 + ) + st.session_state.mode = mode + + st.divider() + + if mode == "Deep Research": + st.subheader("🧪 研究模型配置") + + # 3 个角色的模型配置 + roles_config = {} + for role_key, role_info in config.RESEARCH_MODEL_ROLES.items(): + roles_config[role_key] = st.selectbox( + f"{role_info['name']} ({role_info['description']})", + options=list(AVAILABLE_MODELS.keys()), + index=list(AVAILABLE_MODELS.keys()).index(role_info['default_model']) if role_info['default_model'] in AVAILABLE_MODELS else 0, + key=f"model_{role_key}" + ) + + else: # Debate Workshop + # 模型选择 + model = st.selectbox( + "🤖 选择通用模型", + options=list(AVAILABLE_MODELS.keys()), + format_func=lambda x: AVAILABLE_MODELS[x], + index=0, + help="选择用于辩论的 AI 模型" + ) + + # 辩论配置 + max_rounds = st.slider( + "🔄 辩论轮数", + min_value=1, + max_value=4, + value=2, + help="每轮所有 Agent 都会发言一次" + ) + + st.divider() + + # ==================== 自定义角色 (Debate Only) ==================== + st.subheader("✨ 自定义角色") + + with st.expander("➕ 添加新角色", expanded=False): + new_agent_name = st.text_input("角色名称", placeholder="如:法务顾问", key="new_agent_name") + new_agent_emoji = st.text_input("角色 Emoji", value="🎯", max_chars=2, key="new_agent_emoji") + new_agent_perspective = st.text_input("视角定位", placeholder="如:法律合规视角", key="new_agent_perspective") + new_agent_focus = st.text_input("关注点(逗号分隔)", placeholder="如:合规风险, 法律条款", key="new_agent_focus") + new_agent_prompt = st.text_area("角色设定 Prompt", placeholder="描述这个角色的思考方式...", height=100, key="new_agent_prompt") + + if st.button("✅ 添加角色", use_container_width=True): + if new_agent_name and new_agent_prompt: + agent_id = f"custom_{len(st.session_state.custom_agents)}" + st.session_state.custom_agents[agent_id] = { + "name": new_agent_name, + "emoji": new_agent_emoji, + "perspective": new_agent_perspective or "自定义视角", + "focus_areas": [f.strip() for f in new_agent_focus.split(",") if f.strip()], + "system_prompt": new_agent_prompt + } + st.success(f"已添加角色: {new_agent_emoji} {new_agent_name}") + st.rerun() + else: + st.warning("请至少填写角色名称和 Prompt") + + # 显示已添加的自定义角色 + if st.session_state.custom_agents: + st.markdown("**已添加的自定义角色:**") + for agent_id, agent_info in list(st.session_state.custom_agents.items()): + col1, col2 = st.columns([3, 1]) + with col1: + st.markdown(f"{agent_info['emoji']} {agent_info['name']}") + with col2: + if st.button("🗑️", key=f"del_{agent_id}"): + del st.session_state.custom_agents[agent_id] + st.rerun() + +# ==================== 主界面逻辑 ==================== + +if mode == "Deep Research": + st.title("🧪 Deep Research Mode") + st.markdown("*深度研究模式:规划 -> 研究 -> 报告*") + + # Input + research_topic = st.text_area("研究主题", placeholder="请输入你想深入研究的主题...", height=100) + research_context = st.text_area("补充背景 (可选)", placeholder="任何额外的背景信息...", height=80) + + generate_plan_btn = st.button("📝 生成研究计划", type="primary", disabled=not research_topic) + + if generate_plan_btn and research_topic: + st.session_state.research_started = False + st.session_state.research_output = "" + st.session_state.research_steps_output = [] + + manager = ResearchManager(api_key=api_key) + config_obj = ResearchConfig( + topic=research_topic, + context=research_context, + planner_model=roles_config['planner'], + researcher_model=roles_config['researcher'], + writer_model=roles_config['writer'] + ) + manager.create_agents(config_obj) + + with st.spinner("正在制定研究计划..."): + plan_text = "" + for chunk in manager.generate_plan(research_topic, research_context): + plan_text += chunk + st.session_state.research_plan = plan_text + + # Plan Review & Edit + if st.session_state.research_plan: + st.divider() + st.subheader("📋 研究计划确认") + + edited_plan = st.text_area("请审查并编辑计划 (Markdown格式)", value=st.session_state.research_plan, height=300) + st.session_state.research_plan = edited_plan + + start_research_btn = st.button("🚀 开始深度研究", type="primary") + + if start_research_btn: + st.session_state.research_started = True + st.session_state.research_steps_output = [] # Reset steps + + # Parse plan lines to get steps (simple heuristic: lines starting with - or 1.) + steps = [line.strip() for line in edited_plan.split('\n') if line.strip().startswith(('-', '*', '1.', '2.', '3.', '4.', '5.'))] + if not steps: + steps = [edited_plan] # Fallback if no list format + + manager = ResearchManager(api_key=api_key) + config_obj = ResearchConfig( + topic=research_topic, + context=research_context, + planner_model=roles_config['planner'], + researcher_model=roles_config['researcher'], + writer_model=roles_config['writer'] + ) + manager.create_agents(config_obj) + + # Execute Steps + previous_findings = "" + st.divider() + st.subheader("🔍 研究进行中...") + + step_progress = st.container() + + for i, step in enumerate(steps): + with step_progress: + with st.status(f"正在研究: {step}", expanded=True): + findings_text = "" + placeholder = st.empty() + for chunk in manager.execute_step(step, previous_findings): + findings_text += chunk + placeholder.markdown(findings_text) + + st.session_state.research_steps_output.append(f"### {step}\n{findings_text}") + previous_findings += f"\n\nFinding for '{step}':\n{findings_text}" + + # Final Report + st.divider() + st.subheader("📄 最终报告生成中...") + report_placeholder = st.empty() + final_report = "" + for chunk in manager.generate_report(research_topic, previous_findings): + final_report += chunk + report_placeholder.markdown(final_report) + + st.session_state.research_output = final_report + st.success("✅ 研究完成") + + # Show Final Report if available + if st.session_state.research_output: + st.divider() + st.subheader("📄 最终研究报告") + st.markdown(st.session_state.research_output) + st.download_button("📥 下载报告", st.session_state.research_output, "research_report.md") + + +elif mode == "Debate Workshop": + # ==================== 原始 Debate UI 逻辑 ==================== + st.title("🎭 多 Agent 决策工作坊") + st.markdown("*让多个 AI 角色从不同视角辩论,帮助你做出更全面的决策*") + + # ==================== 输入区域 ==================== + col1, col2 = st.columns([2, 1]) + + with col1: + st.subheader("📝 决策议题") + + # 决策类型选择 + decision_type = st.selectbox( + "决策类型", + options=list(DECISION_TYPES.keys()), + format_func=lambda x: DECISION_TYPES[x], + index=0 + ) + + # 议题输入 + topic = st.text_area( + "请描述你的决策议题", + placeholder="例如:我们是否应该在 Q2 推出 AI 助手功能?\n\n或者:我应该接受这份新工作 offer 吗?", + height=120 + ) + + # 背景信息(可选) + with st.expander("➕ 添加背景信息(可选)"): + context = st.text_area( + "背景信息", + placeholder="提供更多上下文信息,如:\n- 当前状况\n- 已有的资源和限制\n- 相关数据和事实", + height=100 + ) + context = context if 'context' in dir() else "" + + with col2: + st.subheader("🎭 选择参与角色") + + # 获取推荐的角色 + recommended = get_recommended_agents(decision_type) + all_agents = get_all_agents() + + # 预设角色选择 + st.markdown("**预设角色:**") + selected_agents = [] + for agent in all_agents: + is_recommended = agent["id"] in recommended + default_checked = is_recommended + + if st.checkbox( + f"{agent['emoji']} {agent['name']}", + value=default_checked, + key=f"agent_{agent['id']}" + ): + selected_agents.append(agent["id"]) + + # 自定义角色选择 + if st.session_state.custom_agents: + st.markdown("**自定义角色:**") + for agent_id, agent_info in st.session_state.custom_agents.items(): + if st.checkbox( + f"{agent_info['emoji']} {agent_info['name']}", + value=True, + key=f"agent_{agent_id}" + ): + selected_agents.append(agent_id) + + # 角色数量提示 + if len(selected_agents) < 2: + st.warning("请至少选择 2 个角色") + elif len(selected_agents) > 6: + st.warning("建议不超过 6 个角色") + else: + st.info(f"已选择 {len(selected_agents)} 个角色") + + # ==================== 辩论控制 ==================== + st.divider() + + col_btn1, col_btn2, col_btn3 = st.columns([1, 1, 2]) + + with col_btn1: + start_btn = st.button( + "🚀 开始辩论", + disabled=(not topic or len(selected_agents) < 2 or not api_key), + type="primary", + use_container_width=True + ) + + with col_btn2: + reset_btn = st.button( + "🔄 重置", + use_container_width=True + ) + + if reset_btn: + st.session_state.debate_started = False + st.session_state.debate_finished = False + st.session_state.speeches = [] + st.session_state.report = "" + st.rerun() + + # ==================== 辩论展示区 ==================== + if start_btn and topic and len(selected_agents) >= 2: + st.session_state.debate_started = True + st.session_state.speeches = [] + + st.divider() + st.subheader("🎬 辩论进行中...") + + # 临时将自定义角色添加到 agent_profiles + from agents import agent_profiles + original_profiles = dict(agent_profiles.AGENT_PROFILES) + agent_profiles.AGENT_PROFILES.update(st.session_state.custom_agents) + + try: + # 初始化客户端和管理器 + provider_val = "aihubmix" # Debate mode default to aihubmix or logic needs to be robust. + # Note: in sidebar "model" and "api_key" were set. "provider" variable is now inside the Sidebar logic block if mode==Debate. + # But wait, I removed the "Advanced Settings" block from the global scope and put it into sub-scope? + # Let's check my sidebar logic above. + + # Refactoring check: + # I removed the provider selection logic from the global sidebar. I should probably add it back or assume a default. + # In the original code, provider selection was in "Advanced Settings". + + llm_client = LLMClient( + provider="aihubmix", + api_key=api_key, + base_url="https://aihubmix.com/v1", + model=model + ) + debate_manager = DebateManager(llm_client) + + # 配置辩论 + debate_config = DebateConfig( + topic=topic, + context=context, + agent_ids=selected_agents, + max_rounds=max_rounds + ) + debate_manager.setup_debate(debate_config) + + # 运行辩论(流式) + current_round = 0 + speech_placeholder = None + + for event in debate_manager.run_debate_stream(): + if event["type"] == "round_start": + current_round = event["round"] + st.markdown( + f'
📢 第 {current_round} 轮讨论
', + unsafe_allow_html=True + ) + + elif event["type"] == "speech_start": + st.markdown(f"**{event['emoji']} {event['agent_name']}**") + speech_placeholder = st.empty() + current_content = "" + + elif event["type"] == "speech_chunk": + current_content += event["chunk"] + speech_placeholder.markdown(current_content) + + elif event["type"] == "speech_end": + st.session_state.speeches.append({ + "agent_id": event["agent_id"], + "content": event["content"], + "round": current_round + }) + st.divider() + + elif event["type"] == "debate_end": + st.session_state.debate_finished = True + st.success("✅ 辩论结束!正在生成决策报告...") + + # 生成报告 + if st.session_state.debate_finished: + report_generator = ReportGenerator(llm_client) + speeches = debate_manager.get_all_speeches() + + st.subheader("📊 决策报告") + report_placeholder = st.empty() + report_content = "" + + for chunk in report_generator.generate_report_stream( + topic=topic, + speeches=speeches, + context=context + ): + report_content += chunk + report_placeholder.markdown(report_content) + + st.session_state.report = report_content + + # 下载按钮 + st.download_button( + label="📥 下载报告 (Markdown)", + data=report_content, + file_name="decision_report.md", + mime="text/markdown" + ) + + except Exception as e: + st.error(f"发生错误: {str(e)}") + import traceback + st.code(traceback.format_exc()) + st.info("请检查你的 API Key 和模型设置是否正确") + + finally: + # 恢复原始角色配置 + agent_profiles.AGENT_PROFILES = original_profiles + + # ==================== 历史报告展示 ==================== + elif st.session_state.report and not start_btn: + st.divider() + st.subheader("📊 上次的决策报告") + st.markdown(st.session_state.report) + + st.download_button( + label="📥 下载报告 (Markdown)", + data=st.session_state.report, + file_name="decision_report.md", + mime="text/markdown" + ) + +# ==================== 底部信息 ==================== +st.divider() +col_footer1, col_footer2, col_footer3 = st.columns(3) +with col_footer2: + st.markdown( + "
" + "🎭 Multi-Agent Decision Workshop
多 Agent 决策工作坊" + "
", + unsafe_allow_html=True + ) diff --git a/config.py b/config.py new file mode 100644 index 0000000..abc657c --- /dev/null +++ b/config.py @@ -0,0 +1,50 @@ +""" +配置文件 - API Keys 和模型设置 +""" +import os +from dotenv import load_dotenv + +load_dotenv() + +# API 配置 +ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") +AIHUBMIX_API_KEY = os.getenv("AIHUBMIX_API_KEY", "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73") + +# AIHubMix 配置 +AIHUBMIX_BASE_URL = "https://aihubmix.com/v1" + +# 模型配置 +DEFAULT_MODEL = "gpt-4o" # AIHubMix 支持的模型 +LLM_PROVIDER = "aihubmix" # 默认使用 AIHubMix + +# 辩论配置 +MAX_DEBATE_ROUNDS = 3 # 最大辩论轮数 +MAX_AGENTS = 6 # 最大参与 Agent 数量 + +# 研究模式模型角色配置 +RESEARCH_MODEL_ROLES = { + "planner": { + "name": "Planner", + "default_model": "gpt-4o", + "description": "负责拆解问题,制定研究计划" + }, + "researcher": { + "name": "Researcher", + "default_model": "gemini-1.5-pro", + "description": "负责执行具体的研究步骤,深度分析" + }, + "writer": { + "name": "Writer", + "default_model": "claude-3-5-sonnet-20241022", + "description": "负责汇总信息,撰写最终报告" + } +} + +# 决策类型 +DECISION_TYPES = { + "product": "产品方案", + "business": "商业决策", + "tech": "技术选型", + "personal": "个人规划" +} diff --git a/orchestrator/__init__.py b/orchestrator/__init__.py new file mode 100644 index 0000000..b087e84 --- /dev/null +++ b/orchestrator/__init__.py @@ -0,0 +1,4 @@ +"""Orchestrator 模块""" +from orchestrator.debate_manager import DebateManager, DebateConfig, SpeechRecord + +__all__ = ["DebateManager", "DebateConfig", "SpeechRecord"] diff --git a/orchestrator/__pycache__/__init__.cpython-313.pyc b/orchestrator/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d7079ccfc225460cdb52c1c1e392445c2fff146 GIT binary patch literal 344 zcmYk2u}Z{15QcY?oF_+8S&7XRHfgdCpxBryq7s9Z#SoJ<2`1SvNjNW^4TMH12g;oF#GM!^p-UtDjuARY^l!o!Tg8zC;D9_?npq81T@f&sHdUk z>8N`K8dJhZp!pO>iEbP1mTEo4DCKp7VIy$*`Skd)xc?eZwk|D~Uxp1o4$Cm%D9x2U z6y;TxNHeH7kJ2+935-wd4LJsgMFb#s#2V6xe;H7$AgC)e8bcYWGF%Gt9AHU$Wr=jz zdCjqAH}kIRm@8&wF2cBGMVmJnsBe@F%miN7si@fcI}9Kzvj)Ja=5lkt5OJIH12h$s bs;q}mzjaFK3)y}p{xk94CVRJI92WWmK}BWm literal 0 HcmV?d00001 diff --git a/orchestrator/__pycache__/debate_manager.cpython-313.pyc b/orchestrator/__pycache__/debate_manager.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97dd40182d8bb52817791069e83480f3ef80c55f GIT binary patch literal 6029 zcmd5AYjYFV^{%w5$LeVdzp=fRA0QiS0_Itg2_XsKS5r1>$>2dFWZ8(wE9b710Fw_+ zNfR^B;IxGRZ36COYBI?<~p?wxbFL;~e^&HG~wK0^M5ja(73mASW}a)!u+5}A`t?OdCQn%d0N+{RO0 zLY%o>XtPiYtMl#FHXF6Ey3lTK6RF7RmUc&*lR8=5+U}yRZc8cg6WLZyWcx;wG1tcI zl+>N2ia>STNA<9tPN2DRH0tfM2HdH2Fn~Dy`8(5NpG=L7Obx#>asINjR+_qYcJj5~ zOuT(T+R+zPl9ItO`Qb}bmrsf_IBCGF^BplYsoQr&l_(7-6I8eD49DZ)gYjs9)9pRs zWH=HJt131g3#(B!5;O|>qRCJghYbzTL~qPs@95~<8IJ+7E0Wz4`CWhIA9k zfm}^8N6qy_Hc`G?=(fn_RX~+_YGo~fwJg-uZSNLPDZpW6I2#qa9ni9~mPnm_mViTd zJ{~<7PDXbol-^k1j0-xXoB)hH&;07wQ=g6LLNYNBi|9nRMiNRgdL-GU+l@WMdej-z zhA#GpkA!F`knnWV9U zsbm!S!9>X%{E6Py5>%s9Z8>uE*s)_Rk0)MG;)!sN+R~qjCu3Q4ypW*Js)rH-EeRSq z6omz#R$F?QbVB`M1vX5Z2aZw)49Eb=N?ilsRq_wMSaZ0Kw`snj!Cgc04ZcXLs2 zXf$$YZ#0siJu}77k!g71^y|}?2dBrbOni1mx8>BSi$)Dqqzlph#Nk+0>mX-4Q*cO0 z^)uOa1?+SMZl?$V4opiBlp-iYP>!Gi!Ab6HJN}<8$xu-5&jK}*!5#}u-Sw{$L zgkT-d#oPe^XGoBHm|G1sEIr`1C9d}20gcbK`b+_H z>IrCx6GLxKjGml0bvAF2qPT8`9d051s6Qn%1(GDVdoT0m=HTKEzwXUINp9KJ?jGJSx&nS|%%9pv`fa=jM5KC*iQ zA&O}~dLUNzNqf;(=apQLQ_MlKcV43lOncF+@=DOWF}pH}bb@&}h(ws4(Nx6gvCkn4 z7)oBO+a==ImW`7}%*g50rdteYQS_8_ErMnQ%#T~Kmeu*dKvd}o@U#VS>k;??=z?Jk zXnkl-hK8LgNPZw>%&(U()$(E*4yNt}z%*S_2bwJ|WeTp678HEbR-UntvWD@}mYbz5 zL%aXJqVn9K_dV};Zmifc^n~X0o^C(UKJKhaJFCv6zH!#y7D(AT!z4TjC6qN&2thQd zFG55PrIcrN*8&G@PI4Ud zMy}Wtt1K)(FJLjenAltU` zAX$jHin#B>TF6WkyNqjC#ytc@So^13SE!mKusy}vOE#N( zxqvVAPgsFC{l)O~$SK3EeSd8T68wp4r@y~;VzG-`IK1q5ZK9l%NEE@00 z2HKujB)Ltx^~uRwpMG@fi;>Bf-WO*$*rx6<24d^Si%at80Hk&dsXLR9U&phFI}$PP zqjk`Y)?;7iV&4WqdIwU_FVk8CjPWt7;h_ZQjX5#P)AfP@8ht*NNU2%(7*%Nl5a&l^ zVhWfSM$`oZCFGBf3o-hN+0|g6Sk%*4YBo7Vn#mZIEeuzzi`hs8#IArBHXl|Xo5$G3 zZV^UAS_n{i8)OT39LIs%8@ujBAtzA>;8qe{*s+n$ZUVM|(+FZBS0BY z@RUU7D18#Y&fZi!9%2_OB}sQ93K9#<)m1#b81!<*2z~`LrDQ`NTzhCV+^<(Jjez$t zJPHG;pMW*YkU?^%fs|LDd+t)*Xwyj3xPMF9zvb#fY5(@|vWJKEXf=&DomDIvYGr;+ zsvC8TIL4(-X=&4}mG`-C6W-;{I7xZc#fmp8hJ}omRQbm%*L_{NZq8+^FTCv}RS%ux zw35;{j=g^DQgHOZ$bodphB+5sRIEugqs|ehR^uNn87a|f>qfVZY`tRz%B%>zGA>f! z&kz_SQ%owGG^u{nJ>u5jcHJxu7FK=i3q>{Q6@lT7 z@1$m}E->0P(xufjz#c1#?+{#)2l(!+B;^e_^?~yTwCcKx$Ic(q%Byh_ZRKhh5!O;( zIBbC=vY=?(TbuUQj(Z!^-p0$DZhF^)5Gw<@T~s&0&?SX;313h+3z>L%edZ8H%IZMK zvx3>@zDvw5_v}$1Jd%0LM7+hrFJ5=o-LTdDaJ!!ak^G?Ih3c0(RzJCh`>L_($yK&L zJ69uY-MIx&5j^R+f^`h^a~Lh)CO|L&$}e>!z@;$oOuzQ=#Obq>Ck7|~aANVDFu<{F z4L&T1!a=jymzTX^fYBPqF{4qit>hIYMjs_6hKZmp@T;a|EkyCFQxmKF5qBI z06Gf@IaOsNXw5R>Lt^J3uJ?(RNSvc)$;s0c5 z;hFdRzmO+-(Z%x2zNSrGczLn0<;05IIk7VT`#*@4T@C)5SQ*n>F#7#s-D8Ng@q@-M zZRz#9vtr$I-L?nBx`#f4OA?HW3yT%afMC~^?8)c~@Lf;>MGt-kVYk|K*pIy%ZNZv= zfCr%S7_{lnu#E;}gb;k}GagNTfi?D!vL9>9y`ii|0s;yC>Q?|>A%Eq|W_eMlzO?l& zflsE?BQ#uIlOgcA+Ij~+nFdlH(CSue{syh4e%A3cC)9seR+AC10)DI9pRuy4jg(3m zJFAMMq&mYm!FP(j;ns62&Lyw=YSZ?*yDr8NH<^UmOLdoCO#5LPsAp^@p@X}uW}wl* zT}fW;NN?VQlnxF_ZQSJulG?Z{Jy*AplqIJ088Sk_F%jyH z`Bzp*E-Zl73yjNR^FerVHJIZJMn4mefp;Ro{{g-;##E5f=#9 q7&-6`tkplFL6yHZ5DSV`mjBmYGVjM3Iig__uZ%TR*IvtztKkvRP9ra_tqD zon2YB(1XwdtqY{6eK4G(k4}F8oeCXwDWLX5mO!a)ZPY+}@r|im7{2wrA(#4zf~L0) zz&G>ey`7o&zBg|kH=@xHf%b<#O&fod2>AyxUK1OGjoUDINED)Qqhy4m+=!3*MtI80 zIOiJ`Mno#IF+b`b3D5u=3!~CVkOtXU91YRXl-NPuB8vYYQ35GneeG0Z3mRrJ31mS} zMk6^f9(sWqao!ci4BK&qOGeglgCFPxoobFn-YwuTr+&r>j1y^qael@H zsKhvlabPRsLW~R3P%aXWx-H&;E?R|IBljWzi@Ks?6^*P*+(6bUIQkuD(Vr_ieM!rW z7EP@HwyAU0jBYNrhvWB63M}V zM8b_MP{Z*wza*J>*p*bZpv~#3>V{O@W8TDZL{)z@ubB-=tE$c#)OJjxpcgC<^R`1> zn1z#Bj^-Fv!B$n;0+!HLByC9cpkeK*YNJM0)f|T!H|8B3Rri8y1AfGnEx<;H^$@%h zo;B*6R?u=f?JwS?7>%Lp%tx*fv&xY)4%2Q9;?SAnGFg{8M7muLKT~o5D?w z`V|T0f>VK{poDOaN=kStnDi+T#)Xt9$Xl3B7$~psNgl5btkKFeB1#*~@7YyjFVl#! zHDbGJ>|+`&N;_@M@$mz0s~1t@b*H^R3>M$2bGGc{b@`I67iCXPUa;t`S<_m`xB;zb zsJHaHu5?4Qb#wA5bnhc3f};TQ_HSh;2NA42>Q_Wr$UXeFp-t!?UGf za=$0Rz_L4lERppCM?R0NcgUYd>tuh&J>i#O77MP(ZjDk}p(oJ(E%suPfLTbnb=Pc);y&7q zv#4R?5j=l{)tv6AS(eP)Ltk^rrVt%sU{E&+vLv8a8TE)CG0Djr(9fbp%n`nbpqfK(V`6t!zs+ZY!_m{%qnKuzo zeBZbSx@|}MmV2Au-e8!Jm)PvyQR`4yph>T>zi<+c6QD^ukw zQV zA$mRGg<(5UggoqYn|tWXh!G+KXcEbY@A9)8fOkZ3AdL>DH1IlPnbHS zQMv9$S(CHYT#@Q|y=q@$RsRhG#%*rA+OIB zogFDP>V2T%p}rmw+r-CU3ws#I3y6v5vG12ACf62fgk0msebDXA`0n}FV+S6+_u#!R z?v`WmYAjKXC6-TCVi&8@#fo(CwTaJjX%Lb+6Ob-J>aI>hBa?Pp;4GI z3ORdsz)kr4D8RA__@u9e3<>``=##v6cmE6M#~A2|wEX@ZJqzUvz-q$*0e~i+Xu{%Q zPwwOmd%FzL-4EzaG>nuzBhxZH0TyLw5~v3mI+&)B;9I)`uZUhh1yohwwl{WO;H}c{ z;Mea0siQTpyl>fGIyqj-sHO4GN_(_TtbSaM^;To4axAqxTZx^iN@pt4nb#W(t9O#z zZ@Glo@-e10&3uNnB93LQAf54fY6~H z8H=D99` z&C>L((jIdYtUKk{@oH?a92;CdR*4N)rQwP+{F=AsN?B4_V8TqTw`;{Pwosan%f$oXTx>e=h$kif#G4wcABhWqRe2HDHO%PS? zapon+RjwA{#Y^1dp&G${88oq9>0BLJ6V_%{&y|Ojmu#vQ;>EML5_Bu|Znbl5s5J3| z(nlY!ohx7X1oh6MTKchqYUvd?5p=7;wWDiJY4V3_C(7qPeuYzYlh7LY None: + """ + 设置辩论 + + Args: + debate_config: 辩论配置 + """ + self.config = debate_config + self.agents = [] + self.speech_records = [] + self.current_round = 0 + + # 创建参与的 Agent + for agent_id in debate_config.agent_ids: + agent = BaseAgent(agent_id, self.llm_client) + self.agents.append(agent) + + def run_debate_stream( + self, + on_speech_start: Callable = None, + on_speech_chunk: Callable = None, + on_speech_end: Callable = None, + on_round_end: Callable = None + ) -> Generator[dict, None, None]: + """ + 运行辩论(流式) + + Args: + on_speech_start: 发言开始回调 + on_speech_chunk: 发言片段回调 + on_speech_end: 发言结束回调 + on_round_end: 轮次结束回调 + + Yields: + dict: 事件信息 + """ + for round_num in range(1, self.config.max_rounds + 1): + self.current_round = round_num + + yield { + "type": "round_start", + "round": round_num, + "total_rounds": self.config.max_rounds + } + + for agent in self.agents: + # 获取之前的发言(排除自己) + previous_speeches = [ + { + "name": r.agent_name, + "emoji": r.emoji, + "content": r.content + } + for r in self.speech_records + if r.agent_id != agent.agent_id + ] + + yield { + "type": "speech_start", + "agent_id": agent.agent_id, + "agent_name": agent.name, + "emoji": agent.emoji, + "round": round_num + } + + # 流式生成发言 + full_content = "" + for chunk in agent.generate_response( + topic=self.config.topic, + context=self.config.context, + previous_speeches=previous_speeches, + round_num=round_num + ): + full_content += chunk + yield { + "type": "speech_chunk", + "agent_id": agent.agent_id, + "chunk": chunk + } + + # 保存发言记录 + record = SpeechRecord( + agent_id=agent.agent_id, + agent_name=agent.name, + emoji=agent.emoji, + content=full_content, + round_num=round_num + ) + self.speech_records.append(record) + + yield { + "type": "speech_end", + "agent_id": agent.agent_id, + "content": full_content + } + + yield { + "type": "round_end", + "round": round_num + } + + yield {"type": "debate_end"} + + def get_all_speeches(self) -> List[SpeechRecord]: + """获取所有发言记录""" + return self.speech_records + + def get_speeches_by_round(self, round_num: int) -> List[SpeechRecord]: + """获取指定轮次的发言""" + return [r for r in self.speech_records if r.round_num == round_num] + + def get_speeches_by_agent(self, agent_id: str) -> List[SpeechRecord]: + """获取指定 Agent 的所有发言""" + return [r for r in self.speech_records if r.agent_id == agent_id] diff --git a/orchestrator/research_manager.py b/orchestrator/research_manager.py new file mode 100644 index 0000000..b25d888 --- /dev/null +++ b/orchestrator/research_manager.py @@ -0,0 +1,51 @@ +from typing import List, Dict, Generator +from dataclasses import dataclass +from agents.research_agent import ResearchAgent +from utils.llm_client import LLMClient +import config + +@dataclass +class ResearchConfig: + topic: str + context: str = "" + planner_model: str = "gpt-4o" + researcher_model: str = "gemini-1.5-pro" + writer_model: str = "claude-3-5-sonnet-20241022" + +class ResearchManager: + """Manages the Deep Research workflow""" + + def __init__(self, api_key: str, base_url: str = None, provider: str = "aihubmix"): + self.api_key = api_key + self.base_url = base_url + self.provider = provider + self.agents = {} + + def _get_client(self, model: str) -> LLMClient: + return LLMClient( + provider=self.provider, + api_key=self.api_key, + base_url=self.base_url, + model=model + ) + + def create_agents(self, config: ResearchConfig): + """Initialize agents with specific models""" + self.agents["planner"] = ResearchAgent("planner", self._get_client(config.planner_model)) + self.agents["researcher"] = ResearchAgent("researcher", self._get_client(config.researcher_model)) + self.agents["writer"] = ResearchAgent("writer", self._get_client(config.writer_model)) + + def generate_plan(self, topic: str, context: str) -> Generator[str, None, None]: + """Step 1: Generate Research Plan""" + prompt = f"Please create a comprehensive research plan for the topic: '{topic}'.\nBreak it down into 3-5 distinct, actionable steps." + yield from self.agents["planner"].generate(prompt, context) + + def execute_step(self, step: str, previous_findings: str) -> Generator[str, None, None]: + """Step 2: Execute a single research step""" + prompt = f"Execute this research step: '{step}'.\nPrevious findings: {previous_findings}" + yield from self.agents["researcher"].generate(prompt) + + def generate_report(self, topic: str, all_findings: str) -> Generator[str, None, None]: + """Step 3: Generate Final Report""" + prompt = f"Write a final comprehensive report on '{topic}' based on these findings:\n{all_findings}" + yield from self.agents["writer"].generate(prompt) diff --git a/report/__init__.py b/report/__init__.py new file mode 100644 index 0000000..3df5ce4 --- /dev/null +++ b/report/__init__.py @@ -0,0 +1,4 @@ +"""Report 模块""" +from report.report_generator import ReportGenerator + +__all__ = ["ReportGenerator"] diff --git a/report/__pycache__/__init__.cpython-313.pyc b/report/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e47f3c807108be6304efa7eca49c24b8658207 GIT binary patch literal 287 zcmey&%ge<81l#qZGt+_eV-N=hn4pZ$Qb5L3hG2#whG52ECT~VBrXnT=5ljRnBe0)lN za(w(P@glH3JqQ(_j-nEzEHNi1K7J*`XOM}v?DRv6Q;UlAD=Mq1s`Op*%ky&b6H|)y zb4zndGUF40x=Z5A^NX^JGx7`cAx7%Q$7kkcmc+;F6;$5hu*uC&Da}c>D*}0&5r~V0 efy4)9Mn=Y)48nIAj302zT;P&zWG~_XiUR<3^iqZZ literal 0 HcmV?d00001 diff --git a/report/__pycache__/report_generator.cpython-313.pyc b/report/__pycache__/report_generator.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d305a96833f6b109f892924e8bae9d36e962127c GIT binary patch literal 4954 zcmeHLZ%kX)6~E7Z{=+lI9tp&dw7e9S)|monLROWuTibQnmOo6x^S%`6$u%$F1V6L; zp4TRtR6|N)C4Tr#NLO>c43O&!Rxn9>9N_&C)YBGkF}G%T4F|<`7keV@`LnyXXz9!OO$yTl|2}e6?m5V zxr0ZTP=+y zV(}672!pK;BcID>ZMFGqt@hRu3(WG_VN}v;Yqa~gycAc!B^=i0ARO#ie|DqQSDK$$ z>T_aeyV>hjT`gi)1S>xfB@qXd2+k5yqZL53f_<;Ls#)baJHtYdZZYp#5Jrgf&G1`p z1Tu#F%+t(c&?Bu4!pox<7&<~|Prl8nCgm{6hd9-C=## zG1V>zAt|H?g6b*SfBg!l6jqeoKw@aAx^A(0_l=Tw9dUNhwG8i&YEdFxp`dD)jrfbQ zY70grMSNLNtyol|SPHIWE)loE&v4G$&;jS5meq|DHf?^>p8Q*h^)ZsgXYw;;$Y=JO zR~#Am2IK+z0?}kNpJipn4jB8*g#MWESz(sV&jPjYwn_=yfE(7UG%4%cG%FTa4S;3oc103U6GVDtL;oHjR^42_+SQBt*_q5c zJ?Wmcm!4y`Ah4mrz_D-X5Q}BZ}r@pyU0CW&!^^Zr$?uX{lM+2 z=c&0=J*=5MH3*wYU+>mVea!8y=k=lCbkCeVp44ui0FA%{p;U5^^yklM@pIbgEBe_} zTqDtR0Ve6^Pp2mm#JE`!PwjP(ehv1Zzc-ng7|{Bs9*{FpTl>|J|J}rK51@1#jDmn> zNnuyf0}sh04rW1Qz)LIXVq`<~r*OC_v%tGtX8NN-2Nnh#wxA94YQyiQW+%bA?1?Fe zF!2ApO`YeR$D^h`tct<%{H*VbQxis-KnW!~wA7*J6fy?>)TcV+Ypul0VA9ZgWv zXvm4>yLaqKC9h{M#o!f9l1%JG=E4~g={LsHzaC$;Fn8tk37$qjl{{S(eWGgJL?LN4 zJE;xzYrP+(=0^;ld?jzP6Oei<4!(HyUUG=f$8-Wt=Z8cwBx~xEH^_lWUQZ`t+Ta+m zj>3>3q{q+dZ^iU0laTMgC3%ocX#Ll;?u#TWmtyJJIF%<$Wsx1BYi89cACna^EOg;W zxJywTQGk$@0T&LuEGUtqq9l7+)h!&3;BY_@R?3%Z4t8JzfmAlw5m3A~OmG219Y(mE zkPSd!g**xf1Of2Fgi!-TCN-9nNod$fBwcwJ5(SD*YE3cz6bX<+C3gdfp;|3ok>iEojf)*$4xM}-# zmEn)G+qXAyD#LlLY6*@+r8Yr|hE;2zt4oyHRErpn90{o$z57Uku$aGzNQ%lHiG(Dt zg{CaYR<(rYw<5Nw&R`T{a$hoZs#co9GP!@P-fIKx;+bZd+yYJ`=*%Oce{C}`Oe-0 zzu$Go-TYUx$ys*afi~6T%1Ig@>pOP7>Qh(k9eXWJ*upd*$FUZE%fB-^{Qt#O3=R|6 zE!q6GU!XAyj}AfBkOXXZJYhb}cuS)MT=m~{lZz4kqA9qzM}xrpA6$)aC9H!t5kkzt zRfJR%vKdHp@7EANLvIm0zjlxK8sz^YkniL07LrL10crNKMoq$xkT;}$sty9o0(>Wk zfw0%U9zNqbBD{*qswLPFm5$1!*c+t|2u0`0BJj;?Rksxv!1d(W>cr)pJ>b%msMWAp0f2o83L;0yA9N`0I7Vn7jva6kfx#)fB9cT@>= z%Jpmiq);as+kXc1t+_@HB|fqkB)w% literal 0 HcmV?d00001 diff --git a/report/report_generator.py b/report/report_generator.py new file mode 100644 index 0000000..ebb3762 --- /dev/null +++ b/report/report_generator.py @@ -0,0 +1,143 @@ +""" +报告生成器 - 汇总辩论内容并生成决策报告 +""" +from typing import List +from orchestrator.debate_manager import SpeechRecord +from utils.llm_client import LLMClient + + +class ReportGenerator: + """决策报告生成器""" + + def __init__(self, llm_client: LLMClient = None): + self.llm_client = llm_client or LLMClient() + + def generate_report( + self, + topic: str, + speeches: List[SpeechRecord], + context: str = "" + ) -> str: + """ + 生成决策报告 + + Args: + topic: 讨论议题 + speeches: 所有发言记录 + context: 背景信息 + + Returns: + str: Markdown 格式的决策报告 + """ + # 构建发言摘要 + speeches_text = self._format_speeches(speeches) + + system_prompt = """你是一位专业的决策分析师,擅长汇总多方观点并生成结构化的决策报告。 + +你的任务是根据多位专家的讨论,生成一份清晰、可操作的决策报告。 + +报告格式要求: +1. 使用 Markdown 格式 +2. 结构清晰,重点突出 +3. 提炼核心要点,不要罗列原文 +4. 给出明确的建议和下一步行动""" + + user_prompt = f"""## 讨论议题 +{topic} + +{f"## 背景信息" + chr(10) + context if context else ""} + +## 专家讨论记录 +{speeches_text} + +## 你的任务 +请生成一份决策报告,包含以下部分: + +### 📋 议题概述 +(1-2句话总结讨论的核心问题) + +### ✅ 支持观点汇总 +(列出支持该决策的主要理由,注明来源角色) + +### ❌ 反对/风险观点汇总 +(列出反对意见和风险点,注明来源角色) + +### 🔑 关键决策要点 +(3-5个需要重点考虑的因素) + +### 💡 建议与下一步行动 +(给出明确的建议,以及具体的下一步行动项) + +### ⚖️ 决策框架 +(提供一个简单的决策框架或检查清单,帮助做出最终决策) +""" + + return self.llm_client.chat( + system_prompt=system_prompt, + user_prompt=user_prompt, + max_tokens=2048 + ) + + def _format_speeches(self, speeches: List[SpeechRecord]) -> str: + """格式化发言记录""" + formatted = [] + current_round = 0 + + for speech in speeches: + if speech.round_num != current_round: + current_round = speech.round_num + formatted.append(f"\n### 第 {current_round} 轮讨论\n") + + formatted.append( + f"**{speech.emoji} {speech.agent_name}**:\n{speech.content}\n" + ) + + return "\n".join(formatted) + + def generate_report_stream( + self, + topic: str, + speeches: List[SpeechRecord], + context: str = "" + ): + """流式生成决策报告""" + speeches_text = self._format_speeches(speeches) + + system_prompt = """你是一位专业的决策分析师,擅长汇总多方观点并生成结构化的决策报告。""" + + user_prompt = f"""## 讨论议题 +{topic} + +{f"## 背景信息" + chr(10) + context if context else ""} + +## 专家讨论记录 +{speeches_text} + +## 你的任务 +请生成一份决策报告,包含以下部分: + +### 📋 议题概述 +(1-2句话总结讨论的核心问题) + +### ✅ 支持观点汇总 +(列出支持该决策的主要理由,注明来源角色) + +### ❌ 反对/风险观点汇总 +(列出反对意见和风险点,注明来源角色) + +### 🔑 关键决策要点 +(3-5个需要重点考虑的因素) + +### 💡 建议与下一步行动 +(给出明确的建议,以及具体的下一步行动项) + +### ⚖️ 决策框架 +(提供一个简单的决策框架或检查清单) +""" + + for chunk in self.llm_client.chat_stream( + system_prompt=system_prompt, + user_prompt=user_prompt, + max_tokens=2048 + ): + yield chunk diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..37ec9f4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +# Multi-Agent Decision Workshop Dependencies + +streamlit>=1.28.0 +anthropic>=0.18.0 +openai>=1.12.0 +python-dotenv>=1.0.0 +pydantic>=2.0.0 diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e28839c --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,4 @@ +"""Utils 模块""" +from utils.llm_client import LLMClient + +__all__ = ["LLMClient"] diff --git a/utils/__pycache__/__init__.cpython-313.pyc b/utils/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9386efc2526a062c9446ceded5fefebbe2d72d4 GIT binary patch literal 271 zcmey&%ge<81o?W=nZZE%F^B^LOi;#WDIjAiLoh=TLoj17lQ*LmQxTH_Lol;GV-a&Y zizaI+4^Tm9NoG#5!m||%pU$2B5~Nd;@fN3#kFRr1W@=tZ5i?N4Pm|>qdwhIKesX;L zErC*y2ECk|-1uaK5|DgiPELIMN`}uMLvGpXhZd(673)`2R#jE$yX2SW<>V)(6zk`f z=9FZ{C#D0S2a literal 0 HcmV?d00001 diff --git a/utils/__pycache__/llm_client.cpython-313.pyc b/utils/__pycache__/llm_client.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73d56022cd20834cacb697406466c9ee344b1ce5 GIT binary patch literal 5294 zcmc&&Yj6|S6~3$8)noPWQo7lt-WI3adp3eN8^{h08HBtAII$B!rQ~NQNEEb^|tu zglBiKn02EiNa8Ax#IH5z8kp;4y~e~w_!6Fas$I-n{AG6Vlc`^iX2uTReE$`(QJnhp zy&IPfiES~p56An%A>Y&ea;)u1U)z)061&>N`^D_*A7qZdFWe#qy^LnrD#v6TRO7fK zr1zmyTkd1#FTk8aI}v7Jm>8E_yC7@N$yy#D=OA)ecz{#^m9j3?;1J%G5s-_QXK#0ZF#+WR3%iRNRq6m7Ccqka@iz{l&x>c)If#!gfRyESWQ^&t? z=~(vUX!hvgskdLAv(O!0Gd6*FOyW7V6XMW&qk#jH&BleKMtD9LNyu9;j$^M$;}kj4 zqid2$(IM(Kh&t6!h+VR+P_&IczanGBxBtMvz<_Uad|xaQ4|XfQXdk!T=9N72*|!6*<2$HHnLpcT)CwWfTX6?#&#Kn|g45tVyV?xu6w z2f3s6w6h}RTy~}pcwy37aJ+S>^~{b5YyDTY6<<~?9od?!STnf!*p5ki(eW3DUOdw` zVPE!@t?tVTam1CZSUtG;=#F%GO{%=%+;R}xo)_CaVPBr1Vrh468oDdrFkZLeHqTZ% zh|07XRn*?2L~PG0rQE)A>#63(@w$x*HGStANq65FQAzc9?W1GrCkKCfa4bGv{lrA^ z)^Xd`8JH>3?8*T}m7@XhnrOeOSrf3p`41r)+#gWmd*qm+8L_M;a4a+%Z9ths@1^j} zyh|C8MDWEUupZee5oss!enEMm*w3!b)jN@7=mUN)bIQ=c+u#i7g(%lK_teN~KS;}r zIz)I(kFhvd1Bal_x{KFfWSM>OBcd5Y3-#L^y{~>8Saz&);CuUELQ6s{S!*ZlM7pW8#gD@SD zE(~6kee+26o!?9i9m!t&SfH-d;bp0dXhnh9xDWtUIT+O(0X+=lzAtdN6lRIb38^3i zmK-T$&xrqECoq~d)E89q2Ap@@U{3XVqO}dkE#eDHkgaeQ^J!OQ%GH>3HI798)G*=l zr)>VD%|A(M8MxJW>t;3C=8Qh>qzz|~1&! z*~}SGjPrAyr2hq_1Z{(86yyE;UUZ5PQ9FcGD~t@%3i#y{Pyj6FK`YVnoOU;ZP>(@G zwIE5_TG?9E!%$H2i82tHjqQ0XjQtGYrEvss7YrJQ;}Kaih2k+4;+q#TJkX&Vs~L6J zYUZe{C=mXX4lj=(&q3G=1ez&IsDfry<^B0>_Zk<&32q=t9FbGVN)gE9K!a;YQ{`Q- z+yh0DcquXv3K<7-6d&&z>Pp!?NxLUyU!JrtPZyV+ZaC49c6rhzV!F0&Y(=tc!?<(f zv;{f5w-N6UrUm3EO}lGS?uIYi4Z~cztm33_n?>a-?%1LE4w3l>AwMWp(9=t6+q}#t z?lw1jt-u28HMh{V%yeyu3HW(XA}1e((hsl|I+SYe6R8#j)7+Q`rby`|$I^lV09yn! zewN%@0Ay^FLE->t@b(!bzL_I114tdwA=OFmLi0_|LI@z<4p8YLM=LFUelhucz9bo2 zP9j7y==RkB@kM1NBMGUUGb&5A}IcJ^`XOiNxnJt(GVnb&tWnnTt~=5 zK<2t6^&#qgG#%l?quPjodF93E1!Ma}@-xA{sNdnP*yvVXL?JPgtb)qZftV=rUQqI*$ z=jxPmUDCM@T=(?u6T8!ub#P!zR@aR+CQBbrS1%p2CQH{(^K3;ixP4jkG%}Yt2A`NI zMit^PKUv~F-Fmw9MC*BJ#5r1hp?ae3u}hvm*ZnA6<$16Dt@aU5vT9|j>fvP7!^2zt z=BgS_oOg{l$1EvN%Y>)pQsT3NpB=n>FkQ3sz35xfFWf7>wSw7yerH34=uf++5FkQ+cO7?t4$AOr$%u}MS-Zwmx|k_bj}Ed_xK{Xr-5I+s?N~p3D z2wXsPj5H0yB>GZ9yii#IfOr*s$$22{FRuolNxQ4Sxo%s1{5EE?*g4G;X4;5=HBr`# zO3J1!loe26$@f%pdN+eAYj0ahd5M{HRZR1Q0j;v~X%l75$myP@&2ZQXCBu)L;m&jq zZ%kU2es84$D;<0#)r5CWk#M>m;TEWUXC;-Qn_o$_;GNSVoUTQ{_rU(~{X0NBY13IG5A literal 0 HcmV?d00001 diff --git a/utils/llm_client.py b/utils/llm_client.py new file mode 100644 index 0000000..6495d0f --- /dev/null +++ b/utils/llm_client.py @@ -0,0 +1,141 @@ +""" +LLM 客户端封装 - 统一 Anthropic/OpenAI/AIHubMix 接口 +""" +from typing import Generator +import os + + +class LLMClient: + """LLM API 统一客户端""" + + def __init__( + self, + provider: str = None, + api_key: str = None, + base_url: str = None, + model: str = None + ): + """ + 初始化 LLM 客户端 + + Args: + provider: 'anthropic', 'openai', 'aihubmix', 或 'custom' + api_key: API 密钥 + base_url: 自定义 API 地址(用于 aihubmix/custom) + model: 指定模型名称 + """ + self.provider = provider or "aihubmix" + self.model = model or "gpt-4o" + + if self.provider == "anthropic": + from anthropic import Anthropic + self.client = Anthropic(api_key=api_key) + + elif self.provider == "openai": + from openai import OpenAI + self.client = OpenAI(api_key=api_key) + self.model = model or "gpt-4o" + + elif self.provider == "aihubmix": + # AIHubMix 兼容 OpenAI API 格式 + from openai import OpenAI + self.client = OpenAI( + api_key=api_key, + base_url=base_url or "https://aihubmix.com/v1" + ) + self.model = model or "gpt-4o" + + elif self.provider == "custom": + # 自定义 OpenAI 兼容接口(vLLM、Ollama、TGI 等) + from openai import OpenAI + self.client = OpenAI( + api_key=api_key or "not-needed", + base_url=base_url or "http://localhost:8000/v1" + ) + self.model = model or "local-model" + + else: + raise ValueError(f"不支持的 provider: {self.provider}") + + def chat_stream( + self, + system_prompt: str, + user_prompt: str, + max_tokens: int = 1024 + ) -> Generator[str, None, None]: + """ + 流式对话 + + Args: + system_prompt: 系统提示词 + user_prompt: 用户输入 + max_tokens: 最大输出 token 数 + + Yields: + str: 流式输出的文本片段 + """ + if self.provider == "anthropic": + yield from self._anthropic_stream(system_prompt, user_prompt, max_tokens) + else: + yield from self._openai_stream(system_prompt, user_prompt, max_tokens) + + def _anthropic_stream( + self, + system_prompt: str, + user_prompt: str, + max_tokens: int + ) -> Generator[str, None, None]: + """Anthropic 流式调用""" + with self.client.messages.stream( + model=self.model, + max_tokens=max_tokens, + system=system_prompt, + messages=[{"role": "user", "content": user_prompt}] + ) as stream: + for text in stream.text_stream: + yield text + + def _openai_stream( + self, + system_prompt: str, + user_prompt: str, + max_tokens: int + ) -> Generator[str, None, None]: + """OpenAI 兼容接口流式调用(支持 AIHubMix、vLLM 等)""" + try: + stream = self.client.chat.completions.create( + model=self.model, + max_tokens=max_tokens, + stream=True, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ] + ) + for chunk in stream: + # 安全地获取 content,处理各种边界情况 + if chunk.choices and len(chunk.choices) > 0: + delta = chunk.choices[0].delta + if delta and hasattr(delta, 'content') and delta.content: + yield delta.content + except Exception as e: + yield f"\n\n[错误: {str(e)}]" + + def chat( + self, + system_prompt: str, + user_prompt: str, + max_tokens: int = 1024 + ) -> str: + """ + 非流式对话 + + Args: + system_prompt: 系统提示词 + user_prompt: 用户输入 + max_tokens: 最大输出 token 数 + + Returns: + str: 完整的响应文本 + """ + return "".join(self.chat_stream(system_prompt, user_prompt, max_tokens))