From 27ec6b3d858fe380a37a2e9cdc9a6ab97e60a169 Mon Sep 17 00:00:00 2001 From: xyz <123456@gmail.com> Date: Fri, 9 Jan 2026 09:25:02 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E6=99=BA=E8=83=BD?= =?UTF-8?q?=E4=B8=93=E5=AE=B6=E7=94=9F=E6=88=90=E3=80=81=E5=86=B3=E7=AD=96?= =?UTF-8?q?=E5=9C=BA=E6=99=AF=E6=A8=A1=E6=9D=BF=E5=92=8C=E7=94=A8=E6=88=B7?= =?UTF-8?q?=E5=8F=8D=E9=A6=88=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 10 + .storage/config.json | 6 +- README.md | 109 ++-- __pycache__/app.cpython-313.pyc | Bin 29238 -> 41934 bytes __pycache__/config.cpython-313.pyc | Bin 3279 -> 3359 bytes app.py | 499 ++++++++++++++++++- config.py | 3 + utils/__pycache__/llm_client.cpython-313.pyc | Bin 5264 -> 5379 bytes utils/__pycache__/storage.cpython-313.pyc | Bin 6406 -> 10468 bytes utils/auto_agent_generator.py | 108 ++++ utils/llm_client.py | 4 +- utils/storage.py | 32 ++ 12 files changed, 693 insertions(+), 78 deletions(-) create mode 100644 utils/auto_agent_generator.py diff --git a/.gitignore b/.gitignore index e69de29..3ca7834 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,10 @@ +# Python 字节码缓存 +__pycache__/ +*.py[cod] +*$py.class + +# 项目特定的存储/缓存文件夹 +.storage/ + +# 环境变量文件(通常包含敏感信息) +.env \ No newline at end of file diff --git a/.storage/config.json b/.storage/config.json index bb995f7..c77fe55 100644 --- a/.storage/config.json +++ b/.storage/config.json @@ -1,6 +1,6 @@ { - "provider": "DeepSeek", - "api_key": "sk-ca812c913baa474182f6d4e83e078302", - "base_url": "https://api.deepseek.com", + "provider": "AIHubMix", + "api_key": "sk-yd8Tik0nFW5emKYcBdFc433b7c8b4dC182848f76819bBe73", + "base_url": "https://aihubmix.com/v1", "language": "Chinese" } \ No newline at end of file diff --git a/README.md b/README.md index 6ab56ac..5ff5ea6 100644 --- a/README.md +++ b/README.md @@ -1,76 +1,91 @@ -# Multi-Agent Council & Debate Workshop (V4) +# 🍎 智能决策工作坊 (Multi-Agent Council V4) -一个极简而强大的多智能体(Multi-Agent)决策辅助系统。 -**V4 版本**将传统的 "线性研究" 进化为 **"多模型智囊团 (Council V4)"**,支持多轮对话讨论、动态专家组建、以及多 API 平台接入。 +AI驱动的多智能体决策分析系统 - 基于多模型智囊团 -## ✨ 核心功能 (V4 Update) +## ✨ 核心功能 -### 1. 🧪 Multi-Model Council V4 (多模型智囊团) -摒弃了单一的"规划-执行"模式,现在的系统是一个真正的**圆桌会议**: -* **多轮对话讨论**: 专家不再是各自为战,而是像真实会议一样进行多轮(Round-Robin)对话,互相批判、补充观点。 -* **动态专家组建**: 你可以自定义 **2-5 位** 不同的专家(如 CEO, CTO, 法务)。 -* **自定义模型分配**: 为每个专家指定最擅长的模型(例如:让 DeepSeek-Coder 担任技术专家,让 GPT-4o 担任产品专家)。 -* **最终决策合成**: 讨论结束后,最后一位专家(Synthesizer)会综合全场观点,生成最终决策方案,并绘制 **Mermaid 路线图**。 +### 🧪 Multi-Model Council V4 (智囊团模式) +- **多轮对话讨论**: 专家像真实会议一样进行多轮对话,互相批判、补充观点 +- **动态专家组建**: 自定义 2-5 位专家,为每位指定最擅长的模型 +- **🪄 智能专家生成**: AI 根据主题自动推荐最合适的专家角色 +- **最终决策合成**: 最后一位专家综合全场观点,生成方案并绘制 Mermaid 路线图 -### 2. 🎭 Debate Workshop (辩论工作坊) -经典的辩论模式,让 AI 扮演不同立场的角色(如正方、反方、评审),通过激烈的辩论帮助你厘清复杂决策的利弊。 +### 🎯 内置决策场景 +系统预置 4 大典型决策场景,每个场景都配置了专业的典型问题: -### 3. 🌐 Multi-Provider Support (多平台支持) -不再局限于单一平台,系统原生支持多种 API 源,随心切换: -* **DeepSeek Official**: 直接连接 `api.deepseek.com` -* **SiliconFlow (硅基流动)**: 连接 `api.siliconflow.cn` -* **AIHubMix**: 聚合平台 -* **OpenAI / Custom**: 支持标准 OpenAI 接口或本地 vLLM/Ollama +| 场景 | 描述 | +|------|------| +| 🚀 新产品发布评审 | 评估产品可行性、市场潜力和实施计划 | +| 💰 投资审批决策 | 分析投资项目的 ROI、风险和战略价值 | +| 🤝 合作伙伴评估 | 评估合作伙伴的匹配度和合作价值 | +| 📦 供应商评估 | 对比分析供应商的综合能力 | + +### 🎭 Debate Workshop (辩论工作坊) +让 AI 扮演不同立场角色,通过辩论帮助厘清复杂决策的利弊 + +### 💬 用户反馈 +内置用户反馈系统,收集功能建议和使用体验 + +### 🌐 多平台支持 +- **DeepSeek**: V3, R1, Coder +- **OpenAI**: GPT-4o, GPT-4o-mini +- **Anthropic**: Claude 3.5 Sonnet +- **Google**: Gemini 1.5/2.0 +- **SiliconFlow / AIHubMix / Deepseek** --- ## 🛠️ 安装 ```bash -# 1. 克隆项目 +# 克隆项目 git clone https://github.com/HomoDeusss/multi-agent.git cd multi-agent -# 2. 安装依赖 -pip install -r requirements.txt +# 初始化 uv 项目(如首次使用) +uv init + +# 安装依赖 +uv add streamlit openai anthropic python-dotenv + +# 或者同步现有依赖 +uv sync ``` ## 🚀 快速开始 -### 1. 启动应用 ```bash -streamlit run app.py +uv run streamlit run app.py ``` -### 2. 配置 API (V4 新特性) -无需手动修改 `.env` 文件(可选),直接在 Web 界面侧边栏配置: -1. 在侧边栏选择 **"API Provider"** (例如 `DeepSeek` 或 `SiliconFlow`)。 -2. 输入对应的 **API Key**。 -3. 系统会自动配置好 Base URL。 +### 使用步骤 -### 3. 使用 Council V4 模式 -1. 选择 **"Deep Research" (现已升级为 Council V4)**。 -2. **设定专家**: 选择专家人数(例如 3 人),并为每位专家命名并指定模型。 - * *Tip: 建议最后一位专家选一个逻辑能力强的模型(如 Claude 3.5 Sonnet)作为决策者。* -3. **设定轮数**: 选择讨论轮数(建议 2-3 轮)。 -4. 输入议题,点击开始。观察专家们如何互相对话! - -### 4. 使用 Debate 模式 -1. 切换到 **"Debate Workshop"**。 -2. 输入议题(如“是否应该全职做独立开发?”)。 -3. 选择参与辩论的角色。 -4. 点击开始,观看唇枪舌战。 +1. **配置 API**: 在侧边栏选择 Provider 并输入 API Key +2. **选择场景**: 点击预置的决策场景或自定义主题 +3. **生成专家**: 点击 "🪄 根据主题自动生成专家" 或手动配置 +4. **开始决策**: 观察专家们如何互相对话,生成综合方案 --- -## 🤖 支持的模型 (V4 Expanded) +## 📁 项目结构 -系统内置了最新的模型配置,支持在界面直接选择: -* **DeepSeek**: V3 (`deepseek-chat`), R1 (`deepseek-reasoner`), Coder V2 -* **OpenAI**: GPT-4o, GPT-4o-mini -* **Anthropic**: Claude 3.5 Sonnet, Claude 3 Opus -* **Google**: Gemini 1.5 Pro/Flash -* **Meta/Alibaba**: Llama 3.3, Qwen 2.5 +``` +multi_agent_workshop/ +├── app.py # Streamlit 主应用 +├── config.py # 配置文件 +├── agents/ # Agent 定义 +│ ├── agent_profiles.py # 预设角色配置 +│ ├── base_agent.py # 基础 Agent 类 +│ └── research_agent.py # 研究型 Agent +├── orchestrator/ # 编排器 +│ ├── debate_manager.py # 辩论管理 +│ └── research_manager.py # 智囊团管理 +├── utils/ +│ ├── llm_client.py # LLM 客户端封装 +│ ├── storage.py # 存储管理 +│ └── auto_agent_generator.py # 智能专家生成 +└── report/ # 报告生成 +``` ## 📝 License [MIT License](LICENSE) \ No newline at end of file diff --git a/__pycache__/app.cpython-313.pyc b/__pycache__/app.cpython-313.pyc index 45a71fff965fe2ce692e0184c857012ade49a361..e5965ce0d15282cdd70e22c3af032f887b8ce9f8 100644 GIT binary patch literal 41934 zcmd6Q33wFOm2P#nmR75^xAv{IFKCw#T0o!%p0mF@uLNBh$O#Y#Y^nhKmzm7~)(*+!jN!14I0+F=Q{o)6NtJhJ;&Vpo3h$?*xy;51r^5LYhU+Vl<>WFeKd? z!=f~#IWQ#O2E$^qNOxdJxebQJ_^`x*A@$Z67L`SYgaQ4xO84?rNeOG+D7Q#=SjlNi zu@1>cfY~nIJAVTv9rj>JvC@vm#aeD!O*?OHlUAQZ4^v>w zX}afTI68yQbl9H47|K}C)N?bGvCx61h|a=#Edg)#+zf}=Wg82OMRbm_gw8E?XO_E` zflEqqjLRLAtXO2GE@og{kwZE9l^ItwJ#;f&5;vGt-+`VyY0f)J7PW;g=%F3`=%AAO zV3i}kU+>1M#Uv)tWn59R8ThgC7lF)|ARFTs^F78gm$E33J(7IKl!JRex|zGs%N4#9 zy)<7*7aGgyWw*!aO93{IE`o-vHm-IplTL?EF|MGCg_#CnrmpENaiVffX0wo)#UAR7bjggcALkX~+FLzmJkid9U7vBEV6J1ZpV zLu#(=i9_dz?zaU)Vik)(I_MTwi6Ii?xeel-I(@8p}OfFx>vxob5WRkX@>3-V0Hm! zN}^T1g6e*HfZjvjNAJaGy8(4UekgXx@0pl*0*6asT?;vU;)u+tHe?e3g za;F}u>G$#&(w$^!oUzx3Q&jUUq1P$vkqLQ&jy`2KW=lVq<$mY$Gj@@L6!5NX)SIC<<+348UumTwIBbXjOQjle#df=cx2)P>hokOq&w)w!}3VN z8op!fO@EZI$X7`^5>80+FhvbO1DG!6Ud;Bg3@hD%hDYd4*-EL80Nuy*8~f}zp-Z}r z_tHNi-cR8Cy>zvBpRbazK;st1KmB%muGh$;^kc>@ArgPwX{CUL-$hTn2=r~FzL2i^ zXGG0h*uKcPu5zp)Y1R&jXWc^LPkbND;PZiem7{j&T1%=VnaWquvsP+MuL%_Wll(@y zgZvLl^y>-#T*{k%avse0(IeLu^eOsjSkObT>3eRHfTS`1BGD>eLG>{F(_z^9Bcgx0 zskgG}ybLehq2Dv}X5t$6v%q^8>*6`s^}X2?vk!c6IcMWu2^+KDct5PR;~8u3&scX4 z821|=Age>reo{5LLoaQ7K=1-l68=mNiXwQJA1o@P=Zz20n=%0dZ8}VPz8W7g4$>#G9QN>LOI8n^A-)WAb*^L8l)E zjB9)MD*c*qk7O^FP)`0DxhHTt^35d_{W>`Nh}_afDi2Cfua>En?x5d4mNZ77&{Yhv zN`jWJz~(JKNq6)c5-#R3<6{!Fl?oGT%$S)g+#966@>TBF293J~_v9wnzl_kuwbNNDI;lmz<|D&w&$UG!ob>_y{z$(jOPAf~ZPG zRRY%DQR7ki6A~xUp9-VF9i;e9iNvm z-(QmXX8M3fFB@NK!b)8DF}^IzrJmSEiIaaz@Q^w}J!+GS=(P)Li21vUuZR@hCA83g z=Nx4TvUKeST;PT#w$gu}<<6W~gq~FkXGcLpn#yl(rQa}~kgT#EF!hLS?9rX8Rquip zNqtSf{4U%PtH&ZIn3KknuIJIYM4}k)UDrR(%ThD88uy`&qj1!55KHz1h}k)6Fo^T* zH5dif@vpK}%qzx24(cHi<00cK^gr0EEv{zonyZx(jq;WLHRO2`*nVR_jdJJMZW~=d zj#ctV(jC^-KRR*3wI*$ppyjJHe|RTsn`ALDSq)`&txcuwABHx&yu`X;gdk=t^cT^dC_V))k_j#r&1UctEtk zH%0HRxpY8W6%sW9#kY}4{c2IS7urM6beo=w%5!?&_)`*}@U#Mwc2bR}>E9w&I79!A8K-~02nH9L4d3{=@h#|z8}rT`_{E>X zFTRV}pTRomka#IyjpN35=s(bxNQ_X0N;UIy@4$9K&&*W)^*{}!aLY;wKEFH5+EHU5jm;zZXY2&=0$ zPer`9ga=&RWv?;6$4p>cmus4yS$t2!^;i;q<9nn|1)Z2RPS97#$V4-ww}NeOJ>pfs zU$eu{uNmCNBGm78ZA&VB8vXH~uWOmO@=4;pBC4-_f<*pF)=KFPHbip=bNQ!vFs^GJ zn{QdgKQE0aUorOonum9f{7$+9%%6pQiI_V%*G2!cu(S9tSoQCNORjC^m-HNcU8s(I zu&9=PCDCQ~M6NM?P5;GSJN|H?-KYQ6_>t_jk%m$x#IME=VF!blo1l$SrP(!?{!O9= z+UA87^S<=Vd~8(9qcEQ=s{J>l9(oGZ$K}~R^D|nPSSssFcBY1&do^HK#1FmFv6#c*d;p!GKKYxx3YOnE_NWL zu}u3h&SDne8+io4l*#jw?p$GR`lIw#zDgMpc(}@WcMJ1#&9z1Ls*vse)xPJnkY8E1 z;8&MCk9vd1sT_*=<)XU$tpqRV@~5DB43h3L{w>zle?(A37o-^(yQEBn`8#9RopVXh zZ)C4`-ow~Mv`*9@jI~JB-uQR0yZ@Q5{4S>pj3#50Ko6-A|KHZ`O%m_0ZWZ+Zwo8lD z@BiNT_r_m}JHz#Fy0+JX$N5ykKyNMygcU$`f!RJOYm;;bY5bQ2i@o7h^Ltk+XsYB{ z^w%xkA213h3^)M}jZ{nc&=f8g|h^nUHMn;~!% zoGO$#B;5&|@|1ac6Hk718~WAa$qVK;H}(9DG%r&#)`1?pb77I3PU9~I0WB9li^lhk zD%qkZ)g7nhLB;11KKUwp81Vep65W;V!igYDC@7!tQ}A%(QXZXcEqul1iB`=#Vi^6O6_w zqlG9#JuLO&IfctSOs*?`w#e*mS=W_x#snS5Oc(Re|L^O{T1c9cxXxTK`r!EkZ`{IHs&I7TAKiSi!+h%x8K6vIqJd~Lo3upVTae+S|vDZ`R^DzaGtLIUeL3TZ|)_#5H_4}q(H`4@4UDSX< zV-%>pgth&tu(qSKREywrWxX6WPm+r%8eZ%lz7K{RMWZ5zs!&MPXwX}A^DGbn&Z{5; z%0+cb@MST^OPF0O{?p^XID3uzHW~@a7t@b;Sw4VrneAen1pJq1uQz^y``i37Zc%^! zCu!~&%Vp$8ll7K7ujWYhgT8P`;uSn`%w<5OFyr`M`za zRQFtrCF%SBKh-@WV$4pB#LY@o<08LpcBRt}+57;qaYX2Cdjv^+f2)u#aY?^u%mz&; zlW3N&u4r_|bxX^3#~88h=UKo?W#ix?^i;{TNq6)%5;MTpnguTXlhiI>1xvfcnQqs$ zWzgaHeVV{2R~%U-hB>CP_t6p5INGv{XBF=}9oRT>O{kjBB1>Nd{*LFY<+W&!z5lI&=fv z{9K|@zRGbfvVsMt&KEw{L*gX&!iWW~b(PevaKaIJ9p2x5?cTYr@iN8IoofmplW7S; zzDg2c{%riSq?e|@7pOchLrHgn1eY5B?9db^31ZYd2~NHWJqnF~24`fwTm}rnSrX!} zB?|P5S|ju=x;+$d3Q@fbiJbJ$i{wnj^6#l#LD2soVPXE+_|Fc$1QWi1k9v82(jB7u ze+Hc;LM^JaSVd+7$c@r_`6_T^xkRnZ3G`lpC;QsKQ(3+nzE_I4?m%Z;uhU1QnF#f{ ze__64=8Rw3>syP-Yn7A(F4Xg#$~ekc>`9qCgkS$Cn4(y!GPYr)om!!arP>v7R0bt+ zt4%>s?Mgiri*X$+NoPm5#I1~iHb!j4R2U-dzku1P?fyhoWmOa{SIKN?*L}`?O{l5=(bS74S%`= zn3rx3Q(r5OCP>VUNDLAQYWRyJkz2?is3GsRV+7TomBz5kRzYMdjE1bAd0kjxE;4RJ ztpCIK23F+jlI}0?HK>!^qe0~1>92EIC{xe|h4=nGc@5^6Z#q=C$K`5pL$Ocdxwt{LFi=U;g;TnU@ZH`O)!fgU7Fa`Zyr2 zzH#L06Fg;SW5xV+pA>^4cxAt^(nY-J}ZBn~$d1XbzhL&|r zjccpxD(Ez&&1a2yhpFFOZ|X)@#-ui4zXD|5%djvbw5C(?Jg=$+G^ z*IpQp+K2C3HP87sl_>M=A9Y(gu=pQX0u73 zlZ5+ag#|lIdHUR(oTN0FUON}=+ttx;mRlXO5XJC)L?z01!BkoY`XIhcQ7_tim@b#| zafrrT)LzhDJ`})nj&eVwNC@NIGtWay$78nW)H<6Sn0_NuQ?v>vy z&{_HA4>ZfO?dxO%7H_~ncb}=Q~TWTw|*eDnZ z|MHFH)pg~Dx{8+i#x)gnv`yDkL06PFt*#OqTACVRaR#(l+Lu3gW9FG3&OZPC?8zaa zVdlBVE>9d6ZJf|Bdth+(#WU9ipO|_1px9;N&C4HraP^HJ%?urZ74Yn3dfEnB`)%qS z1AS1izCN2rw8&$I^?5LN;vtrf#NK3=$kVW7VL4Xpcf z)HcV9CZ*%nannSq1`9qoDB}3DJKv+X>U;EYqur&0JJR)gVDb1KeLVkLMpn~a65X^W z%dPu%Tt^7J@Y6)(w&2t3I3%j4c2a5H4Yq(jbEmnrACiZz?1496Y-(ukz>fNky*3|H zZ%4~L=6x+4-M#P{p7uuu#W%|4yTjCHZW&-Y?Qhl6v&|1K2JkJNrtX~sM7KOw?;_e~ zhDxfXVVDFUVUh?YwFH>vZd3O4+kE<<|Q2r&wr%U8@shlpAOsE< z`O-$Nw2@u6l{Id+l(txOci#kCGEr^Ot-bE;;q5WF`Y*neyC?6anoG17f!an? zK=?>S0l^+=8W=rV84~-73POy8&08`s(Iu%5{#qXoiLNoK93br-CjnIXIH$f>A48@^z30(E>~JQoZ?c3I4ORUvEP@Y9<-$_=unaCNV{H3G zlO=4`Q1zw4mG5=E({;ASQrO7S8!d$!N77DY9?Kl_nvTeuHg4yQ<|(6@ZSUsW8Lpj~ zqo{sGxe}Mv$_gdS%1*^v@>Hd)Ay0QHYL!E^mm(8REI+n?gLm)SR3LSOj=`%C0 zJTh}=KriJ+;*8edzl)*+0?{b(q{8LqM{HT*{>zm$N=f- zo15y=lngoiB0vuiQGIcIRIp%Y-n;hbQ!ru@1^UY$zCJU22Fa$ryjnkV;KbEOKGvsR z{_v@pLuY1Bynf}ygELP*Hgo8wvnO9gl$OS%5v)YR2gnh^xf4$g>#u(D3{upqXWqU# zHaL)l*4ZZzwLL-bl?&**H zsdo47w7Ko>HREUfr}v(KIs{__G`pf1o?jVHdDW;WWTzUESAE8jGIY(@28h14qX0dbmskET>kW> znYTym?0V*aUeu%6v6C|&9ZFMu6R{epUTa6Eeq(_?b)FYY`^Hbq!X0IW0cN4#WZ^cDK^l=~vUthd9roa05n=_}6Uw;1yv0@{(zxnK-_coI^X)1wa%m!0{@nQO$F+2u>2=O03H4rwB znokbS3=bizy7~qIu?5RmNgmaU_!&wz5J~zx`tIzzV^&wO60McI;G!p3L`?gR?K4oPFyPXNuc<5V8pj6d8r^3e=H!s;jqufEcye zLx-+@I)3%jW0xo16k(tlvx6^P9ef07BNk9rR@SvP0NVVWNEZ5o6FbQZk6q@DoxA!) zHu$6Eix(fY3roa^I6G(nfj&HHzX?%1(}H4C|1O)SJ)RsOuG~rL0*4eCuvlaR&*m=h z9YT{CMKl$iTTG1E)Q4byAL7X_;y}r?h}oQZ@28@=h!m9QZ3?}htXI#xclGgiXT}fD zoczhmW5>kTqW;Vrc~asG@h0i%4kTcS6M3YJ2pvmLzeZLr!{BSc6JK*Djvj^wu(`FF zJ6o`51wRr1+bGzz7GlvzuCsr+b9sz}09k$g<-N{z2h!+NW)k|B=+QSdW&d+2PKBEV zfPEeN%_aKQo;_wpEFw!5-L9t_aL8|N0ch&)H?{6URTIUu_Kv;gHW{T@AtQ*T z5{96;b0^+-Q!iF+>ydJF>Se<_u#0dMbx6_R%pUt-=7j?@FT4UiIykFuKuPz_%M;Jc zoc@K~eW7am@#&c}?_WLh($&*rusvUX^yKV|gEPZVUwQT&@doAvt4-`B)-}=vg}*ZT z?3Gi$$QGMWD!%s0$UqK=APN9?`Y0;IqqC2^JNw={xEFzEhn^R~A#bqf>-$3|VT7 znS7DxV!u~P5 zuSBlnm5==uNeQ8(DyNu$s4FHbE;-@7;0&*xe0}E8N5!~6pDG3fY2uVW{P6O}&s}@v zrJ2{BCo>g7hs&Ryoc-Y$Vl>tPpFNQ~KB%AhXmI9@gAN6ldFl`<^rX5313zp6;Iey7e2#qwE4_fA|U_DiIoMoWwe)dpT#bzQSmNDbUY8`^n5lk02$$20?cl zCOSZh5HJ+4l2FmH3PhC>!$6xG!DjQck8ks^=gwXTV-b>=y8G?E>9yx!S3OL7U{)BL zkHgILn)-JQkm?HTlT%1{?G?D5q4ae0@fU4z=RjAtXo#T(LgY-GxsWp<9(Wk{Fwn5r z=DjG}h-%X(1gE!}!{v#$$(m8vG{ij$Me9D1Ng6UOvD{#DM+n&6&){GkWmBZu6lwO6 zx>05nz_679m3WlU^EkfX2?#yRljQ3WeA#^M1WEOor_nfoWAB3Rx$@yhaF1fu^UAX) zAYdZqGtXiB#=}n1Qv?%itUozz|CeiJe11H@Ah ze|Pot_{_)8!DEQQD0Iv@suyw(ghQa_+Ox;5o;d?+E{2HD{74TwcICq%XQJ)u_6#u> z2d{qou{f3e!PsT$Wz4(G-F*lagsMwcSDO%M*b@m%^SPgi-W!`?Y?q)aCYSB zKMnp+FMRd(@8uH5C@Zf_4FwiWpIyV17b9E`9nzhshQy? z#aLAg`oS`B(-a(+cT6806BR;oykf@s%u9N);p*wr#O;#c+U{82`S3euK+JY@heh#S zQ?a_5u5N5-*|23@1=?*-ny!=F?1N-9m+N{{9|l8Up@0We3E{Bo__Y}F(l_=Zhva_ ziGyN>|K&#~G#UgHR|g*@`N+^=vCr(&ug@GhO2X|Av8u<#jcR>QdpmaZW`>WV*R{PE zRWvJ(oX6R67AMgGUu9EfOLOmTBPz6|A`SM*ojdP^_ZZ#^P- zF+N+KjV@(~350_IBF6n0`sIB%GNBiCa#y6ZBVfuD(y*MYBAjeMGDZ#ajYzgoER(Re?i%R6$L20xH1;787BOR>9qc9-hQuPwJNS|Uh68rP zJA2^unZpMF_Ar4U1#)4z+C0Qf9=v!SZT3}YkLpPDgsD4X4grNewcf*=rahQv2J?dv)dR+{h?6LFk8()6(LqT)Q`&Ie}^!wea*C0+N(Sx0;Ge4ev zlk z+1h-BdlbQhQzVQJ0V*Re{Q7XVPS{|u@5QMlL&KB{@7GkUEpM*dARUhp%H#~GyE3nm zuO;M*B;58xd?FTcYmk(J7;*xiA#02&AYX!=Tt@Cl^?@lSUnS(L6kj;zA|7-j^-i+W z;W#YE*xZFJKbu=8RFGLofZeb?)rZ{?`^I&PefLV-T7&@-w*{rwLO5#QLhNYUYtw8b z+ffw^12$I@bSNN@BNjV)_S!;kI&FbN62#>Cx7v>^NLX!t(w#j^hH>t>4}^O=+B@2q z3W8rlbWzONge*(mxkS-XJYnH^pNZ+lkv&@w-MnsHW7CF;H7#}J4VBF}=t$eNW{KQ{M^_c%|2S_X4KU;wsUcMOm) z*`~tP=2fw`)hw(*n;SH`&E~nw)Q5^CW3ONJ^+LyOD$I)cSm?i_u)y5iiqMJpK;f#E z3bM>T0^!*=(Ar9lH+jO#3x`*%JbUaIz=FXUzfo|+*qIlgWMFAb%?LDLf zLCP~YYlWL!;X4nNDd8-puVaA;l`5OId!TCvvPkiOjW?cJq-xD`WO#xC+olZJQ?@BO zYypz?iGk$2t!crWdW+pfzb(}g1z_Qf41y4+vmR1~C{Q@`fyh8S#7aD<%}tm~pm2az zDx?6c-)!?03Uh5O_Ewu3G?Ko#j=MZtP-`bRD1nf03)SjJ1qKT>B0{SW0olBm9)uaf zMy?h+GW{)GkRxI$Co-XAM47;Avi)fDwA&_|MzjenJv(-zY6y|xNQ@AT_1V10)P(H{ zkV~-2&)y&xiw^b*b8$ECK`LVlkd~i=IC5S_JVYd5lph6NGJi{^g1wF2ew;b7Q%q_) z(#Dj{PwFo47(-|Tt_d3nwm@OE%;%~GXLoQQmz>5C92i1Xa@fi47)fQ*griXkLNYNe zIJnTyKwB9MNs3@ZQ?NcX_Dw+Z-c}q6LgXjN&#uFGX)o~H%e+V?pcNZhMEy{8c6MR$ z?(4JHG?-9evIxw8=>)(fq=hwcbNcIte$#xcUkG#L2XD znR>ByF5o*K5?@+RVAzeFR~(pTjp(o#jULahbaSkm=2s| zCc?7kH8zidUKk{>A1rJUGZ^L!>49P?j3-Gp?M#-AZb>>VJxe+b<_Sl;MAarW%zhN) z#70>Og`5X!BqlS?dKUnrWCzEpBZ9K|h_(XNc>wMs>xv`*EhM`Yf&rU9IR;N8A>zZK zF_ilR7-!5dT|%3r&^SCVATZlA3TZf)C}9Rp%JwN?g}@uJ(WdO(1+N2#>(Fl>M+7L? zQ#WE(5oq+Ha?AXZbPo_T*%^u)ZEMBZ5Wx!x27)Z@mi8V*LQF1{)$x$${E$yda_vl> z!k+KvBWZ`&vyYLVrtn{g(3iJr#4^gHmXJ zt$x{7U!B!Ez#5VGm6y9hIp>de zk9$AtJx9g+xXrCmP+?JDRVd;W(Q|bQDk%79+L1IiDr;P63Cth#y8%okb85;z#)^O1 z2z+1nat~Av`T~!e&#*x}&uCF4UX0DO#AXi~#50A{Zc!k@EpQ}|4^QL5(=4hqYhp5= zSimI~@QKU0#N~Wq6_;3LNvx5(#C_$VR%FfTC^yY;ARiFN1;klYan`6MGDtYVsvF7U zBbRWIODw7-)?}x*309SE#K1>oa8Vf+RfaVsL++VqRrw4z@xe)4aFRupWYx#Z@1m_L z@8LZ045meuV2#ns?;@?Lz>#=9BAts!x2V#sNh$KXctTCkqHnFL;E^UCXO^NfEvigw zYNp&XX(6R)@;gu(F&fXu=W+3Q7FC`#(}}C;kcEf(@vV<+wYqscUj1ypCFZl=Z{(8<|&--U`{+UDW(3vRxs0Xhv;PeH&zL?V&TlA%t$mOg$ z%o?dbQGBd;bnAG8*f6J5d4*d;Q~1zKE;N(PDzk*H9IE{R$j=&*%!g!fAsLpCtRe3?Ed}!ZVmZH9-YKDuU>POYAHq9CkX$_3F2E~nL^YKf$_@(1rm|J`m8&}N+ z)L8vv!Jr_`kSF+)xMWDltHU^T*tA*?apIWUev5aSH6o3V$mAk2$97M|TOw9GQHyst z?(1|gm`yFR1Ql~Z73bsmnyp;TR=#FCSF@dM>9N%Gvi*B3HG9B_fFMcqiJX5To3wP= zzwlCa!I^!h_Dw`sve&Rxn=RR!hs{Sj4tI=drbCvn+q?Mf_j23sWtj&s-G^{u57TW? z7|3*e!sav}7JgAvInkWy*7jgBEIo?0CH_A?X(H^s!wNNuQ>@ zpN0d2C5@J>#*04sX`lF6zYwd|_fXA&8aDLqX|3r-tCG?f6mz?jlrDk~%H)DF2kWds zVMmJ(7msWm3%3O257t2`-Gi+`v3yVx7nC&GiAIp1rb5Gy?moPm4@u`j(yi)9tACW$ zH^k}#YNM@zNpn6_=#n|gGt^_S5r)MtfcK4=@{M6*S5B>MWjpTYd>xY6bp zI-j`ztm^E*-)P^7WSh1RxmklFMhZrfN7Z8?d|L5TTJc1JC9RB4tKiZqENPW&Y84w) zJ*2i~E|n?6>@|lp2Q+L@{i_K4Q1O}eNH zW5d(<@T{ruEK7KfMVEUqY^gOc$QqD`1r(qea-R#NLc)%2IkLs7jkSh^l7HlN5nhLA zhBR1TUfzTYPiir7+-eVK+2HhP^^!|IeuuUl*v5uuO$1wfR$8NDPuz9lSZzcyCO&fom$`z^tmQImEopUZYCRjpJk29v8F8N zQ`WPq)>}(!_|kP;={k1(hUwB~OG@ihY4f;=-*Pv%N;Yrx+1#_+*)<#3*yd^7Mk}m9 z)sd>_YA)f10L8AI)>Ym}qkLB>=CYt63U#j%JFu#3Hu1&XQQqCN#`Z?v09oId@O^34baw_NyR` zyT@FV6Us176*kg1jnD`W9ijlRJfP^2` z46YeUw0Zeh!y`{b9gkv@OWErCp-dIZDxypg)yg4Xa6#ky1z;eF)V>-NSWkK;pDpk~L6wH2X;QXx2pe`2eUi$>`giijli&u=YS|5Uv!)&F-K}g> z8y8_7snuj^8*Auewh zyg%OU!m)HPm0ZB$I^|vByY1wuUD-!2{mKDy=~M0@mp0{nq(jtRB~GtvBY17x6ih={ zf;Bwpy1OdD_b-&n$L~fc4kA;=Dv>E}WRSU24zh&TLr zIGeEYYzN!YHNCBi-}V4I@WAxF4{+fRvTh-tdj}FL?|IOZ^{<&$*TMydgw1zctEPfi zosG2wuODnc@U9LS*}=zTaWPqZ%u4)YV^&HC0!t>!_|kf=wEp}~c2m2hbSGP~i}mlA zR_|W87&&#ska{@P<`Zm{3|7W?G4HqHQbEzMQVchy{d2APOXa5=YjGuCT#I#jzJdHE zsklXcSBMY_Dy}}FepW;H67p3Tz8tvI#=UvJ~8{>vv?rv2;j#@^Qx z-GEDWDyh6@s%&d$9wx@vO=c|kfr4rEQrHveH&`Xpx{@IngQyegW9rcm1R7!aL)F%h zh@-m>?;6=V7HA2{7^;LI@xup@8~aQwOiQ69po9?MeZ+fY$7uMd_gEg%rD^|csj+g} zKk@JR zQ|f~8mT7eb-p!A-$Nemkt4N&f?IYRZSk60^)#vlx1)O)moYD;v6lrIp3a0&+3Y>gK zEoEtZcqSK)fWO=lZeW|YO!;p`XcL4$Khhc;Z4D-Y9|>ZwL+cRw>q!Eji?;?RA|(`@ zua@%i=he|u>gbW()9SQK^Y~6o*9%cE#0>e)xvf!nWmv=Ye0auGIDTEiuBd0Xwz1(E z)8Xb}cSL?8A%|Ct)>(pbkTi|8pYmaKN!G-)v6Pby!}X4vuRN%TRHCXaFSLwpI~!t& zu7a^YQG2|0ES}9QV>4DxFl?$}I%+j(u6dybhGoTA_Cx@iQaT;A+?rSLUh2E4@20a+ z+19L_k;WTdR7|x3W-=o6SPXn&NaRT2;k!mVEWz1UUBc+V*cL8j8K+w|+-TLM@VXpM zmopxFL05hW!5RVF{aJ7he4|6VI9&;^E1%MpBQ~lF$r zqFq7qb~#~M%KI$^Q^d5Bk6z71uRd$wqASm17*vR5(f0E|M zrd`Z0x5OHLS~g}HO@7VmoiZ-gfQS!*j|v=C{~J<)L~H));VL#d8`F16GsPvkbP75Z zmu`hzqLQuANpoIQK;d=DJs=R%l@pT%>zTxV>Sxq!_=;)&G62fUIfeI3VLemqk?oUO z$4btcR%eq3?*ra)AXC8UgcXkd>IjQEf{jEOgU?yT<*ed!)^R!O*d}7#ux@<)#aJ-^ zGoJzk1JZAu%BI80hTWuv+%{S;=5ccQXcillJMEt*HLn>BJJvAmzr@bF=e5^8sqmFh z8{egnmlShhn8Hvv@CqQV!XF z{4}l2z62*+e4u#vE;e-~@4f2pwqA;i9U_^KPvYlgbt4as?cic_$5WF~a~ zkfDk>rMnMwPk*B2c*{fyOK)fI=^1IUME4F?Sasne&4(MvO|1Ci*RkLWI;;mO=3WKZ zCTH*2Q`&4^TR5dHoY;Ea%q`n6t!;K>i&z!qVd5%01$wudX7La!_ zHgzlzX%m-L%%{~&rPcjv!+GZX1}?4XLM(l_a(MMfpcGUloNOJ7=hI8L^b$V3ek#4* zlH72wovOn+cOc zNOp7VN8&oknx?|E0uJ_6dmKqq1d^ta7?@HnJi~&tNtb;wEFVb`jB>bU$VXUsVtSP} zr=-Fgl-8gStY;ya@^!3)6`%Q(S&>LG#oz=>a1s(TAsfSTWp!!OYFLP< zA-~@S#|~9kqvJ;lj%^#O<)W976f9$?kz_^pAH1LVfzZQcY+(Fo+-SvE^2z$qm8^fh zH6+Oz5;f9HdX3bxy7U{KZvMX4DSROu3P8y#dCET-=6b`)`!D$CUk{=}^mAcOIx{an z$w8gS$po+EQrGgSbzExQ1(;)dYJzG{=`*F)(74eQE;OADSu&P8wspdT%UsE3tYTr` zBVth68VwuOAiM=Nx0` zWjdC~5~KxFu?3dcLQBLlK4JwIv0}nJ9kGVpxeL|e(aobZY*;qbC^m4I6y-9DhW$p> z7XP&C;Z$@6QHg5Bbv>o^8+ynBBNk`zPH-&Lg3q)|FZ%co$69>iPL^AIlI(A|Q1a#y z5`)Tvm4|nq3>s~*#4Tm@h41VJBd4{78$AlB(_FuT#JTAQ ztA=)ssK4+EAZxDrNcBb30U~0b?chUdPUeq3WJy>y5yvHz;_>XVkM6eyL|FBt@Exof z^7=Ov7DEL>4%Xn;uG89_8v&Fi@GBj@9N5AE4>wKVF-1)vI;jJ!>5ihuv^vGQ#3_7B zMSVaH=HjgJiSkn{Sr+O8>L)cM@C!KFe5`MDr6o3h{NCxbg0%txB)5KZeI! z!xODxvDWZ7Ye*!d8sR%s1LF`KJ5+B?$m7A>grW-xC5Qcn-G}!Q%^UbEDDmQw0&84` zRi9#wOS8r#qgzJEaQ$eKMVAWOomh&RT8v(bHFCM=-0^Hnv;pfFl{TjeE}OWF_bcN3 ziV%VZNGUz5FPhdB6UJ9PQzaG3`DlWL&8x6PuVuH~HKp4QuNJdpti%#sIMe{k5}gL} z$vh&`tdU99m?hTm)USLzqkV_!F&m#qUYjzdO&Rl<))vC%M8{xuaY?K;=2CbHA6`5a zUVI_E)DdHzthWT^BA`4aMwVi!;Ubnn*F+0fXJo_ff|ULOZ3rQmmJkn~?6w3K{1G04 z-ptcR4*%}zVDIU-&;xAK9&XQrY~n+RQAJFn@uSUS%T8`*6AQ-6$Ib88afQ|F(wg(} z=kGe-$m)%3Z~v5Tz?!z?OxRDteiFgzlC8<9M;h$`r7)5Zd7j#I@nl&ULIj|02tZ+; zglZ4GRz4N6e6GkTip=KyvRS|EiwSw-C64>-Z$6t&nvWBNzjLEwgp8B1)2aCTa^^b#CT@sZ+@ySSVRK4|TQTH~d- zCBrq>l~nvX<#5f##QbpsU$BNNSi=|8as{<)ejQuag6b{X!m#mu!?mQI5cWdY3lYPl z3RUA=+bb~m>|ij0J+VBxfG87P-FE(-Xd0ye0CRIVbA zN3u!9pM{rPOvxD!-U>v%k$ zXW;S-d>%cOM_aNsu+5vftj%mDBGS9q?I;Dcaof%8UF~e@&grOKQqPp}_;LE3l<{yj zRmk{A8hE_sWfG;OZWS`UQk3R0%Z3^*Xh}$=(ehs5oL4ySrN_V1Fs%#CaPG7^kF?Iq zJC;C`r_~uS+G5^uF)#-Ksknj32c&WVsTV`iF9ztLDPEetLcJHpggMjhIreHGhaD2P zID1UU`Ik+*t#ryM#T>ABN>^;rl@8X=dWR8WP!8h!cU(}ne(ga8$D^VlM#H0SCpVtd zjy!mZ9@mYxzZ1phuAa(WZON@*bJm_~J)d~~zF%dYk7c8g9duX%ZYuiBtH9PgQ*%-` z+BQ~ja`&i_jm?|Z<-;ISu|a&4^OWucpI_ z<`-U(a0lL_-~x7|C>9)lJ%rLmk1Bb64yVtV_Rg_K3{Tb}dzxS3S~(TG@~o0ywT@f0 zj$d{6)T+BJ!KR_LpZf{L4YKxP^TrD&Qr;moY&Lq$w10&?c{|oQR`^cxsWLXb#EMeR2rb{pxx^$~9Vx*WuDI+cgiDe9{ z&v{2No;;y@Cu2P3{XRBk_16f?0}*CpgI@En=0{otre`#7YQ}Sa;=@r%gWf~iMwZyz z{q5RuU8(d6zz8TZd%3%NKpq|egWLXnt}Bf4kNBEWkUiY6gHpx4zKx{-&ERy#pd#9s&#}#9JquY7^f+>G= z1KiSYZUiE?yAY7~%?%u#@LhSqr|g>>DdN0uJb)IE?*sBtnh|(G+BAVrhkbK{M43;A zeLJ@cCobYJ?SOEbE(|yW!oQu{sG_2h&<5*_tqJdA-lMz5?j36#ZQ#T5r^52d(7^@Y z-0-C$N-uQ`muUWV$W?!Q(C7^;)qejQ%z zLmsD-lgZo9d;$zLp32W+%CpHM@9S@<2~qMfl|tV?C=DwOHAU`UD8dXi`MzI7>x4^g zc{pjktSK)eoo3ZBhFXp1Y_djp$|+YGYNI@_J+N~NTprMx`0*qul?~bA_QkoBA(pj-er$`mg0dh6Z2vbF2J- zv3|Mdx$*$D&`=(}l)OJ@2wV*a+*Bm_8XDp}&Q;`>=Lrop?&VPi{NdYk^|1klb$-;j z#+Yowx)|!*`eg=x!+H*=02Zn106t}yKi}Uy3nvM8Klw0s&n@js!79Sd31zfW3u~X z`LamEMy2PZAy6Yc8=}hH4I6cyldI#@Lc`h>WrmH#)MPcLvdKd=S)HslROfk4R%fY* zs3)s)72s~e+~&T)a2lOhW$Ak z@5ytH{W;&A=)+`95j4QlYqC};;;q%-Iox})R%d^X!gB_mlf8s-YP0cN<~doX&=~6c zaSg&X9@ivXmwHXs0bjkR=VU$b)Q97W{`DDNll9pihWcXFWPOoBye`)m8oa$H8$1O4 zm~8OHbBxzyLm1Q|nVM`!_AoS-s3sdS%2kHOrK-usrS3vQ;|h&ooz`=5ouAsUE>bnQ zE}G&Rd`O+8P=8ITIMc1ulLXx)(3k|t`EaC#cOhXyqf6DR)zpS?W{&H1Xy4FTn$hUP5QWOFJS z0MMM}F1)}?@ww2;(7aMLxlsuf3-F%Ys1-G9qYlprc#g3@V~uYt^qSmQ>|xjh25nL* M#48xHN$2^000_JDO8@`> delta 11934 zcmbU{3tW@ez2BF2LV$!kAmJ6>Aa4;B6fh#7AfP!~tNkzyCKTET^h>a6t+>7}Ra?bl z)vLDR)(5q>EnZv4TGwmtbGy5@OA0Gl+I4oDy6$m@+Rd$Zcf0>{l8+$8?RRfBzw`RP z&i{4(=g<%SO?2^R?BK1~SQP`mJ%8%xiFGr~Psu>|#Y8%{R22;KJcAgXK`auru)K)2 zVqOfbxJA+|<)s#elV&oN5flr7l(fj2<-ELE!7G}TypqmITU5<4d`z>NSHrl4MY5LI z<~TmCIi8QF0P+@1vzFI>rLSm7Xx8yMIxO7Mu%FnnDz95CWo zR*gR^Fyc~Honr<;^ex8bWInk$g--z-12UpSl!Qzr5N;BrnsVh=sC^Ir?mX%UL@h1S86`4%Wns1w$87|o~$Yo*VG;yjl=Px4X&miYU z;F9yGh{Aldbr%SIlqb++!5-#RY5ol3h}7pY)3Gl@rw}pVY$DqhVsZQjNgV#E#E4U+ zaSiiq#I{nPA8q+kt}K(`jJD3`oY1qmz*A&o=8((!c}lfP$`#*%BrM%_53`zYAGP{%eZRWF5(+9zJ{`EmZuh1SKrGp4lOto z2oPl^^ia9LwB5jo@s$E&61NO3B=nH3QWOT`YeAm9cuZ|*s*Yn&MTH10;%ZT)02#-? z%wn_zBwWF*AX*m4M_cy_p)ZJ5_18pOIwP6{I4z}nv{FexGlNx}6;-2URbqZ6xAHDL zXKV#h{gv%kKf*9h1E^``T_9(qA&?GvCV*x}gBu)Zh2W*m1XvTKwgKB3K-w4jqFt4*T3d-S|I1#>_puuGuWfVz0&JT8MJCWJHPsDjreeE*5EwGv^5G0qAhBmo(3+18rxaagjR!TvbZd? zhR%ad&|l3!?F9fIrbFNxh+M0pEQFqWCS|p2QSP)sTH+VNwFm+S=Wvl(K^`ndSrt*XpL}XPt&AQ7)z5%{03w} zg;kwAgPhV z%)2BPU(v*tZ{pgyb=>+IR)BMYwJi*Kh_l(w3&x4I$c@vq$?OG^lJEwtF5F96Cu%KZ zm>59xhQoc|ZV$Zxfl5d%1OxaQe7*NHG$@<1J+UB3L3#)}8=e5@-gt zpM;uN5cmE}r8vMa!ylM*56{9QX*vQTvV98#%fw$M#K<@C9WZ@~vgA=?sYpN>fDQ6? z{(jC*Og8*MQWDpJJR}oQu{w}9+^-N5?#lZkhNTKm9Y8y{O*A(?GE42CJ3#~mw@E4N zrAeEG(nzE-JJ z77gIflOku;Q#t|xl~jsQ^_|O1ZHfc4t$;(b24P^+&&9*Ilk4SN+-h_bcJ(mVZ@W5Y zOEBbSM`h$lqD~h7yr3NLQ<3c&9$A*^@WL_JZv&s1`=hK{(XlEq_|zo$)DCV3I!^oq zouCHq09HHD-%`(c1bQcFZzuFl(cUi38;)#~;WYOMvGyJ8zaTM~`U1BT!u>^V7ktlD zNbi979TBi@57P0E&5fH@sJCXs8KL4N!^~qaH$s>f$AZ9*aeHj#_*aQ377Nq*>@0+{ z1c4;j))O#GH2++7ZrQvZLN@kwLJ@tV6m+1kEj%KK3Gc5Fn!u^ z?u63-f--U%u;4VWmWa`4mBa#jZesCpvrflAihAu%3GfzEiNf&Gol-Tn9h zn&A*Re+Qp%h4S~bLP@39jZ6)?K&z9tAprLS8a)wW-a!}9H)*aofUihQ+HrwQWWN*& zeG9!CIX4c@b!hY+_iVJ}T?U$SfPZoaq1@B)Ux5}*nXu3WmjrI090eqa{lb`kj)Q7i zxajkTK-b0J78WfG94UvnL*O)PAzFsHVe~%nR_-~%o=_GVu$Aqs|c&8D6MVLK%#CF{op5-B$Bui@1jLV{N*SX^)@J>NWrmU zQEE&b!_cw5{h$$}NQO+vN6ALlj+J5W=TrvP&jSg@r8ZWB!%v~?N#p{FIR|Csd zqNZxNIyiJx$frrX7yOH`W?|Zw)2bp0{jb$W?}XO=+bn2!Jb@f^r6AGpojdVhf8Pz) zd{A+c1cXoJ8#3WC_3!Ih0e=8e3xAS7#h>O*Mk0)!&*JZK2XJk9GMXzl#L}$N8gis% z)}<28@nP_Y|A4)}FgGr_Q?uz8|7W<+=U@B^+>3%2IW|IWwoo&YN1kv`U6BDVCGeD! zLr?msa|=E9%p(q9Uc~EC)9YRkC^m^5(B%%y8e3pIcaLz*07$S-xZc3`ie{>bPedss zV@K0@`Uiq4E##{8eR%yl!=C}AjDW$<*b?vu8hGkBOS_lz4T%kcoY9u(AeqVpW}s$T2bI1~>Y&!g zX0iU{j=JYZ1V2*uydJK5;03ky>@0*&DZ)yaAxTEGG)FHaaOAm?p4s@&@;H|2XmAX4be?+?{J}McCJe!Ia=C-(XSAR-fpsr%=eY}1 zW9PXsTlqW~v#NpCZr6JU*Y_K^FDty6X$}v6sQ=m!k717&lb>>@n6y5{DiTk zAd&4MuA}GQ;ocE)DSelWoJ6MIM2R%9?XPq1z&Q@pB!3akczB+;h^8YkdIS12{J|J~ z*so4WL}cn_c)3D4^Av@(vF7x3f3fbQ_aNylKz|=Ew`Se>(9!Y*`YlR+C+7U^SppkbT}R0 zeF%;VJ^wEEuAo-OAf-s-Gx7J_yECepb%%l^sQm*h2}=DE`p5B#dIR~h&|rxYGA(@- zdN+Kfi`s@%4&fLm{v+Vf3m?G^D+&D%^dkyGf2xq6KUaw@L_v{S!f4_JQnZYCkH5r! zo4Z7gd2+F7ViAKsS(U0^DrX$g3$u-x>AJS^8gV+)#bz+vY@j9p z(zV6op@w3yrtLAjqA5ch-*$_(n%0&hjae|f9GUh++OBZhx)hdkVq1rxPNB3tuzPsJ zW4jpO@-+|z>SO+KS~IOjdKHwF=eQ>!IX;eio0FAK@lR2us7Z+^eG1eQM_*I_)D*9I zy#Dbfbz275AMh4PLK5$;epg7k!{Huqg(TfBcRx=;md}FE zZnbsY8p}$D$7R`QTN{e&gr0qXciY3rA|8%@hJp{kX~z8gt#5w*)U(&W{icPWwYYX! ze)itW*WY{f`g?EiqzvP8@RcpC>DkaNfIpCt8FIzo>&b^Qt$`j+;O8RyfVfY#R^N>Gg?_(x27mk&Gm|4k};@pk}NrX!% zrmK(N?(o{X270$eSmN#uhm49}kH6pXC=}BzTg$a%BBb2z*lFL^?chE9a{TPpKg5w* zFeK}C^*qq+g{h}vMqeb z@go@_!f(NRXIIQR(p^tJEAdY|T?tW55abHBm4&ED*kbX$& zc)-=`<>AGP*)7e2pJjk#y6H179kc8fS@J_{L4;(lH>9BGu3l$I);HiKE+gW3A_Dwn zjrsh~@Eb^h-cL?HWx4*|rK^XBt{wZto}Rv5J8?svTrBwkyu0gdT;DUgv6;ZIp}18I^=r_Y_BQ*5&FdNv55+O# zAC6jU$v9%q3lBq}Kju?`P$K1-NONVy!;1`6hD`KMv}Qx;!zzg^ixDA3Pp=nZJ$ zGkxadKFifh`>wuv;OFnZ_VYL21^)1#S1>%W2Y(-Y@VndV;6Fg1ks38BF-Uu(&pw*Td;!~080N^o4CDUaHs zf>5Nw$iftX)o#K-Mr9+O*4EdJul4_rlnRc2`tYN8KcCH>#BcF?ir){(?RID1HoHBf z^muufW4pV@%acPrBd%#7IL-DH`8m$}GZm>hrv9~mA<`W6EB=XryJ_Y0h_X9`+?mmYz zLWoZz(?m6V8TpWEonJ^ku;oOCC$B5~V)9u+K1<1`3io;o<3q7)*S6Z%+1l?}v#P;{ z@QYqk(gIixX`HSu$ABB;<@4L7FUWeP>?{@S;NIQ18?A<*cZy&#lvV1FnG^lJ_|~op)`RnQHzp1fabv=H(%nf6K;iDIa3Qf$6(@ad0PHh{Z_>f^S$C%3o*rtN--@w(iEpWI(0{wyH_R~{&< z4=7SkJUFQ+xS~)UQtekAR0pA>-mmr>7ELNDub7h0q?}ASl{OSNEh$qL1x=PIQ>F zG`iJa)9T;i^qYz%@W_FMYm&@oVot`4xco&m{=!;+cHLysiXcp?PO3&Wj@FEpjwg(_ zj@NxB!=JTwGO0CKQubE&x$f5=^d}Vt7Zja{zpZ4<`G1=>t;U51n+gMZYtT|2%*-2A zoGrZ}71y)IFBq{Q5sYX^Jl=ey`PkYUrHr|YonC~e4mJ+T7_DwfV+m+1Q=0sMCVxs( z8PHS)GZqBR`N5RjE15+&$1+B`URra0Zy>8Nm|by0E=e;^D;e$L8;nw$ zFs%nV#}!!NH`PpPYQby@)s#FjAWxi<=fVGpZthf4$)ut*sK6gQdsr8UU34R!QRGjj zFfx@tHf>UxhMzwC%_KFGmiJoL>p2strNen+wjufL3Pu%ob1~jHJPh`^KKv|v2afzX zcf>jon-f&W9ctd+>`z?u19jyU7&IMdI{e^S-H3f6qtb6#JcdtVi|Jg?MAp)P#IkSo zQ1=PNPn6oA-gtb?ku^a)fwN3do*Gm`%G+W_m2gu7pPAK+qsQca0}=YS`2C|xL|zu( zKDt?SBLTnjdVZ%7fVjqj_iB4!06AEk{~{=9=;C z<97dA&Y#o~Tu}0bRBSL#s~BVQiN(VmBfS%bMOSidesu2?;sS{Ccl7ul@=l-uzio#f z-+wHX9mji*C8wQBdfn{T=LU20kF;Mgm}hW<`mB+GQT)wgse>c#3By9*Dpcmh0e$h5 zeo;Wbh{&`ckX;$f${#favMPcZC1A_N;3X3964i{C2!!1~076Ta9bOqGH3<}8GvG-6 z^!6$Jx*N+F^YZBxETc)7icb&3r%%O~PQ~Mr;~8pyYT2ZwJeZUB>b+mb_Z?4Vr}3`i z?YY#w3WDbJV9Ek;uV5yq>8tL9OHU-}g65?`Q(iD_VbGL+Qz->EOT)U8g{&Xfoy;`_ z6Ci!)rnDIWZN`)~KcLMI8W#k$mYaC*$*eektYuP?5tPXOrseS#KYr_ELAhVk`H^DV zEg56T99jja$CHmFk4Q%DIjcUgcNC53$GXm^OqJJvSYAI--rz55yo{Bn%EU*SNAU7f zX~vPXv4Qaom$SdMWqhr_a?Mm?b8uPRW!!(NFmBq&s8dHoQtDa=yCwcLoBb(UCN=j7 zFb$KMOrhU6QWZ!kp461U#FQp;0#@`3f_n3bs(?Nxn2|RsJDcXWl#Q`tx#RltMPq5> z9)Fss{w6+ix}wec3iN)17s{(u4w4XNReH`n6DFQmmhK+@mSTfV%# zSAy*?7_?n1#QAS=?{~WtR059A1=|8CKO{ek=c*4 zt+fQ|N118Xjw1P|jEamtRar&Wj#~MT2i0mazLKr8Zp{*Z#u}_!E5$#_t53Gt<lV zvpSNczspD@quU~Dk+oAR{{6z0I@12PvVb-V2UnI_9d*(_WmvPwgm1-?Dr;w!*k`Ry zuy$68eT~UE)@^de*Cbh0Lpy7ws}g8uW1-pV%8>f*k(#W|8oBSDG*x(<57N5g<-Sb| z6m)`9>a4CsVjq{IuyW;cA6G0U%Jgw%td$!i6XlVKMJj8DSn140r`fUq^NXuL#D=VtgHoBjZ}od0dQdt3pZXY}LZpBKK|0 zQdtMLE`&~{%4(O$eRestiAtXx2-+7wn++OX$oT9FWmbnw?6WVmimeV->~pYEI^j^N ztd1nP&yfr*AUI%)plzR{7{<$CzD{NBj8pnLl?tlePT<@LoI8PYCot};>{3~`0q1RU znblb$_HB!`8m(|G^*OU(qMY$L3#4=b7Q^FmrPWyt>ouTlb&Ss?4=dRf592f#n3 O7?&%3F3@S0NdCXPJ7IJH diff --git a/__pycache__/config.cpython-313.pyc b/__pycache__/config.cpython-313.pyc index 1735124d90626634d2b803268b3946e15e9afc36..eb6d5d9d2f83736cde84656b8418bb1c3d38f2d9 100644 GIT binary patch delta 226 zcmX>vIbVwRGcPX}0}vdQiOVdR$ScYCYNPsgMvEe$V8>vdV5eYbJ?3DSVC7&}Fv~4i zHrO4^^3Y>05)SqZ_6qh6_6hdYV<{3zXVUcBEW~u5l{Hg`k%3|JNA@m8M$ySNoCgI3 zeH|m>{X;_nLPO$1{JmZMCR=mqdzsM4(RaNP`*to{lw`&y zrl;nW#Fyt6Wfy1U7w9MF=cQ$)>lIYq;;_lhPbtkwwJS0QIvi*dL$TZDPOh(v>}(98 Q5*HW*C)e`$v6uqI0pI3AW&i*H delta 146 zcmbO)bzYM9GcPX}0}$wZkIht^$ScYCY@_;iMw4KkV8>u5J?3ENVC7&JFv~SqHrNf! za@S)n5(@SR_6+t4_73*ZV<{3&XVUcDEXH)7b@MOwE=ER?$*r6RCr5JW%5x^?=cQ$) o>lIYq;;_lhPbtkwwJS0MTF3~*#de!7a(!i-e2&ML#RSL)0H#wZAOHXW diff --git a/app.py b/app.py index e424796..4bc1a62 100644 --- a/app.py +++ b/app.py @@ -17,6 +17,7 @@ from report import ReportGenerator from report import ReportGenerator from utils import LLMClient from utils.storage import StorageManager +from utils.auto_agent_generator import generate_experts_for_topic import config # ==================== 页面配置 ==================== @@ -30,37 +31,146 @@ st.set_page_config( # ==================== 样式 ==================== st.markdown(""" """, unsafe_allow_html=True) @@ -122,6 +232,8 @@ if "research_output" not in st.session_state: st.session_state.research_output = "" # Final report if "research_steps_output" not in st.session_state: st.session_state.research_steps_output = [] # List of step results +if "generated_experts" not in st.session_state: + st.session_state.generated_experts = None # Auto-generated expert configs # ==================== 侧边栏:配置 ==================== @@ -210,7 +322,7 @@ with st.sidebar: save_current_config() if not api_key: - st.warning("请配置 API Key 以继续") + st.warning("⚠️ 请配置 API Key 以启用 AI 功能 (仍可查看历史档案)") # Output Language Selection lang_options = config.SUPPORTED_LANGUAGES @@ -251,8 +363,8 @@ with st.sidebar: # 模式选择 mode = st.radio( "📊 选择模式", - ["Council V4 (Deep Research)", "Debate Workshop", "📜 History Archives"], - index=0 if st.session_state.mode == "Deep Research" else (1 if st.session_state.mode == "Debate Workshop" else 2) + ["Council V4 (Deep Research)", "Debate Workshop", "📜 History Archives", "💬 用户反馈"], + index=0 if st.session_state.mode == "Deep Research" else (1 if st.session_state.mode == "Debate Workshop" else (2 if st.session_state.mode == "History Archives" else 3)) ) # Map selection back to internal mode string @@ -260,8 +372,10 @@ with st.sidebar: st.session_state.mode = "Deep Research" elif mode == "Debate Workshop": st.session_state.mode = "Debate Workshop" - else: + elif mode == "📜 History Archives": st.session_state.mode = "History Archives" + else: + st.session_state.mode = "Feedback" st.divider() @@ -341,18 +455,167 @@ if st.session_state.get("bg_image_data_url"): # ==================== 主界面逻辑 ==================== if st.session_state.mode == "Deep Research": - st.title("🧪 Multi-Model Council V4") - st.markdown("*多模型智囊团:自定义 N 个专家进行多轮对话讨论,最后由最后一位专家决策*") + # ==================== 主标题区域 ==================== + st.markdown(""" +
+

🍎 智能决策工作坊

+

AI驱动的多智能体决策分析系统 - 基于多模型智囊团

+
+ """, unsafe_allow_html=True) + + # 状态指示器和语言选择 + col_status, col_lang = st.columns([2, 1]) + with col_status: + if api_key: + st.markdown(""" +
+
+ ✓ 已连接到服务器 +
+ """, unsafe_allow_html=True) + else: + st.warning("⚠️ 请在侧边栏配置 API Key") + + with col_lang: + st.markdown(f"**语言/Language:** {output_language}") + + st.divider() + + # ==================== 开始决策按钮 ==================== + st.markdown(""" +
+

🚀 开始决策

+

选择场景或自定义主题,开始多专家协作分析

+
+ """, unsafe_allow_html=True) + + st.divider() + + # ==================== 支持的决策场景 ==================== + st.markdown(""" +
+

📋 支持的决策场景

+

系统支持以下决策场景,每个场景都配置了专业的AI专家团队

+
+ """, unsafe_allow_html=True) + + # Decision scenario templates with typical questions + DECISION_SCENARIOS = { + "🚀 新产品发布评审": { + "topic": "新产品发布评审:评估产品功能完备性、市场准备度、发布时机和潜在风险", + "description": "评估新产品概念的可行性、市场潜力和实施计划", + "example": "我们计划在下个季度发布AI助手功能,需要评估技术准备度、市场时机和竞争态势", + "questions": [ + "这个产品的核心价值主张是什么?", + "目标用户群体是谁?需求是否真实存在?", + "技术实现难度如何?团队是否具备能力?", + "竞争对手有类似产品吗?我们的差异化在哪?" + ] + }, + "💰 投资审批决策": { + "topic": "投资审批决策:评估投资项目的财务回报、战略价值、风险因素和执行可行性", + "description": "分析投资项目的ROI、风险和战略价值", + "example": "公司考虑投资1000万用于数据中台建设,需要评估ROI、技术风险和业务价值", + "questions": [ + "预期投资回报率(ROI)是多少?", + "投资回收期需要多长时间?", + "主要风险因素有哪些?如何缓解?", + "是否有更优的替代方案?" + ] + }, + "🤝 合作伙伴评估": { + "topic": "合作伙伴评估:分析潜在合作方的能力、信誉、战略协同和合作风险", + "description": "评估潜在合作伙伴的匹配度和合作价值", + "example": "评估与XX公司建立战略合作的可行性,包括技术互补性、市场协同和风险", + "questions": [ + "合作方的核心能力是什么?", + "双方资源如何互补?", + "合作的战略协同效应有多大?", + "合作失败的风险和退出机制是什么?" + ] + }, + "📦 供应商评估": { + "topic": "供应商评估:评估供应商的质量、成本、交付能力、稳定性和合作风险", + "description": "对比分析供应商的综合能力", + "example": "评估更换核心零部件供应商的利弊,包括成本对比、质量风险和切换成本", + "questions": [ + "供应商的质量控制体系如何?", + "价格竞争力与行业均值对比?", + "交付能力和响应速度如何?", + "供应商的财务稳定性如何?" + ] + } + } + + # Display scenario cards with typical questions + for scenario_name, scenario_data in DECISION_SCENARIOS.items(): + st.markdown(f""" +
+

{scenario_name}

+

{scenario_data['description']}

+
+ 典型问题: +
    + {''.join([f'
  • {q}
  • ' for q in scenario_data['questions']])} +
+
+
+ """, unsafe_allow_html=True) + + if st.button(f"使用此场景", key=f"use_{scenario_name}", use_container_width=True): + st.session_state.selected_scenario = scenario_data + st.session_state.prefill_topic = scenario_data['topic'] + st.rerun() + + st.divider() + + # Get prefilled topic if available + prefill_topic = st.session_state.get("prefill_topic", "") + if st.session_state.get("selected_scenario"): + prefill_topic = prefill_topic or st.session_state.selected_scenario.get("topic", "") col1, col2 = st.columns([3, 1]) with col1: - research_topic = st.text_area("研究/决策主题", placeholder="请输入你想深入研究或决策的主题...", height=100) + research_topic = st.text_area("研究/决策主题", value=prefill_topic, placeholder="请输入你想深入研究或决策的主题...", height=100) with col2: max_rounds = st.number_input("讨论轮数", min_value=1, max_value=5, value=2, help="专家们进行对话的轮数") # Expert Configuration st.subheader("👥 专家配置") - num_experts = st.number_input("专家数量", min_value=2, max_value=5, value=3) + + # Auto-generate experts row + col_num, col_auto = st.columns([2, 3]) + with col_num: + num_experts = st.number_input("专家数量", min_value=2, max_value=5, value=3) + with col_auto: + st.write("") # Spacing + auto_gen_btn = st.button( + "🪄 根据主题自动生成专家", + disabled=(not research_topic or not api_key), + help="AI 将根据您的主题自动推荐合适的专家角色" + ) + + # Handle auto-generation + if auto_gen_btn and research_topic and api_key: + with st.spinner("🤖 AI 正在分析主题并生成专家配置..."): + try: + temp_client = LLMClient( + provider=provider_id, + api_key=api_key, + base_url=base_url, + model="gpt-4o-mini" # Use fast model for generation + ) + generated = generate_experts_for_topic( + topic=research_topic, + num_experts=num_experts, + llm_client=temp_client, + language=output_language + ) + st.session_state.generated_experts = generated + st.success(f"✅ 已生成 {len(generated)} 位专家配置!") + st.rerun() + except Exception as e: + st.error(f"生成失败: {e}") experts_config = [] cols = st.columns(num_experts) @@ -360,11 +623,20 @@ if st.session_state.mode == "Deep Research": for i in range(num_experts): with cols[i]: default_model_key = list(AVAILABLE_MODELS.keys())[i % len(AVAILABLE_MODELS)] - st.markdown(f"**Expert {i+1}**") - # Default names - default_name = f"Expert {i+1}" - if i == num_experts - 1: - default_name = f"Expert {i+1} (Synthesizer)" + + # Use generated expert name if available + if st.session_state.generated_experts and i < len(st.session_state.generated_experts): + gen_expert = st.session_state.generated_experts[i] + default_name = gen_expert.get("name", f"Expert {i+1}") + perspective = gen_expert.get("perspective", "") + st.markdown(f"**{default_name}**") + if perspective: + st.caption(f"_{perspective}_") + else: + default_name = f"Expert {i+1}" + if i == num_experts - 1: + default_name = f"Expert {i+1} (Synthesizer)" + st.markdown(f"**Expert {i+1}**") expert_name = st.text_input(f"名称 #{i+1}", value=default_name, key=f"expert_name_{i}") expert_model = st.selectbox(f"模型 #{i+1}", options=list(AVAILABLE_MODELS.keys()), index=list(AVAILABLE_MODELS.keys()).index(default_model_key), key=f"expert_model_{i}") @@ -376,12 +648,58 @@ if st.session_state.mode == "Deep Research": research_context = st.text_area("补充背景 (可选)", placeholder="任何额外的背景信息...", height=80) - start_research_btn = st.button("🚀 开始多模型协作", type="primary", disabled=not research_topic) + start_research_btn = st.button("🚀 开始多模型协作", type="primary", disabled=(not research_topic or not api_key)) + if not api_key: + st.info("💡 请先在侧边栏配置 API Key 才能开始任务") + + # ==================== 恢复会话逻辑 (Resume Logic) ==================== + # Try to load cached session + cached_session = st.session_state.storage.load_session_state("council_cache") + + # If we have a cached session, and we are NOT currently running one (research_started is False) + if cached_session and not st.session_state.research_started: + st.info(f"🔍 检测到上次未完成的会话: {cached_session.get('topic', 'Unknown Topic')}") + col_res1, col_res2 = st.columns([1, 4]) + with col_res1: + if st.button("🔄 恢复会话", type="primary"): + # Restore state + st.session_state.research_started = True + st.session_state.research_output = "" # Usually empty if unfinished + st.session_state.research_steps_output = cached_session.get("steps_output", []) + + # Restore inputs if possible (tricky with widgets, but we can set defaults or just rely on cache for display) + # For simplicity, we restore the viewing state. Continuing generation is harder without rebuilding the exact generator state. + # Currently, "Resume" means "Restore View". To continue adding to it would require skipping done steps in manager. + + st.rerun() + with col_res2: + if st.button("🗑️ 放弃", type="secondary"): + st.session_state.storage.clear_session_state("council_cache") + st.rerun() + + # ==================== 历史渲染区域 (Always visible if started) ==================== + if st.session_state.research_started and st.session_state.research_steps_output and not start_research_btn: + st.subheader("🗣️ 智囊团讨论历史") + for step in st.session_state.research_steps_output: + step_name = step.get('step', 'Unknown') + content = step.get('output', '') + role_type = "assistant" + + with st.chat_message(role_type, avatar="🤖"): + st.markdown(f"**{step_name}**") + st.markdown(content) + st.divider() + + # ==================== 执行区域 (Triggered by Button) ==================== if start_research_btn and research_topic: st.session_state.research_started = True st.session_state.research_output = "" st.session_state.research_steps_output = [] + + # Clear any old cache when starting fresh + st.session_state.storage.clear_session_state("council_cache") + # 使用全局页面背景(若已上传) research_bg_path = st.session_state.get("bg_image_path") if st.session_state.get("bg_image_data_url"): @@ -404,9 +722,7 @@ if st.session_state.mode == "Deep Research": ) manager.create_agents(config_obj) - st.divider() st.subheader("🗣️ 智囊团讨论中...") - chat_container = st.container() try: @@ -418,10 +734,11 @@ if st.session_state.mode == "Deep Research": # Create a chat message block with chat_container: - st.markdown(f"#### {current_step_name}") - st.caption(f"🤖 {current_agent} ({current_model})") - message_placeholder = st.empty() - current_content = "" + with st.chat_message("assistant", avatar="🤖"): + st.markdown(f"**{current_step_name}**") + st.caption(f"({current_model})") + message_placeholder = st.empty() + current_content = "" elif event["type"] == "content": current_content += event["content"] @@ -433,7 +750,18 @@ if st.session_state.mode == "Deep Research": "step": current_step_name, "output": event["output"] }) - st.divider() # Separator between turns + + # === AUTO-SAVE CACHE === + # Save current progress to session cache + cache_data = { + "topic": research_topic, + "context": research_context, + "steps_output": st.session_state.research_steps_output, + "experts_config": experts_config, + "max_rounds": max_rounds + } + st.session_state.storage.save_session_state("council_cache", cache_data) + # ======================= # The last step output is the final plan if st.session_state.research_steps_output: @@ -456,6 +784,10 @@ if st.session_state.mode == "Deep Research": content=final_plan, metadata=metadata ) + + # Clear session cache as we finished successfully + st.session_state.storage.clear_session_state("council_cache") + st.toast("✅ 记录已保存到历史档案") except Exception as e: @@ -624,6 +956,8 @@ elif st.session_state.mode == "Debate Workshop": type="primary", use_container_width=True ) + if not api_key: + st.caption("🔒 需配置 API Key") with col_btn2: reset_btn = st.button( @@ -863,6 +1197,117 @@ elif st.session_state.mode == "History Archives": file_name=f"{record['type']}_{record['id']}.md" ) +# ==================== 用户反馈页面 ==================== +elif st.session_state.mode == "Feedback": + st.title("💬 用户反馈") + st.markdown("*您的反馈帮助我们不断改进产品*") + + # Feedback form + st.subheader("📝 提交反馈") + + feedback_type = st.selectbox( + "反馈类型", + ["功能建议", "Bug 报告", "使用体验", "其他"], + help="选择您要反馈的类型" + ) + + # Rating + st.markdown("**整体满意度**") + rating = st.slider("", 1, 5, 4, format="%d ⭐") + rating_labels = {1: "😞 非常不满意", 2: "😕 不满意", 3: "😐 一般", 4: "😊 满意", 5: "🤩 非常满意"} + st.caption(rating_labels.get(rating, "")) + + # Feedback content + feedback_content = st.text_area( + "详细描述", + placeholder="请描述您的反馈内容...\n\n例如:\n- 您遇到了什么问题?\n- 您希望增加什么功能?\n- 您对哪些方面有改进建议?", + height=200 + ) + + # Feature requests for Council V4 + st.subheader("🎯 功能需求调研") + st.markdown("您最希望看到哪些新功能?(可多选)") + + feature_options = { + "more_scenarios": "📋 更多决策场景模板", + "export_pdf": "📄 导出 PDF 报告", + "voice_input": "🎤 语音输入支持", + "realtime_collab": "👥 多人实时协作", + "custom_prompts": "✏️ 自定义专家 Prompt", + "api_access": "🔌 API 接口支持", + "mobile_app": "📱 移动端应用" + } + + selected_features = [] + cols = st.columns(3) + for idx, (key, label) in enumerate(feature_options.items()): + with cols[idx % 3]: + if st.checkbox(label, key=f"feature_{key}"): + selected_features.append(key) + + # Contact info (optional) + st.subheader("📧 联系方式(可选)") + contact_email = st.text_input("邮箱", placeholder="your@email.com") + + # Submit button + st.divider() + if st.button("📤 提交反馈", type="primary", use_container_width=True): + if feedback_content.strip(): + # Save feedback + feedback_data = { + "type": feedback_type, + "rating": rating, + "content": feedback_content, + "features": selected_features, + "email": contact_email, + "timestamp": st.session_state.storage._get_timestamp() if hasattr(st.session_state.storage, '_get_timestamp') else "" + } + + # Save to storage + try: + import json + import os + feedback_dir = os.path.join(st.session_state.storage.base_dir, "feedback") + os.makedirs(feedback_dir, exist_ok=True) + + from datetime import datetime + filename = f"feedback_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" + filepath = os.path.join(feedback_dir, filename) + + with open(filepath, 'w', encoding='utf-8') as f: + json.dump(feedback_data, f, ensure_ascii=False, indent=2) + + st.success("🎉 感谢您的反馈!我们会认真阅读并持续改进产品。") + st.balloons() + except Exception as e: + st.error(f"保存反馈时出错: {e}") + else: + st.warning("请填写反馈内容") + + # Show previous feedback summary + st.divider() + with st.expander("📊 我的反馈历史"): + try: + import os + import json + feedback_dir = os.path.join(st.session_state.storage.base_dir, "feedback") + if os.path.exists(feedback_dir): + files = sorted(os.listdir(feedback_dir), reverse=True)[:5] + if files: + for f in files: + filepath = os.path.join(feedback_dir, f) + with open(filepath, 'r', encoding='utf-8') as file: + data = json.load(file) + st.markdown(f"**{data.get('timestamp', 'Unknown')}** | {data.get('type', '')} | {'⭐' * data.get('rating', 0)}") + st.caption(data.get('content', '')[:100] + "...") + st.divider() + else: + st.info("暂无反馈记录") + else: + st.info("暂无反馈记录") + except Exception: + st.info("暂无反馈记录") + # ==================== 底部信息 ==================== st.divider() col_footer1, col_footer2, col_footer3 = st.columns(3) diff --git a/config.py b/config.py index 1ee3715..6e6a67f 100644 --- a/config.py +++ b/config.py @@ -96,6 +96,9 @@ MAX_AGENTS = 6 # 最大参与 Agent 数量 # 支持的输出语言 SUPPORTED_LANGUAGES = ["Chinese", "English", "Japanese", "Spanish", "French", "German"] +# 生成配置 +MAX_OUTPUT_TOKENS = 300 # 限制单次回复长度,保持精简 + # 研究模式模型角色配置 RESEARCH_MODEL_ROLES = { "expert_a": { diff --git a/utils/__pycache__/llm_client.cpython-313.pyc b/utils/__pycache__/llm_client.cpython-313.pyc index 1a8cdb095e7f815348bb66312bdd69c6db599e35..ed60228af43482fc6086f003ac0cabee3f3d95a0 100644 GIT binary patch delta 505 zcmbQB*{sF;nU|M~0SNBO#AU9X$ScWcF;QJsCYUjp*_)+^MS?+zA(%yqA(+*a38adF zp@?;2L?Pp=iKnasV%UQOAcDxG31cv43~MkK5Oc?H2J^&#Sj@r9Cd{lrWhNZKyiyEV zELn_2-012iJ2HyK@SuyL>E;LNF5;DA2o^?D$qF=E0BSa0uwW5?IQ^t{UO+|>D9nodK!hxakOC67IBatBQ%ZAE?TP|{Tt*-+ rRs|9tm>C%vKQb{fvV3J@V3fVh!1)UQ5X@@I1d?T7C}NqIP{?>~ z;w9^PIfh{2V9prUU@jo$j$sexiQxdUn1h*3m|1~JO*n#ir5LhUvKWgv38>=(sVm|l zpo$-?iW_RL9S*{5FHN$P*}Zi>;)xATuw$h#ACT$}cVgdAZ03M96{&DUdjaO>TZlX-=wL tkw1{j2*kw-K;i>4BO~KSCMHIfuWSsAvbPyHzq7e9F$OYzVgOTM69C8tSV;f? diff --git a/utils/__pycache__/storage.cpython-313.pyc b/utils/__pycache__/storage.cpython-313.pyc index 00dbe789764fc0daf5f49e089e90099dcb2ea968..7af2bbaedf986e05c3897af968ca60690f0b57f1 100644 GIT binary patch delta 4714 zcmaJ_X>1$U5#Ecp#QRuX)XI_%+LBybM7MYEF?iAI|ib_D5?|%QZ-2a^he1kiIlB=7^r_VKwGG>0k`&_&MZY8 zKDq+Geecb@dGF24o0-*{JFh=uy-{9nrr@y)M^o1-CagRipV-{v7mcKxr#4Wcsge@S zZ49zd6tO0mfMqN^ab{0dqcD(?vl%kFok+(rBr7)x(R5sh%Ca=E*kx4p*viUE9qnZ( zTIr-8&*={9TN%+%NeyzuEc!&#VZCUEp9Owphk<(7(8}mwomQh*j$DH@v530E>u4IC z5v`h(a?u8Jb?fGGqFtj|MF-HFtF)&>-J<+Yv>_~%+dbFNH^b%*%7~oZ_ z2*-^krJiUSkbG}f?$8xXAFZ4*@k%rEmNRVCv;@#(p{NiwWK@2|)Y2}+ZtyC3rfQ{S z@YqY7tM9%NAE)S97j0H5*=JnlpeS}75+nN#^r_}$jV9P$y#waSZz#WKTkH#!{)OpVc$^T4WLwld(34YpQzb1ejeWn{a$Hj&MVnXq*JF zzZD?B5>4@x7N}749PGrL9BH9zyms))*Ep0>@=M2gpO!l2ts)b22BHL#wajw2Y zjEj1fm6`3746Xfj6qRY|vnY+`&2_BE?FPm;?4buV6k?xddg>|4LJ7c|X4`0G++3-g zF`JZ68TWRbs4tx%&4sAG`xt-io|RF~kDE(aDN!HdV2dpJQ(6~xl{OdBiH4F$hDz(# zDa=@+tSC%?E$aI`rP-@*P@@kHRB3Np*0?}D^*9Vx;9xFo2v?POLxwQAiuT+iA3P&u z=tEbz>k&;Q!D+(-c3>!R2oPxSuOwZlrNT8Oc_CeitpPM#)UtBQQl0DdKg`xr3A&C7 z3ngX)73D!+#xR(q_=uK}5lvkZB2cxkTG|*iCSgTPmzMS^f9j2uJf zmf}JpH6#gJq~>IEqtG9Xy_6)G5r}rF+%SZKfRIXOGg|1&%0fIvq!r>1LIVsB{zA1u7$Z09599ziLWK+5VA4LLvn1+dJm&A_Szuny`T2*J9ExA>dRE z15r7e%@Xnu&SgO*DyIyV*XIa2o)$1v-Eor2O5_pb;NY}z)tr)3X*nBB$0U`7T|-quvGH7!(anXhUpR5eW) zuhrClu=fi0vFWnuBg&Ro)mbJmtV=BI623^diubE%XjXPiAS!m zSN45;;PQcwo|xOxk>A`o?F(+-xdRgi&JWM~+Y0`+IsZf7xI32gRCSu$OML9Rz^ipo$cvk7mmsS7whgzzxpT1>;@oyY;cW&~O zz2XMNnE1sqhm?13nQ~0Ad1vF-Wlf7N-<>5LP;ScyL9?$1tzEmRS-Zb$qj7ep9 z2ScA%1UtL7vR^oz(D`Dc4solwtHba`s{z})&D}NZmpW@V&wk14x@)dJX*$i;lTMJ? z`@I_w^3aMWlZRnoxvTuEY|F-HvHu*x^9X(fbZ;UcYyjvc^~&yYe~tuk!08VLsAHI7_f%#0x%y=w=DVN5lGze*8Uh4f+<{4>Ez zk!X~98#^w=>e9VZFsXaPb~i>jBQa+Kn*Gprd@9R!DAS>^C*7te_&mNB0% z-&-Z+YNn50J@4Xay65UA?ne6Q^#>2=SGRK4CbT63T+s-qPnE-PsV%g&_-XQX&~GaV z@4K7HAaH3~kFn4yJw|9?|3)L|2kI5&FYXE+k~Q7JXo;E)uW*MsC2!KQxe#JR1`DPz zmOI)LVnrw(5{t$Lq~<|6lUB_Uyb!=PeCz?Y2zJyvR=qB1l%#8NAa@uE?tAs7t+Cr(H%L!> zIb>b2i)z+{XjygnW&mBrYESbRa9(JtPA#4d^;e$gTzVO})?}z8=cAVZfUR@~jFFHKqgOl~s zhOZnQGr>i>`<&^l>3qXf-OR?X?2q1JDQD-rqvKmg$DQjo{su*x+96Wr-9MUkOhce- zoiA%El(pSiVu5g5Zi6kn{^Q^dYL@Z^_X_N+&xjqt9BeksZe_9EtlfGqr(MgB$YRQOT$C45G?_d_#43b+jtaRBD+(iA>8}zWV785jQJ?3D& zVYZgVcD;t1%)u6Rb|(!OW6m;e`n~nGY1CsW>!+0CoRABxsb7;!l7X5RLy`7C2msq+ zU7K9B5j$41cSwqo|8FFSzAIKynT)LJi8PFt9lz;HpL1Z697WJ3tKW0bQC%@y;4W)y zxa(@&hvyTN1YhJ2G>64AZ>cU=s^=~Kg2lhYu;tHr7VS>$9&P=`zHv!MmDT1)qb;5dhH z5CLxkX+mg45E1$j@B)#S5k?SBBK!p54FJ^>(Sl1V7Qsq5)jtAddn9s+BA7O{P(;kw zLcp&;`ELMkQa>;{ldfsWtIYT|*_OQZI)19g(0L~1TNJ#C+rY&GHq{V`#51u- zg!~lxWV3RVZ{CCdz#zCW;zYnYs<`2|u(fgs)wpLk6CW9peoW2)0jmW0QRQ=fr+1;; RbIN>+8(?V94-}#{>3_EVOnv|W delta 1323 zcmZuwO>7%g5ZaUfS6e~m{dIJeLHVv-pssj zW`DbQdm+1%$)qG)7yf?DA0O#ui|qAYY1J%BPf3npN=|Y~CbJ|#rY+m4TbFttRPvVF zX=Lti>!lXxovON?I6PT2&QN} z!5-B^9~HNPd*{Byx8%?Ho%9%QDO3Et^nU(>VlspOu3Y3*^@2W%Ug0|bL_N-bRSzpa z8b|um+FOi`^3U`cb3a;P96_*_U|vXZEnG0hf6?dJ_!sGftREn!NuEm{e1W0~-MfLr zg)mL}8Pcd5f!l;xI+qY^1>(SSq_7b}s&x?dT5_H}$v;hg(|- zF4z%Rrfa8oX>4w7k;El}69f|kgK|hZh;R-p#I;+6vj72Dp?lr$w0AqZY}r|%_?<|o zk-zB)BaEQM|IAKr2?fQ$3-T0-o&kplhOfvBUU8hXj}>wnb;4-6kypn!f;B`cbX#6M z3c7wn7%hLp>$sRuMHd%_JY?`5@?fP2;)P6~DU{iKKPcQ&mdliD_`v7r_&mWG0xH7R z;D5BR!e2GdPsXk%$%_f4?M3)uH$oa5>gac7Wy?c3q2L*# zs9UpTp}gVU6bbOIdk}hd8e(>iqEV>rjo|XI>F^9Gb+_B~I!(9~d(fL>0)-w15P40k zmbxo6Kk_z1p_;_ZfR9zwhxBvo($JeIvzPkcPZSwj>HmCSp1r)64O<@;^H>>iPFo%H zYb7qF>qb}NME{-XSEf#~Hh+KWqXLd7&GH~(7A2ci?GgU&?1^%L^z*dCo+da( zP$i&ohIN9={IA)`Lqr~o5hQnkI1WEVxGg=96I%Ixo>xk<@*f3$t+Z}El#*I8<~+WE zQ<|vPn?a*q7jh873}2WlKTE>`6bEtyacG~Uf71!0vKchr-0)6A2MyHd@D%@eZXx$j Nbr~x@kO*V{e*g#97*+rP diff --git a/utils/auto_agent_generator.py b/utils/auto_agent_generator.py new file mode 100644 index 0000000..b9beaa6 --- /dev/null +++ b/utils/auto_agent_generator.py @@ -0,0 +1,108 @@ +""" +Auto Agent Generator - 根据主题自动生成专家配置 +Uses LLM to analyze the topic and suggest appropriate expert agents. +""" +import json +import re +from typing import List, Dict +from utils.llm_client import LLMClient + + +EXPERT_GENERATION_PROMPT = """You are an expert team composition advisor. Given a research/decision topic, you need to suggest the most appropriate team of experts to analyze it. + +Instructions: +1. Analyze the topic carefully to understand its domain and key aspects +2. Generate {num_experts} distinct expert roles that would provide the most valuable perspectives +3. Each expert should have a unique focus area relevant to the topic +4. The LAST expert should always be a "Synthesizer" role who can integrate all perspectives + +Output Format (MUST be valid JSON array): +[ + {{"name": "Expert Name", "perspective": "Brief description of their viewpoint", "focus": "Key areas they analyze"}}, + ... +] + +Examples of good expert names based on topic: +- For "Should we launch an e-commerce platform?": "市场渠道分析师", "电商运营专家", "供应链顾问", "数字化转型综合师" +- For "Career transition to AI field": "职业发展顾问", "AI行业专家", "技能评估分析师", "综合规划师" + +IMPORTANT: +- Use {language} for all names and descriptions +- Make names specific to the topic, not generic like "Expert 1" +- The last expert MUST be a synthesizer/integrator type + +Topic: {topic} + +Generate exactly {num_experts} experts as a JSON array:""" + + +def generate_experts_for_topic( + topic: str, + num_experts: int, + llm_client: LLMClient, + language: str = "Chinese" +) -> List[Dict[str, str]]: + """ + Use LLM to generate appropriate expert configurations based on the topic. + + Args: + topic: The research/decision topic + num_experts: Number of experts to generate (2-5) + llm_client: LLM client instance for API calls + language: Output language (Chinese/English) + + Returns: + List of expert dicts: [{"name": "...", "perspective": "...", "focus": "..."}, ...] + """ + if not topic.strip(): + return [] + + prompt = EXPERT_GENERATION_PROMPT.format( + topic=topic, + num_experts=num_experts, + language=language + ) + + try: + response = llm_client.chat( + system_prompt="You are a helpful assistant that generates JSON output only. No markdown, no explanation.", + user_prompt=prompt, + max_tokens=800 + ) + + # Extract JSON from response (handle potential markdown wrapping) + json_match = re.search(r'\[[\s\S]*\]', response) + if json_match: + experts = json.loads(json_match.group()) + # Validate structure + if isinstance(experts, list) and len(experts) >= 1: + validated = [] + for exp in experts[:num_experts]: + if isinstance(exp, dict) and "name" in exp: + validated.append({ + "name": exp.get("name", "Expert"), + "perspective": exp.get("perspective", ""), + "focus": exp.get("focus", "") + }) + return validated + except (json.JSONDecodeError, Exception) as e: + print(f"[AutoAgentGenerator] Error parsing LLM response: {e}") + + # Fallback: return generic experts + fallback = [] + for i in range(num_experts): + if i == num_experts - 1: + fallback.append({"name": f"综合分析师", "perspective": "整合视角", "focus": "综合决策"}) + else: + fallback.append({"name": f"专家 {i+1}", "perspective": "分析视角", "focus": "专业分析"}) + return fallback + + +def get_default_model_for_expert(expert_index: int, total_experts: int, available_models: list) -> str: + """ + Assign a default model to an expert based on their position. + Spreads experts across available models for diversity. + """ + if not available_models: + return "gpt-4o" + return available_models[expert_index % len(available_models)] diff --git a/utils/llm_client.py b/utils/llm_client.py index f3881d8..0b11447 100644 --- a/utils/llm_client.py +++ b/utils/llm_client.py @@ -5,6 +5,8 @@ from typing import Generator import os +import config + class LLMClient: """LLM API 统一客户端""" @@ -62,7 +64,7 @@ class LLMClient: self, system_prompt: str, user_prompt: str, - max_tokens: int = 1024 + max_tokens: int = config.MAX_OUTPUT_TOKENS ) -> Generator[str, None, None]: """ 流式对话 diff --git a/utils/storage.py b/utils/storage.py index f827112..84e5dc0 100644 --- a/utils/storage.py +++ b/utils/storage.py @@ -150,3 +150,35 @@ class StorageManager: return json.load(f) except Exception: return None + + # ==================== Session Cache (Resume Functionality) ==================== + def save_session_state(self, key: str, data: Dict[str, Any]): + """Save temporary session state for recovery""" + try: + # We use a dedicated cache file per key + cache_file = self.root_dir / f"{key}_cache.json" + data["_timestamp"] = int(time.time()) + with open(cache_file, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=2, ensure_ascii=False) + except Exception as e: + print(f"Error saving session cache: {e}") + + def load_session_state(self, key: str) -> Dict[str, Any]: + """Load temporary session state""" + cache_file = self.root_dir / f"{key}_cache.json" + if not cache_file.exists(): + return None + try: + with open(cache_file, 'r', encoding='utf-8') as f: + return json.load(f) + except Exception: + return None + + def clear_session_state(self, key: str): + """Clear temporary session state""" + cache_file = self.root_dir / f"{key}_cache.json" + if cache_file.exists(): + try: + os.remove(cache_file) + except Exception: + pass