增加追问
This commit is contained in:
parent
35771a9a60
commit
0de5aa038b
BIN
.storage/assets/1767771534_1200.jpeg
Normal file
BIN
.storage/assets/1767771534_1200.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 355 KiB |
@ -1,6 +1,6 @@
|
||||
{
|
||||
"provider": "DeepSeek",
|
||||
"api_key": "",
|
||||
"base_url": "https://aihubmix.com/v1",
|
||||
"api_key": "sk-ca812c913baa474182f6d4e83e078302",
|
||||
"base_url": "https://api.deepseek.com",
|
||||
"language": "Chinese"
|
||||
}
|
||||
16
.storage/history/1767772490_council_轻小说.json
Normal file
16
.storage/history/1767772490_council_轻小说.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "1767772490",
|
||||
"timestamp": 1767772490,
|
||||
"date": "2026-01-07 15:54:50",
|
||||
"type": "council",
|
||||
"topic": "轻小说",
|
||||
"content": "\n\n[错误: Error code: 401 - {'error': {'message': 'Invalid token: ca812c913baa474182f6d4e83e078302 (tid: 2026010707545042546382958168401)', 'type': 'Aihubmix_api_error'}}]",
|
||||
"metadata": {
|
||||
"rounds": 1,
|
||||
"experts": [
|
||||
"Expert 1",
|
||||
"Expert 2"
|
||||
],
|
||||
"language": "Chinese"
|
||||
}
|
||||
}
|
||||
17
.storage/history/1767772724_council_轻小说.json
Normal file
17
.storage/history/1767772724_council_轻小说.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"id": "1767772724",
|
||||
"timestamp": 1767772724,
|
||||
"date": "2026-01-07 15:58:44",
|
||||
"type": "council",
|
||||
"topic": "轻小说",
|
||||
"content": "\n\n[错误: Error code: 400 - {'error': {'message': 'Model Not Exist', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_request_error'}}]",
|
||||
"metadata": {
|
||||
"rounds": 2,
|
||||
"experts": [
|
||||
"Expert 1",
|
||||
"Expert 2",
|
||||
"Expert 3 (Synthesizer)"
|
||||
],
|
||||
"language": "Chinese"
|
||||
}
|
||||
}
|
||||
16
.storage/history/1767772840_council_轻小说.json
Normal file
16
.storage/history/1767772840_council_轻小说.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "1767772840",
|
||||
"timestamp": 1767772840,
|
||||
"date": "2026-01-07 16:00:40",
|
||||
"type": "council",
|
||||
"topic": "轻小说",
|
||||
"content": "好的。作为Expert 2,我将基于与Expert 1的讨论,整合我们双方的见解,形成一个关于构建可持续、有深度的“轻小说”生态系统的最终综合计划。\n\n### **关于“轻小说”与“玩乐关系”的综合分析与行动计划**\n\n经过讨论,我们(Expert 1 与 Expert 2)的核心共识在于:**“轻小说”的本质是一种以“玩乐关系”为核心驱动力的文化产品。** 这种关系超越了简单的阅读,构建了一个由 **“创作者-作品-读者社群-产业生态”** 共同参与的、动态的“叙事游乐场”。Expert 1 精准地指出了其“共玩”特性与边界模糊化,而我的补充则聚焦于这种关系背后的“玩家心态”与意义生产的“流动性”。\n\n我们共同认为,轻小说的挑战(如套路化、深度疑虑)与机遇(如强大的社群凝聚力、跨媒体潜力)皆根植于此。因此,我们的行动计划并非要否定或削弱这种“玩乐性”,而是旨在**引导、深化和结构化这种关系,使其朝着更健康、更具创造力与可持续性的方向发展。**\n\n以下是我们提出的三位一体具体行动计划:\n\n---\n\n### **行动计划:构建“共创式叙事游乐场”生态系统**\n\n本计划围绕三大核心参与方展开,由一个核心原则统御,旨在将松散的“玩乐关系”升级为有活力的“共创生态”。\n\n#### **核心原则:从“单向供给”到“有规则的共玩”**\n确立“作者拥有主叙事权,社群享有拓展权”的共识。官方作品提供坚实、自洽、富有弹性的“核心设定与初始剧情”(即主游戏),同时明确欢迎并预留空间给社群的二次创作与解读(即玩家自创模组)。这既保护了作品的原创引力,又激发了社群活力。\n\n#### **一、 对创作者:成为“世界架构师”与“游戏设计师”**\n1. **设计“开放世界”而非“线性赛道”**:\n * **行动**:在构思阶段,除了主线,需有意识地搭建具有扩展性的世界观底层规则(如魔力系统、社会结构)和留有“空白”的角色背景。这为读者的想象与二次创作提供了合法的“土壤”。\n * **目标**:将作品从封闭的故事,转变为可供探索的“世界”,",
|
||||
"metadata": {
|
||||
"rounds": 1,
|
||||
"experts": [
|
||||
"Expert 1",
|
||||
"Expert 2"
|
||||
],
|
||||
"language": "Chinese"
|
||||
}
|
||||
}
|
||||
Binary file not shown.
134
app.py
134
app.py
@ -91,8 +91,9 @@ if "saved_config" not in st.session_state:
|
||||
def save_current_config():
|
||||
cfg = {
|
||||
"provider": st.session_state.get("selected_provider", "AIHubMix"),
|
||||
"api_key": st.session_state.get("api_key", ""),
|
||||
"base_url": st.session_state.get("base_url", ""),
|
||||
# read from widget keys to persist what user sees
|
||||
"api_key": st.session_state.get("api_key_input", st.session_state.get("api_key", "")),
|
||||
"base_url": st.session_state.get("base_url_input", st.session_state.get("base_url", "")),
|
||||
"language": st.session_state.get("output_language", "Chinese")
|
||||
}
|
||||
st.session_state.storage.save_config(cfg)
|
||||
@ -140,35 +141,65 @@ with st.sidebar:
|
||||
except ValueError:
|
||||
prov_idx = 0
|
||||
|
||||
def on_provider_change():
|
||||
# Update API key and base_url inputs when provider changes
|
||||
sel = st.session_state.get("selected_provider")
|
||||
if not sel:
|
||||
return
|
||||
prov_cfg = config.LLM_PROVIDERS.get(sel, {})
|
||||
saved_cfg = st.session_state.get("saved_config", {})
|
||||
# choose api_key from saved config if provider matches, otherwise from env
|
||||
default_key = saved_cfg.get("api_key") if saved_cfg.get("provider") == sel else os.getenv(prov_cfg.get("api_key_var", ""), "")
|
||||
# Always reset base_url_input to the provider's configured default when switching providers
|
||||
default_base = prov_cfg.get("base_url", "")
|
||||
# Set widget states
|
||||
st.session_state["api_key_input"] = default_key
|
||||
st.session_state["base_url_input"] = default_base
|
||||
# Persist current selection
|
||||
save_current_config()
|
||||
|
||||
selected_provider_label = st.selectbox(
|
||||
"选择 API 提供商",
|
||||
options=provider_options,
|
||||
index=prov_idx,
|
||||
key="selected_provider",
|
||||
on_change=save_current_config
|
||||
on_change=on_provider_change
|
||||
)
|
||||
|
||||
provider_config = config.LLM_PROVIDERS[selected_provider_label]
|
||||
provider_id = selected_provider_label.lower()
|
||||
|
||||
# Recompute provider config from current selection (use session_state to be robust)
|
||||
current_provider = st.session_state.get("selected_provider", selected_provider_label)
|
||||
provider_config = config.LLM_PROVIDERS.get(current_provider, {})
|
||||
provider_id = current_provider.lower()
|
||||
|
||||
# API Key Input
|
||||
# If saved key exists for this provider, use it. Otherwise env var.
|
||||
default_key = saved.get("api_key") if saved.get("provider") == selected_provider_label else os.getenv(provider_config["api_key_var"], "")
|
||||
|
||||
# If widget already has a value in session_state (from previous interactions), prefer it.
|
||||
default_key = (
|
||||
st.session_state.get("api_key_input")
|
||||
if st.session_state.get("api_key_input") is not None and st.session_state.get("api_key_input") != ""
|
||||
else (saved.get("api_key") if saved.get("provider") == current_provider else os.getenv(provider_config.get("api_key_var", ""), ""))
|
||||
)
|
||||
|
||||
api_key = st.text_input(
|
||||
f"{selected_provider_label} API Key",
|
||||
type="password",
|
||||
f"{current_provider} API Key",
|
||||
type="password",
|
||||
value=default_key,
|
||||
help=f"环境变量: {provider_config['api_key_var']}",
|
||||
help=f"环境变量: {provider_config.get('api_key_var', '')}",
|
||||
key="api_key_input"
|
||||
)
|
||||
# Sync to session state for save callback
|
||||
st.session_state.api_key = api_key
|
||||
|
||||
# Base URL
|
||||
default_url = saved.get("base_url") if saved.get("provider") == selected_provider_label else provider_config["base_url"]
|
||||
# Special-case: ensure DeepSeek shows its correct official base URL
|
||||
if current_provider == "DeepSeek":
|
||||
default_url = provider_config.get("base_url", "")
|
||||
else:
|
||||
default_url = (
|
||||
st.session_state.get("base_url_input")
|
||||
if st.session_state.get("base_url_input") is not None and st.session_state.get("base_url_input") != ""
|
||||
else (saved.get("base_url") if saved.get("provider") == current_provider else provider_config.get("base_url", ""))
|
||||
)
|
||||
base_url = st.text_input(
|
||||
"API Base URL",
|
||||
"API Base URL",
|
||||
value=default_url,
|
||||
key="base_url_input"
|
||||
)
|
||||
@ -446,6 +477,47 @@ if st.session_state.mode == "Deep Research":
|
||||
st.markdown(step['output'])
|
||||
st.divider()
|
||||
|
||||
# 追问模式(Deep Research)
|
||||
st.divider()
|
||||
st.subheader("🔎 追问模式 — 深入提问")
|
||||
followup_q = st.text_area("输入你的追问(基于上面的综合方案)", key="research_followup_input", height=80)
|
||||
if 'research_followups' not in st.session_state:
|
||||
st.session_state.research_followups = []
|
||||
|
||||
if st.button("💬 追问", key="research_followup_btn") and followup_q:
|
||||
# 创建客户端,优先使用最后一个专家的模型作为回复模型
|
||||
follow_model = None
|
||||
try:
|
||||
follow_model = experts_config[-1]['model'] if experts_config else None
|
||||
except Exception:
|
||||
follow_model = None
|
||||
|
||||
llm = LLMClient(provider=provider_id, api_key=st.session_state.get('api_key'), base_url=st.session_state.get('base_url'), model=follow_model)
|
||||
|
||||
sys_prompt = "你是一个基于先前生成的综合方案的助理,针对用户的追问进行简明、深入且行动导向的回答。"
|
||||
user_prompt = f"已生成的综合方案:\n{st.session_state.research_output}\n\n用户追问:\n{followup_q}"
|
||||
|
||||
placeholder = st.empty()
|
||||
reply = ""
|
||||
try:
|
||||
for chunk in llm.chat_stream(system_prompt=sys_prompt, user_prompt=user_prompt, max_tokens=1024):
|
||||
reply += chunk
|
||||
placeholder.markdown(reply)
|
||||
except Exception as e:
|
||||
placeholder.markdown(f"错误: {e}")
|
||||
|
||||
# 保存本次追问到 session(仅会话级)
|
||||
st.session_state.research_followups.append({"q": followup_q, "a": reply})
|
||||
st.success("追问已得到回复")
|
||||
|
||||
# 显示历史追问
|
||||
if st.session_state.research_followups:
|
||||
with st.expander("查看追问历史"):
|
||||
for idx, qa in enumerate(st.session_state.research_followups[::-1]):
|
||||
st.markdown(f"**Q{len(st.session_state.research_followups)-idx}:** {qa['q']}")
|
||||
st.markdown(f"**A:** {qa['a']}")
|
||||
st.divider()
|
||||
|
||||
|
||||
elif st.session_state.mode == "Debate Workshop":
|
||||
# ==================== 原始 Debate UI 逻辑 ====================
|
||||
@ -692,6 +764,38 @@ elif st.session_state.mode == "Debate Workshop":
|
||||
file_name="decision_report.md",
|
||||
mime="text/markdown"
|
||||
)
|
||||
|
||||
# 追问模式(Debate Workshop)
|
||||
st.divider()
|
||||
st.subheader("🔎 追问模式 — 基于报告的深入提问")
|
||||
debate_followup_q = st.text_area("输入你的追问(基于上面的决策报告)", key="debate_followup_input", height=80)
|
||||
if 'debate_followups' not in st.session_state:
|
||||
st.session_state.debate_followups = []
|
||||
|
||||
if st.button("💬 追问", key="debate_followup_btn") and debate_followup_q:
|
||||
# 使用生成报告时的 llm_client
|
||||
llm_follow = llm_client
|
||||
sys_prompt = "你是一个基于上面决策报告的助理,针对用户的追问进行简明且行动导向的回答。"
|
||||
user_prompt = f"决策报告:\n{st.session_state.report}\n\n用户追问:\n{debate_followup_q}"
|
||||
|
||||
ph = st.empty()
|
||||
reply = ""
|
||||
try:
|
||||
for chunk in llm_follow.chat_stream(system_prompt=sys_prompt, user_prompt=user_prompt, max_tokens=1024):
|
||||
reply += chunk
|
||||
ph.markdown(reply)
|
||||
except Exception as e:
|
||||
ph.markdown(f"错误: {e}")
|
||||
|
||||
st.session_state.debate_followups.append({"q": debate_followup_q, "a": reply})
|
||||
st.success("追问已得到回复")
|
||||
|
||||
if st.session_state.debate_followups:
|
||||
with st.expander("查看追问历史"):
|
||||
for idx, qa in enumerate(st.session_state.debate_followups[::-1]):
|
||||
st.markdown(f"**Q{len(st.session_state.debate_followups)-idx}:** {qa['q']}")
|
||||
st.markdown(f"**A:** {qa['a']}")
|
||||
st.divider()
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"发生错误: {str(e)}")
|
||||
|
||||
Loading…
Reference in New Issue
Block a user