Skip to content

Commit 3153e60

Browse files
zxjwznpre-commit-ci[bot]KomoriDev
authored
✨ 支持回滚多轮对话 (#14)
Co-authored-by: Zaxpris <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: KomoriDev <[email protected]>
1 parent 1a084ae commit 3153e60

File tree

4 files changed

+183
-124
lines changed

4 files changed

+183
-124
lines changed

nonebot_plugin_deepseek/__init__.py

Lines changed: 10 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,14 @@
1-
from dataclasses import asdict
21
from importlib.util import find_spec
32

4-
import httpx
53
from nonebot import require
6-
from nonebot.adapters import Event
74
from nonebot.params import Depends
8-
from nonebot.matcher import Matcher
9-
from nonebot.permission import User, SuperUser, Permission
5+
from nonebot.permission import SuperUser
106
from nonebot.plugin import PluginMetadata, inherit_supported_adapters
117

128
require("nonebot_plugin_waiter")
139
require("nonebot_plugin_alconna")
1410
require("nonebot_plugin_localstore")
1511
from arclet.alconna import config as alc_config
16-
from nonebot_plugin_waiter import Waiter, prompt
17-
from nonebot_plugin_alconna.uniseg import UniMessage
1812
from nonebot_plugin_alconna.builtins.extensions.reply import ReplyMergeExtension
1913
from nonebot_plugin_alconna import (
2014
Args,
@@ -32,18 +26,16 @@
3226

3327
if find_spec("nonebot_plugin_htmlrender"):
3428
require("nonebot_plugin_htmlrender")
35-
from nonebot_plugin_htmlrender import md_to_pic
29+
from nonebot_plugin_htmlrender import md_to_pic as md_to_pic
3630

3731
is_to_pic = True
3832
else:
3933
is_to_pic = False
4034

4135
from .apis import API
4236
from . import hook as hook
43-
from .function_call import registry
44-
from .exception import RequestException
37+
from .utils import DeepSeekHandler
4538
from .extension import CleanDocExtension
46-
from .utils import extract_content_and_think
4739
from .config import Config, config, model_config
4840

4941
__plugin_meta__ = PluginMetadata(
@@ -167,114 +159,16 @@ async def _(
167159

168160
@deepseek.handle()
169161
async def _(
170-
event: Event,
171-
matcher: Matcher,
172162
content: Match[tuple[str, ...]],
173163
model_name: Query[str] = Query("use-model.model"),
174164
context_option: Query[bool] = Query("with-context.value"),
175-
):
176-
if not content.available:
177-
resp = await prompt("你想对 DeepSeek 说什么呢?", timeout=60)
178-
if resp is None:
179-
await deepseek.finish("等待超时")
180-
text = resp.extract_plain_text()
181-
if text in ["结束", "取消", "done"]:
182-
await deepseek.finish("已结束对话")
183-
chat_content = text
184-
else:
185-
chat_content = " ".join(content.result)
186-
165+
) -> None:
187166
if not model_name.available:
188167
model_name.result = model_config.default_model
189168

190-
message = [{"role": "user", "content": chat_content}]
191-
192-
try:
193-
if not context_option.available:
194-
completion = await API.chat(message, model=model_name.result)
195-
result = completion.choices[0].message
196-
if result.tool_calls:
197-
message.append(asdict(result))
198-
fc_result = await registry.execute_tool_call(result.tool_calls[0])
199-
message.append(
200-
{
201-
"role": "tool",
202-
"tool_call_id": result.tool_calls[0].id,
203-
"content": fc_result,
204-
}
205-
)
206-
completion = await API.chat(message, model=model_name.result)
207-
result = completion.choices[0].message
208-
209-
ds_content, ds_think = extract_content_and_think(result)
210-
211-
if is_to_pic:
212-
output = (
213-
f"<blockquote><p> {ds_think} </p></blockquote>" + ds_content
214-
if ds_think and config.enable_send_thinking and ds_content
215-
else ds_content
216-
)
217-
unimsg = UniMessage.image(raw=await md_to_pic(output)) # type: ignore
218-
if unimsg:
219-
await unimsg.finish()
220-
await deepseek.finish(output)
221-
else:
222-
output = (
223-
ds_think + f"\n----\n{ds_content}"
224-
if ds_think and config.enable_send_thinking and ds_content
225-
else ds_content
226-
)
227-
await deepseek.finish(output)
228-
229-
def handler(event: Event):
230-
text = event.get_plaintext().strip().lower()
231-
if text in ["结束", "取消", "done"]:
232-
return False
233-
return text
234-
235-
permission = Permission(User.from_event(event, perm=matcher.permission))
236-
waiter = Waiter(waits=["message"], handler=handler, matcher=deepseek, permission=permission)
237-
waiter.future.set_result("")
238-
239-
async for resp in waiter(default=False):
240-
if resp is False:
241-
await deepseek.finish("已结束对话")
242-
243-
if resp and isinstance(resp, str):
244-
message.append({"role": "user", "content": resp})
245-
246-
completion = await API.chat(message, model=model_name.result)
247-
result = completion.choices[0].message
248-
ds_content, ds_think = extract_content_and_think(result)
249-
250-
result.reasoning_content = None
251-
message.append(asdict(result))
252-
253-
if result.tool_calls:
254-
fc_result = await registry.execute_tool_call(result.tool_calls[0])
255-
message.append(
256-
{
257-
"role": "tool",
258-
"tool_call_id": result.tool_calls[0].id,
259-
"content": fc_result,
260-
}
261-
)
262-
resp = ""
263-
waiter.future.set_result("")
264-
continue
265-
266-
output = (
267-
ds_think + f"\n----\n{ds_content}"
268-
if ds_think and config.enable_send_thinking and ds_content
269-
else ds_content
270-
)
271-
272-
if not output:
273-
return
274-
275-
await deepseek.send(output)
276-
277-
except httpx.ReadTimeout:
278-
await deepseek.finish("网络超时,再试试吧")
279-
except RequestException as e:
280-
await deepseek.finish(str(e))
169+
model = config.get_model_config(model_name.result)
170+
await DeepSeekHandler(
171+
model=model,
172+
is_to_pic=is_to_pic,
173+
is_contextual=context_option.available,
174+
).handle(" ".join(content.result) if content.available else None)

nonebot_plugin_deepseek/config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,8 @@ class ScopedConfig(BaseModel):
140140
"""Text to Image"""
141141
enable_send_thinking: bool = False
142142
"""Whether to send model thinking chain"""
143+
context_timeout: int = Field(default=50, gt=50)
144+
"""Multi-round conversation timeout"""
143145

144146
def get_enable_models(self) -> list[str]:
145147
return [model.name for model in self.enable_models]

nonebot_plugin_deepseek/utils.py

Lines changed: 157 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,165 @@
11
import re
2+
from dataclasses import asdict
3+
from typing import Any, Union, Literal, Optional
24

5+
import httpx
6+
from nonebot.adapters import Event
7+
from nonebot.permission import User, Permission
8+
from nonebot_plugin_htmlrender import md_to_pic
9+
from nonebot_plugin_waiter import Waiter, prompt
10+
from nonebot_plugin_alconna.uniseg import UniMsg, UniMessage
11+
from nonebot.matcher import Matcher, current_event, current_matcher
12+
13+
from .apis import API
314
from .schemas import Message
15+
from .config import CustomModel, config
16+
from .function_call.registry import registry
17+
18+
19+
class DeepSeekHandler:
20+
def __init__(
21+
self,
22+
model: CustomModel,
23+
is_to_pic: bool,
24+
is_contextual: bool,
25+
) -> None:
26+
self.model: CustomModel = model
27+
self.is_to_pic: bool = is_to_pic
28+
self.is_contextual: bool = is_contextual
29+
30+
self.event: Event = current_event.get()
31+
self.matcher: Matcher = current_matcher.get()
32+
self.waiter: Waiter[Union[str, Literal[False]]] = self._setup_waiter()
33+
34+
self.context: list[dict[str, Any]] = []
35+
36+
async def handle(self, content: Optional[str]) -> None:
37+
if content:
38+
self.context.append({"role": "user", "content": content})
39+
40+
if not self.is_contextual:
41+
await self._handle_single_conversion()
42+
else:
43+
await self._handle_multi_round_conversion()
44+
45+
async def _handle_single_conversion(self) -> None:
46+
if message := await self._get_response_message():
47+
await self._send_response(message)
48+
49+
async def _handle_multi_round_conversion(self) -> None:
50+
async for resp in self.waiter(default=False, timeout=config.context_timeout):
51+
await self._process_waiter_response(resp)
52+
53+
if resp == "rollback":
54+
continue
55+
56+
message = await self._get_response_message()
57+
if not message:
58+
continue
59+
60+
await self._send_response(message)
61+
self.context.append(asdict(message))
62+
63+
if await self._handle_tool_calls(message):
64+
self.waiter.future.set_result("")
65+
continue
66+
67+
def _setup_waiter(self) -> Waiter[Union[str, Literal[False]]]:
68+
permission = Permission(User.from_event(self.event, perm=self.matcher.permission))
69+
waiter = Waiter(waits=["message"], handler=self._waiter_handler, matcher=self.matcher, permission=permission)
70+
waiter.future.set_result("")
71+
return waiter
72+
73+
def _waiter_handler(self, msg: UniMsg) -> Union[str, Literal[False]]:
74+
text = msg.extract_plain_text()
75+
if text in ["结束", "取消", "done"]:
76+
return False
77+
if text in ["回滚", "rollback"]:
78+
return "rollback"
79+
return text
80+
81+
async def _process_waiter_response(self, resp: Union[bool, str]) -> None:
82+
if resp == "" and not self.context:
83+
_resp = await prompt("你想对 DeepSeek 说什么呢?", timeout=60)
84+
if _resp is None:
85+
await self.matcher.finish("等待超时")
86+
resp = self._waiter_handler(UniMessage.generate_sync(message=_resp))
87+
88+
if resp is False:
89+
await self.matcher.finish("已结束对话")
90+
elif resp == "rollback":
91+
await self._handle_rollback()
92+
elif resp and isinstance(resp, str):
93+
self.context.append({"role": "user", "content": resp})
94+
95+
async def _handle_rollback(self, steps: int = 1, by_error: bool = False) -> None:
96+
rollback_per_step = 1 if by_error else 2
97+
required_length = steps * rollback_per_step
98+
rollback_position = -rollback_per_step * steps
99+
100+
if len(self.context) >= required_length:
101+
self.context = self.context[:rollback_position]
102+
action_desc = f"回滚 {steps} 条输入" if by_error else f"回滚 {steps} 轮对话"
103+
status_msg = f"Oops! 连接异常,已自动{action_desc}。" if by_error else f"已{action_desc}。"
104+
105+
remaining_context = (
106+
"空" if not self.context else f'{self.context[-1]["role"]}: {self.context[-1]["content"]}'
107+
)
108+
109+
await self.matcher.send(f"{status_msg}当前上下文为:\n{remaining_context}\n" "user:(等待输入)")
110+
elif by_error and len(self.context) > 0:
111+
self.context.clear()
112+
await self.matcher.send("Oops! 连接异常,请重新输入")
113+
else:
114+
await self.matcher.send("无法回滚,当前对话记录为空")
115+
116+
async def _handle_tool_calls(self, message: Message) -> bool:
117+
if not message.tool_calls:
118+
return False
119+
120+
try:
121+
result = await registry.execute_tool_call(message.tool_calls[0])
122+
except Exception:
123+
self.context.pop()
124+
return False
125+
126+
self.context.append({"role": "tool", "tool_call_id": message.tool_calls[0].id, "content": result})
127+
return True
128+
129+
async def _get_response_message(self) -> Optional[Message]:
130+
try:
131+
completion = await API.chat(self.context, self.model.name)
132+
return completion.choices[0].message
133+
except (httpx.ReadTimeout, httpx.RequestError):
134+
await self._handle_rollback(by_error=True)
135+
136+
def _extract_content_and_think(self, message: Message) -> tuple[str, str]:
137+
thinking = message.reasoning_content
138+
139+
if not thinking:
140+
think_blocks = re.findall(r"<think>(.*?)</think>", message.content or "", flags=re.DOTALL)
141+
thinking = "\n".join([block.strip() for block in think_blocks if block.strip()])
4142

143+
content = re.sub(r"<think>.*?</think>", "", message.content or "", flags=re.DOTALL).strip()
5144

6-
def extract_content_and_think(message: Message) -> tuple[str, str]:
7-
thinking = message.reasoning_content
145+
return content, thinking
8146

9-
if not thinking:
10-
think_blocks = re.findall(r"<think>(.*?)</think>", message.content or "", flags=re.DOTALL)
11-
thinking = "\n".join([block.strip() for block in think_blocks if block.strip()])
147+
def _format_output(self, message: Message) -> str:
148+
content, thinking = self._extract_content_and_think(message)
12149

13-
content = re.sub(r"<think>.*?</think>", "", message.content or "", flags=re.DOTALL).strip()
150+
if config.enable_send_thinking and content and thinking:
151+
return (
152+
f"<blockquote><p>{thinking}</p></blockquote>{content}"
153+
if self.is_to_pic
154+
else f"{thinking}\n----\n{content}"
155+
)
156+
return content
14157

15-
return content, thinking
158+
async def _send_response(self, message: Message) -> None:
159+
output = self._format_output(message)
160+
message.reasoning_content = None
161+
if self.is_to_pic:
162+
if unimsg := UniMessage.image(raw=await md_to_pic(output)):
163+
await unimsg.send()
164+
else:
165+
await self.matcher.send(output)

tests/test_extract.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,19 @@
1+
import re
2+
3+
14
def test_extract_content_and_think():
25
from nonebot_plugin_deepseek.schemas import Message
3-
from nonebot_plugin_deepseek.utils import extract_content_and_think
6+
7+
def extract_content_and_think(message: Message) -> tuple[str, str]:
8+
thinking = message.reasoning_content
9+
10+
if not thinking:
11+
think_blocks = re.findall(r"<think>(.*?)</think>", message.content or "", flags=re.DOTALL)
12+
thinking = "\n".join([block.strip() for block in think_blocks if block.strip()])
13+
14+
content = re.sub(r"<think>.*?</think>", "", message.content or "", flags=re.DOTALL).strip()
15+
16+
return content, thinking
417

518
# 没有 <think> 标签时
619
message1 = Message(role="assistant", content="This is the response without think tags.", reasoning_content=None)

0 commit comments

Comments
 (0)