fix: preserve isContextOnly context/target dividers in final prompt transcript rendering

Root cause: formatPromptMessageTranscript in prompt-builder.js ignored
isContextOnly, so context review and extraction target sections were
flattened into plain transcript even though the flag was correctly set
in intermediate layers. Additionally, userPromptSections (which
contained the dividers) was only a fallback that never reached the
final prompt when block-based profiles had user blocks.

Fix:
- getPromptMessageLikeDescriptor now preserves isContextOnly flag
- formatPromptMessageTranscript now inserts context/target section
  dividers when messages carry isContextOnly, ensuring the final
  LLM prompt always shows the distinction regardless of which
  rendering path (recentMessages, chatMessages, dialogueText) is used

Regression tests:
- prompt-builder-mixed-transcript: verify recentMessages block content
  includes context review and extraction target dividers
- extractor-phase3-layered-context: end-to-end test proving default
  extract profile + default structured mode produces final promptMessages
  with context/target section dividers
This commit is contained in:
Youzini-afk
2026-04-12 13:03:54 +08:00
parent 270ceba78f
commit 559312c1b6
3 changed files with 130 additions and 15 deletions

View File

@@ -165,6 +165,81 @@ function collectAllPromptContent(captured) {
}
}
{
const graph = createEmptyGraph();
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: [
{
seq: 10,
role: "user",
content: "第一轮消息",
name: "玩家",
speaker: "玩家",
isContextOnly: true,
},
{
seq: 11,
role: "assistant",
content: "第一轮回复",
name: "艾琳",
speaker: "艾琳",
isContextOnly: true,
},
{
seq: 12,
role: "user",
content: "第二轮消息",
name: "玩家",
speaker: "玩家",
isContextOnly: false,
},
{
seq: 13,
role: "assistant",
content: "第二轮回复",
name: "艾琳",
speaker: "艾琳",
isContextOnly: false,
},
],
startSeq: 12,
endSeq: 13,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: { ...defaultSettings },
});
assert.equal(result.success, true);
assert.ok(captured);
const recentBlock = (Array.isArray(captured.promptMessages) ? captured.promptMessages : []).find(
(m) => m.sourceKey === "recentMessages",
);
assert.ok(recentBlock, "recentMessages block should exist");
const recentContent = String(recentBlock?.content || "");
assert.match(recentContent, /以下是上下文回顾(已提取过),仅供理解剧情/);
assert.match(recentContent, /以下是本次需要提取记忆的新对话内容/);
assert.ok(
recentContent.indexOf("已提取过") < recentContent.indexOf("本次需要提取"),
"context review should appear before extraction target section",
);
} finally {
restore();
}
}
// ── Test 2: extractRecentMessageCap limits messages ──
{
const graph = createEmptyGraph();

View File

@@ -117,6 +117,7 @@ const promptBuild = await buildTaskPrompt(settings, "extract", {
content: "继续说明",
name: "艾琳",
speaker: "艾琳",
isContextOnly: true,
},
{
seq: 42,
@@ -124,6 +125,7 @@ const promptBuild = await buildTaskPrompt(settings, "extract", {
content: "用户输入",
name: "玩家",
speaker: "玩家",
isContextOnly: false,
},
],
graphStats: "node_count=1",
@@ -134,6 +136,14 @@ const payload = buildTaskLlmPayload(promptBuild, "fallback-user");
const recentBlock = payload.promptMessages.find(
(message) => message.sourceKey === "recentMessages",
);
assert.match(
String(recentBlock?.content || ""),
/以下是上下文回顾(已提取过),仅供理解剧情/,
);
assert.match(
String(recentBlock?.content || ""),
/以下是本次需要提取记忆的新对话内容/,
);
assert.match(String(recentBlock?.content || ""), /#41 \[assistant\|艾琳\]: 助手已净化/);
assert.match(String(recentBlock?.content || ""), /#42 \[user\|玩家\]: 用户已净化/);
assert.doesNotMatch(