From 9a5c83dbde18903b8a2ecea6156c940c254c0ac2 Mon Sep 17 00:00:00 2001 From: Youzini-afk <13153778771cx@gmail.com> Date: Fri, 27 Mar 2026 03:03:50 +0800 Subject: [PATCH] fix: align prompt assembly with executed messages --- compressor.js | 36 ++++++++--- consolidator.js | 37 ++++++++--- extractor.js | 76 ++++++++++++++-------- llm.js | 83 +++++++++++++++++++++++- panel.js | 12 ++-- prompt-builder.js | 132 ++++++++++++++++++++++++++++++++++----- retriever.js | 33 +++++++--- tests/task-worldinfo.mjs | 7 ++- 8 files changed, 341 insertions(+), 75 deletions(-) diff --git a/compressor.js b/compressor.js index 0029dcc..d28086d 100644 --- a/compressor.js +++ b/compressor.js @@ -11,7 +11,11 @@ import { getNode, } from "./graph.js"; import { callLLMForJSON } from "./llm.js"; -import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js"; +import { + buildTaskExecutionDebugContext, + buildTaskLlmPayload, + buildTaskPrompt, +} from "./prompt-builder.js"; import { getSTContextForPrompt } from "./st-context.js"; import { applyTaskRegex } from "./task-regex.js"; import { isDirectVectorConfig } from "./vector-index.js"; @@ -28,6 +32,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) { : null; } +function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") { + if (typeof buildTaskLlmPayload === "function") { + return buildTaskLlmPayload(promptBuild, fallbackUserPrompt); + } + + return { + systemPrompt: String(promptBuild?.systemPrompt || ""), + userPrompt: String(fallbackUserPrompt || ""), + promptMessages: [], + additionalMessages: Array.isArray(promptBuild?.privateTaskMessages) + ? promptBuild.privateTaskMessages + : [], + }; +} + function throwIfAborted(signal) { if (signal?.aborted) { throw signal.reason instanceof Error ? signal.reason : createAbortError(); @@ -279,10 +298,14 @@ async function summarizeBatch( ); const userPrompt = `请压缩以下 ${nodes.length} 个 "${typeDef.label}" 节点:\n\n${nodeDescriptions}`; + const promptPayload = resolveTaskPromptPayload( + compressPromptBuild, + userPrompt, + ); return await callLLMForJSON({ - systemPrompt, - userPrompt, + systemPrompt: promptPayload.systemPrompt || systemPrompt, + userPrompt: promptPayload.userPrompt, maxRetries: 1, signal, taskType: "compress", @@ -290,11 +313,8 @@ async function summarizeBatch( compressPromptBuild, compressRegexInput, ), - additionalMessages: - compressPromptBuild.privateTaskMessages || [ - ...(compressPromptBuild.customMessages || []), - ...(compressPromptBuild.additionalMessages || []), - ], + promptMessages: promptPayload.promptMessages, + additionalMessages: promptPayload.additionalMessages, }); } diff --git a/consolidator.js b/consolidator.js index dc314e9..0c1444a 100644 --- a/consolidator.js +++ b/consolidator.js @@ -5,7 +5,11 @@ import { embedBatch, searchSimilar } from "./embedding.js"; import { addEdge, createEdge, getActiveNodes, getNode } from "./graph.js"; import { callLLMForJSON } from "./llm.js"; -import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js"; +import { + buildTaskExecutionDebugContext, + buildTaskLlmPayload, + buildTaskPrompt, +} from "./prompt-builder.js"; import { getSTContextForPrompt } from "./st-context.js"; import { applyTaskRegex } from "./task-regex.js"; import { @@ -27,6 +31,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) { : null; } +function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") { + if (typeof buildTaskLlmPayload === "function") { + return buildTaskLlmPayload(promptBuild, fallbackUserPrompt); + } + + return { + systemPrompt: String(promptBuild?.systemPrompt || ""), + userPrompt: String(fallbackUserPrompt || ""), + promptMessages: [], + additionalMessages: Array.isArray(promptBuild?.privateTaskMessages) + ? promptBuild.privateTaskMessages + : [], + }; +} + function isAbortError(error) { return error?.name === "AbortError"; } @@ -318,10 +337,15 @@ export async function consolidateMemories({ consolidationRegexInput, "system", ); + const promptPayload = resolveTaskPromptPayload( + consolidationPromptBuild, + userPrompt, + ); try { decision = await callLLMForJSON({ - systemPrompt: consolidationSystemPrompt, - userPrompt, + systemPrompt: + promptPayload.systemPrompt || consolidationSystemPrompt, + userPrompt: promptPayload.userPrompt, maxRetries: 1, signal, taskType: "consolidation", @@ -329,11 +353,8 @@ export async function consolidateMemories({ consolidationPromptBuild, consolidationRegexInput, ), - additionalMessages: - consolidationPromptBuild.privateTaskMessages || [ - ...(consolidationPromptBuild.customMessages || []), - ...(consolidationPromptBuild.additionalMessages || []), - ], + promptMessages: promptPayload.promptMessages, + additionalMessages: promptPayload.additionalMessages, }); } catch (e) { if (isAbortError(e)) throw e; diff --git a/extractor.js b/extractor.js index 22495fc..3c527ed 100644 --- a/extractor.js +++ b/extractor.js @@ -16,7 +16,11 @@ import { } from "./graph.js"; import { callLLMForJSON } from "./llm.js"; import { ensureEventTitle, getNodeDisplayName } from "./node-labels.js"; -import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js"; +import { + buildTaskExecutionDebugContext, + buildTaskLlmPayload, + buildTaskPrompt, +} from "./prompt-builder.js"; import { RELATION_TYPES } from "./schema.js"; import { applyTaskRegex } from "./task-regex.js"; import { getSTContextForPrompt } from "./st-context.js"; @@ -34,6 +38,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) { : null; } +function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") { + if (typeof buildTaskLlmPayload === "function") { + return buildTaskLlmPayload(promptBuild, fallbackUserPrompt); + } + + return { + systemPrompt: String(promptBuild?.systemPrompt || ""), + userPrompt: String(fallbackUserPrompt || ""), + promptMessages: [], + additionalMessages: Array.isArray(promptBuild?.privateTaskMessages) + ? promptBuild.privateTaskMessages + : [], + }; +} + function isAbortError(error) { return error?.name === "AbortError"; } @@ -153,20 +172,18 @@ export async function extractMemories({ "", "请分析对话,按 JSON 格式输出操作列表。", ].join("\n"); + const promptPayload = resolveTaskPromptPayload(promptBuild, userPrompt); // 调用 LLM const result = await callLLMForJSON({ - systemPrompt, - userPrompt, + systemPrompt: promptPayload.systemPrompt || systemPrompt, + userPrompt: promptPayload.userPrompt, maxRetries: 2, signal, taskType: "extract", debugContext: createTaskLlmDebugContext(promptBuild, extractRegexInput), - additionalMessages: - promptBuild.privateTaskMessages || [ - ...(promptBuild.customMessages || []), - ...(promptBuild.additionalMessages || []), - ], + promptMessages: promptPayload.promptMessages, + additionalMessages: promptPayload.additionalMessages, }); throwIfAborted(signal); @@ -667,9 +684,7 @@ export async function generateSynopsis({ "system", ); - const result = await callLLMForJSON({ - systemPrompt: synopsisSystemPrompt, - userPrompt: [ + const synopsisUserPrompt = [ "## 事件时间线", eventSummaries, "", @@ -678,7 +693,15 @@ export async function generateSynopsis({ "", "## 活跃主线", threadSummary || "(无)", - ].join("\n"), + ].join("\n"); + const synopsisPromptPayload = resolveTaskPromptPayload( + synopsisPromptBuild, + synopsisUserPrompt, + ); + + const result = await callLLMForJSON({ + systemPrompt: synopsisPromptPayload.systemPrompt || synopsisSystemPrompt, + userPrompt: synopsisPromptPayload.userPrompt, maxRetries: 1, signal, taskType: "synopsis", @@ -686,11 +709,8 @@ export async function generateSynopsis({ synopsisPromptBuild, synopsisRegexInput, ), - additionalMessages: - synopsisPromptBuild.privateTaskMessages || [ - ...(synopsisPromptBuild.customMessages || []), - ...(synopsisPromptBuild.additionalMessages || []), - ], + promptMessages: synopsisPromptPayload.promptMessages, + additionalMessages: synopsisPromptPayload.additionalMessages, }); if (!result?.summary) return; @@ -795,9 +815,7 @@ export async function generateReflection({ "system", ); - const result = await callLLMForJSON({ - systemPrompt: reflectionSystemPrompt, - userPrompt: [ + const reflectionUserPrompt = [ "## 最近事件", eventSummary, "", @@ -809,7 +827,16 @@ export async function generateReflection({ "", "## 已知矛盾", contradictionSummary || "(无)", - ].join("\n"), + ].join("\n"); + const reflectionPromptPayload = resolveTaskPromptPayload( + reflectionPromptBuild, + reflectionUserPrompt, + ); + + const result = await callLLMForJSON({ + systemPrompt: + reflectionPromptPayload.systemPrompt || reflectionSystemPrompt, + userPrompt: reflectionPromptPayload.userPrompt, maxRetries: 1, signal, taskType: "reflection", @@ -817,11 +844,8 @@ export async function generateReflection({ reflectionPromptBuild, reflectionRegexInput, ), - additionalMessages: - reflectionPromptBuild.privateTaskMessages || [ - ...(reflectionPromptBuild.customMessages || []), - ...(reflectionPromptBuild.additionalMessages || []), - ], + promptMessages: reflectionPromptPayload.promptMessages, + additionalMessages: reflectionPromptPayload.additionalMessages, }); if (!result?.insight) return null; diff --git a/llm.js b/llm.js index 5e83ac1..fc271f3 100644 --- a/llm.js +++ b/llm.js @@ -430,15 +430,18 @@ function buildJsonAttemptMessages( attempt, reason = "", additionalMessages = [], + promptMessages = [], ) { const systemParts = [ - systemPrompt, "输出要求补充:只输出一个紧凑的 JSON 对象。", "禁止 markdown 代码块、禁止解释、禁止前后缀、禁止省略号。", "如果需要重新生成,请直接从头输出完整 JSON,不要续写上一次内容。", ]; - const userParts = [userPrompt]; + const userParts = []; + if (String(userPrompt || "").trim()) { + userParts.push(String(userPrompt || "").trim()); + } if (attempt > 0) { userParts.push( reason ? `上一次输出失败原因:${reason}` : "上一次输出未能被程序解析。", @@ -450,8 +453,79 @@ function buildJsonAttemptMessages( userParts.push("请直接输出紧凑 JSON 对象,不要包含任何额外文本。"); } + const normalizedPromptMessages = Array.isArray(promptMessages) + ? promptMessages + .map((message) => { + if (!message || typeof message !== "object") return null; + const role = String(message.role || "").trim().toLowerCase(); + const content = String(message.content || "").trim(); + if (!["system", "user", "assistant"].includes(role) || !content) { + return null; + } + return { role, content }; + }) + .filter(Boolean) + : []; + + const systemSupplement = [systemPrompt, ...systemParts] + .filter((part) => String(part || "").trim()) + .join("\n\n") + .trim(); + const userSupplement = userParts.join("\n\n").trim(); + + if (normalizedPromptMessages.length > 0) { + const messages = normalizedPromptMessages.map((message) => ({ ...message })); + const firstSystemIndex = messages.findIndex( + (message) => message.role === "system", + ); + + if (systemSupplement) { + if (firstSystemIndex >= 0) { + messages[firstSystemIndex] = { + ...messages[firstSystemIndex], + content: [ + messages[firstSystemIndex].content, + systemSupplement, + ] + .filter((part) => String(part || "").trim()) + .join("\n\n"), + }; + } else { + messages.unshift({ role: "system", content: systemSupplement }); + } + } + + if (userSupplement) { + const hasFallbackUserPrompt = Boolean(String(userPrompt || "").trim()); + const lastUserIndex = [...messages] + .reverse() + .findIndex((message) => message.role === "user"); + const resolvedLastUserIndex = + lastUserIndex >= 0 ? messages.length - 1 - lastUserIndex : -1; + + if (resolvedLastUserIndex >= 0 && !hasFallbackUserPrompt) { + messages[resolvedLastUserIndex] = { + ...messages[resolvedLastUserIndex], + content: [ + messages[resolvedLastUserIndex].content, + userSupplement, + ] + .filter((part) => String(part || "").trim()) + .join("\n\n"), + }; + } else { + messages.push({ role: "user", content: userSupplement }); + } + } + + return messages; + } + const messages = []; - const normalizedSystemPrompt = systemParts.join("\n\n").trim(); + const normalizedSystemPrompt = [systemPrompt, ...systemParts] + .filter((part) => String(part || "").trim()) + .join("\n\n") + .trim(); if (normalizedSystemPrompt) { messages.push({ role: "system", content: normalizedSystemPrompt }); } @@ -796,6 +870,7 @@ export async function callLLMForJSON({ taskType = "", requestSource = "", additionalMessages = [], + promptMessages = [], debugContext = null, } = {}) { const override = getLlmTestOverride("callLLMForJSON"); @@ -808,6 +883,7 @@ export async function callLLMForJSON({ taskType, requestSource, additionalMessages, + promptMessages, debugContext, }); } @@ -827,6 +903,7 @@ export async function callLLMForJSON({ attempt, lastFailureReason, additionalMessages, + promptMessages, ); const response = await callDedicatedOpenAICompatible(messages, { signal, diff --git a/panel.js b/panel.js index 8d192c9..8903246 100644 --- a/panel.js +++ b/panel.js @@ -2352,7 +2352,7 @@ function _renderTaskDebugPromptCard(taskType, promptBuild) {