fix: align prompt assembly with executed messages

This commit is contained in:
Youzini-afk
2026-03-27 03:03:50 +08:00
parent bf0ab29476
commit 9a5c83dbde
8 changed files with 341 additions and 75 deletions

View File

@@ -11,7 +11,11 @@ import {
getNode,
} from "./graph.js";
import { callLLMForJSON } from "./llm.js";
import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js";
import {
buildTaskExecutionDebugContext,
buildTaskLlmPayload,
buildTaskPrompt,
} from "./prompt-builder.js";
import { getSTContextForPrompt } from "./st-context.js";
import { applyTaskRegex } from "./task-regex.js";
import { isDirectVectorConfig } from "./vector-index.js";
@@ -28,6 +32,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) {
: null;
}
function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") {
if (typeof buildTaskLlmPayload === "function") {
return buildTaskLlmPayload(promptBuild, fallbackUserPrompt);
}
return {
systemPrompt: String(promptBuild?.systemPrompt || ""),
userPrompt: String(fallbackUserPrompt || ""),
promptMessages: [],
additionalMessages: Array.isArray(promptBuild?.privateTaskMessages)
? promptBuild.privateTaskMessages
: [],
};
}
function throwIfAborted(signal) {
if (signal?.aborted) {
throw signal.reason instanceof Error ? signal.reason : createAbortError();
@@ -279,10 +298,14 @@ async function summarizeBatch(
);
const userPrompt = `请压缩以下 ${nodes.length} 个 "${typeDef.label}" 节点:\n\n${nodeDescriptions}`;
const promptPayload = resolveTaskPromptPayload(
compressPromptBuild,
userPrompt,
);
return await callLLMForJSON({
systemPrompt,
userPrompt,
systemPrompt: promptPayload.systemPrompt || systemPrompt,
userPrompt: promptPayload.userPrompt,
maxRetries: 1,
signal,
taskType: "compress",
@@ -290,11 +313,8 @@ async function summarizeBatch(
compressPromptBuild,
compressRegexInput,
),
additionalMessages:
compressPromptBuild.privateTaskMessages || [
...(compressPromptBuild.customMessages || []),
...(compressPromptBuild.additionalMessages || []),
],
promptMessages: promptPayload.promptMessages,
additionalMessages: promptPayload.additionalMessages,
});
}

View File

@@ -5,7 +5,11 @@
import { embedBatch, searchSimilar } from "./embedding.js";
import { addEdge, createEdge, getActiveNodes, getNode } from "./graph.js";
import { callLLMForJSON } from "./llm.js";
import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js";
import {
buildTaskExecutionDebugContext,
buildTaskLlmPayload,
buildTaskPrompt,
} from "./prompt-builder.js";
import { getSTContextForPrompt } from "./st-context.js";
import { applyTaskRegex } from "./task-regex.js";
import {
@@ -27,6 +31,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) {
: null;
}
function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") {
if (typeof buildTaskLlmPayload === "function") {
return buildTaskLlmPayload(promptBuild, fallbackUserPrompt);
}
return {
systemPrompt: String(promptBuild?.systemPrompt || ""),
userPrompt: String(fallbackUserPrompt || ""),
promptMessages: [],
additionalMessages: Array.isArray(promptBuild?.privateTaskMessages)
? promptBuild.privateTaskMessages
: [],
};
}
function isAbortError(error) {
return error?.name === "AbortError";
}
@@ -318,10 +337,15 @@ export async function consolidateMemories({
consolidationRegexInput,
"system",
);
const promptPayload = resolveTaskPromptPayload(
consolidationPromptBuild,
userPrompt,
);
try {
decision = await callLLMForJSON({
systemPrompt: consolidationSystemPrompt,
userPrompt,
systemPrompt:
promptPayload.systemPrompt || consolidationSystemPrompt,
userPrompt: promptPayload.userPrompt,
maxRetries: 1,
signal,
taskType: "consolidation",
@@ -329,11 +353,8 @@ export async function consolidateMemories({
consolidationPromptBuild,
consolidationRegexInput,
),
additionalMessages:
consolidationPromptBuild.privateTaskMessages || [
...(consolidationPromptBuild.customMessages || []),
...(consolidationPromptBuild.additionalMessages || []),
],
promptMessages: promptPayload.promptMessages,
additionalMessages: promptPayload.additionalMessages,
});
} catch (e) {
if (isAbortError(e)) throw e;

View File

@@ -16,7 +16,11 @@ import {
} from "./graph.js";
import { callLLMForJSON } from "./llm.js";
import { ensureEventTitle, getNodeDisplayName } from "./node-labels.js";
import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js";
import {
buildTaskExecutionDebugContext,
buildTaskLlmPayload,
buildTaskPrompt,
} from "./prompt-builder.js";
import { RELATION_TYPES } from "./schema.js";
import { applyTaskRegex } from "./task-regex.js";
import { getSTContextForPrompt } from "./st-context.js";
@@ -34,6 +38,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) {
: null;
}
function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") {
if (typeof buildTaskLlmPayload === "function") {
return buildTaskLlmPayload(promptBuild, fallbackUserPrompt);
}
return {
systemPrompt: String(promptBuild?.systemPrompt || ""),
userPrompt: String(fallbackUserPrompt || ""),
promptMessages: [],
additionalMessages: Array.isArray(promptBuild?.privateTaskMessages)
? promptBuild.privateTaskMessages
: [],
};
}
function isAbortError(error) {
return error?.name === "AbortError";
}
@@ -153,20 +172,18 @@ export async function extractMemories({
"",
"请分析对话,按 JSON 格式输出操作列表。",
].join("\n");
const promptPayload = resolveTaskPromptPayload(promptBuild, userPrompt);
// 调用 LLM
const result = await callLLMForJSON({
systemPrompt,
userPrompt,
systemPrompt: promptPayload.systemPrompt || systemPrompt,
userPrompt: promptPayload.userPrompt,
maxRetries: 2,
signal,
taskType: "extract",
debugContext: createTaskLlmDebugContext(promptBuild, extractRegexInput),
additionalMessages:
promptBuild.privateTaskMessages || [
...(promptBuild.customMessages || []),
...(promptBuild.additionalMessages || []),
],
promptMessages: promptPayload.promptMessages,
additionalMessages: promptPayload.additionalMessages,
});
throwIfAborted(signal);
@@ -667,9 +684,7 @@ export async function generateSynopsis({
"system",
);
const result = await callLLMForJSON({
systemPrompt: synopsisSystemPrompt,
userPrompt: [
const synopsisUserPrompt = [
"## 事件时间线",
eventSummaries,
"",
@@ -678,7 +693,15 @@ export async function generateSynopsis({
"",
"## 活跃主线",
threadSummary || "(无)",
].join("\n"),
].join("\n");
const synopsisPromptPayload = resolveTaskPromptPayload(
synopsisPromptBuild,
synopsisUserPrompt,
);
const result = await callLLMForJSON({
systemPrompt: synopsisPromptPayload.systemPrompt || synopsisSystemPrompt,
userPrompt: synopsisPromptPayload.userPrompt,
maxRetries: 1,
signal,
taskType: "synopsis",
@@ -686,11 +709,8 @@ export async function generateSynopsis({
synopsisPromptBuild,
synopsisRegexInput,
),
additionalMessages:
synopsisPromptBuild.privateTaskMessages || [
...(synopsisPromptBuild.customMessages || []),
...(synopsisPromptBuild.additionalMessages || []),
],
promptMessages: synopsisPromptPayload.promptMessages,
additionalMessages: synopsisPromptPayload.additionalMessages,
});
if (!result?.summary) return;
@@ -795,9 +815,7 @@ export async function generateReflection({
"system",
);
const result = await callLLMForJSON({
systemPrompt: reflectionSystemPrompt,
userPrompt: [
const reflectionUserPrompt = [
"## 最近事件",
eventSummary,
"",
@@ -809,7 +827,16 @@ export async function generateReflection({
"",
"## 已知矛盾",
contradictionSummary || "(无)",
].join("\n"),
].join("\n");
const reflectionPromptPayload = resolveTaskPromptPayload(
reflectionPromptBuild,
reflectionUserPrompt,
);
const result = await callLLMForJSON({
systemPrompt:
reflectionPromptPayload.systemPrompt || reflectionSystemPrompt,
userPrompt: reflectionPromptPayload.userPrompt,
maxRetries: 1,
signal,
taskType: "reflection",
@@ -817,11 +844,8 @@ export async function generateReflection({
reflectionPromptBuild,
reflectionRegexInput,
),
additionalMessages:
reflectionPromptBuild.privateTaskMessages || [
...(reflectionPromptBuild.customMessages || []),
...(reflectionPromptBuild.additionalMessages || []),
],
promptMessages: reflectionPromptPayload.promptMessages,
additionalMessages: reflectionPromptPayload.additionalMessages,
});
if (!result?.insight) return null;

83
llm.js
View File

@@ -430,15 +430,18 @@ function buildJsonAttemptMessages(
attempt,
reason = "",
additionalMessages = [],
promptMessages = [],
) {
const systemParts = [
systemPrompt,
"输出要求补充:只输出一个紧凑的 JSON 对象。",
"禁止 markdown 代码块、禁止解释、禁止前后缀、禁止省略号。",
"如果需要重新生成,请直接从头输出完整 JSON不要续写上一次内容。",
];
const userParts = [userPrompt];
const userParts = [];
if (String(userPrompt || "").trim()) {
userParts.push(String(userPrompt || "").trim());
}
if (attempt > 0) {
userParts.push(
reason ? `上一次输出失败原因:${reason}` : "上一次输出未能被程序解析。",
@@ -450,8 +453,79 @@ function buildJsonAttemptMessages(
userParts.push("请直接输出紧凑 JSON 对象,不要包含任何额外文本。");
}
const normalizedPromptMessages = Array.isArray(promptMessages)
? promptMessages
.map((message) => {
if (!message || typeof message !== "object") return null;
const role = String(message.role || "").trim().toLowerCase();
const content = String(message.content || "").trim();
if (!["system", "user", "assistant"].includes(role) || !content) {
return null;
}
return { role, content };
})
.filter(Boolean)
: [];
const systemSupplement = [systemPrompt, ...systemParts]
.filter((part) => String(part || "").trim())
.join("\n\n")
.trim();
const userSupplement = userParts.join("\n\n").trim();
if (normalizedPromptMessages.length > 0) {
const messages = normalizedPromptMessages.map((message) => ({ ...message }));
const firstSystemIndex = messages.findIndex(
(message) => message.role === "system",
);
if (systemSupplement) {
if (firstSystemIndex >= 0) {
messages[firstSystemIndex] = {
...messages[firstSystemIndex],
content: [
messages[firstSystemIndex].content,
systemSupplement,
]
.filter((part) => String(part || "").trim())
.join("\n\n"),
};
} else {
messages.unshift({ role: "system", content: systemSupplement });
}
}
if (userSupplement) {
const hasFallbackUserPrompt = Boolean(String(userPrompt || "").trim());
const lastUserIndex = [...messages]
.reverse()
.findIndex((message) => message.role === "user");
const resolvedLastUserIndex =
lastUserIndex >= 0 ? messages.length - 1 - lastUserIndex : -1;
if (resolvedLastUserIndex >= 0 && !hasFallbackUserPrompt) {
messages[resolvedLastUserIndex] = {
...messages[resolvedLastUserIndex],
content: [
messages[resolvedLastUserIndex].content,
userSupplement,
]
.filter((part) => String(part || "").trim())
.join("\n\n"),
};
} else {
messages.push({ role: "user", content: userSupplement });
}
}
return messages;
}
const messages = [];
const normalizedSystemPrompt = systemParts.join("\n\n").trim();
const normalizedSystemPrompt = [systemPrompt, ...systemParts]
.filter((part) => String(part || "").trim())
.join("\n\n")
.trim();
if (normalizedSystemPrompt) {
messages.push({ role: "system", content: normalizedSystemPrompt });
}
@@ -796,6 +870,7 @@ export async function callLLMForJSON({
taskType = "",
requestSource = "",
additionalMessages = [],
promptMessages = [],
debugContext = null,
} = {}) {
const override = getLlmTestOverride("callLLMForJSON");
@@ -808,6 +883,7 @@ export async function callLLMForJSON({
taskType,
requestSource,
additionalMessages,
promptMessages,
debugContext,
});
}
@@ -827,6 +903,7 @@ export async function callLLMForJSON({
attempt,
lastFailureReason,
additionalMessages,
promptMessages,
);
const response = await callDedicatedOpenAICompatible(messages, {
signal,

View File

@@ -2352,7 +2352,7 @@ function _renderTaskDebugPromptCard(taskType, promptBuild) {
</div>
<div class="bme-debug-kv-item">
<span class="bme-debug-kv-key">私有消息</span>
<span class="bme-debug-kv-value">${_escHtml(String(promptBuild.debug?.privateTaskMessageCount ?? promptBuild.privateTaskMessages?.length ?? 0))}</span>
<span class="bme-debug-kv-value">${_escHtml(String(promptBuild.debug?.executionMessageCount ?? promptBuild.executionMessages?.length ?? promptBuild.privateTaskMessages?.length ?? 0))}</span>
</div>
<div class="bme-debug-kv-item">
<span class="bme-debug-kv-key">EJS 状态</span>
@@ -2368,11 +2368,11 @@ function _renderTaskDebugPromptCard(taskType, promptBuild) {
</div>
</div>
${_renderDebugDetails("实际投递路径", promptBuild.debug?.effectivePath || null)}
${_renderDebugDetails("渲染后的块", promptBuild.renderedBlocks)}
${_renderDebugDetails("注入计划(推导)", promptBuild.hostInjectionPlan || null)}
${_renderDebugDetails("世界书注入内容(当前实际仍走私有 prompt", promptBuild.hostInjections)}
${_renderDebugDetails("私有任务消息", promptBuild.privateTaskMessages)}
${_renderDebugDetails("系统提示词", promptBuild.systemPrompt || "")}
${_renderDebugDetails("渲染后的块(按配置顺序)", promptBuild.renderedBlocks)}
${_renderDebugDetails("实际执行消息序列", promptBuild.executionMessages || promptBuild.privateTaskMessages || null)}
${_renderDebugDetails("系统提示词(兼容视图", promptBuild.systemPrompt || "")}
${_renderDebugDetails("世界书桶内容(诊断)", promptBuild.hostInjections)}
${_renderDebugDetails("世界书块命中计划(诊断)", promptBuild.hostInjectionPlan || null)}
${_renderDebugDetails("世界书调试", promptBuild.worldInfo?.debug || promptBuild.worldInfoResolution?.debug || null)}
`;
}

View File

@@ -68,11 +68,13 @@ export function buildTaskExecutionDebugContext(
return {
promptAssembly: {
mode: "private-task-prompt",
mode: "ordered-private-messages",
hostInjectionPlanMode:
promptDebug.hostInjectionPlanMode || "diagnostic-plan-only",
privateTaskMessageCount: Number(
promptDebug.privateTaskMessageCount ??
promptDebug.executionMessageCount ??
promptBuild?.executionMessages?.length ??
promptDebug.privateTaskMessageCount ??
promptBuild?.privateTaskMessages?.length ??
0,
),
@@ -130,6 +132,22 @@ function normalizeInjectionMode(mode) {
return "append";
}
function createExecutionMessage(
role,
content,
extra = {},
) {
const trimmedContent = String(content || "").trim();
if (!trimmedContent) {
return null;
}
return {
role: normalizeRole(role),
content: trimmedContent,
...extra,
};
}
function stringifyInterpolatedValue(value) {
if (value == null) return "";
if (typeof value === "string") return value;
@@ -277,7 +295,10 @@ function buildHostInjectionPlan(renderedBlocks = [], worldInfoResolution = {}) {
for (const block of renderedBlocks) {
if (!block?.content) continue;
if (block.delivery === "host.before") {
if (
block.type === "builtin" &&
String(block.sourceKey || "") === "worldInfoBefore"
) {
plan.before.push(
createHostInjectionPlanEntry(block, "before", {
entryNames: beforeEntryNames,
@@ -287,7 +308,10 @@ function buildHostInjectionPlan(renderedBlocks = [], worldInfoResolution = {}) {
continue;
}
if (block.delivery === "host.after") {
if (
block.type === "builtin" &&
String(block.sourceKey || "") === "worldInfoAfter"
) {
plan.after.push(
createHostInjectionPlanEntry(block, "after", {
entryNames: afterEntryNames,
@@ -314,21 +338,25 @@ function buildHostInjectionPlan(renderedBlocks = [], worldInfoResolution = {}) {
}
function resolveBlockDelivery(block = {}) {
return normalizeRole(block.role) === "system"
? "private.system"
: "private.message";
}
function getBlockDiagnosticInjectionPosition(block = {}) {
if (
block.type === "builtin" &&
String(block.sourceKey || "") === "worldInfoBefore"
) {
return "host.before";
return "before";
}
if (
block.type === "builtin" &&
String(block.sourceKey || "") === "worldInfoAfter"
) {
return "host.after";
return "after";
}
return normalizeRole(block.role) === "system"
? "private.system"
: "private.message";
return "";
}
function profileRequiresWorldInfo(profile) {
@@ -411,7 +439,11 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
let systemPrompt = "";
const customMessages = [];
const executionMessages = [];
const renderedBlocks = [];
let userRoleBlockCount = 0;
let assistantRoleBlockCount = 0;
let systemRoleBlockCount = 0;
for (const block of blocks) {
if (!block || block.enabled === false) continue;
@@ -449,10 +481,24 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
: block._orderIndex,
injectionMode: mode,
delivery: resolveBlockDelivery(block),
effectiveDelivery: role === "system" ? "private.system" : "private.message",
effectiveDelivery: resolveBlockDelivery(block),
diagnosticInjectionPosition: getBlockDiagnosticInjectionPosition(block),
});
const executionMessage = createExecutionMessage(role, content, {
source: "profile-block",
blockId: String(block.id || ""),
blockName: String(block.name || ""),
blockType: String(block.type || "custom"),
sourceKey: String(block.sourceKey || ""),
injectionMode: mode,
});
if (executionMessage) {
executionMessages.push(executionMessage);
}
if (role === "system") {
systemRoleBlockCount += 1;
if (!systemPrompt) {
systemPrompt = content;
} else if (mode === "prepend") {
@@ -463,6 +509,11 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
continue;
}
if (role === "user") {
userRoleBlockCount += 1;
} else if (role === "assistant") {
assistantRoleBlockCount += 1;
}
if (mode === "prepend") {
customMessages.unshift({ role, content });
} else {
@@ -470,6 +521,19 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
}
}
for (const message of worldInfoResolution.additionalMessages || []) {
const executionMessage = createExecutionMessage(
message.role,
message.content,
{
source: "worldInfo-atDepth",
},
);
if (executionMessage) {
executionMessages.push(executionMessage);
}
}
const privateTaskMessages = [
...customMessages,
...worldInfoResolution.additionalMessages,
@@ -487,6 +551,7 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
systemPrompt,
messages: privateTaskMessages,
},
executionMessages,
privateTaskMessages,
renderedBlocks,
worldInfoResolution,
@@ -525,16 +590,19 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
customMessageCount: customMessages.length,
additionalMessageCount: worldInfoResolution.additionalMessages.length,
privateTaskMessageCount: privateTaskMessages.length,
executionMessageCount: executionMessages.length,
userRoleBlockCount,
assistantRoleBlockCount,
systemRoleBlockCount,
effectiveDelivery: {
systemBlocks: "private.system",
customMessages: "private.message",
worldInfoBeforeAfter: "private.system (host injection plan is diagnostic only)",
worldInfoAtDepth: "private.message",
profileBlocks: "ordered-private-messages",
worldInfoBeforeAfter: "inline-in-ordered-messages",
worldInfoAtDepth: "appended-private-messages",
},
worldInfoCacheHit: Boolean(worldInfoResolution.debug?.cache?.hit),
ejsRuntimeStatus: worldInfoResolution.debug?.ejsRuntimeStatus || "",
effectivePath: {
promptAssembly: "private-task-prompt",
promptAssembly: "ordered-private-messages",
hostInjectionPlan: "diagnostic-plan-only",
ejs:
worldInfoResolution.debug?.ejsRuntimeStatus ||
@@ -555,6 +623,7 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
profileName: profile?.name || "",
systemPrompt,
privateTaskMessages,
executionMessages,
renderedBlocks,
hostInjections: worldInfoResolution.injections,
hostInjectionPlan,
@@ -565,6 +634,39 @@ export async function buildTaskPrompt(settings = {}, taskType, context = {}) {
return result;
}
export function buildTaskLlmPayload(promptBuild = null, fallbackUserPrompt = "") {
const executionMessages = Array.isArray(promptBuild?.executionMessages)
? promptBuild.executionMessages
.map((message) =>
createExecutionMessage(message.role, message.content, {
source: String(message.source || ""),
blockId: String(message.blockId || ""),
blockName: String(message.blockName || ""),
blockType: String(message.blockType || ""),
sourceKey: String(message.sourceKey || ""),
injectionMode: String(message.injectionMode || ""),
}),
)
.filter(Boolean)
: [];
const hasUserMessage = executionMessages.some(
(message) => message.role === "user",
);
return {
systemPrompt: String(promptBuild?.systemPrompt || ""),
userPrompt: hasUserMessage ? "" : String(fallbackUserPrompt || ""),
promptMessages: executionMessages,
additionalMessages:
executionMessages.length > 0
? []
: Array.isArray(promptBuild?.privateTaskMessages)
? promptBuild.privateTaskMessages
: [],
};
}
export function interpolateVariables(template, context = {}) {
return String(template || "").replace(/\{\{\s*([\w.]+)\s*\}\}/g, (_, key) => {
return stringifyInterpolatedValue(getByPath(context, key));

View File

@@ -11,7 +11,11 @@ import {
getNodeEdges,
} from "./graph.js";
import { callLLMForJSON } from "./llm.js";
import { buildTaskExecutionDebugContext, buildTaskPrompt } from "./prompt-builder.js";
import {
buildTaskExecutionDebugContext,
buildTaskLlmPayload,
buildTaskPrompt,
} from "./prompt-builder.js";
import { applyTaskRegex } from "./task-regex.js";
import { getSTContextForPrompt } from "./st-context.js";
import { findSimilarNodesByText, validateVectorConfig } from "./vector-index.js";
@@ -28,6 +32,21 @@ function createTaskLlmDebugContext(promptBuild, regexInput) {
: null;
}
function resolveTaskPromptPayload(promptBuild, fallbackUserPrompt = "") {
if (typeof buildTaskLlmPayload === "function") {
return buildTaskLlmPayload(promptBuild, fallbackUserPrompt);
}
return {
systemPrompt: String(promptBuild?.systemPrompt || ""),
userPrompt: String(fallbackUserPrompt || ""),
promptMessages: [],
additionalMessages: Array.isArray(promptBuild?.privateTaskMessages)
? promptBuild.privateTaskMessages
: [],
};
}
function isAbortError(error) {
return error?.name === "AbortError";
}
@@ -463,10 +482,11 @@ async function llmRecall(
"",
"请选择最相关的节点并输出 JSON。",
].join("\n");
const promptPayload = resolveTaskPromptPayload(recallPromptBuild, userPrompt);
const result = await callLLMForJSON({
systemPrompt,
userPrompt,
systemPrompt: promptPayload.systemPrompt || systemPrompt,
userPrompt: promptPayload.userPrompt,
maxRetries: 1,
signal,
taskType: "recall",
@@ -474,11 +494,8 @@ async function llmRecall(
recallPromptBuild,
recallRegexInput,
),
additionalMessages:
recallPromptBuild.privateTaskMessages || [
...(recallPromptBuild.customMessages || []),
...(recallPromptBuild.additionalMessages || []),
],
promptMessages: promptPayload.promptMessages,
additionalMessages: promptPayload.additionalMessages,
});
if (result?.selected_ids && Array.isArray(result.selected_ids)) {

View File

@@ -335,9 +335,14 @@ try {
assert.equal(promptBuild.hostInjectionPlan.atDepth.length, 1);
assert.equal(promptBuild.hostInjectionPlan.atDepth[0].entryName, "深度注入");
assert.equal(typeof promptBuild.debug.worldInfoCacheHit, "boolean");
assert.equal(promptBuild.executionMessages.length, 4);
assert.deepEqual(
promptBuild.executionMessages.map((message) => message.role),
["system", "system", "user", "system"],
);
assert.deepEqual(
promptBuild.renderedBlocks.map((block) => block.delivery),
["host.before", "host.after", "private.message"],
["private.system", "private.system", "private.message"],
);
assert.equal(promptBuild.additionalMessages.length, 1);
assert.equal(promptBuild.additionalMessages[0].content, "这是一条 atDepth 消息。");