Files
ST-Bionic-Memory-Ecology/tests/extractor-phase3-layered-context.mjs

394 lines
12 KiB
JavaScript

import assert from "node:assert/strict";
import {
installResolveHooks,
toDataModuleUrl,
} from "./helpers/register-hooks-compat.mjs";
const extensionsShimSource = [
"export const extension_settings = {};",
"export function getContext() {",
" return globalThis.__stBmeTestContext || {",
" chat: [],",
" chatMetadata: {},",
" extensionSettings: {},",
" powerUserSettings: {},",
" characters: {},",
" characterId: null,",
" name1: '玩家',",
" name2: '艾琳',",
" chatId: 'test-chat',",
" };",
"}",
].join("\n");
const scriptShimSource = [
"export function getRequestHeaders() {",
" return {};",
"}",
"export function substituteParamsExtended(value) {",
" return String(value ?? '');",
"}",
].join("\n");
const openAiShimSource = [
"export const chat_completion_sources = {};",
"export async function sendOpenAIRequest() {",
" throw new Error('sendOpenAIRequest should not be called in p3 test');",
"}",
].join("\n");
installResolveHooks([
{
specifiers: [
"../../../extensions.js",
"../../../../extensions.js",
"../../../../../extensions.js",
],
url: toDataModuleUrl(extensionsShimSource),
},
{
specifiers: [
"../../../../script.js",
"../../../../../script.js",
],
url: toDataModuleUrl(scriptShimSource),
},
{
specifiers: [
"../../../../openai.js",
"../../../../../openai.js",
],
url: toDataModuleUrl(openAiShimSource),
},
]);
const { createEmptyGraph, addNode, createNode } = await import("../graph/graph.js");
const { DEFAULT_NODE_SCHEMA } = await import("../graph/schema.js");
const { extractMemories } = await import("../maintenance/extractor.js");
const { appendSummaryEntry } = await import("../graph/summary-state.js");
const { normalizeGraphSummaryState } = await import("../graph/summary-state.js");
const { applyBatchStoryTime } = await import("../graph/story-timeline.js");
const { defaultSettings } = await import("../runtime/settings-defaults.js");
function setTestOverrides(overrides = {}) {
globalThis.__stBmeTestOverrides = overrides;
return () => {
delete globalThis.__stBmeTestOverrides;
};
}
globalThis.__stBmeTestContext = {
chat: [],
chatMetadata: {},
extensionSettings: {},
powerUserSettings: {},
characters: {},
characterId: null,
name1: "玩家",
name2: "艾琳",
chatId: "test-chat",
};
const baseMessages = [
{ seq: 10, role: "user", content: "第一轮消息", name: "玩家", speaker: "玩家" },
{ seq: 11, role: "assistant", content: "第一轮回复", name: "艾琳", speaker: "艾琳" },
{ seq: 12, role: "user", content: "第二轮消息", name: "玩家", speaker: "玩家" },
{ seq: 13, role: "assistant", content: "第二轮回复", name: "艾琳", speaker: "艾琳" },
{ seq: 14, role: "user", content: "第三轮消息", name: "玩家", speaker: "玩家" },
{ seq: 15, role: "assistant", content: "第三轮回复", name: "艾琳", speaker: "艾琳" },
];
function collectAllPromptContent(captured) {
return [
String(captured.systemPrompt || ""),
String(captured.userPrompt || ""),
...(Array.isArray(captured.promptMessages) ? captured.promptMessages : []).map(
(m) => String(m.content || ""),
),
...(Array.isArray(captured.additionalMessages) ? captured.additionalMessages : []).map(
(m) => String(m.content || ""),
),
].join("\n");
}
// ── Test 1: default settings — activeSummaries and storyTimeContext passed ──
{
const graph = createEmptyGraph();
normalizeGraphSummaryState(graph);
const entry = appendSummaryEntry(graph, {
text: "最近的局面总结测试文本",
messageRange: [5, 9],
level: 1,
});
applyBatchStoryTime(graph, { label: "第二天清晨", tense: "ongoing" }, "extract");
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages.slice(0, 2),
startSeq: 10,
endSeq: 11,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: { ...defaultSettings },
});
assert.equal(result.success, true);
assert.ok(captured, "LLM should be called");
const allContent = collectAllPromptContent(captured);
// activeSummaries should be somewhere in prompt content
assert.match(allContent, /最近的局面总结测试文本/, "active summaries text should appear in prompt");
// storyTimeContext should be somewhere in prompt content
assert.match(allContent, /第二天清晨/, "story time label should appear in prompt");
// recentMessages block should contain the dialogue
const recentBlock = (Array.isArray(captured.promptMessages) ? captured.promptMessages : []).find(
(m) => m.sourceKey === "recentMessages",
);
assert.ok(recentBlock, "recentMessages block should exist");
assert.match(String(recentBlock.content || ""), /第一轮/, "recentMessages should contain dialogue content");
} finally {
restore();
}
}
// ── Test 2: extractRecentMessageCap limits messages ──
{
const graph = createEmptyGraph();
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages,
startSeq: 10,
endSeq: 15,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: {
...defaultSettings,
extractRecentMessageCap: 2,
},
});
assert.equal(result.success, true);
assert.ok(captured);
// With cap=2, only the last 2 messages (seq 14, 15) should be in the recentMessages block
const recentBlock = (Array.isArray(captured.promptMessages) ? captured.promptMessages : []).find(
(m) => m.sourceKey === "recentMessages",
);
assert.ok(recentBlock, "recentMessages block should exist");
const recentContent = String(recentBlock.content || "");
assert.match(recentContent, /第三轮/, "capped messages should contain the last messages");
assert.doesNotMatch(recentContent, /第一轮/, "capped messages should not contain early messages");
} finally {
restore();
}
}
// ── Test 3: extractPromptStructuredMode = "structured" omits dialogueText ──
{
const graph = createEmptyGraph();
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages.slice(0, 2),
startSeq: 10,
endSeq: 11,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: {
...defaultSettings,
extractPromptStructuredMode: "structured",
},
});
assert.equal(result.success, true);
assert.ok(captured);
// In structured mode, recentMessages block should still have structured content
const recentBlock = (Array.isArray(captured.promptMessages) ? captured.promptMessages : []).find(
(m) => m.sourceKey === "recentMessages",
);
assert.ok(recentBlock, "recentMessages block should exist");
const recentContent = String(recentBlock?.content || "");
assert.ok(recentContent.length > 0, "recentMessages block should have content");
// The full transcript should NOT appear in prompt content
// (structured mode excludes dialogueText)
const allContent = collectAllPromptContent(captured);
// In "structured" mode, the user prompt fallback or blocks may reference structured messages
assert.match(recentContent, /第一轮/, "structured messages should contain dialogue");
} finally {
restore();
}
}
// ── Test 4: extractPromptStructuredMode = "transcript" passes string ──
{
const graph = createEmptyGraph();
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages.slice(0, 2),
startSeq: 10,
endSeq: 11,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: {
...defaultSettings,
extractPromptStructuredMode: "transcript",
},
});
assert.equal(result.success, true);
assert.ok(captured);
// In transcript mode, the content should still be present in some form
const allContent = collectAllPromptContent(captured);
assert.match(allContent, /第一轮/, "transcript mode should have dialogue content");
// recentMessages block should exist and have transcript content
const recentBlock = (Array.isArray(captured.promptMessages) ? captured.promptMessages : []).find(
(m) => m.sourceKey === "recentMessages",
);
assert.ok(recentBlock, "recentMessages block should exist in transcript mode");
} finally {
restore();
}
}
// ── Test 5: extractIncludeSummaries = false omits summaries ──
{
const graph = createEmptyGraph();
normalizeGraphSummaryState(graph);
appendSummaryEntry(graph, {
text: "这条总结不应出现",
messageRange: [5, 9],
level: 1,
});
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages.slice(0, 2),
startSeq: 10,
endSeq: 11,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: {
...defaultSettings,
extractIncludeSummaries: false,
},
});
assert.equal(result.success, true);
assert.ok(captured);
const allContent = collectAllPromptContent(captured);
assert.doesNotMatch(allContent, /这条总结不应出现/, "summaries should be excluded when disabled");
} finally {
restore();
}
}
// ── Test 6: extractIncludeStoryTime = false omits story time ──
{
const graph = createEmptyGraph();
applyBatchStoryTime(graph, { label: "隐藏的时间标签", tense: "ongoing" }, "extract");
let captured = null;
const restore = setTestOverrides({
llm: {
async callLLMForJSON(payload) {
captured = payload;
return { operations: [], cognitionUpdates: [], regionUpdates: {} };
},
},
});
try {
const result = await extractMemories({
graph,
messages: baseMessages.slice(0, 2),
startSeq: 10,
endSeq: 11,
schema: DEFAULT_NODE_SCHEMA,
embeddingConfig: null,
settings: {
...defaultSettings,
extractIncludeStoryTime: false,
},
});
assert.equal(result.success, true);
assert.ok(captured);
const allContent = collectAllPromptContent(captured);
assert.doesNotMatch(allContent, /隐藏的时间标签/, "story time should be excluded when disabled");
} finally {
restore();
}
}
// ── Test 7: new settings exist in defaults ──
{
assert.equal(defaultSettings.extractRecentMessageCap, 0);
assert.equal(defaultSettings.extractPromptStructuredMode, "both");
assert.equal(defaultSettings.extractWorldbookMode, "active");
assert.equal(defaultSettings.extractIncludeStoryTime, true);
assert.equal(defaultSettings.extractIncludeSummaries, true);
}
console.log("extractor-phase3-layered-context tests passed");