{
_patchSettings({ recallEnableMultiIntent: checked });
});
+ bindCheckbox("bme-setting-recall-context-query-blend-enabled", (checked) => {
+ _patchSettings({ recallEnableContextQueryBlend: checked });
+ });
+ bindCheckbox("bme-setting-recall-lexical-boost-enabled", (checked) => {
+ _patchSettings({ recallEnableLexicalBoost: checked });
+ });
bindCheckbox("bme-setting-recall-temporal-links-enabled", (checked) => {
_patchSettings({ recallEnableTemporalLinks: checked });
});
@@ -1760,6 +1793,23 @@ function _bindConfigControls() {
8,
(value) => _patchSettings({ recallMultiIntentMaxSegments: value }),
);
+ bindFloat(
+ "bme-setting-recall-context-assistant-weight",
+ 0.2,
+ 0,
+ 1,
+ (value) => _patchSettings({ recallContextAssistantWeight: value }),
+ );
+ bindFloat(
+ "bme-setting-recall-context-previous-user-weight",
+ 0.1,
+ 0,
+ 1,
+ (value) => _patchSettings({ recallContextPreviousUserWeight: value }),
+ );
+ bindFloat("bme-setting-recall-lexical-weight", 0.18, 0, 1, (value) =>
+ _patchSettings({ recallLexicalWeight: value }),
+ );
bindFloat("bme-setting-recall-teleport-alpha", 0.15, 0, 1, (value) =>
_patchSettings({ recallTeleportAlpha: value }),
);
@@ -1843,6 +1893,13 @@ function _bindConfigControls() {
bindFloat("bme-setting-forget-threshold", 0.5, 0.1, 1, (value) =>
_patchSettings({ forgetThreshold: value }),
);
+ bindNumber(
+ "bme-setting-maintenance-auto-min-new-nodes",
+ 3,
+ 1,
+ 50,
+ (value) => _patchSettings({ maintenanceAutoMinNewNodes: value }),
+ );
bindNumber("bme-setting-sleep-every", 10, 1, 200, (value) =>
_patchSettings({ sleepEveryN: value }),
);
@@ -2903,6 +2960,7 @@ function _renderTaskDebugTab(state) {
const promptBuild = runtimeDebug?.taskPromptBuilds?.[state.taskType] || null;
const llmRequest = runtimeDebug?.taskLlmRequests?.[state.taskType] || null;
const recallInjection = runtimeDebug?.injections?.recall || null;
+ const maintenanceDebug = runtimeDebug?.maintenance || null;
const graphPersistence = runtimeDebug?.graphPersistence || null;
return `
@@ -2923,6 +2981,9 @@ function _renderTaskDebugTab(state) {
${_renderTaskDebugGraphPersistenceCard(graphPersistence)}
+
+ ${_renderTaskDebugMaintenanceCard(maintenanceDebug)}
+
${_renderTaskDebugPromptCard(state.taskType, promptBuild)}
@@ -2937,6 +2998,32 @@ function _renderTaskDebugTab(state) {
`;
}
+function _renderTaskDebugMaintenanceCard(maintenanceDebug) {
+ const lastAction = maintenanceDebug?.lastAction || null;
+ const lastUndoResult = maintenanceDebug?.lastUndoResult || null;
+
+ if (!lastAction && !lastUndoResult) {
+ return `
+
维护账本状态
+
当前还没有最近维护或撤销快照。
+ `;
+ }
+
+ return `
+
+
+
维护账本状态
+
+ 最近一次维护记录和最近一次撤销结果。
+
+
+
${_escHtml(lastAction?.action || lastUndoResult?.action || "maintenance")}
+
+ ${_renderDebugDetails("最近维护", lastAction)}
+ ${_renderDebugDetails("最近撤销", lastUndoResult)}
+ `;
+}
+
function _renderTaskDebugGraphPersistenceCard(graphPersistence) {
if (!graphPersistence) {
return `
diff --git a/retriever.js b/retriever.js
index 55a169c..e669b49 100644
--- a/retriever.js
+++ b/retriever.js
@@ -126,10 +126,15 @@ function createRetrievalMeta(enableLLMRecall) {
diffusionHits: 0,
scoredCandidates: 0,
segmentsUsed: [],
+ queryBlendActive: false,
+ queryBlendParts: [],
+ queryBlendWeights: {},
vectorMergedHits: 0,
seedCount: 0,
temporalSyntheticEdgeCount: 0,
teleportAlpha: 0,
+ lexicalBoostedNodes: 0,
+ lexicalTopHits: [],
cooccurrenceBoostedNodes: 0,
candidatePoolBeforeDpp: 0,
candidatePoolAfterDpp: 0,
@@ -159,6 +164,421 @@ function clampRange(value, fallback, min = 0, max = 1) {
return Math.max(min, Math.min(max, parsed));
}
+function normalizeQueryText(value, maxLength = 400) {
+ const normalized = String(value ?? "")
+ .replace(/\r\n/g, "\n")
+ .replace(/\s+/g, " ")
+ .trim();
+ if (!normalized) return "";
+ return normalized.slice(0, Math.max(1, maxLength));
+}
+
+function createTextPreview(text, maxLength = 120) {
+ const normalized = normalizeQueryText(text, maxLength + 4);
+ if (!normalized) return "";
+ return normalized.length > maxLength
+ ? `${normalized.slice(0, maxLength)}...`
+ : normalized;
+}
+
+function roundBlendWeight(value) {
+ return Math.round((Number(value) || 0) * 1000) / 1000;
+}
+
+function uniqueStrings(values = [], maxLength = 400) {
+ const result = [];
+ const seen = new Set();
+
+ for (const value of values) {
+ const text = normalizeQueryText(value, maxLength);
+ const key = text.toLowerCase();
+ if (!text || seen.has(key)) continue;
+ seen.add(key);
+ result.push(text);
+ }
+
+ return result;
+}
+
+function parseRecallContextLine(line = "") {
+ const raw = String(line ?? "").trim();
+ if (!raw) return null;
+
+ const bracketMatch = raw.match(/^\[(user|assistant)\]\s*:\s*([\s\S]*)$/i);
+ if (bracketMatch) {
+ const role = String(bracketMatch[1] || "").toLowerCase();
+ const text = normalizeQueryText(bracketMatch[2] || "");
+ return text ? { role, text } : null;
+ }
+
+ const plainMatch = raw.match(
+ /^(user|assistant|用户|助手|ai)\s*[::]\s*([\s\S]*)$/i,
+ );
+ if (!plainMatch) return null;
+
+ const roleToken = String(plainMatch[1] || "").toLowerCase();
+ const role =
+ roleToken === "assistant" || roleToken === "助手" || roleToken === "ai"
+ ? "assistant"
+ : "user";
+ const text = normalizeQueryText(plainMatch[2] || "");
+ return text ? { role, text } : null;
+}
+
+function buildContextQueryBlend(
+ userMessage,
+ recentMessages = [],
+ {
+ enabled = true,
+ assistantWeight = 0.2,
+ previousUserWeight = 0.1,
+ maxTextLength = 400,
+ } = {},
+) {
+ const currentText = normalizeQueryText(userMessage, maxTextLength);
+ const normalizedAssistantWeight = clampRange(assistantWeight, 0.2, 0, 1);
+ const normalizedPreviousUserWeight = clampRange(
+ previousUserWeight,
+ 0.1,
+ 0,
+ 1,
+ );
+ const currentWeight = Math.max(
+ 0,
+ 1 - normalizedAssistantWeight - normalizedPreviousUserWeight,
+ );
+
+ let assistantText = "";
+ let previousUserText = "";
+ const parsedMessages = Array.isArray(recentMessages)
+ ? recentMessages.map((line) => parseRecallContextLine(line)).filter(Boolean)
+ : [];
+
+ for (let index = parsedMessages.length - 1; index >= 0; index--) {
+ const item = parsedMessages[index];
+ if (!assistantText && item.role === "assistant") {
+ assistantText = normalizeQueryText(item.text, maxTextLength);
+ }
+ if (
+ !previousUserText &&
+ item.role === "user" &&
+ normalizeQueryText(item.text, maxTextLength).toLowerCase() !==
+ currentText.toLowerCase()
+ ) {
+ previousUserText = normalizeQueryText(item.text, maxTextLength);
+ }
+ if (assistantText && previousUserText) break;
+ }
+
+ const rawParts = [
+ {
+ kind: "currentUser",
+ label: "当前用户消息",
+ text: currentText,
+ weight: enabled ? currentWeight : 1,
+ },
+ ];
+
+ if (enabled && assistantText) {
+ rawParts.push({
+ kind: "assistantContext",
+ label: "最近 assistant 回复",
+ text: assistantText,
+ weight: normalizedAssistantWeight,
+ });
+ }
+
+ if (enabled && previousUserText) {
+ rawParts.push({
+ kind: "previousUser",
+ label: "上一条 user 消息",
+ text: previousUserText,
+ weight: normalizedPreviousUserWeight,
+ });
+ }
+
+ const dedupedParts = [];
+ const seen = new Set();
+ for (const part of rawParts) {
+ const text = normalizeQueryText(part.text, maxTextLength);
+ const key = text.toLowerCase();
+ if (!text || seen.has(key)) continue;
+ seen.add(key);
+ dedupedParts.push({
+ ...part,
+ text,
+ });
+ }
+
+ if (dedupedParts.length === 0) {
+ return {
+ active: false,
+ parts: [],
+ currentText: "",
+ assistantText: "",
+ previousUserText: "",
+ combinedText: "",
+ };
+ }
+
+ const totalWeight = dedupedParts.reduce(
+ (sum, part) => sum + Math.max(0, Number(part.weight) || 0),
+ 0,
+ );
+ const normalizedParts = dedupedParts.map((part) => ({
+ ...part,
+ weight:
+ totalWeight > 0
+ ? roundBlendWeight((Math.max(0, Number(part.weight) || 0) || 0) / totalWeight)
+ : roundBlendWeight(1 / dedupedParts.length),
+ }));
+ const combinedText =
+ normalizedParts.length <= 1
+ ? normalizedParts[0]?.text || ""
+ : normalizedParts
+ .map((part) => `${part.label}:\n${part.text}`)
+ .join("\n\n");
+
+ return {
+ active: enabled && normalizedParts.length > 1,
+ parts: normalizedParts,
+ currentText: currentText || normalizedParts[0]?.text || "",
+ assistantText,
+ previousUserText,
+ combinedText,
+ };
+}
+
+function buildVectorQueryPlan(
+ blendPlan,
+ { enableMultiIntent = true, maxSegments = 4 } = {},
+) {
+ const plan = [];
+ let currentSegments = [];
+
+ for (const part of blendPlan?.parts || []) {
+ let queries = [part.text];
+ if (part.kind === "currentUser" && enableMultiIntent) {
+ currentSegments = splitIntentSegments(part.text, { maxSegments });
+ queries = uniqueStrings([
+ part.text,
+ ...currentSegments.filter((item) => item !== part.text),
+ ]);
+ } else {
+ queries = uniqueStrings([part.text]);
+ }
+
+ plan.push({
+ kind: part.kind,
+ label: part.label,
+ weight: part.weight,
+ queries,
+ });
+ }
+
+ return {
+ plan,
+ currentSegments,
+ };
+}
+
+function buildLexicalQuerySources(
+ userMessage,
+ { enableMultiIntent = true, maxSegments = 4 } = {},
+) {
+ const currentText = normalizeQueryText(userMessage, 400);
+ const segments = enableMultiIntent
+ ? splitIntentSegments(currentText, { maxSegments })
+ : [];
+ return {
+ sources: uniqueStrings([currentText, ...segments]),
+ segments,
+ };
+}
+
+function normalizeLexicalText(value = "") {
+ return normalizeQueryText(value, 600).toLowerCase();
+}
+
+function buildLexicalUnits(text = "") {
+ const normalized = normalizeLexicalText(text);
+ if (!normalized) return [];
+
+ const rawTokens = normalized.match(/[a-z0-9]+|[\u4e00-\u9fff]+/g) || [];
+ const units = [];
+
+ for (const token of rawTokens) {
+ if (token.length >= 2) {
+ units.push(token);
+ }
+ if (/[\u4e00-\u9fff]/.test(token) && token.length > 2) {
+ for (let index = 0; index < token.length - 1; index++) {
+ units.push(token.slice(index, index + 2));
+ }
+ }
+ }
+
+ return [...new Set(units)];
+}
+
+function computeTokenOverlapScore(sourceUnits = [], targetUnits = []) {
+ if (!sourceUnits.length || !targetUnits.length) return 0;
+ const targetSet = new Set(targetUnits);
+ let overlap = 0;
+ for (const unit of sourceUnits) {
+ if (targetSet.has(unit)) {
+ overlap += 1;
+ }
+ }
+ return overlap / Math.max(1, sourceUnits.length);
+}
+
+function scoreFieldMatch(
+ fieldText,
+ querySources = [],
+ { exact = 1, includes = 0.9, overlap = 0.6 } = {},
+) {
+ const normalizedField = normalizeLexicalText(fieldText);
+ if (!normalizedField) return 0;
+
+ const fieldUnits = buildLexicalUnits(normalizedField);
+ let best = 0;
+
+ for (const sourceText of querySources) {
+ const normalizedSource = normalizeLexicalText(sourceText);
+ if (!normalizedSource) continue;
+
+ if (normalizedSource === normalizedField) {
+ best = Math.max(best, exact);
+ continue;
+ }
+
+ if (
+ Math.min(normalizedSource.length, normalizedField.length) >= 2 &&
+ (normalizedSource.includes(normalizedField) ||
+ normalizedField.includes(normalizedSource))
+ ) {
+ best = Math.max(best, includes);
+ }
+
+ const overlapScore = computeTokenOverlapScore(
+ buildLexicalUnits(normalizedSource),
+ fieldUnits,
+ );
+ best = Math.max(best, overlapScore * overlap);
+ }
+
+ return Math.min(1, best);
+}
+
+function collectNodeLexicalTexts(node, fieldNames = []) {
+ const values = [];
+ for (const fieldName of fieldNames) {
+ const value = node?.fields?.[fieldName];
+ if (typeof value === "string" && value.trim()) {
+ values.push(value.trim());
+ } else if (Array.isArray(value)) {
+ for (const item of value) {
+ if (typeof item === "string" && item.trim()) {
+ values.push(item.trim());
+ }
+ }
+ }
+ }
+ return values;
+}
+
+function computeLexicalScore(node, querySources = []) {
+ if (!node || !Array.isArray(querySources) || querySources.length === 0) {
+ return 0;
+ }
+
+ const primaryTexts = collectNodeLexicalTexts(node, ["name", "title"]);
+ const secondaryTexts = collectNodeLexicalTexts(node, [
+ "summary",
+ "insight",
+ "state",
+ "traits",
+ "participants",
+ "status",
+ ]);
+ const combinedText = [...primaryTexts, ...secondaryTexts].join(" ");
+
+ const primaryScore = primaryTexts.reduce(
+ (best, value) =>
+ Math.max(
+ best,
+ scoreFieldMatch(value, querySources, {
+ exact: 1,
+ includes: 0.92,
+ overlap: 0.72,
+ }),
+ ),
+ 0,
+ );
+ const secondaryScore = secondaryTexts.reduce(
+ (best, value) =>
+ Math.max(
+ best,
+ scoreFieldMatch(value, querySources, {
+ exact: 0.82,
+ includes: 0.68,
+ overlap: 0.52,
+ }),
+ ),
+ 0,
+ );
+ const tokenScore = scoreFieldMatch(combinedText, querySources, {
+ exact: 0.65,
+ includes: 0.55,
+ overlap: 0.45,
+ });
+
+ if (primaryScore <= 0 && secondaryScore <= 0 && tokenScore <= 0) {
+ return 0;
+ }
+
+ return Math.min(
+ 1,
+ Math.max(
+ primaryScore,
+ secondaryScore * 0.82,
+ tokenScore * 0.7,
+ primaryScore * 0.75 + secondaryScore * 0.35 + tokenScore * 0.2,
+ ),
+ );
+}
+
+function buildLexicalTopHits(scoredNodes = [], maxCount = 5) {
+ return scoredNodes
+ .filter((item) => (Number(item?.lexicalScore) || 0) > 0)
+ .sort((a, b) => {
+ const lexicalDelta =
+ (Number(b?.lexicalScore) || 0) - (Number(a?.lexicalScore) || 0);
+ if (lexicalDelta !== 0) return lexicalDelta;
+ return (Number(b?.finalScore) || 0) - (Number(a?.finalScore) || 0);
+ })
+ .slice(0, Math.max(1, maxCount))
+ .map((item) => ({
+ nodeId: item.nodeId,
+ type: item.node?.type || "",
+ label:
+ item.node?.fields?.name ||
+ item.node?.fields?.title ||
+ item.node?.fields?.summary ||
+ item.nodeId,
+ lexicalScore: Math.round((Number(item.lexicalScore) || 0) * 1000) / 1000,
+ finalScore: Math.round((Number(item.finalScore) || 0) * 1000) / 1000,
+ }));
+}
+
+function scaleVectorResults(results = [], weight = 1) {
+ return (Array.isArray(results) ? results : []).map((item) => ({
+ ...item,
+ score: (Number(item?.score) || 0) * Math.max(0, Number(weight) || 0),
+ }));
+}
+
/**
* 三层混合检索管线
*
@@ -248,6 +668,21 @@ export async function retrieve({
10,
);
const residualTopK = clampPositiveInt(options.residualTopK, 5);
+ const enableContextQueryBlend = options.enableContextQueryBlend ?? true;
+ const contextAssistantWeight = clampRange(
+ options.contextAssistantWeight,
+ 0.2,
+ 0,
+ 1,
+ );
+ const contextPreviousUserWeight = clampRange(
+ options.contextPreviousUserWeight,
+ 0.1,
+ 0,
+ 1,
+ );
+ const enableLexicalBoost = options.enableLexicalBoost ?? true;
+ const lexicalWeight = clampRange(options.lexicalWeight, 0.18, 0, 10);
let activeNodes = getActiveNodes(graph).filter(
(node) =>
@@ -270,6 +705,29 @@ export async function retrieve({
);
const vectorValidation = validateVectorConfig(embeddingConfig);
const retrievalMeta = createRetrievalMeta(enableLLMRecall);
+ const contextQueryBlend = buildContextQueryBlend(userMessage, recentMessages, {
+ enabled: enableContextQueryBlend,
+ assistantWeight: contextAssistantWeight,
+ previousUserWeight: contextPreviousUserWeight,
+ });
+ retrievalMeta.queryBlendActive = contextQueryBlend.active;
+ retrievalMeta.queryBlendParts = (contextQueryBlend.parts || []).map((part) => ({
+ kind: part.kind,
+ label: part.label,
+ weight: part.weight,
+ text: createTextPreview(part.text),
+ length: part.text.length,
+ }));
+ retrievalMeta.queryBlendWeights = Object.fromEntries(
+ (contextQueryBlend.parts || []).map((part) => [part.kind, part.weight]),
+ );
+ const lexicalQuery = buildLexicalQuerySources(
+ contextQueryBlend.currentText || userMessage,
+ {
+ enableMultiIntent,
+ maxSegments: multiIntentMaxSegments,
+ },
+ );
console.log(
`[ST-BME] 检索开始: ${nodeCount} 个活跃节点${enableVisibility ? " (认知边界已启用)" : ""}`,
);
@@ -299,25 +757,25 @@ export async function retrieve({
const vectorStartedAt = nowMs();
if (enableVectorPrefilter && vectorValidation.valid) {
console.log("[ST-BME] 第1层: 向量预筛");
- const segments = enableMultiIntent
- ? splitIntentSegments(userMessage, {
- maxSegments: multiIntentMaxSegments,
- })
- : [];
- const queries = [userMessage, ...segments.filter((item) => item !== userMessage)];
+ const queryPlan = buildVectorQueryPlan(contextQueryBlend, {
+ enableMultiIntent,
+ maxSegments: multiIntentMaxSegments,
+ });
const groups = [];
- retrievalMeta.segmentsUsed = segments;
- for (const queryText of queries) {
- const results = await vectorPreFilter(
- graph,
- queryText,
- activeNodes,
- embeddingConfig,
- normalizedTopK,
- signal,
- );
- groups.push(results);
+ retrievalMeta.segmentsUsed = queryPlan.currentSegments;
+ for (const part of queryPlan.plan) {
+ for (const queryText of part.queries) {
+ const results = await vectorPreFilter(
+ graph,
+ queryText,
+ activeNodes,
+ embeddingConfig,
+ normalizedTopK,
+ signal,
+ );
+ groups.push(scaleVectorResults(results, part.weight || 1));
+ }
}
const merged = mergeVectorResults(
@@ -332,7 +790,12 @@ export async function retrieve({
}
retrievalMeta.timings.vector = roundMs(nowMs() - vectorStartedAt);
- exactEntityAnchors.push(...extractEntityAnchors(userMessage, activeNodes));
+ exactEntityAnchors.push(
+ ...extractEntityAnchors(
+ contextQueryBlend.currentText || userMessage,
+ activeNodes,
+ ),
+ );
supplementalAnchorNodeIds = collectSupplementalAnchorNodeIds(
graph,
vectorResults,
@@ -354,7 +817,7 @@ export async function retrieve({
residualBasisMaxNodes,
);
residualResult = await runResidualRecall({
- queryText: userMessage,
+ queryText: contextQueryBlend.combinedText || userMessage,
graph,
embeddingConfig,
basisNodes,
@@ -514,22 +977,39 @@ export async function retrieve({
for (const [nodeId, scores] of scoreMap) {
const node = getNode(graph, nodeId);
if (!node || node.archived) continue;
+ const lexicalScore = enableLexicalBoost
+ ? computeLexicalScore(node, lexicalQuery.sources)
+ : 0;
const finalScore = hybridScore(
{
graphScore: scores.graphScore,
vectorScore: scores.vectorScore,
+ lexicalScore,
importance: node.importance,
createdTime: node.createdTime,
},
- weights,
+ {
+ ...weights,
+ lexicalWeight: enableLexicalBoost ? lexicalWeight : 0,
+ },
);
- scoredNodes.push({ nodeId, node, finalScore, ...scores });
+ scoredNodes.push({
+ nodeId,
+ node,
+ finalScore,
+ lexicalScore,
+ ...scores,
+ });
}
scoredNodes.sort((a, b) => b.finalScore - a.finalScore);
retrievalMeta.scoredCandidates = scoredNodes.length;
+ retrievalMeta.lexicalBoostedNodes = scoredNodes.filter(
+ (item) => (Number(item.lexicalScore) || 0) > 0,
+ ).length;
+ retrievalMeta.lexicalTopHits = buildLexicalTopHits(scoredNodes);
retrievalMeta.timings.scoring = roundMs(nowMs() - scoringStartedAt);
let selectedNodeIds;
diff --git a/runtime-state.js b/runtime-state.js
index e017d34..4fc02ec 100644
--- a/runtime-state.js
+++ b/runtime-state.js
@@ -1,6 +1,7 @@
// ST-BME: 运行时状态与历史恢复辅助
const BATCH_JOURNAL_LIMIT = 96;
+const MAINTENANCE_JOURNAL_LIMIT = 20;
export const BATCH_JOURNAL_VERSION = 2;
export function buildVectorCollectionId(chatId) {
@@ -49,6 +50,10 @@ export function createDefaultBatchJournal() {
return [];
}
+export function createDefaultMaintenanceJournal() {
+ return [];
+}
+
export function normalizeGraphRuntimeState(graph, chatId = "") {
if (!graph || typeof graph !== "object") {
return graph;
@@ -165,6 +170,11 @@ export function normalizeGraphRuntimeState(graph, chatId = "") {
graph.batchJournal = Array.isArray(graph.batchJournal)
? graph.batchJournal.slice(-BATCH_JOURNAL_LIMIT)
: createDefaultBatchJournal();
+ graph.maintenanceJournal = Array.isArray(graph.maintenanceJournal)
+ ? graph.maintenanceJournal
+ .filter((entry) => entry && typeof entry === "object")
+ .slice(-MAINTENANCE_JOURNAL_LIMIT)
+ : createDefaultMaintenanceJournal();
graph.lastProcessedSeq = historyState.lastProcessedAssistantFloor;
return graph;
}
@@ -549,6 +559,212 @@ export function appendBatchJournal(graph, entry) {
}
}
+export function createMaintenanceJournalEntry(
+ snapshotBefore,
+ snapshotAfter,
+ meta = {},
+) {
+ const beforeNodes = buildNodeMap(snapshotBefore?.nodes || []);
+ const afterNodes = buildNodeMap(snapshotAfter?.nodes || []);
+ const beforeEdges = buildEdgeMap(snapshotBefore?.edges || []);
+ const afterEdges = buildEdgeMap(snapshotAfter?.edges || []);
+
+ const restoreNodes = [];
+ const restoreEdges = [];
+ const deleteNodeIds = [];
+ const deleteEdgeIds = [];
+ const postNodes = [];
+ const postEdges = [];
+
+ for (const [nodeId, beforeNode] of beforeNodes.entries()) {
+ const afterNode = afterNodes.get(nodeId);
+ if (!afterNode) {
+ restoreNodes.push(cloneGraphSnapshot(beforeNode));
+ continue;
+ }
+ if (!hasMeaningfulNodeChange(beforeNode, afterNode)) continue;
+ restoreNodes.push(cloneGraphSnapshot(beforeNode));
+ postNodes.push(cloneGraphSnapshot(afterNode));
+ }
+
+ for (const [nodeId, afterNode] of afterNodes.entries()) {
+ if (beforeNodes.has(nodeId)) continue;
+ deleteNodeIds.push(nodeId);
+ postNodes.push(cloneGraphSnapshot(afterNode));
+ }
+
+ for (const [edgeId, beforeEdge] of beforeEdges.entries()) {
+ const afterEdge = afterEdges.get(edgeId);
+ if (!afterEdge) {
+ restoreEdges.push(cloneGraphSnapshot(beforeEdge));
+ continue;
+ }
+ if (!hasMeaningfulEdgeChange(beforeEdge, afterEdge)) continue;
+ restoreEdges.push(cloneGraphSnapshot(beforeEdge));
+ postEdges.push(cloneGraphSnapshot(afterEdge));
+ }
+
+ for (const [edgeId, afterEdge] of afterEdges.entries()) {
+ if (beforeEdges.has(edgeId)) continue;
+ deleteEdgeIds.push(edgeId);
+ postEdges.push(cloneGraphSnapshot(afterEdge));
+ }
+
+ if (
+ restoreNodes.length === 0 &&
+ restoreEdges.length === 0 &&
+ deleteNodeIds.length === 0 &&
+ deleteEdgeIds.length === 0
+ ) {
+ return null;
+ }
+
+ return {
+ id: `maintenance-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
+ createdAt: Date.now(),
+ action: String(meta.action || "unknown"),
+ mode:
+ meta.mode === "auto" || meta.mode === "manual" ? meta.mode : "manual",
+ summary: String(meta.summary || ""),
+ inversePatch: {
+ restoreNodes,
+ restoreEdges,
+ deleteNodeIds,
+ deleteEdgeIds,
+ },
+ postCheck: {
+ nodes: postNodes,
+ edges: postEdges,
+ },
+ };
+}
+
+export function appendMaintenanceJournal(graph, entry) {
+ if (!entry || typeof entry !== "object") return;
+ normalizeGraphRuntimeState(graph, graph?.historyState?.chatId || "");
+ graph.maintenanceJournal.push(entry);
+ if (graph.maintenanceJournal.length > MAINTENANCE_JOURNAL_LIMIT) {
+ graph.maintenanceJournal = graph.maintenanceJournal.slice(
+ -MAINTENANCE_JOURNAL_LIMIT,
+ );
+ }
+}
+
+export function getLatestMaintenanceJournalEntry(graph) {
+ normalizeGraphRuntimeState(graph, graph?.historyState?.chatId || "");
+ const journal = Array.isArray(graph?.maintenanceJournal)
+ ? graph.maintenanceJournal
+ : [];
+ return journal.length > 0 ? journal[journal.length - 1] : null;
+}
+
+function validateMaintenanceUndoState(graph, entry) {
+ const currentNodes = buildNodeMap(graph?.nodes || []);
+ const currentEdges = buildEdgeMap(graph?.edges || []);
+ const expectedNodes = entry?.postCheck?.nodes || [];
+ const expectedEdges = entry?.postCheck?.edges || [];
+
+ for (const snapshot of expectedNodes) {
+ const current = currentNodes.get(snapshot?.id);
+ if (!current) {
+ return {
+ ok: false,
+ reason: `节点 ${snapshot?.id || "unknown"} 已被后续操作改写`,
+ };
+ }
+ if (JSON.stringify(current) !== JSON.stringify(snapshot)) {
+ return {
+ ok: false,
+ reason: `节点 ${snapshot?.id || "unknown"} 当前状态已变化,无法安全撤销`,
+ };
+ }
+ }
+
+ for (const snapshot of expectedEdges) {
+ const current = currentEdges.get(snapshot?.id);
+ if (!current) {
+ return {
+ ok: false,
+ reason: `边 ${snapshot?.id || "unknown"} 已被后续操作改写`,
+ };
+ }
+ if (JSON.stringify(current) !== JSON.stringify(snapshot)) {
+ return {
+ ok: false,
+ reason: `边 ${snapshot?.id || "unknown"} 当前状态已变化,无法安全撤销`,
+ };
+ }
+ }
+
+ return { ok: true, reason: "" };
+}
+
+export function applyMaintenanceInversePatch(graph, inversePatch = {}) {
+ if (!graph || !inversePatch || typeof inversePatch !== "object") {
+ return graph;
+ }
+
+ normalizeGraphRuntimeState(graph, graph?.historyState?.chatId || "");
+
+ const deleteNodeIds = new Set(inversePatch.deleteNodeIds || []);
+ const deleteEdgeIds = new Set(inversePatch.deleteEdgeIds || []);
+ const restoreNodes = Array.isArray(inversePatch.restoreNodes)
+ ? inversePatch.restoreNodes
+ : [];
+ const restoreEdges = Array.isArray(inversePatch.restoreEdges)
+ ? inversePatch.restoreEdges
+ : [];
+
+ graph.edges = (graph.edges || []).filter(
+ (edge) =>
+ !deleteEdgeIds.has(edge.id) &&
+ !deleteNodeIds.has(edge.fromId) &&
+ !deleteNodeIds.has(edge.toId),
+ );
+ graph.nodes = (graph.nodes || []).filter((node) => !deleteNodeIds.has(node.id));
+
+ for (const nodeSnapshot of restoreNodes) {
+ upsertById(graph.nodes, cloneGraphSnapshot(nodeSnapshot));
+ }
+ for (const edgeSnapshot of restoreEdges) {
+ upsertById(graph.edges, cloneGraphSnapshot(edgeSnapshot));
+ }
+
+ sanitizeGraphReferences(graph);
+ return graph;
+}
+
+export function undoLatestMaintenance(graph) {
+ normalizeGraphRuntimeState(graph, graph?.historyState?.chatId || "");
+ const entry = getLatestMaintenanceJournalEntry(graph);
+ if (!entry) {
+ return {
+ ok: false,
+ reason: "当前没有可撤销的维护记录",
+ entry: null,
+ };
+ }
+
+ const validation = validateMaintenanceUndoState(graph, entry);
+ if (!validation.ok) {
+ return {
+ ok: false,
+ reason: validation.reason,
+ entry,
+ };
+ }
+
+ applyMaintenanceInversePatch(graph, entry.inversePatch || {});
+ graph.maintenanceJournal = graph.maintenanceJournal.slice(0, -1);
+
+ return {
+ ok: true,
+ reason: "",
+ entry,
+ remaining: graph.maintenanceJournal.length,
+ };
+}
+
function upsertById(list, item) {
const index = list.findIndex((entry) => entry.id === item.id);
if (index >= 0) {
diff --git a/tests/default-settings.mjs b/tests/default-settings.mjs
index 6c38982..04e2a9d 100644
--- a/tests/default-settings.mjs
+++ b/tests/default-settings.mjs
@@ -46,6 +46,11 @@ assert.equal(defaultSettings.recallLlmCandidatePool, 30);
assert.equal(defaultSettings.recallLlmContextMessages, 4);
assert.equal(defaultSettings.recallEnableMultiIntent, true);
assert.equal(defaultSettings.recallMultiIntentMaxSegments, 4);
+assert.equal(defaultSettings.recallEnableContextQueryBlend, true);
+assert.equal(defaultSettings.recallContextAssistantWeight, 0.2);
+assert.equal(defaultSettings.recallContextPreviousUserWeight, 0.1);
+assert.equal(defaultSettings.recallEnableLexicalBoost, true);
+assert.equal(defaultSettings.recallLexicalWeight, 0.18);
assert.equal(defaultSettings.recallTeleportAlpha, 0.15);
assert.equal(defaultSettings.recallEnableTemporalLinks, true);
assert.equal(defaultSettings.recallTemporalLinkStrength, 0.2);
@@ -64,6 +69,7 @@ assert.equal(defaultSettings.recallResidualTopK, 5);
assert.equal(defaultSettings.injectDepth, 9999);
assert.equal(defaultSettings.enabled, true);
assert.equal(defaultSettings.enableReflection, true);
+assert.equal(defaultSettings.maintenanceAutoMinNewNodes, 3);
assert.equal(defaultSettings.embeddingTransportMode, "direct");
assert.equal(defaultSettings.taskProfilesVersion, 3);
assert.ok(defaultSettings.taskProfiles);
diff --git a/tests/maintenance-journal.mjs b/tests/maintenance-journal.mjs
new file mode 100644
index 0000000..ed01b85
--- /dev/null
+++ b/tests/maintenance-journal.mjs
@@ -0,0 +1,173 @@
+import assert from "node:assert/strict";
+
+import {
+ appendMaintenanceJournal,
+ createMaintenanceJournalEntry,
+ normalizeGraphRuntimeState,
+ undoLatestMaintenance,
+} from "../runtime-state.js";
+
+function clone(value) {
+ return JSON.parse(JSON.stringify(value));
+}
+
+function buildNode(id, extra = {}) {
+ return {
+ id,
+ type: "character",
+ archived: false,
+ seq: 1,
+ seqRange: [1, 1],
+ importance: 5,
+ fields: {},
+ childIds: [],
+ parentId: null,
+ prevId: null,
+ nextId: null,
+ ...extra,
+ };
+}
+
+function buildEdge(id, fromId, toId, extra = {}) {
+ return {
+ id,
+ fromId,
+ toId,
+ relation: "related",
+ strength: 1,
+ ...extra,
+ };
+}
+
+{
+ const before = {
+ nodes: [buildNode("sleep-1")],
+ edges: [],
+ };
+ const after = clone(before);
+ after.nodes[0].archived = true;
+
+ const graph = normalizeGraphRuntimeState(clone(after), "chat-sleep");
+ const entry = createMaintenanceJournalEntry(before, after, {
+ action: "sleep",
+ mode: "manual",
+ summary: "手动遗忘:归档 1 个节点",
+ });
+
+ appendMaintenanceJournal(graph, entry);
+ const result = undoLatestMaintenance(graph);
+ assert.equal(result.ok, true);
+ assert.equal(graph.nodes[0].archived, false);
+ assert.equal(graph.maintenanceJournal.length, 0);
+}
+
+{
+ const before = {
+ nodes: [
+ buildNode("child-1"),
+ buildNode("child-2"),
+ buildNode("location-1", { type: "location", fields: { title: "大厅" } }),
+ ],
+ edges: [buildEdge("edge-old", "child-1", "location-1")],
+ };
+ const after = clone(before);
+ after.nodes[0].archived = true;
+ after.nodes[0].parentId = "parent-1";
+ after.nodes[1].archived = true;
+ after.nodes[1].parentId = "parent-1";
+ after.nodes.push(
+ buildNode("parent-1", {
+ level: 1,
+ fields: { summary: "压缩父节点" },
+ childIds: ["child-1", "child-2"],
+ }),
+ );
+ after.edges.push(buildEdge("edge-new", "parent-1", "location-1"));
+
+ const graph = normalizeGraphRuntimeState(clone(after), "chat-compress");
+ const entry = createMaintenanceJournalEntry(before, after, {
+ action: "compress",
+ mode: "manual",
+ summary: "手动压缩:新建 1,归档 2",
+ });
+
+ appendMaintenanceJournal(graph, entry);
+ const result = undoLatestMaintenance(graph);
+ assert.equal(result.ok, true);
+ assert.equal(graph.nodes.some((node) => node.id === "parent-1"), false);
+ assert.equal(
+ graph.edges.some((edge) => edge.id === "edge-new"),
+ false,
+ );
+ assert.equal(
+ graph.nodes.find((node) => node.id === "child-1")?.archived,
+ false,
+ );
+ assert.equal(
+ graph.nodes.find((node) => node.id === "child-2")?.archived,
+ false,
+ );
+}
+
+{
+ const before = {
+ nodes: [
+ buildNode("new-1", { fields: { summary: "新线索" } }),
+ buildNode("old-1", { fields: { summary: "旧描述" } }),
+ ],
+ edges: [],
+ };
+ const after = clone(before);
+ after.nodes[0].archived = true;
+ after.nodes[1].fields.summary = "被新信息修正后的旧描述";
+ after.edges.push(buildEdge("edge-merge", "new-1", "old-1"));
+
+ const graph = normalizeGraphRuntimeState(clone(after), "chat-consolidate");
+ const entry = createMaintenanceJournalEntry(before, after, {
+ action: "consolidate",
+ mode: "manual",
+ summary: "手动整合:合并 1,更新 1",
+ });
+
+ appendMaintenanceJournal(graph, entry);
+ const result = undoLatestMaintenance(graph);
+ assert.equal(result.ok, true);
+ assert.equal(
+ graph.nodes.find((node) => node.id === "new-1")?.archived,
+ false,
+ );
+ assert.equal(
+ graph.nodes.find((node) => node.id === "old-1")?.fields?.summary,
+ "旧描述",
+ );
+ assert.equal(
+ graph.edges.some((edge) => edge.id === "edge-merge"),
+ false,
+ );
+}
+
+{
+ const before = {
+ nodes: [buildNode("sleep-2")],
+ edges: [],
+ };
+ const after = clone(before);
+ after.nodes[0].archived = true;
+
+ const graph = normalizeGraphRuntimeState(clone(after), "chat-diverged");
+ const entry = createMaintenanceJournalEntry(before, after, {
+ action: "sleep",
+ mode: "manual",
+ summary: "手动遗忘:归档 1 个节点",
+ });
+
+ appendMaintenanceJournal(graph, entry);
+ graph.nodes[0].importance = 9;
+
+ const result = undoLatestMaintenance(graph);
+ assert.equal(result.ok, false);
+ assert.match(result.reason, /当前状态已变化|已被后续操作改写/);
+ assert.equal(graph.maintenanceJournal.length, 1);
+}
+
+console.log("maintenance-journal tests passed");
diff --git a/tests/retrieval-config.mjs b/tests/retrieval-config.mjs
index 39b1d7d..ebcd7da 100644
--- a/tests/retrieval-config.mjs
+++ b/tests/retrieval-config.mjs
@@ -145,8 +145,12 @@ const retrieve = await loadRetrieve({
async runResidualRecall() {
return { triggered: false, hits: [], skipReason: "residual-disabled-test" };
},
- hybridScore: ({ graphScore = 0, vectorScore = 0, importance = 0 }) =>
- graphScore + vectorScore + importance,
+ hybridScore: ({
+ graphScore = 0,
+ vectorScore = 0,
+ lexicalScore = 0,
+ importance = 0,
+ }) => graphScore + vectorScore + lexicalScore + importance,
reinforceAccessBatch() {},
validateVectorConfig() {
return { valid: true };
@@ -214,6 +218,32 @@ assert.equal(state.diffusionCalls.length, 0);
assert.equal(state.llmCalls.length, 0);
assert.deepEqual(Array.from(noStageResult.selectedNodeIds), ["rule-2", "rule-1"]);
+state.vectorCalls.length = 0;
+await retrieve({
+ graph,
+ userMessage: "他后来怎么做?",
+ recentMessages: [
+ "[assistant]: 他提到了规则二的限制",
+ "[user]: 我们先看规则一",
+ "[user]: 他后来怎么做?",
+ ],
+ embeddingConfig: {},
+ schema,
+ options: {
+ topK: 4,
+ maxRecallNodes: 2,
+ enableVectorPrefilter: true,
+ enableGraphDiffusion: false,
+ enableLLMRecall: false,
+ enableMultiIntent: false,
+ enableContextQueryBlend: true,
+ },
+});
+assert.deepEqual(
+ state.vectorCalls.map((item) => item.message),
+ ["他后来怎么做?", "他提到了规则二的限制", "我们先看规则一"],
+);
+
state.vectorCalls.length = 0;
state.diffusionCalls.length = 0;
state.llmCalls.length = 0;
@@ -235,7 +265,10 @@ const llmPoolResult = await retrieve({
llmCandidatePool: 2,
},
});
-assert.deepEqual(state.vectorCalls, [{ topK: 4, message: "请根据规则给出结论" }]);
+assert.deepEqual(state.vectorCalls, [
+ { topK: 4, message: "请根据规则给出结论" },
+ { topK: 4, message: "现在该怎么做?" },
+]);
assert.equal(state.diffusionCalls.length, 0);
assert.equal(state.llmCandidateCount, 2);
assert.deepEqual(Array.from(llmPoolResult.selectedNodeIds), ["rule-2", "rule-1"]);
@@ -366,4 +399,49 @@ const cappedResult = await retrieve({
});
assert.equal(cappedResult.selectedNodeIds.length, 1);
+const lexicalGraph = {
+ nodes: [
+ {
+ id: "char-1",
+ type: "character",
+ importance: 1,
+ createdTime: 1,
+ archived: false,
+ fields: { name: "Alice", summary: "常驻角色" },
+ seqRange: [1, 1],
+ },
+ {
+ id: "char-2",
+ type: "character",
+ importance: 1,
+ createdTime: 1,
+ archived: false,
+ fields: { name: "Bob", summary: "常驻角色" },
+ seqRange: [1, 1],
+ },
+ ],
+ edges: [],
+};
+const lexicalSchema = [{ id: "character", label: "角色", alwaysInject: false }];
+const lexicalResult = await retrieve({
+ graph: lexicalGraph,
+ userMessage: "Alice 现在怎么样了",
+ recentMessages: [],
+ embeddingConfig: {},
+ schema: lexicalSchema,
+ options: {
+ topK: 2,
+ maxRecallNodes: 1,
+ enableVectorPrefilter: false,
+ enableGraphDiffusion: false,
+ enableLLMRecall: false,
+ enableDiversitySampling: false,
+ enableLexicalBoost: true,
+ },
+});
+assert.deepEqual(Array.from(lexicalResult.selectedNodeIds), ["char-1"]);
+assert.equal(lexicalResult.meta.retrieval.queryBlendActive, false);
+assert.equal(lexicalResult.meta.retrieval.lexicalBoostedNodes, 1);
+assert.equal(lexicalResult.meta.retrieval.lexicalTopHits[0]?.nodeId, "char-1");
+
console.log("retrieval-config tests passed");
diff --git a/ui-actions-controller.js b/ui-actions-controller.js
index 1c7ccc7..cb6513b 100644
--- a/ui-actions-controller.js
+++ b/ui-actions-controller.js
@@ -101,6 +101,12 @@ export async function onManualCompressController(runtime) {
undefined,
runtime.getSettings(),
);
+ runtime.recordMaintenanceAction?.({
+ action: "compress",
+ beforeSnapshot,
+ mode: "manual",
+ summary: runtime.buildMaintenanceSummary?.("compress", result, "manual"),
+ });
await runtime.recordGraphMutation({
beforeSnapshot,
artifactTags: ["compression"],
@@ -386,6 +392,12 @@ export async function onManualSleepController(runtime) {
const beforeSnapshot = runtime.cloneGraphSnapshot(graph);
const result = runtime.sleepCycle(graph, runtime.getSettings());
+ runtime.recordMaintenanceAction?.({
+ action: "sleep",
+ beforeSnapshot,
+ mode: "manual",
+ summary: runtime.buildMaintenanceSummary?.("sleep", result, "manual"),
+ });
await runtime.recordGraphMutation({
beforeSnapshot,
artifactTags: ["sleep"],
@@ -439,6 +451,12 @@ export async function onManualEvolveController(runtime) {
conflictThreshold: settings.consolidationThreshold,
},
});
+ runtime.recordMaintenanceAction?.({
+ action: "consolidate",
+ beforeSnapshot,
+ mode: "manual",
+ summary: runtime.buildMaintenanceSummary?.("consolidate", result, "manual"),
+ });
await runtime.recordGraphMutation({
beforeSnapshot,
artifactTags: ["consolidation"],
@@ -447,3 +465,26 @@ export async function onManualEvolveController(runtime) {
`整合完成:合并 ${result.merged},跳过 ${result.skipped},保留 ${result.kept},进化 ${result.evolved},新链接 ${result.connections},回溯更新 ${result.updates}`,
);
}
+
+export async function onUndoLastMaintenanceController(runtime) {
+ const graph = runtime.getCurrentGraph();
+ if (!graph) return;
+ if (!runtime.ensureGraphMutationReady("撤销最近维护")) return;
+
+ const result = runtime.undoLastMaintenance?.();
+ if (!result?.ok) {
+ runtime.toastr.warning(result?.reason || "撤销最近维护失败");
+ return { handledToast: true };
+ }
+
+ runtime.markVectorStateDirty?.("撤销维护后需要重建向量索引");
+ runtime.saveGraphToChat?.({ reason: "maintenance-undo-complete" });
+ runtime.refreshPanelLiveState?.();
+ runtime.toastr.success(
+ `已撤销最近维护:${result.entry?.summary || result.entry?.action || "未知操作"}`,
+ );
+ return {
+ handledToast: true,
+ result,
+ };
+}