fix: restore luker llm priority order

This commit is contained in:
Youzini-afk
2026-04-16 01:04:56 +08:00
parent 66a168773f
commit 622f00620c
4 changed files with 117 additions and 26 deletions

View File

@@ -1965,17 +1965,7 @@ async function callDedicatedOpenAICompatible(
profileName: config.requestedLlmPresetName || "",
});
const settings = extension_settings[MODULE_NAME] || {};
const shouldPreferLukerHostRoute =
hostRouting.hostProfile === "luker" &&
(
config.llmConfigSource === "global" ||
(
String(config.llmConfigSource || "").startsWith("global-fallback-") &&
hostRouting.routeApplied === true
)
);
const hasDedicatedConfig =
hasDedicatedLLMConfig(config) && !shouldPreferLukerHostRoute;
const hasDedicatedConfig = hasDedicatedLLMConfig(config);
if (taskType && config.llmPresetFallbackReason) {
debugWarn(
`[ST-BME] 任务 ${taskType} 指定的 API 模板不可用,已回退当前 API: ` +
@@ -2052,7 +2042,10 @@ async function callDedicatedOpenAICompatible(
hostRequestApi: hostRouting.requestApi,
hostRouteApplied: hostRouting.routeApplied,
hostRouteReason: hostRouting.routeReason,
preferHostRoute: shouldPreferLukerHostRoute,
preferHostRoute:
!hasDedicatedConfig &&
hostRouting.hostProfile === "luker" &&
hostRouting.routeApplied === true,
apiSettingsOverride: hostRouting.apiSettingsOverride,
maxCompletionTokens,
...buildStreamDebugSnapshot(streamState),

View File

@@ -1152,7 +1152,7 @@ export async function extractMemories({
}
// 调用 LLM
const result = await callLLMForJSON({
const llmResult = await callLLMForJSON({
systemPrompt: llmSystemPrompt,
userPrompt: promptPayload.userPrompt,
maxRetries: 2,
@@ -1166,8 +1166,18 @@ export async function extractMemories({
promptMessages: promptPayload.promptMessages,
additionalMessages: promptPayloadAdditionalMessages,
onStreamProgress,
returnFailureDetails: true,
});
throwIfAborted(signal);
const llmFailure =
llmResult && typeof llmResult === "object" && "ok" in llmResult
? llmResult
: null;
const result = llmFailure
? llmFailure.ok
? llmFailure.data
: null
: llmResult;
const normalizedResult = normalizeExtractionResultPayload(result, schema);
const ownershipWarnings = [];
const extractionOwnerContext = deriveExtractionOwnerContext(
@@ -1199,11 +1209,23 @@ export async function extractMemories({
`[ST-BME] 提取 LLM 未返回有效操作 ` +
`[type=${diagType}]` +
(diagKeys ? ` [keys=${diagKeys}]` : "") +
(diagPreview ? ` [preview=${diagPreview}]` : ""),
(diagPreview ? ` [preview=${diagPreview}]` : "") +
(llmFailure?.ok === false && llmFailure?.errorType
? ` [failureType=${String(llmFailure.errorType)}]`
: "") +
(llmFailure?.ok === false && llmFailure?.failureReason
? ` [failureReason=${String(llmFailure.failureReason).slice(0, 200)}]`
: ""),
);
const failureReason =
llmFailure?.ok === false
? String(llmFailure.failureReason || "").trim()
: "";
return {
success: false,
error: "提取 LLM 未返回有效操作",
error: failureReason
? `提取 LLM 未返回有效操作: ${failureReason}`
: "提取 LLM 未返回有效操作",
newNodes: 0,
updatedNodes: 0,
newEdges: 0,

View File

@@ -56,6 +56,7 @@ const originalRequire = globalThis.require;
const originalExtensionSettings = globalThis.__lukerLlmRoutingExtensionSettings;
const originalSendOpenAIRequest = globalThis.__lukerLlmRoutingSendOpenAIRequest;
const originalLuker = globalThis.Luker;
const originalFetch = globalThis.fetch;
globalThis.__lukerLlmRoutingExtensionSettings = {
st_bme: {},
@@ -80,6 +81,8 @@ if (originalExtensionSettings === undefined) {
let capturedOptions = null;
let capturedMessages = null;
let sendOpenAIRequestCalls = 0;
let capturedFetchBody = null;
globalThis.Luker = {
getContext() {
@@ -112,6 +115,7 @@ globalThis.__lukerLlmRoutingSendOpenAIRequest = async (
signal,
options = {},
) => {
sendOpenAIRequestCalls += 1;
capturedOptions = { ...(options || {}) };
capturedMessages = Array.isArray(messages) ? [...messages] : messages;
return {
@@ -125,6 +129,28 @@ globalThis.__lukerLlmRoutingSendOpenAIRequest = async (
};
};
globalThis.fetch = async (_url, options = {}) => {
capturedFetchBody = JSON.parse(String(options.body || "{}"));
return new Response(
JSON.stringify({
choices: [
{
message: {
content: '{"operations":[]}',
},
finish_reason: "stop",
},
],
}),
{
status: 200,
headers: {
"Content-Type": "application/json",
},
},
);
};
extensionsApi.extension_settings.st_bme = {
taskProfiles: createDefaultTaskProfiles(),
};
@@ -151,6 +177,8 @@ try {
capturedOptions = null;
capturedMessages = null;
capturedFetchBody = null;
sendOpenAIRequestCalls = 0;
extensionsApi.extension_settings.st_bme = {
llmApiUrl: "https://stale-generic-config.invalid/v1",
llmApiKey: "sk-stale-generic",
@@ -167,15 +195,20 @@ try {
});
assert.deepEqual(routedResult, { operations: [] });
assert.deepEqual(capturedOptions?.apiSettingsOverride, {
chat_completion_source: "openai",
reverse_proxy: "https://example-luker-route.test/v1",
proxy_password: "sk-luker-route",
secret_id: "luker-secret-1",
});
assert.equal(
sendOpenAIRequestCalls,
0,
"存在可用的 BME 全局专用 LLM 配置时,不应退回宿主当前聊天路由",
);
assert.equal(
capturedFetchBody?.custom_url,
"https://stale-generic-config.invalid/v1",
);
capturedOptions = null;
capturedMessages = null;
capturedFetchBody = null;
sendOpenAIRequestCalls = 0;
const taskProfiles = createDefaultTaskProfiles();
taskProfiles.extract.profiles[0].generation.llm_preset = "luker-profile-alpha";
extensionsApi.extension_settings.st_bme = {
@@ -218,11 +251,15 @@ try {
});
assert.deepEqual(profileRoutedResult, { operations: [] });
assert.deepEqual(capturedOptions?.apiSettingsOverride, {
chat_completion_source: "openai",
reverse_proxy: "https://example-luker-profile.test/v1",
proxy_password: "sk-luker-profile",
});
assert.equal(
sendOpenAIRequestCalls,
0,
"存在可用的 BME 全局专用 LLM 配置时,不应因为 Luker profile 名而劫持到当前聊天路由",
);
assert.equal(
capturedFetchBody?.custom_url,
"https://stale-generic-config.invalid/v1",
);
} finally {
if (originalSendOpenAIRequest === undefined) {
delete globalThis.__lukerLlmRoutingSendOpenAIRequest;
@@ -230,6 +267,12 @@ try {
globalThis.__lukerLlmRoutingSendOpenAIRequest = originalSendOpenAIRequest;
}
if (originalFetch === undefined) {
delete globalThis.fetch;
} else {
globalThis.fetch = originalFetch;
}
if (originalLuker === undefined) {
delete globalThis.Luker;
} else {

View File

@@ -2565,6 +2565,38 @@ async function testExtractorNormalizesArrayPayloadAndPreservesScopeField() {
}
}
async function testExtractorPropagatesLlmFailureReason() {
const graph = createEmptyGraph();
const restoreOverrides = pushTestOverrides({
llm: {
async callLLMForJSON() {
return {
ok: false,
errorType: "provider-error",
failureReason: "Invalid character name",
};
},
},
});
try {
const result = await extractMemories({
graph,
messages: [{ seq: 9, role: "assistant", content: "测试 LLM 失败原因" }],
startSeq: 9,
endSeq: 9,
schema,
embeddingConfig: null,
settings: {},
});
assert.equal(result.success, false);
assert.match(result.error, /Invalid character name/);
} finally {
restoreOverrides();
}
}
async function testConsolidatorMergeUpdatesSeqRange() {
const graph = createEmptyGraph();
const target = createNode({
@@ -7005,6 +7037,7 @@ await testCompressTypeAcceptsTopLevelFieldsResult();
await testExtractorFailsOnUnknownOperation();
await testExtractorNormalizesFlatCreateOperation();
await testExtractorNormalizesArrayPayloadAndPreservesScopeField();
await testExtractorPropagatesLlmFailureReason();
await testConsolidatorMergeUpdatesSeqRange();
await testConsolidatorMergeFallbackKeepsNodeWhenTargetMissing();
await testBatchJournalVectorDeltaCapturesRecoveryFields();