mirror of
https://github.com/Youzini-afk/ST-Bionic-Memory-Ecology.git
synced 2026-05-15 22:30:38 +08:00
Fix dedicated LLM model list fetching
This commit is contained in:
152
llm.js
152
llm.js
@@ -486,9 +486,11 @@ function normalizeModelList(items = []) {
|
||||
id = item.trim();
|
||||
label = id;
|
||||
} else if (item && typeof item === "object") {
|
||||
id = String(item.id || item.name || item.value || item.slug || "").trim();
|
||||
id = String(
|
||||
item.id || item.name || item.label || item.value || item.slug || "",
|
||||
).trim();
|
||||
label = String(
|
||||
item.name || item.id || item.value || item.slug || "",
|
||||
item.label || item.name || item.id || item.value || item.slug || "",
|
||||
).trim();
|
||||
}
|
||||
|
||||
@@ -500,6 +502,101 @@ function normalizeModelList(items = []) {
|
||||
return models;
|
||||
}
|
||||
|
||||
function extractModelListPayload(payload = {}) {
|
||||
if (Array.isArray(payload)) {
|
||||
return payload;
|
||||
}
|
||||
|
||||
if (!payload || typeof payload !== "object") {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (Array.isArray(payload.models)) {
|
||||
return payload.models;
|
||||
}
|
||||
|
||||
if (Array.isArray(payload.data)) {
|
||||
return payload.data;
|
||||
}
|
||||
|
||||
if (payload.data && typeof payload.data === "object") {
|
||||
if (Array.isArray(payload.data.models)) {
|
||||
return payload.data.models;
|
||||
}
|
||||
if (Array.isArray(payload.data.data)) {
|
||||
return payload.data.data;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
function buildDedicatedAuthHeaderString(apiKey = "") {
|
||||
const normalized = String(apiKey || "").trim();
|
||||
return normalized ? `Authorization: Bearer ${normalized}` : "";
|
||||
}
|
||||
|
||||
function buildDedicatedStatusRequestVariants(config = getMemoryLLMConfig()) {
|
||||
const customVariant = {
|
||||
mode: "custom",
|
||||
body: {
|
||||
chat_completion_source: chat_completion_sources.CUSTOM,
|
||||
custom_url: config.apiUrl,
|
||||
custom_include_headers: buildDedicatedAuthHeaderString(config.apiKey),
|
||||
reverse_proxy: config.apiUrl,
|
||||
proxy_password: "",
|
||||
},
|
||||
};
|
||||
|
||||
const legacyOpenAiVariant = {
|
||||
mode: "openai-reverse-proxy",
|
||||
body: {
|
||||
chat_completion_source: chat_completion_sources.OPENAI,
|
||||
reverse_proxy: config.apiUrl,
|
||||
proxy_password: config.apiKey || "",
|
||||
},
|
||||
};
|
||||
|
||||
return [customVariant, legacyOpenAiVariant];
|
||||
}
|
||||
|
||||
async function requestDedicatedStatusModels(
|
||||
variant,
|
||||
{ timeoutMs = LLM_REQUEST_TIMEOUT_MS } = {},
|
||||
) {
|
||||
const response = await fetchWithTimeout(
|
||||
"/api/backends/chat-completions/status",
|
||||
{
|
||||
method: "POST",
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify(variant.body),
|
||||
},
|
||||
timeoutMs,
|
||||
);
|
||||
|
||||
const rawText = await response.text().catch(() => "");
|
||||
let payload = {};
|
||||
try {
|
||||
payload = rawText ? JSON.parse(rawText) : {};
|
||||
} catch {
|
||||
payload = {};
|
||||
}
|
||||
|
||||
if (!response.ok || payload?.error) {
|
||||
throw new Error(
|
||||
extractErrorMessageFromPayload(payload) ||
|
||||
rawText ||
|
||||
response.statusText ||
|
||||
`HTTP ${response.status}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
payload,
|
||||
models: normalizeModelList(extractModelListPayload(payload)),
|
||||
};
|
||||
}
|
||||
|
||||
function extractContentFromResponsePayload(payload) {
|
||||
if (typeof payload === "string") {
|
||||
return payload;
|
||||
@@ -1508,37 +1605,32 @@ export async function fetchMemoryLLMModels() {
|
||||
};
|
||||
}
|
||||
|
||||
const variants = buildDedicatedStatusRequestVariants(config);
|
||||
const errors = [];
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/backends/chat-completions/status", {
|
||||
method: "POST",
|
||||
headers: getRequestHeaders(),
|
||||
body: JSON.stringify({
|
||||
chat_completion_source: chat_completion_sources.OPENAI,
|
||||
reverse_proxy: config.apiUrl,
|
||||
proxy_password: config.apiKey || "",
|
||||
}),
|
||||
});
|
||||
|
||||
const payload = await response.json().catch(() => ({}));
|
||||
if (!response.ok) {
|
||||
const message = payload?.error || payload?.message || response.statusText;
|
||||
return {
|
||||
success: false,
|
||||
models: [],
|
||||
error: message || `HTTP ${response.status}`,
|
||||
};
|
||||
for (const variant of variants) {
|
||||
try {
|
||||
const result = await requestDedicatedStatusModels(variant, {
|
||||
timeoutMs: config.timeoutMs,
|
||||
});
|
||||
if (result.models.length > 0) {
|
||||
return { success: true, models: result.models, error: "" };
|
||||
}
|
||||
errors.push(`${variant.mode}:empty`);
|
||||
} catch (error) {
|
||||
errors.push(`${variant.mode}:${String(error?.message || error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
const models = normalizeModelList(payload?.data);
|
||||
if (models.length === 0) {
|
||||
return {
|
||||
success: false,
|
||||
models: [],
|
||||
error: "未拉取到可用模型,请检查接口是否支持 /models",
|
||||
};
|
||||
}
|
||||
|
||||
return { success: true, models, error: "" };
|
||||
return {
|
||||
success: false,
|
||||
models: [],
|
||||
error:
|
||||
errors.length > 0
|
||||
? `未拉取到可用模型。尝试结果: ${errors.join(" | ")}`
|
||||
: "未拉取到可用模型,请检查接口是否支持模型列表接口",
|
||||
};
|
||||
} catch (error) {
|
||||
return { success: false, models: [], error: String(error) };
|
||||
}
|
||||
|
||||
243
tests/llm-model-fetch.mjs
Normal file
243
tests/llm-model-fetch.mjs
Normal file
@@ -0,0 +1,243 @@
|
||||
import assert from "node:assert/strict";
|
||||
import { createRequire, registerHooks } from "node:module";
|
||||
|
||||
const extensionsShimSource = [
|
||||
"export const extension_settings = globalThis.__llmModelFetchExtensionSettings || {};",
|
||||
"export function getContext() {",
|
||||
" return null;",
|
||||
"}",
|
||||
].join("\n");
|
||||
const scriptShimSource = [
|
||||
"export function getRequestHeaders() {",
|
||||
" return { 'Content-Type': 'application/json' };",
|
||||
"}",
|
||||
].join("\n");
|
||||
const openAiShimSource = [
|
||||
"export const chat_completion_sources = { CUSTOM: 'custom', OPENAI: 'openai' };",
|
||||
"export async function sendOpenAIRequest(...args) {",
|
||||
" if (typeof globalThis.__llmModelFetchSendOpenAIRequest === 'function') {",
|
||||
" return await globalThis.__llmModelFetchSendOpenAIRequest(...args);",
|
||||
" }",
|
||||
" return { choices: [{ message: { content: '{}' } }] };",
|
||||
"}",
|
||||
].join("\n");
|
||||
|
||||
registerHooks({
|
||||
resolve(specifier, context, nextResolve) {
|
||||
if (
|
||||
specifier === "../../../extensions.js" ||
|
||||
specifier === "../../../../extensions.js"
|
||||
) {
|
||||
return {
|
||||
shortCircuit: true,
|
||||
url: `data:text/javascript,${encodeURIComponent(extensionsShimSource)}`,
|
||||
};
|
||||
}
|
||||
if (specifier === "../../../../script.js") {
|
||||
return {
|
||||
shortCircuit: true,
|
||||
url: `data:text/javascript,${encodeURIComponent(scriptShimSource)}`,
|
||||
};
|
||||
}
|
||||
if (specifier === "../../../openai.js") {
|
||||
return {
|
||||
shortCircuit: true,
|
||||
url: `data:text/javascript,${encodeURIComponent(openAiShimSource)}`,
|
||||
};
|
||||
}
|
||||
return nextResolve(specifier, context);
|
||||
},
|
||||
});
|
||||
|
||||
const require = createRequire(import.meta.url);
|
||||
const originalRequire = globalThis.require;
|
||||
const originalExtensionSettings = globalThis.__llmModelFetchExtensionSettings;
|
||||
const originalSendOpenAIRequest = globalThis.__llmModelFetchSendOpenAIRequest;
|
||||
|
||||
globalThis.__llmModelFetchExtensionSettings = {
|
||||
st_bme: {},
|
||||
};
|
||||
globalThis.require = require;
|
||||
|
||||
const { createDefaultTaskProfiles } = await import("../prompt-profiles.js");
|
||||
const llm = await import("../llm.js");
|
||||
const extensionsApi = await import("../../../../extensions.js");
|
||||
|
||||
if (originalRequire === undefined) {
|
||||
delete globalThis.require;
|
||||
} else {
|
||||
globalThis.require = originalRequire;
|
||||
}
|
||||
|
||||
if (originalExtensionSettings === undefined) {
|
||||
delete globalThis.__llmModelFetchExtensionSettings;
|
||||
} else {
|
||||
globalThis.__llmModelFetchExtensionSettings = originalExtensionSettings;
|
||||
}
|
||||
|
||||
if (originalSendOpenAIRequest === undefined) {
|
||||
delete globalThis.__llmModelFetchSendOpenAIRequest;
|
||||
} else {
|
||||
globalThis.__llmModelFetchSendOpenAIRequest = originalSendOpenAIRequest;
|
||||
}
|
||||
|
||||
function buildModelFetchSettings() {
|
||||
return {
|
||||
llmApiUrl: "https://example.com/v1",
|
||||
llmApiKey: "sk-model-secret",
|
||||
llmModel: "gpt-model-test",
|
||||
timeoutMs: 5678,
|
||||
taskProfilesVersion: 3,
|
||||
taskProfiles: createDefaultTaskProfiles(),
|
||||
};
|
||||
}
|
||||
|
||||
async function withModelFetchSettings(run) {
|
||||
const previousSettings = JSON.parse(
|
||||
JSON.stringify(extensionsApi.extension_settings.st_bme || {}),
|
||||
);
|
||||
extensionsApi.extension_settings.st_bme = {
|
||||
...previousSettings,
|
||||
...buildModelFetchSettings(),
|
||||
};
|
||||
|
||||
try {
|
||||
await run();
|
||||
} finally {
|
||||
extensionsApi.extension_settings.st_bme = previousSettings;
|
||||
}
|
||||
}
|
||||
|
||||
async function testFetchMemoryModelsUsesCustomStatusFirst() {
|
||||
const originalFetch = globalThis.fetch;
|
||||
const seenBodies = [];
|
||||
|
||||
globalThis.fetch = async (_url, options = {}) => {
|
||||
seenBodies.push(JSON.parse(String(options.body || "{}")));
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
models: [{ id: "gpt-4.1-mini" }, { id: "gpt-4.1" }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
try {
|
||||
await withModelFetchSettings(async () => {
|
||||
const result = await llm.fetchMemoryLLMModels();
|
||||
assert.equal(result.success, true);
|
||||
assert.deepEqual(
|
||||
result.models.map((item) => item.id),
|
||||
["gpt-4.1-mini", "gpt-4.1"],
|
||||
);
|
||||
assert.equal(seenBodies.length, 1);
|
||||
assert.equal(seenBodies[0].chat_completion_source, "custom");
|
||||
assert.equal(seenBodies[0].custom_url, "https://example.com/v1");
|
||||
assert.match(
|
||||
String(seenBodies[0].custom_include_headers || ""),
|
||||
/Authorization:\s+Bearer\s+sk-model-secret/,
|
||||
);
|
||||
});
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
}
|
||||
|
||||
async function testFetchMemoryModelsFallsBackToLegacyStatus() {
|
||||
const originalFetch = globalThis.fetch;
|
||||
const seenBodies = [];
|
||||
let fetchCount = 0;
|
||||
|
||||
globalThis.fetch = async (_url, options = {}) => {
|
||||
fetchCount += 1;
|
||||
seenBodies.push(JSON.parse(String(options.body || "{}")));
|
||||
|
||||
if (fetchCount === 1) {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
error: {
|
||||
message: "custom source not supported",
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 400,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
data: [{ id: "legacy-openai-model" }],
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
try {
|
||||
await withModelFetchSettings(async () => {
|
||||
const result = await llm.fetchMemoryLLMModels();
|
||||
assert.equal(result.success, true);
|
||||
assert.deepEqual(result.models, [
|
||||
{ id: "legacy-openai-model", label: "legacy-openai-model" },
|
||||
]);
|
||||
assert.equal(fetchCount, 2);
|
||||
assert.equal(seenBodies[0].chat_completion_source, "custom");
|
||||
assert.equal(seenBodies[1].chat_completion_source, "openai");
|
||||
assert.equal(seenBodies[1].reverse_proxy, "https://example.com/v1");
|
||||
assert.equal(seenBodies[1].proxy_password, "sk-model-secret");
|
||||
});
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
}
|
||||
|
||||
async function testFetchMemoryModelsParsesNestedPayload() {
|
||||
const originalFetch = globalThis.fetch;
|
||||
|
||||
globalThis.fetch = async () =>
|
||||
new Response(
|
||||
JSON.stringify({
|
||||
data: {
|
||||
models: [{ name: "nested-model-a" }, { label: "nested-model-b" }],
|
||||
},
|
||||
}),
|
||||
{
|
||||
status: 200,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
try {
|
||||
await withModelFetchSettings(async () => {
|
||||
const result = await llm.fetchMemoryLLMModels();
|
||||
assert.equal(result.success, true);
|
||||
assert.deepEqual(
|
||||
result.models.map((item) => item.id),
|
||||
["nested-model-a", "nested-model-b"],
|
||||
);
|
||||
});
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
}
|
||||
|
||||
await testFetchMemoryModelsUsesCustomStatusFirst();
|
||||
await testFetchMemoryModelsFallsBackToLegacyStatus();
|
||||
await testFetchMemoryModelsParsesNestedPayload();
|
||||
|
||||
console.log("llm-model-fetch tests passed");
|
||||
Reference in New Issue
Block a user