mirror of
https://github.com/Youzini-afk/ST-Bionic-Memory-Ecology.git
synced 2026-05-15 22:30:38 +08:00
refactor: extract ui action helpers into controller module
This commit is contained in:
108
index.js
108
index.js
@@ -75,6 +75,13 @@ import {
|
||||
createDefaultTaskProfiles,
|
||||
migrateLegacyTaskProfiles,
|
||||
} from "./prompt-profiles.js";
|
||||
import {
|
||||
onFetchEmbeddingModelsController,
|
||||
onFetchMemoryLLMModelsController,
|
||||
onTestEmbeddingController,
|
||||
onTestMemoryLLMController,
|
||||
onViewGraphController,
|
||||
} from "./ui-actions-controller.js";
|
||||
import {
|
||||
createNoticePanelActionController,
|
||||
initializePanelBridgeController,
|
||||
@@ -4474,24 +4481,11 @@ function onMessageReceived() {
|
||||
// ==================== UI 操作 ====================
|
||||
|
||||
async function onViewGraph() {
|
||||
if (!currentGraph) {
|
||||
toastr.warning("当前没有加载的图谱");
|
||||
return;
|
||||
}
|
||||
|
||||
const stats = getGraphStats(currentGraph);
|
||||
const statsText = [
|
||||
`节点: ${stats.activeNodes} 活跃 / ${stats.archivedNodes} 归档`,
|
||||
`边: ${stats.totalEdges}`,
|
||||
`最后处理楼层: ${stats.lastProcessedSeq}`,
|
||||
`类型分布: ${
|
||||
Object.entries(stats.typeCounts)
|
||||
.map(([k, v]) => `${k}=${v}`)
|
||||
.join(", ") || "(空)"
|
||||
}`,
|
||||
].join("\n");
|
||||
|
||||
toastr.info(statsText, "ST-BME 图谱状态", { timeOut: 10000 });
|
||||
return await onViewGraphController({
|
||||
getCurrentGraph: () => currentGraph,
|
||||
getGraphStats,
|
||||
toastr,
|
||||
});
|
||||
}
|
||||
|
||||
async function onRebuild() {
|
||||
@@ -4730,69 +4724,39 @@ async function onViewLastInjection() {
|
||||
}
|
||||
|
||||
async function onTestEmbedding() {
|
||||
const config = getEmbeddingConfig();
|
||||
const validation = validateVectorConfig(config);
|
||||
if (!validation.valid) {
|
||||
toastr.warning(validation.error);
|
||||
return;
|
||||
}
|
||||
|
||||
toastr.info("正在测试 Embedding API 连通性...");
|
||||
const result = await testVectorConnection(config, getCurrentChatId());
|
||||
|
||||
if (result.success) {
|
||||
toastr.success(`连接成功!向量维度: ${result.dimensions}`);
|
||||
} else {
|
||||
toastr.error(`连接失败: ${result.error}`);
|
||||
}
|
||||
return await onTestEmbeddingController({
|
||||
getCurrentChatId,
|
||||
getEmbeddingConfig,
|
||||
testVectorConnection,
|
||||
toastr,
|
||||
validateVectorConfig,
|
||||
});
|
||||
}
|
||||
|
||||
async function onTestMemoryLLM() {
|
||||
toastr.info("正在测试记忆 LLM 连通性...");
|
||||
const result = await testLLMConnection();
|
||||
|
||||
if (result.success) {
|
||||
toastr.success(`连接成功!模式: ${result.mode}`);
|
||||
} else {
|
||||
toastr.error(`连接失败: ${result.error}`);
|
||||
}
|
||||
return await onTestMemoryLLMController({
|
||||
testLLMConnection,
|
||||
toastr,
|
||||
});
|
||||
}
|
||||
|
||||
async function onFetchMemoryLLMModels() {
|
||||
toastr.info("正在拉取记忆 LLM 模型列表...");
|
||||
const result = await fetchMemoryLLMModels();
|
||||
|
||||
if (result.success) {
|
||||
toastr.success(`已拉取 ${result.models.length} 个记忆 LLM 模型`);
|
||||
} else {
|
||||
toastr.error(`拉取失败: ${result.error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
return await onFetchMemoryLLMModelsController({
|
||||
fetchMemoryLLMModels,
|
||||
toastr,
|
||||
});
|
||||
}
|
||||
|
||||
async function onFetchEmbeddingModels(mode = null) {
|
||||
const config = getEmbeddingConfig(mode);
|
||||
const targetMode = mode || config?.mode || "direct";
|
||||
const validation = validateVectorConfig(config);
|
||||
if (!validation.valid) {
|
||||
toastr.warning(validation.error);
|
||||
return { success: false, models: [], error: validation.error };
|
||||
}
|
||||
|
||||
toastr.info("正在拉取 Embedding 模型列表...");
|
||||
const result = await fetchAvailableEmbeddingModels(config);
|
||||
|
||||
if (result.success) {
|
||||
const modeLabel = targetMode === "backend" ? "后端" : "直连";
|
||||
toastr.success(
|
||||
`已拉取 ${result.models.length} 个${modeLabel} Embedding 模型`,
|
||||
);
|
||||
} else {
|
||||
toastr.error(`拉取失败: ${result.error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
return await onFetchEmbeddingModelsController(
|
||||
{
|
||||
fetchAvailableEmbeddingModels,
|
||||
getEmbeddingConfig,
|
||||
toastr,
|
||||
validateVectorConfig,
|
||||
},
|
||||
mode,
|
||||
);
|
||||
}
|
||||
|
||||
async function onManualExtract() {
|
||||
|
||||
87
ui-actions-controller.js
Normal file
87
ui-actions-controller.js
Normal file
@@ -0,0 +1,87 @@
|
||||
export async function onViewGraphController(runtime) {
|
||||
const graph = runtime.getCurrentGraph();
|
||||
if (!graph) {
|
||||
runtime.toastr.warning("当前没有加载的图谱");
|
||||
return;
|
||||
}
|
||||
|
||||
const stats = runtime.getGraphStats(graph);
|
||||
const statsText = [
|
||||
`节点: ${stats.activeNodes} 活跃 / ${stats.archivedNodes} 归档`,
|
||||
`边: ${stats.totalEdges}`,
|
||||
`最后处理楼层: ${stats.lastProcessedSeq}`,
|
||||
`类型分布: ${
|
||||
Object.entries(stats.typeCounts)
|
||||
.map(([k, v]) => `${k}=${v}`)
|
||||
.join(", ") || "(空)"
|
||||
}`,
|
||||
].join("\n");
|
||||
|
||||
runtime.toastr.info(statsText, "ST-BME 图谱状态", { timeOut: 10000 });
|
||||
}
|
||||
|
||||
export async function onTestEmbeddingController(runtime) {
|
||||
const config = runtime.getEmbeddingConfig();
|
||||
const validation = runtime.validateVectorConfig(config);
|
||||
if (!validation.valid) {
|
||||
runtime.toastr.warning(validation.error);
|
||||
return;
|
||||
}
|
||||
|
||||
runtime.toastr.info("正在测试 Embedding API 连通性...");
|
||||
const result = await runtime.testVectorConnection(config, runtime.getCurrentChatId());
|
||||
|
||||
if (result.success) {
|
||||
runtime.toastr.success(`连接成功!向量维度: ${result.dimensions}`);
|
||||
} else {
|
||||
runtime.toastr.error(`连接失败: ${result.error}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function onTestMemoryLLMController(runtime) {
|
||||
runtime.toastr.info("正在测试记忆 LLM 连通性...");
|
||||
const result = await runtime.testLLMConnection();
|
||||
|
||||
if (result.success) {
|
||||
runtime.toastr.success(`连接成功!模式: ${result.mode}`);
|
||||
} else {
|
||||
runtime.toastr.error(`连接失败: ${result.error}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function onFetchMemoryLLMModelsController(runtime) {
|
||||
runtime.toastr.info("正在拉取记忆 LLM 模型列表...");
|
||||
const result = await runtime.fetchMemoryLLMModels();
|
||||
|
||||
if (result.success) {
|
||||
runtime.toastr.success(`已拉取 ${result.models.length} 个记忆 LLM 模型`);
|
||||
} else {
|
||||
runtime.toastr.error(`拉取失败: ${result.error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export async function onFetchEmbeddingModelsController(runtime, mode = null) {
|
||||
const config = runtime.getEmbeddingConfig(mode);
|
||||
const targetMode = mode || config?.mode || "direct";
|
||||
const validation = runtime.validateVectorConfig(config);
|
||||
if (!validation.valid) {
|
||||
runtime.toastr.warning(validation.error);
|
||||
return { success: false, models: [], error: validation.error };
|
||||
}
|
||||
|
||||
runtime.toastr.info("正在拉取 Embedding 模型列表...");
|
||||
const result = await runtime.fetchAvailableEmbeddingModels(config);
|
||||
|
||||
if (result.success) {
|
||||
const modeLabel = targetMode === "backend" ? "后端" : "直连";
|
||||
runtime.toastr.success(
|
||||
`已拉取 ${result.models.length} 个${modeLabel} Embedding 模型`,
|
||||
);
|
||||
} else {
|
||||
runtime.toastr.error(`拉取失败: ${result.error}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
Reference in New Issue
Block a user