mirror of
https://github.com/Youzini-afk/ST-Bionic-Memory-Ecology.git
synced 2026-05-15 22:30:38 +08:00
feat: 提示窗流式文本预览+回调链路+自动淡出调整
This commit is contained in:
@@ -89,6 +89,7 @@ export async function extractMemories({
|
||||
extractPrompt,
|
||||
signal = undefined,
|
||||
settings = {},
|
||||
onStreamProgress = null,
|
||||
}) {
|
||||
throwIfAborted(signal);
|
||||
if (!messages || messages.length === 0) {
|
||||
@@ -184,6 +185,7 @@ export async function extractMemories({
|
||||
debugContext: createTaskLlmDebugContext(promptBuild, extractRegexInput),
|
||||
promptMessages: promptPayload.promptMessages,
|
||||
additionalMessages: promptPayload.additionalMessages,
|
||||
onStreamProgress,
|
||||
});
|
||||
throwIfAborted(signal);
|
||||
|
||||
|
||||
27
index.js
27
index.js
@@ -416,11 +416,11 @@ function getStageNoticeTitle(stage) {
|
||||
function getStageNoticeDuration(level = "info") {
|
||||
switch (level) {
|
||||
case "error":
|
||||
return 5600;
|
||||
return 6000;
|
||||
case "warning":
|
||||
return 4600;
|
||||
return 5000;
|
||||
case "success":
|
||||
return 2800;
|
||||
return 3000;
|
||||
default:
|
||||
return 3200;
|
||||
}
|
||||
@@ -2967,6 +2967,16 @@ async function executeExtractionBatch({
|
||||
extractPrompt: undefined,
|
||||
settings,
|
||||
signal,
|
||||
onStreamProgress: ({ previewText, receivedChars }) => {
|
||||
const preview = previewText?.length > 80
|
||||
? "…" + previewText.slice(-80)
|
||||
: previewText || "";
|
||||
setLastExtractionStatus(
|
||||
"AI 生成中",
|
||||
`${preview}\n已接收 ${receivedChars} 字符`,
|
||||
"running",
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
if (!result.success) {
|
||||
@@ -3737,6 +3747,17 @@ async function runRecall(options = {}) {
|
||||
schema: getSchema(),
|
||||
signal: recallSignal,
|
||||
settings,
|
||||
onStreamProgress: ({ previewText, receivedChars }) => {
|
||||
const preview = previewText?.length > 80
|
||||
? "…" + previewText.slice(-80)
|
||||
: previewText || "";
|
||||
setLastRecallStatus(
|
||||
"AI 生成中",
|
||||
`${preview}\n已接收 ${receivedChars} 字符`,
|
||||
"running",
|
||||
{ syncRuntime: true },
|
||||
);
|
||||
},
|
||||
options: {
|
||||
topK: settings.recallTopK,
|
||||
maxRecallNodes: settings.recallMaxNodes,
|
||||
|
||||
17
llm.js
17
llm.js
@@ -836,7 +836,7 @@ function isAbortError(error) {
|
||||
|
||||
async function parseDedicatedStreamingResponse(
|
||||
response,
|
||||
{ taskKey = "", streamState = null } = {},
|
||||
{ taskKey = "", streamState = null, onStreamProgress = null } = {},
|
||||
) {
|
||||
const reader = response?.body?.getReader?.();
|
||||
if (!reader) {
|
||||
@@ -938,6 +938,15 @@ async function parseDedicatedStreamingResponse(
|
||||
streamState.previewText,
|
||||
deltaText,
|
||||
);
|
||||
if (typeof onStreamProgress === "function") {
|
||||
try {
|
||||
onStreamProgress({
|
||||
previewText: streamState.previewText,
|
||||
chunkCount: streamState.chunkCount,
|
||||
receivedChars: streamState.receivedChars,
|
||||
});
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
|
||||
if (reasoningDelta) {
|
||||
@@ -1004,6 +1013,7 @@ async function executeDedicatedRequest(
|
||||
jsonMode = false,
|
||||
taskKey = "",
|
||||
streamState = null,
|
||||
onStreamProgress = null,
|
||||
} = {},
|
||||
) {
|
||||
const requestBody = cloneRuntimeDebugValue(body, {}) || {};
|
||||
@@ -1064,6 +1074,7 @@ async function executeDedicatedRequest(
|
||||
return await parseDedicatedStreamingResponse(response, {
|
||||
taskKey,
|
||||
streamState,
|
||||
onStreamProgress,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1079,6 +1090,7 @@ async function callDedicatedOpenAICompatible(
|
||||
maxCompletionTokens = null,
|
||||
taskType = "",
|
||||
requestSource = "",
|
||||
onStreamProgress = null,
|
||||
} = {},
|
||||
) {
|
||||
const privateRequestSource = resolvePrivateRequestSource(
|
||||
@@ -1246,6 +1258,7 @@ async function callDedicatedOpenAICompatible(
|
||||
jsonMode,
|
||||
taskKey,
|
||||
streamState,
|
||||
onStreamProgress,
|
||||
});
|
||||
} catch (error) {
|
||||
if (
|
||||
@@ -1332,6 +1345,7 @@ export async function callLLMForJSON({
|
||||
additionalMessages = [],
|
||||
promptMessages = [],
|
||||
debugContext = null,
|
||||
onStreamProgress = null,
|
||||
} = {}) {
|
||||
const override = getLlmTestOverride("callLLMForJSON");
|
||||
if (override) {
|
||||
@@ -1370,6 +1384,7 @@ export async function callLLMForJSON({
|
||||
jsonMode: true,
|
||||
taskType,
|
||||
requestSource: privateRequestSource,
|
||||
onStreamProgress,
|
||||
maxCompletionTokens:
|
||||
attempt === 0
|
||||
? DEFAULT_JSON_COMPLETION_TOKENS
|
||||
|
||||
@@ -80,6 +80,7 @@ export async function retrieve({
|
||||
signal = undefined,
|
||||
options = {},
|
||||
settings = {},
|
||||
onStreamProgress = null,
|
||||
}) {
|
||||
throwIfAborted(signal);
|
||||
const topK = options.topK ?? 20;
|
||||
@@ -286,6 +287,7 @@ export async function retrieve({
|
||||
options.recallPrompt,
|
||||
settings,
|
||||
signal,
|
||||
onStreamProgress,
|
||||
);
|
||||
selectedNodeIds = llmResult.selectedNodeIds;
|
||||
llmMeta = {
|
||||
@@ -429,6 +431,7 @@ async function llmRecall(
|
||||
customPrompt,
|
||||
settings = {},
|
||||
signal,
|
||||
onStreamProgress = null,
|
||||
) {
|
||||
throwIfAborted(signal);
|
||||
const contextStr = recentMessages.join("\n---\n");
|
||||
@@ -496,6 +499,7 @@ async function llmRecall(
|
||||
),
|
||||
promptMessages: promptPayload.promptMessages,
|
||||
additionalMessages: promptPayload.additionalMessages,
|
||||
onStreamProgress,
|
||||
});
|
||||
|
||||
if (result?.selected_ids && Array.isArray(result.selected_ids)) {
|
||||
|
||||
Reference in New Issue
Block a user