Skip to content

Commit

Permalink
Merge pull request ChatGPTNextWeb#6204 from bestsanmao/ali_bytedance_…
Browse files Browse the repository at this point in the history
…reasoning_content

add 3 type of reasoning_content support (+deepseek-r1@OpenAI @alibaba @bytedance), parse <think></think> from SSE
  • Loading branch information
Leizhenpeng authored Feb 13, 2025
2 parents 48cd4b1 + cf140d4 commit 12863f5
Show file tree
Hide file tree
Showing 12 changed files with 318 additions and 294 deletions.
228 changes: 107 additions & 121 deletions app/client/platforms/alibaba.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
"use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import {
ApiPath,
Alibaba,
ALIBABA_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";

useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
Expand All @@ -15,14 +16,12 @@ import {
SpeechOptions,
MultimodalContent,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
Expand Down Expand Up @@ -92,7 +91,10 @@ export class QwenApi implements LLMApi {
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content: getMessageTextContent(v),
content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
}));

const modelConfig = {
Expand Down Expand Up @@ -122,134 +124,118 @@ export class QwenApi implements LLMApi {
options.onController?.(controller);

try {
const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};

const chatPath = this.path(Alibaba.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
},
headers: headers,
};

// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
getTimeoutMSByModel(options.config.model),
);

if (shouldStream) {
let responseText = "";
let remainText = "";
let finished = false;
let responseRes: Response;

// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
headers,
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;

if (!choices?.length) return { isThinking: false, content: "" };

const tool_calls = choices[0]?.message?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return;
}

if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}

requestAnimationFrame(animateResponseText);
}

// start animaion
animateResponseText();

const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};

controller.signal.onabort = finish;

fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Alibaba] request response content type: ",
contentType,
);
responseRes = res;

if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;

// Skip if both content and reasoning_content are empty or null
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}

if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}

if (extraInfo) {
responseTexts.push(extraInfo);
}

responseText = responseTexts.join("\n\n");

return finish();
return {
isThinking: false,
content: "",
};
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: { content: string };
}>;
const delta = choices[0]?.message?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);

if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}

return {
isThinking: false,
content: "",
};
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
openWhenHidden: true,
});
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
Expand Down
11 changes: 3 additions & 8 deletions app/client/platforms/baidu.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,5 @@
"use client";
import {
ApiPath,
Baidu,
BAIDU_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu";

Expand All @@ -23,7 +18,7 @@ import {
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
Expand Down Expand Up @@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
getTimeoutMSByModel(options.config.model),
);

if (shouldStream) {
Expand Down
Loading

0 comments on commit 12863f5

Please sign in to comment.