diff --git a/src/app/api/chat/[provider]/agentRuntime.test.ts b/src/app/api/chat/[provider]/agentRuntime.test.ts index c9391a18b57ab..715a4a2490dae 100644 --- a/src/app/api/chat/[provider]/agentRuntime.test.ts +++ b/src/app/api/chat/[provider]/agentRuntime.test.ts @@ -8,9 +8,11 @@ import { JWTPayload } from '@/const/auth'; import { TraceNameMap } from '@/const/trace'; import { ChatStreamPayload, + LobeAnthropicAI, LobeAzureOpenAI, LobeBedrockAI, LobeGoogleAI, + LobeMistralAI, LobeMoonshotAI, LobeOllamaAI, LobeOpenAI, @@ -38,6 +40,8 @@ vi.mock('@/config/server', () => ({ AWS_REGION: 'test-aws-region', OLLAMA_PROXY_URL: 'test-ollama-url', PERPLEXITY_API_KEY: 'test-perplexity-key', + ANTHROPIC_API_KEY: 'test-anthropic-key', + MISTRAL_API_KEY: 'test-mistral-key', })), })); @@ -261,6 +265,54 @@ describe('AgentRuntime', () => { }); }); + describe('Anthropic AI provider', () => { + it('should initialize correctly', async () => { + const jwtPayload: JWTPayload = { apiKey: 'user-anthropic-key' }; + const runtime = await AgentRuntime.initializeWithUserPayload( + ModelProvider.Anthropic, + jwtPayload, + ); + + // 假设 LobeAnthropicAI 是 Anthropic 提供者的实现类 + expect(runtime['_runtime']).toBeInstanceOf(LobeAnthropicAI); + }); + + it('should initialize correctly without apiKey', async () => { + const jwtPayload: JWTPayload = {}; + const runtime = await AgentRuntime.initializeWithUserPayload( + ModelProvider.Anthropic, + jwtPayload, + ); + + // 假设 LobeAnthropicAI 是 Anthropic 提供者的实现类 + expect(runtime['_runtime']).toBeInstanceOf(LobeAnthropicAI); + }); + }); + + describe('Mistral AI provider', () => { + it('should initialize correctly', async () => { + const jwtPayload: JWTPayload = { apiKey: 'user-mistral-key' }; + const runtime = await AgentRuntime.initializeWithUserPayload( + ModelProvider.Mistral, + jwtPayload, + ); + + // 假设 LobeMistralAI 是 Mistral 提供者的实现类 + expect(runtime['_runtime']).toBeInstanceOf(LobeMistralAI); + }); + + it('should initialize correctly without apiKey', async () => { + const jwtPayload: JWTPayload = {}; + const runtime = await AgentRuntime.initializeWithUserPayload( + ModelProvider.Mistral, + jwtPayload, + ); + + // 假设 LobeMistralAI 是 Mistral 提供者的实现类 + expect(runtime['_runtime']).toBeInstanceOf(LobeMistralAI); + }); + }); + it('should handle unknown provider gracefully', async () => { const jwtPayload: JWTPayload = {}; const runtime = await AgentRuntime.initializeWithUserPayload('unknown', jwtPayload); diff --git a/src/app/api/errorResponse.test.ts b/src/app/api/errorResponse.test.ts index 9e930f30a20bc..df005705cc2e9 100644 --- a/src/app/api/errorResponse.test.ts +++ b/src/app/api/errorResponse.test.ts @@ -99,6 +99,21 @@ describe('createErrorResponse', () => { const response = createErrorResponse(errorType); expect(response.status).toBe(479); }); + + // 测试 AnthropicBizError 错误类型返回480状态码 + it('returns a 480 status for AnthropicBizError error type', () => { + const errorType = AgentRuntimeErrorType.AnthropicBizError; + const response = createErrorResponse(errorType); + expect(response.status).toBe(480); + }); + + // 测试 MistralBizError 错误类型返回481状态码 + it('returns a 481 status for MistralBizError error type', () => { + const errorType = AgentRuntimeErrorType.MistralBizError; + const response = createErrorResponse(errorType); + expect(response.status).toBe(481); + }); + }); // 测试状态码不在200-599范围内的情况 diff --git a/src/libs/agent-runtime/anthropic/index.test.ts b/src/libs/agent-runtime/anthropic/index.test.ts index 6c3323608480c..1d2667091491a 100644 --- a/src/libs/agent-runtime/anthropic/index.test.ts +++ b/src/libs/agent-runtime/anthropic/index.test.ts @@ -31,6 +31,7 @@ describe('LobeAnthropicAI', () => { }); describe('chat', () => { + it('should return a StreamingTextResponse on successful API call', async () => { const result = await instance.chat({ messages: [{ content: 'Hello', role: 'user' }], @@ -41,6 +42,7 @@ describe('LobeAnthropicAI', () => { // Assert expect(result).toBeInstanceOf(Response); }); + it('should handle text messages correctly', async () => { // Arrange const mockStream = new ReadableStream({ @@ -73,6 +75,7 @@ describe('LobeAnthropicAI', () => { }) expect(result).toBeInstanceOf(Response); }); + it('should handle system prompt correctly', async () => { // Arrange const mockStream = new ReadableStream({ @@ -107,6 +110,81 @@ describe('LobeAnthropicAI', () => { }) expect(result).toBeInstanceOf(Response); }); + + it('should call Anthropic API with supported opions in streaming mode', async () => { + // Arrange + const mockStream = new ReadableStream({ + start(controller) { + controller.enqueue('Hello, world!'); + controller.close(); + }, + }); + const mockResponse = Promise.resolve(mockStream); + (instance['client'].messages.create as Mock).mockResolvedValue(mockResponse); + + // Act + const result = await instance.chat({ + max_tokens: 2048, + messages: [ + { content: 'Hello', role: 'user' }, + ], + model: 'claude-instant-1.2', + temperature: 0.5, + top_p: 1, + }); + + // Assert + expect(instance['client'].messages.create).toHaveBeenCalledWith({ + max_tokens: 2048, + messages: [ + { content: 'Hello', role: 'user' }, + ], + model: 'claude-instant-1.2', + stream: true, + temperature: 0.5, + top_p: 1, + }) + expect(result).toBeInstanceOf(Response); + }); + + it('should call Anthropic API without unsupported opions', async () => { + // Arrange + const mockStream = new ReadableStream({ + start(controller) { + controller.enqueue('Hello, world!'); + controller.close(); + }, + }); + const mockResponse = Promise.resolve(mockStream); + (instance['client'].messages.create as Mock).mockResolvedValue(mockResponse); + + // Act + const result = await instance.chat({ + frequency_penalty: 0.5, // Unsupported option + max_tokens: 2048, + messages: [ + { content: 'Hello', role: 'user' }, + ], + model: 'claude-instant-1.2', + presence_penalty: 0.5, + temperature: 0.5, + top_p: 1, + }); + + // Assert + expect(instance['client'].messages.create).toHaveBeenCalledWith({ + max_tokens: 2048, + messages: [ + { content: 'Hello', role: 'user' }, + ], + model: 'claude-instant-1.2', + stream: true, + temperature: 0.5, + top_p: 1, + }) + expect(result).toBeInstanceOf(Response); + }); + it('should call debugStream in DEBUG mode', async () => { // Arrange const mockProdStream = new ReadableStream({ diff --git a/src/libs/agent-runtime/mistral/index.test.ts b/src/libs/agent-runtime/mistral/index.test.ts index 9930d62db50de..6c89be006aed2 100644 --- a/src/libs/agent-runtime/mistral/index.test.ts +++ b/src/libs/agent-runtime/mistral/index.test.ts @@ -58,6 +58,64 @@ describe('LobeMistralAI', () => { expect(result).toBeInstanceOf(Response); }); + it('should call Mistral API with supported options in streaming mode', async () => { + // Arrange + const mockStream = new ReadableStream(); + const mockResponse = Promise.resolve(mockStream); + + (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse); + + // Act + const result = await instance.chat({ + max_tokens: 1024, + messages: [{ content: 'Hello', role: 'user' }], + model: 'open-mistral-7b', + temperature: 0.7, + top_p: 1, + }); + + // Assert + expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({ + max_tokens: 1024, + messages: [{ content: 'Hello', role: 'user' }], + model: 'open-mistral-7b', + stream: true, + temperature: 0.7, + top_p: 1, + }) + expect(result).toBeInstanceOf(Response); + }); + + it('should call Mistral API without unsupported options', async () => { + // Arrange + const mockStream = new ReadableStream(); + const mockResponse = Promise.resolve(mockStream); + + (instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse); + + // Act + const result = await instance.chat({ + frequency_penalty: 0.5, // unsupported option + max_tokens: 1024, + messages: [{ content: 'Hello', role: 'user' }], + model: 'open-mistral-7b', + presence_penalty: 0.5, // unsupported option + temperature: 0.7, + top_p: 1, + }); + + // Assert + expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({ + max_tokens: 1024, + messages: [{ content: 'Hello', role: 'user' }], + model: 'open-mistral-7b', + stream: true, + temperature: 0.7, + top_p: 1, + }) + expect(result).toBeInstanceOf(Response); + }); + describe('Error', () => { it('should return MistralBizError with an openai error response when OpenAI.APIError is thrown', async () => { // Arrange diff --git a/src/services/_auth.test.ts b/src/services/_auth.test.ts index 2c48cc265421a..351013ed003e2 100644 --- a/src/services/_auth.test.ts +++ b/src/services/_auth.test.ts @@ -11,6 +11,8 @@ import { getProviderAuthPayload } from './_auth'; const mockZhiPuAPIKey = 'zhipu-api-key'; const mockMoonshotAPIKey = 'moonshot-api-key'; const mockGoogleAPIKey = 'google-api-key'; +const mockAnthropicAPIKey = 'anthropic-api-key'; +const mockMistralAPIKey = 'mistral-api-key'; // mock the traditional zustand vi.mock('zustand/traditional'); @@ -43,6 +45,24 @@ describe('getProviderAuthPayload', () => { expect(payload).toEqual({ apiKey: mockMoonshotAPIKey }); }); + it('should return correct payload for Anthropic provider', () => { + act(() => { + setModelProviderConfig('anthropic', { apiKey: mockAnthropicAPIKey }); + }); + + const payload = getProviderAuthPayload(ModelProvider.Anthropic); + expect(payload).toEqual({ apiKey: mockAnthropicAPIKey }); + }); + + it('should return correct payload for Mistral provider', () => { + act(() => { + setModelProviderConfig('mistral', { apiKey: mockMistralAPIKey }); + }); + + const payload = getProviderAuthPayload(ModelProvider.Mistral); + expect(payload).toEqual({ apiKey: mockMistralAPIKey }); + }); + it('should return correct payload for Google provider', () => { act(() => { setModelProviderConfig('google', { apiKey: mockGoogleAPIKey });