Skip to content

Commit

Permalink
✅ add missing tests for anthropic and mistral
Browse files Browse the repository at this point in the history
  • Loading branch information
danielglh committed Mar 6, 2024
1 parent de1a8c5 commit cd46b63
Show file tree
Hide file tree
Showing 5 changed files with 223 additions and 0 deletions.
52 changes: 52 additions & 0 deletions src/app/api/chat/[provider]/agentRuntime.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@ import { JWTPayload } from '@/const/auth';
import { TraceNameMap } from '@/const/trace';
import {
ChatStreamPayload,
LobeAnthropicAI,
LobeAzureOpenAI,
LobeBedrockAI,
LobeGoogleAI,
LobeMistralAI,
LobeMoonshotAI,
LobeOllamaAI,
LobeOpenAI,
Expand Down Expand Up @@ -38,6 +40,8 @@ vi.mock('@/config/server', () => ({
AWS_REGION: 'test-aws-region',
OLLAMA_PROXY_URL: 'test-ollama-url',
PERPLEXITY_API_KEY: 'test-perplexity-key',
ANTHROPIC_API_KEY: 'test-anthropic-key',
MISTRAL_API_KEY: 'test-mistral-key',
})),
}));

Expand Down Expand Up @@ -261,6 +265,54 @@ describe('AgentRuntime', () => {
});
});

describe('Anthropic AI provider', () => {
it('should initialize correctly', async () => {
const jwtPayload: JWTPayload = { apiKey: 'user-anthropic-key' };
const runtime = await AgentRuntime.initializeWithUserPayload(
ModelProvider.Anthropic,
jwtPayload,
);

// 假设 LobeAnthropicAI 是 Anthropic 提供者的实现类
expect(runtime['_runtime']).toBeInstanceOf(LobeAnthropicAI);
});

it('should initialize correctly without apiKey', async () => {
const jwtPayload: JWTPayload = {};
const runtime = await AgentRuntime.initializeWithUserPayload(
ModelProvider.Anthropic,
jwtPayload,
);

// 假设 LobeAnthropicAI 是 Anthropic 提供者的实现类
expect(runtime['_runtime']).toBeInstanceOf(LobeAnthropicAI);
});
});

describe('Mistral AI provider', () => {
it('should initialize correctly', async () => {
const jwtPayload: JWTPayload = { apiKey: 'user-mistral-key' };
const runtime = await AgentRuntime.initializeWithUserPayload(
ModelProvider.Mistral,
jwtPayload,
);

// 假设 LobeMistralAI 是 Mistral 提供者的实现类
expect(runtime['_runtime']).toBeInstanceOf(LobeMistralAI);
});

it('should initialize correctly without apiKey', async () => {
const jwtPayload: JWTPayload = {};
const runtime = await AgentRuntime.initializeWithUserPayload(
ModelProvider.Mistral,
jwtPayload,
);

// 假设 LobeMistralAI 是 Mistral 提供者的实现类
expect(runtime['_runtime']).toBeInstanceOf(LobeMistralAI);
});
});

it('should handle unknown provider gracefully', async () => {
const jwtPayload: JWTPayload = {};
const runtime = await AgentRuntime.initializeWithUserPayload('unknown', jwtPayload);
Expand Down
15 changes: 15 additions & 0 deletions src/app/api/errorResponse.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,21 @@ describe('createErrorResponse', () => {
const response = createErrorResponse(errorType);
expect(response.status).toBe(479);
});

// 测试 AnthropicBizError 错误类型返回480状态码
it('returns a 480 status for AnthropicBizError error type', () => {
const errorType = AgentRuntimeErrorType.AnthropicBizError;
const response = createErrorResponse(errorType);
expect(response.status).toBe(480);
});

// 测试 MistralBizError 错误类型返回481状态码
it('returns a 481 status for MistralBizError error type', () => {
const errorType = AgentRuntimeErrorType.MistralBizError;
const response = createErrorResponse(errorType);
expect(response.status).toBe(481);
});

});

// 测试状态码不在200-599范围内的情况
Expand Down
78 changes: 78 additions & 0 deletions src/libs/agent-runtime/anthropic/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ describe('LobeAnthropicAI', () => {
});

describe('chat', () => {

it('should return a StreamingTextResponse on successful API call', async () => {
const result = await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
Expand All @@ -41,6 +42,7 @@ describe('LobeAnthropicAI', () => {
// Assert
expect(result).toBeInstanceOf(Response);
});

it('should handle text messages correctly', async () => {
// Arrange
const mockStream = new ReadableStream({
Expand Down Expand Up @@ -73,6 +75,7 @@ describe('LobeAnthropicAI', () => {
})
expect(result).toBeInstanceOf(Response);
});

it('should handle system prompt correctly', async () => {
// Arrange
const mockStream = new ReadableStream({
Expand Down Expand Up @@ -107,6 +110,81 @@ describe('LobeAnthropicAI', () => {
})
expect(result).toBeInstanceOf(Response);
});

it('should call Anthropic API with supported opions in streaming mode', async () => {
// Arrange
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue('Hello, world!');
controller.close();
},
});
const mockResponse = Promise.resolve(mockStream);
(instance['client'].messages.create as Mock).mockResolvedValue(mockResponse);

// Act
const result = await instance.chat({
max_tokens: 2048,
messages: [
{ content: 'Hello', role: 'user' },
],
model: 'claude-instant-1.2',
temperature: 0.5,
top_p: 1,
});

// Assert
expect(instance['client'].messages.create).toHaveBeenCalledWith({
max_tokens: 2048,
messages: [
{ content: 'Hello', role: 'user' },
],
model: 'claude-instant-1.2',
stream: true,
temperature: 0.5,
top_p: 1,
})
expect(result).toBeInstanceOf(Response);
});

it('should call Anthropic API without unsupported opions', async () => {
// Arrange
const mockStream = new ReadableStream({
start(controller) {
controller.enqueue('Hello, world!');
controller.close();
},
});
const mockResponse = Promise.resolve(mockStream);
(instance['client'].messages.create as Mock).mockResolvedValue(mockResponse);

// Act
const result = await instance.chat({
frequency_penalty: 0.5, // Unsupported option
max_tokens: 2048,
messages: [
{ content: 'Hello', role: 'user' },
],
model: 'claude-instant-1.2',
presence_penalty: 0.5,
temperature: 0.5,
top_p: 1,
});

// Assert
expect(instance['client'].messages.create).toHaveBeenCalledWith({
max_tokens: 2048,
messages: [
{ content: 'Hello', role: 'user' },
],
model: 'claude-instant-1.2',
stream: true,
temperature: 0.5,
top_p: 1,
})
expect(result).toBeInstanceOf(Response);
});

it('should call debugStream in DEBUG mode', async () => {
// Arrange
const mockProdStream = new ReadableStream({
Expand Down
58 changes: 58 additions & 0 deletions src/libs/agent-runtime/mistral/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,64 @@ describe('LobeMistralAI', () => {
expect(result).toBeInstanceOf(Response);
});

it('should call Mistral API with supported options in streaming mode', async () => {
// Arrange
const mockStream = new ReadableStream();
const mockResponse = Promise.resolve(mockStream);

(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);

// Act
const result = await instance.chat({
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'open-mistral-7b',
temperature: 0.7,
top_p: 1,
});

// Assert
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'open-mistral-7b',
stream: true,
temperature: 0.7,
top_p: 1,
})
expect(result).toBeInstanceOf(Response);
});

it('should call Mistral API without unsupported options', async () => {
// Arrange
const mockStream = new ReadableStream();
const mockResponse = Promise.resolve(mockStream);

(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);

// Act
const result = await instance.chat({
frequency_penalty: 0.5, // unsupported option
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'open-mistral-7b',
presence_penalty: 0.5, // unsupported option
temperature: 0.7,
top_p: 1,
});

// Assert
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith({
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'open-mistral-7b',
stream: true,
temperature: 0.7,
top_p: 1,
})
expect(result).toBeInstanceOf(Response);
});

describe('Error', () => {
it('should return MistralBizError with an openai error response when OpenAI.APIError is thrown', async () => {
// Arrange
Expand Down
20 changes: 20 additions & 0 deletions src/services/_auth.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import { getProviderAuthPayload } from './_auth';
const mockZhiPuAPIKey = 'zhipu-api-key';
const mockMoonshotAPIKey = 'moonshot-api-key';
const mockGoogleAPIKey = 'google-api-key';
const mockAnthropicAPIKey = 'anthropic-api-key';
const mockMistralAPIKey = 'mistral-api-key';

// mock the traditional zustand
vi.mock('zustand/traditional');
Expand Down Expand Up @@ -43,6 +45,24 @@ describe('getProviderAuthPayload', () => {
expect(payload).toEqual({ apiKey: mockMoonshotAPIKey });
});

it('should return correct payload for Anthropic provider', () => {
act(() => {
setModelProviderConfig('anthropic', { apiKey: mockAnthropicAPIKey });
});

const payload = getProviderAuthPayload(ModelProvider.Anthropic);
expect(payload).toEqual({ apiKey: mockAnthropicAPIKey });
});

it('should return correct payload for Mistral provider', () => {
act(() => {
setModelProviderConfig('mistral', { apiKey: mockMistralAPIKey });
});

const payload = getProviderAuthPayload(ModelProvider.Mistral);
expect(payload).toEqual({ apiKey: mockMistralAPIKey });
});

it('should return correct payload for Google provider', () => {
act(() => {
setModelProviderConfig('google', { apiKey: mockGoogleAPIKey });
Expand Down

0 comments on commit cd46b63

Please sign in to comment.