Skip to content

Commit

Permalink
✨ feat: Add Fireworks AI Model Provider (lobehub#3392)
Browse files Browse the repository at this point in the history
* ✨ feat: Add Fireworks AI Model Provider

* πŸ’„ style: add FireworksAI self-models

* πŸ› fix: support function call

* Revert "πŸ› fix: support function call" (lobehub#48)

This reverts commit c8422f4.

* πŸ’„ style: update model info

* πŸ› fix: disable stream if contains tools

* πŸ‘· build: add ENV for Fireworks AI provider

* πŸ‘· build: update ENV

* ✨ feat: support proxy url

* πŸ› fix: fix typo

* Update providers.tsx

* ♻️ refactor: cleanup

* πŸ”¨ chore: remove proxyUrl for FireworksAI

* πŸ› fix: tools calling issue in Fireworks AI

* πŸ”¨ chore: remove work around method for tool calling

* πŸ”¨ chore: rollback changes

* πŸ› fix: fix CI error

* πŸ’„ style: update model list

* πŸ”¨ chore: fix rebase conflicts
  • Loading branch information
hezhijie0327 authored Sep 10, 2024
1 parent fc85c20 commit fa0d84d
Show file tree
Hide file tree
Showing 16 changed files with 468 additions and 4 deletions.
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ ENV \
BAICHUAN_API_KEY="" \
# DeepSeek
DEEPSEEK_API_KEY="" \
# Fireworks AI
FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
# Google
GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
# Groq
Expand Down
2 changes: 2 additions & 0 deletions Dockerfile.database
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ ENV \
BAICHUAN_API_KEY="" \
# DeepSeek
DEEPSEEK_API_KEY="" \
# Fireworks AI
FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
# Google
GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
# Groq
Expand Down
2 changes: 2 additions & 0 deletions src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import {
AnthropicProviderCard,
BaichuanProviderCard,
DeepSeekProviderCard,
FireworksAIProviderCard,
GoogleProviderCard,
GroqProviderCard,
MinimaxProviderCard,
Expand Down Expand Up @@ -48,6 +49,7 @@ export const useProviderList = (): ProviderItem[] => {
OpenRouterProviderCard,
NovitaProviderCard,
TogetherAIProviderCard,
FireworksAIProviderCard,
QwenProviderCard,
DeepSeekProviderCard,
MinimaxProviderCard,
Expand Down
7 changes: 7 additions & 0 deletions src/app/api/chat/agentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {

return { apiKey };
}
case ModelProvider.FireworksAI: {
const { FIREWORKSAI_API_KEY } = getLLMConfig();

const apiKey = apiKeyManager.pick(payload?.apiKey || FIREWORKSAI_API_KEY);

return { apiKey };
}
case ModelProvider.ZeroOne: {
const { ZEROONE_API_KEY } = getLLMConfig();

Expand Down
8 changes: 8 additions & 0 deletions src/config/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ export const getLLMConfig = () => {
TOGETHERAI_API_KEY: z.string().optional(),
TOGETHERAI_MODEL_LIST: z.string().optional(),

ENABLED_FIREWORKSAI: z.boolean(),
FIREWORKSAI_API_KEY: z.string().optional(),
FIREWORKSAI_MODEL_LIST: z.string().optional(),

ENABLED_AWS_BEDROCK: z.boolean(),
AWS_BEDROCK_MODEL_LIST: z.string().optional(),
AWS_REGION: z.string().optional(),
Expand Down Expand Up @@ -152,6 +156,10 @@ export const getLLMConfig = () => {
TOGETHERAI_API_KEY: process.env.TOGETHERAI_API_KEY,
TOGETHERAI_MODEL_LIST: process.env.TOGETHERAI_MODEL_LIST,

ENABLED_FIREWORKSAI: !!process.env.FIREWORKSAI_API_KEY,
FIREWORKSAI_API_KEY: process.env.FIREWORKSAI_API_KEY,
FIREWORKSAI_MODEL_LIST: process.env.FIREWORKSAI_MODEL_LIST,

ENABLED_MOONSHOT: !!process.env.MOONSHOT_API_KEY,
MOONSHOT_API_KEY: process.env.MOONSHOT_API_KEY,
MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
Expand Down
143 changes: 143 additions & 0 deletions src/config/modelProviders/fireworksai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://fireworks.ai/models?show=Serverless
// ref https://fireworks.ai/pricing
const FireworksAI: ModelProviderCard = {
chatModels: [
{
description: 'Fireworks latest and most performant function-calling model. Firefunction-v2 is based on Llama-3 and trained to excel at function-calling as well as chat and instruction-following. See blog post for more details https://fireworks.ai/blog/firefunction-v2-launch-post',
displayName: 'Firefunction V2',
enabled: true,
functionCall: true,
id: 'accounts/fireworks/models/firefunction-v2',
tokens: 8192,
},
{
description: 'Fireworks open-source function calling model.',
displayName: 'Firefunction V1',
functionCall: true,
id: 'accounts/fireworks/models/firefunction-v1',
tokens: 32_768,
},
{
description: 'Vision-language model allowing both image and text as inputs (single image is recommended), trained on OSS model generated training data and open sourced on huggingface at fireworks-ai/FireLLaVA-13b',
displayName: 'FireLLaVA-13B',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/firellava-13b',
tokens: 4096,
vision: true,
},
{
displayName: 'Llama 3.1 8B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
tokens: 131_072,
},
{
displayName: 'Llama 3.1 70B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
tokens: 131_072,
},
{
displayName: 'Llama 3.1 405B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
tokens: 131_072,
},
{
displayName: 'Llama 3 8B Instruct',
functionCall: false,
id: 'accounts/fireworks/models/llama-v3-8b-instruct',
tokens: 8192,
},
{
displayName: 'Llama 3 70B Instruct',
functionCall: false,
id: 'accounts/fireworks/models/llama-v3-70b-instruct',
tokens: 8192,
},
{
displayName: 'Llama 3 8B Instruct (HF version)',
functionCall: false,
id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
tokens: 8192,
},
{
displayName: 'Llama 3 70B Instruct (HF version)',
functionCall: false,
id: 'accounts/fireworks/models/llama-v3-70b-instruct-hf',
tokens: 8192,
},
{
displayName: 'Gemma 2 9B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/gemma2-9b-it',
tokens: 8192,
},
{
displayName: 'Mixtral MoE 8x7B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
tokens: 32_768,
},
{
displayName: 'Mixtral MoE 8x22B Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
tokens: 65_536,
},
{
displayName: 'Mixtral MoE 8x7B Instruct (HF version)',
functionCall: false,
id: 'accounts/fireworks/models/mixtral-8x7b-instruct-hf',
tokens: 32_768,
},
{
displayName: 'Phi 3 Vision Instruct',
enabled: true,
functionCall: false,
id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
tokens: 8192,
vision: true,
},
{
displayName: 'Yi-Large',
enabled: true,
functionCall: false,
id: 'accounts/yi-01-ai/models/yi-large',
tokens: 32_768,
},
{
displayName: 'StarCoder 7B',
functionCall: false,
id: 'accounts/fireworks/models/starcoder-7b',
tokens: 8192,
},
{
displayName: 'StarCoder 15.5B',
functionCall: false,
id: 'accounts/fireworks/models/starcoder-16b',
tokens: 8192,
},
{
displayName: 'MythoMax L2 13b',
functionCall: false,
id: 'accounts/fireworks/models/mythomax-l2-13b',
tokens: 4096,
},
],
checkModel: 'accounts/fireworks/models/firefunction-v2',
id: 'fireworksai',
modelList: { showModelFetcher: true },
name: 'Fireworks AI',
};

export default FireworksAI;
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import AzureProvider from './azure';
import BaichuanProvider from './baichuan';
import BedrockProvider from './bedrock';
import DeepSeekProvider from './deepseek';
import FireworksAIProvider from './fireworksai';
import GoogleProvider from './google';
import GroqProvider from './groq';
import MinimaxProvider from './minimax';
Expand Down Expand Up @@ -40,6 +41,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
OllamaProvider.chatModels,
OpenRouterProvider.chatModels,
TogetherAIProvider.chatModels,
FireworksAIProvider.chatModels,
PerplexityProvider.chatModels,
AnthropicProvider.chatModels,
ZeroOneProvider.chatModels,
Expand All @@ -63,6 +65,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
GoogleProvider,
OpenRouterProvider,
TogetherAIProvider,
FireworksAIProvider,
BedrockProvider,
PerplexityProvider,
MinimaxProvider,
Expand Down Expand Up @@ -96,6 +99,7 @@ export { default as AzureProviderCard } from './azure';
export { default as BaichuanProviderCard } from './baichuan';
export { default as BedrockProviderCard } from './bedrock';
export { default as DeepSeekProviderCard } from './deepseek';
export { default as FireworksAIProviderCard } from './fireworksai';
export { default as GoogleProviderCard } from './google';
export { default as GroqProviderCard } from './groq';
export { default as MinimaxProviderCard } from './minimax';
Expand Down
5 changes: 5 additions & 0 deletions src/const/settings/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import {
BaichuanProviderCard,
BedrockProviderCard,
DeepSeekProviderCard,
FireworksAIProviderCard,
GoogleProviderCard,
GroqProviderCard,
MinimaxProviderCard,
Expand Down Expand Up @@ -52,6 +53,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(DeepSeekProviderCard),
},
fireworksai: {
enabled: false,
enabledModels: filterEnabledModels(FireworksAIProviderCard),
},
google: {
enabled: false,
enabledModels: filterEnabledModels(GoogleProviderCard),
Expand Down
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import { LobeAzureOpenAI } from './azureOpenai';
import { LobeBaichuanAI } from './baichuan';
import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
import { LobeDeepSeekAI } from './deepseek';
import { LobeFireworksAI } from './fireworksai';
import { LobeGoogleAI } from './google';
import { LobeGroq } from './groq';
import { LobeMinimaxAI } from './minimax';
Expand Down Expand Up @@ -121,6 +122,7 @@ class AgentRuntime {
baichuan: Partial<ClientOptions>;
bedrock: Partial<LobeBedrockAIParams>;
deepseek: Partial<ClientOptions>;
fireworksai: Partial<ClientOptions>;
google: { apiKey?: string; baseURL?: string };
groq: Partial<ClientOptions>;
minimax: Partial<ClientOptions>;
Expand Down Expand Up @@ -226,6 +228,11 @@ class AgentRuntime {
break;
}

case ModelProvider.FireworksAI: {
runtimeModel = new LobeFireworksAI(params.fireworksai);
break
}

case ModelProvider.ZeroOne: {
runtimeModel = new LobeZeroOneAI(params.zeroone);
break;
Expand Down
Loading

0 comments on commit fa0d84d

Please sign in to comment.