diff --git a/enjoy/package.json b/enjoy/package.json index 356b051b..4859da25 100644 --- a/enjoy/package.json +++ b/enjoy/package.json @@ -87,7 +87,6 @@ "@electron-forge/publisher-s3": "^7.4.0", "@hookform/resolvers": "^3.9.0", "@langchain/community": "^0.2.17", - "@langchain/google-genai": "^0.0.21", "@mozilla/readability": "^0.5.0", "@radix-ui/react-accordion": "^1.2.0", "@radix-ui/react-alert-dialog": "^1.1.1", diff --git a/enjoy/src/i18n/en.json b/enjoy/src/i18n/en.json index 51dfb415..2895cc3c 100644 --- a/enjoy/src/i18n/en.json +++ b/enjoy/src/i18n/en.json @@ -398,9 +398,12 @@ "openaiConfigSaved": "OpenAI config saved", "openaiKeyRequired": "OpenAI key required", "baseUrl": "baseURL", + "customModels": "Custom models", + "customModelsDescription": "Customize your LLM models. Split by comma.", "model": "model", "key": "key", "leaveEmptyToUseDefault": "Leave empty to use default", + "openaiBaseUrlDescription": "Any endpoint that supports OpenAI style API.", "newConversation": "New conversation", "selectAiRole": "Select AI role", "chooseFromPresetGpts": "Choose from preset GPTs", diff --git a/enjoy/src/i18n/zh-CN.json b/enjoy/src/i18n/zh-CN.json index c4c9a630..93acb3a6 100644 --- a/enjoy/src/i18n/zh-CN.json +++ b/enjoy/src/i18n/zh-CN.json @@ -398,9 +398,12 @@ "openaiConfigSaved": "OpenAI 配置已保存", "openaiKeyRequired": "未配置 OpenAI 密钥", "baseUrl": "接口地址", + "customModels": "自定义模型", + "customModelsDescription": "自定义模型,设置多个模型时用英文逗号隔开", "model": "模型", "key": "密钥", "leaveEmptyToUseDefault": "留空则使用默认值", + "openaiBaseUrlDescription": "支持所有兼容 OpenAI 风格的 API", "newConversation": "新对话", "selectAiRole": "选择 AI 角色", "chooseFromPresetGpts": "从预设的 GPTs 中选择", diff --git a/enjoy/src/renderer/components/conversations/conversation-form/index.tsx b/enjoy/src/renderer/components/conversations/conversation-form/index.tsx index cdbb2677..c3708d9a 100644 --- a/enjoy/src/renderer/components/conversations/conversation-form/index.tsx +++ b/enjoy/src/renderer/components/conversations/conversation-form/index.tsx @@ -59,9 +59,7 @@ export const ConversationForm = (props: { const conversationFormSchema = z.object({ name: z.string().optional(), - engine: z - .enum(["enjoyai", "openai", "ollama", "googleGenerativeAi"]) - .default("openai"), + engine: z.enum(["enjoyai", "openai", "ollama"]).default("openai"), configuration: z.object({ type: z.enum(["gpt", "tts"]), model: z.string().optional(), @@ -69,7 +67,7 @@ export const ConversationForm = (props: { roleDefinition: z.string().optional(), temperature: z.number().min(0).max(1).default(0.2), numberOfChoices: z.number().min(1).default(1), - maxTokens: z.number().min(-1).default(2000), + maxTokens: z.number().min(-1).default(2048), presencePenalty: z.number().min(-2).max(2).default(0), frequencyPenalty: z.number().min(-2).max(2).default(0), historyBufferSize: z.number().min(0).default(10), @@ -102,6 +100,10 @@ export const ConversationForm = (props: { console.warn(`No ollama server found: ${e.message}`); } + if (openai.models) { + providers["openai"].models = openai.models.split(","); + } + setGptProviders({ ...providers }); }; diff --git a/enjoy/src/renderer/components/conversations/gpt-providers.tsx b/enjoy/src/renderer/components/conversations/gpt-providers.tsx index f9b4536d..9010c0c2 100644 --- a/enjoy/src/renderer/components/conversations/gpt-providers.tsx +++ b/enjoy/src/renderer/components/conversations/gpt-providers.tsx @@ -53,18 +53,6 @@ export const GPT_PROVIDERS: { [key: string]: any } = { "tts", ], }, - googleGenerativeAi: { - name: "Google Generative AI", - models: ["gemini-pro"], - configurable: [ - "model", - "roleDefinition", - "temperature", - "maxTokens", - "historyBufferSize", - "tts", - ], - }, ollama: { name: "Ollama", description: t("ensureYouHaveOllamaRunningLocallyAndHasAtLeastOneModel"), diff --git a/enjoy/src/renderer/components/preferences/google-generative-ai-settings.tsx b/enjoy/src/renderer/components/preferences/google-generative-ai-settings.tsx deleted file mode 100644 index b1a32636..00000000 --- a/enjoy/src/renderer/components/preferences/google-generative-ai-settings.tsx +++ /dev/null @@ -1,68 +0,0 @@ -import { t } from "i18next"; -import { Button, Input, Label, toast } from "@renderer/components/ui"; -import { AISettingsProviderContext } from "@renderer/context"; -import { useContext, useState, useRef, useEffect } from "react"; - -export const GoogleGenerativeAiSettings = () => { - const { googleGenerativeAi, setGoogleGenerativeAi } = useContext( - AISettingsProviderContext - ); - const [editing, setEditing] = useState(false); - const ref = useRef(); - - const handleSave = () => { - if (!ref.current) return; - - setGoogleGenerativeAi({ - key: ref.current.value, - }); - setEditing(false); - - toast.success(t("googleGenerativeAiKeySaved")); - }; - - useEffect(() => { - if (editing) { - ref.current?.focus(); - } - }, [editing]); - - return ( -
-
-
Google Generative AI
-
-
- - - {editing && ( - - )} -
-
-
-
- -
-
- ); -}; diff --git a/enjoy/src/renderer/components/preferences/index.ts b/enjoy/src/renderer/components/preferences/index.ts index 6b2edeb9..beeabdf4 100644 --- a/enjoy/src/renderer/components/preferences/index.ts +++ b/enjoy/src/renderer/components/preferences/index.ts @@ -14,7 +14,6 @@ export * from "./default-engine-settings"; export * from "./openai-settings"; export * from "./library-settings"; export * from "./whisper-settings"; -export * from "./google-generative-ai-settings"; export * from "./user-settings"; export * from "./email-settings"; diff --git a/enjoy/src/renderer/components/preferences/openai-settings.tsx b/enjoy/src/renderer/components/preferences/openai-settings.tsx index 8869cb42..66858b23 100644 --- a/enjoy/src/renderer/components/preferences/openai-settings.tsx +++ b/enjoy/src/renderer/components/preferences/openai-settings.tsx @@ -11,6 +11,7 @@ import { FormMessage, Input, toast, + FormDescription, } from "@renderer/components/ui"; import { AISettingsProviderContext } from "@renderer/context"; import { useContext, useState } from "react"; @@ -22,6 +23,7 @@ export const OpenaiSettings = () => { const openAiConfigSchema = z.object({ key: z.string().optional(), baseUrl: z.string().optional(), + models: z.string().optional(), }); const form = useForm>({ @@ -29,6 +31,7 @@ export const OpenaiSettings = () => { values: { key: openai?.key, baseUrl: openai?.baseUrl, + models: openai?.models, }, }); @@ -82,6 +85,32 @@ export const OpenaiSettings = () => { onChange={field.onChange} /> + + {t("openaiBaseUrlDescription")} + + + + )} + /> + ( + +
+ + {t("customModels")}: + + +
+ + {t("customModelsDescription")} +
)} diff --git a/enjoy/src/renderer/components/preferences/preferences.tsx b/enjoy/src/renderer/components/preferences/preferences.tsx index b323fd8c..6c793967 100644 --- a/enjoy/src/renderer/components/preferences/preferences.tsx +++ b/enjoy/src/renderer/components/preferences/preferences.tsx @@ -12,7 +12,6 @@ import { WhisperSettings, OpenaiSettings, ProxySettings, - GoogleGenerativeAiSettings, ResetSettings, ResetAllSettings, NativeLanguageSettings, @@ -42,8 +41,6 @@ export const Preferences = () => { - - ), }, diff --git a/enjoy/src/renderer/context/ai-settings-provider.tsx b/enjoy/src/renderer/context/ai-settings-provider.tsx index aaf25276..97889d31 100644 --- a/enjoy/src/renderer/context/ai-settings-provider.tsx +++ b/enjoy/src/renderer/context/ai-settings-provider.tsx @@ -8,8 +8,6 @@ type AISettingsProviderState = { refreshWhisperConfig?: () => void; openai?: LlmProviderType; setOpenai?: (config: LlmProviderType) => void; - googleGenerativeAi?: LlmProviderType; - setGoogleGenerativeAi?: (config: LlmProviderType) => void; setGptEngine?: (engine: GptEngineSettingType) => void; currentEngine?: GptEngineSettingType; }; @@ -31,8 +29,6 @@ export const AISettingsProvider = ({ }, }); const [openai, setOpenai] = useState(null); - const [googleGenerativeAi, setGoogleGenerativeAi] = - useState(null); const [whisperConfig, setWhisperConfig] = useState(null); const { EnjoyApp, libraryPath, user, apiUrl } = useContext( AppSettingsProviderContext @@ -73,15 +69,6 @@ export const AISettingsProvider = ({ setOpenai(Object.assign({ name: "openai" }, _openai)); } - const _googleGenerativeAi = await EnjoyApp.settings.getLlm( - "googleGenerativeAi" - ); - if (_googleGenerativeAi) { - setGoogleGenerativeAi( - Object.assign({ name: "googleGenerativeAi" }, _googleGenerativeAi) - ); - } - const _defaultEngine = await EnjoyApp.settings.getDefaultEngine(); const _gptEngine = await EnjoyApp.settings.getGptEngine(); if (_gptEngine) { @@ -131,11 +118,8 @@ export const AISettingsProvider = ({ case "openai": setOpenai(Object.assign({ name: "openai" }, _config)); break; - case "googleGenerativeAi": - setGoogleGenerativeAi( - Object.assign({ name: "googleGenerativeAi" }, _config) - ); - break; + default: + throw new Error("Unsupported LLM provider"); } }; @@ -152,6 +136,7 @@ export const AISettingsProvider = ({ ? Object.assign(gptEngine, { key: openai.key, baseUrl: openai.baseUrl, + models: openai.models, }) : Object.assign(gptEngine, { key: user?.accessToken, @@ -159,9 +144,6 @@ export const AISettingsProvider = ({ }), openai, setOpenai: (config: LlmProviderType) => handleSetLlm("openai", config), - googleGenerativeAi, - setGoogleGenerativeAi: (config: LlmProviderType) => - handleSetLlm("googleGenerativeAi", config), whisperConfig, refreshWhisperConfig, setWhisperModel, diff --git a/enjoy/src/renderer/hooks/use-conversation.tsx b/enjoy/src/renderer/hooks/use-conversation.tsx index 8b58dac8..b396a99e 100644 --- a/enjoy/src/renderer/hooks/use-conversation.tsx +++ b/enjoy/src/renderer/hooks/use-conversation.tsx @@ -7,7 +7,6 @@ import { ChatMessageHistory, BufferMemory } from "langchain/memory/index"; import { ConversationChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatOllama } from "@langchain/community/chat_models/ollama"; -import { ChatGoogleGenerativeAI } from "@langchain/google-genai"; import { ChatPromptTemplate, MessagesPlaceholder, @@ -22,7 +21,7 @@ export const useConversation = () => { const { EnjoyApp, webApi, user, apiUrl, learningLanguage } = useContext( AppSettingsProviderContext ); - const { openai, googleGenerativeAi, currentEngine } = useContext( + const { openai, currentEngine } = useContext( AISettingsProviderContext ); @@ -76,17 +75,6 @@ export const useConversation = () => { presencePenalty, maxRetries: 2, }); - } else if (conversation.engine === "googleGenerativeAi") { - if (!googleGenerativeAi) - throw new Error("Google Generative AI API key is required"); - - return new ChatGoogleGenerativeAI({ - apiKey: googleGenerativeAi.key, - modelName: model, - temperature: temperature, - maxOutputTokens: maxTokens, - maxRetries: 2, - }); } }; diff --git a/enjoy/src/types/conversation.d.ts b/enjoy/src/types/conversation.d.ts index 51563843..ad09de79 100644 --- a/enjoy/src/types/conversation.d.ts +++ b/enjoy/src/types/conversation.d.ts @@ -1,7 +1,7 @@ type ConversationType = { id: string; type: "gpt" | "tts"; - engine: "enjoyai" | "openai" | "ollama" | "googleGenerativeAi"; + engine: "enjoyai" | "openai" | "ollama"; name: string; configuration: { [key: string]: any }; model: string; diff --git a/enjoy/src/types/index.d.ts b/enjoy/src/types/index.d.ts index 060937ae..9d5e5fb8 100644 --- a/enjoy/src/types/index.d.ts +++ b/enjoy/src/types/index.d.ts @@ -5,13 +5,14 @@ declare const MAIN_WINDOW_VITE_DEV_SERVER_URL: string; declare const MAIN_WINDOW_VITE_NAME: string; declare module "compromise-paragraphs"; -type SupportedLlmProviderType = "enjoyai" | "openai" | "googleGenerativeAi"; +type SupportedLlmProviderType = "enjoyai" | "openai"; type LlmProviderType = { - name?: "enjoyai" | "openai" | "googleGenerativeAi"; + name?: "enjoyai" | "openai"; key?: string; model?: string; baseUrl?: string; + models?: string; }; type DownloadStateType = { diff --git a/yarn.lock b/yarn.lock index db0e9962..d2fec293 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3302,13 +3302,6 @@ __metadata: languageName: node linkType: hard -"@google/generative-ai@npm:^0.7.0": - version: 0.7.1 - resolution: "@google/generative-ai@npm:0.7.1" - checksum: 10c0/0632dc794fb6186a533381e99cba4d839d0032bdded42458a9c95d17e1c9c45494c8574ba50ec8a97da92e2213518ad811f882c55bbe18c70f660bf7796c4376 - languageName: node - linkType: hard - "@grpc/grpc-js@npm:^1.7.1": version: 1.10.9 resolution: "@grpc/grpc-js@npm:1.10.9" @@ -4004,17 +3997,6 @@ __metadata: languageName: node linkType: hard -"@langchain/google-genai@npm:^0.0.21": - version: 0.0.21 - resolution: "@langchain/google-genai@npm:0.0.21" - dependencies: - "@google/generative-ai": "npm:^0.7.0" - "@langchain/core": "npm:>=0.2.9 <0.3.0" - zod-to-json-schema: "npm:^3.22.4" - checksum: 10c0/d9331b2bc3c919e2e4d1c7406986094fc38242471d6e7b6de1807179f9bcff3fcbd7fa360756782be63641451dd649d290ecf958b048e9a9f41ebea3b78311ce - languageName: node - linkType: hard - "@langchain/openai@npm:>=0.1.0 <0.3.0": version: 0.2.0 resolution: "@langchain/openai@npm:0.2.0" @@ -14307,7 +14289,6 @@ __metadata: "@electron/fuses": "npm:^1.8.0" "@hookform/resolvers": "npm:^3.9.0" "@langchain/community": "npm:^0.2.17" - "@langchain/google-genai": "npm:^0.0.21" "@mozilla/readability": "npm:^0.5.0" "@playwright/test": "npm:^1.45.2" "@radix-ui/react-accordion": "npm:^1.2.0" @@ -27861,7 +27842,7 @@ __metadata: languageName: node linkType: hard -"zod-to-json-schema@npm:^3.22.3, zod-to-json-schema@npm:^3.22.4, zod-to-json-schema@npm:^3.22.5": +"zod-to-json-schema@npm:^3.22.3, zod-to-json-schema@npm:^3.22.5": version: 3.23.0 resolution: "zod-to-json-schema@npm:3.23.0" peerDependencies: