Refactor components codes (#538)

* remove deprecated code

* refactor code

* refactor components codes

* fix renderer tests
This commit is contained in:
an-lee
2024-04-19 10:46:04 +08:00
committed by GitHub
parent 5f3ee54bb5
commit e4f5bdcfb9
30 changed files with 509 additions and 1147 deletions

View File

@@ -113,45 +113,12 @@ class ConversationsHandler {
});
}
private async ask(
event: IpcMainEvent,
id: string,
params: {
messageId: string;
content?: string;
file?: string;
blob?: {
type: string;
arrayBuffer: ArrayBuffer;
};
}
) {
const conversation = await Conversation.findOne({
where: { id },
});
if (!conversation) {
event.sender.send("on-notification", {
type: "error",
message: t("models.conversation.notFound"),
});
return;
}
return conversation.ask(params).catch((err) => {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
});
}
register() {
ipcMain.handle("conversations-find-all", this.findAll);
ipcMain.handle("conversations-find-one", this.findOne);
ipcMain.handle("conversations-create", this.create);
ipcMain.handle("conversations-update", this.update);
ipcMain.handle("conversations-destroy", this.destroy);
ipcMain.handle("conversations-ask", this.ask);
}
}

View File

@@ -1,4 +1,3 @@
import { app } from "electron";
import {
AfterCreate,
AfterDestroy,
@@ -13,28 +12,8 @@ import {
AllowNull,
} from "sequelize-typescript";
import { Message, Speech } from "@main/db/models";
import { ChatMessageHistory, BufferMemory } from "langchain/memory";
import { ConversationChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
import { ChatOllama } from "@langchain/community/chat_models/ollama";
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { type Generation } from "langchain/dist/schema";
import settings from "@main/settings";
import db from "@main/db";
import mainWindow from "@main/window";
import { t } from "i18next";
import log from "@main/logger";
import fs from "fs-extra";
import path from "path";
import Ffmpeg from "@main/ffmpeg";
import whisper from "@main/whisper";
import { hashFile } from "@main/utils";
import { WEB_API_URL } from "@/constants";
import proxyAgent from "@main/proxy-agent";
const logger = log.scope("db/models/conversation");
@Table({
@@ -68,7 +47,7 @@ export class Conversation extends Model<Conversation> {
} & { [key: string]: any };
@Column(DataType.VIRTUAL)
get type(): 'gpt' | 'tts' {
get type(): "gpt" | "tts" {
return this.getDataValue("configuration").type || "gpt";
}
@@ -117,263 +96,4 @@ export class Conversation extends Model<Conversation> {
record: conversation.toJSON(),
});
}
// convert messages to chat history
async chatHistory() {
const chatMessageHistory = new ChatMessageHistory();
let limit = this.configuration.historyBufferSize;
if (!limit || limit < 0) {
limit = 0;
}
const _messages = await Message.findAll({
where: { conversationId: this.id },
order: [["createdAt", "DESC"]],
limit,
});
logger.debug(_messages);
_messages
.sort((a, b) => a.createdAt - b.createdAt)
.forEach((message) => {
if (message.role === "user") {
chatMessageHistory.addUserMessage(message.content);
} else if (message.role === "assistant") {
chatMessageHistory.addAIChatMessage(message.content);
}
});
return chatMessageHistory;
}
// choose llm based on engine
llm() {
const { httpAgent, fetch } = proxyAgent();
if (this.engine === "enjoyai") {
return new ChatOpenAI(
{
openAIApiKey: settings.getSync("user.accessToken") as string,
modelName: this.model,
configuration: {
baseURL: `${process.env.WEB_API_URL || WEB_API_URL}/api/ai`,
},
temperature: this.configuration.temperature,
n: this.configuration.numberOfChoices,
maxTokens: this.configuration.maxTokens,
frequencyPenalty: this.configuration.frequencyPenalty,
presencePenalty: this.configuration.presencePenalty,
},
{
httpAgent,
// @ts-ignore
fetch,
}
);
} else if (this.engine === "openai") {
const key = settings.getSync("openai.key") as string;
if (!key) {
throw new Error(t("openaiKeyRequired"));
}
return new ChatOpenAI(
{
openAIApiKey: key,
modelName: this.model,
configuration: {
baseURL: this.configuration.baseUrl,
},
temperature: this.configuration.temperature,
n: this.configuration.numberOfChoices,
maxTokens: this.configuration.maxTokens,
frequencyPenalty: this.configuration.frequencyPenalty,
presencePenalty: this.configuration.presencePenalty,
},
{
httpAgent,
// @ts-ignore
fetch,
}
);
} else if (this.engine === "googleGenerativeAi") {
const key = settings.getSync("googleGenerativeAi.key") as string;
if (!key) {
throw new Error(t("googleGenerativeAiKeyRequired"));
}
return new ChatGoogleGenerativeAI({
apiKey: key,
modelName: this.model,
temperature: this.configuration.temperature,
maxOutputTokens: this.configuration.maxTokens,
});
} else if (this.engine == "ollama") {
return new ChatOllama({
baseUrl: this.configuration.baseUrl,
model: this.model,
temperature: this.configuration.temperature,
frequencyPenalty: this.configuration.frequencyPenalty,
presencePenalty: this.configuration.presencePenalty,
});
}
}
// choose memory based on conversation scenario
async memory() {
const chatHistory = await this.chatHistory();
return new BufferMemory({
chatHistory,
memoryKey: "history",
returnMessages: true,
});
}
chatPrompt() {
return ChatPromptTemplate.fromMessages([
["system", this.roleDefinition],
new MessagesPlaceholder("history"),
["human", "{input}"],
]);
}
async chain() {
return new ConversationChain({
llm: this.llm(),
memory: await this.memory(),
prompt: this.chatPrompt(),
verbose: app.isPackaged ? false : true,
});
}
async ask(params: {
messageId?: string;
content?: string;
file?: string;
blob?: {
type: string;
arrayBuffer: ArrayBuffer;
};
}) {
let { content, file, blob, messageId } = params;
if (!content && !blob) {
throw new Error(t("models.conversation.contentRequired"));
}
let md5 = "";
let extname = ".wav";
if (file) {
extname = path.extname(file);
md5 = await hashFile(file, { algo: "md5" });
fs.copySync(
file,
path.join(settings.userDataPath(), "speeches", `${md5}${extname}`)
);
} else if (blob) {
const filename = `${Date.now()}${extname}`;
const format = blob.type.split("/")[1];
const tempfile = path.join(
settings.cachePath(),
`${Date.now()}.${format}`
);
await fs.outputFile(tempfile, Buffer.from(blob.arrayBuffer));
const wavFile = path.join(settings.userDataPath(), "speeches", filename);
const ffmpeg = new Ffmpeg();
await ffmpeg.convertToWav(tempfile, wavFile);
md5 = await hashFile(wavFile, { algo: "md5" });
fs.renameSync(
wavFile,
path.join(path.dirname(wavFile), `${md5}${extname}`)
);
const previousMessage = await Message.findOne({
where: { conversationId: this.id },
order: [["createdAt", "DESC"]],
});
let prompt = "";
if (previousMessage?.content) {
prompt = previousMessage.content.replace(/"/g, '\\"');
}
const { transcription } = await whisper.transcribe(wavFile, {
force: true,
extra: [`--prompt "${prompt}"`],
});
content = transcription
.map((t: TranscriptionResultSegmentType) => t.text)
.join(" ")
.trim();
logger.debug("transcription", transcription);
}
const chain = await this.chain();
let response: Generation[] = [];
const result = await chain.call({ input: content }, [
{
handleLLMEnd: async (output) => {
response = output.generations[0];
},
},
]);
logger.debug("LLM result:", result);
if (!response) {
throw new Error(t("models.conversation.failedToGenerateResponse"));
}
const transaction = await db.connection.transaction();
await Message.create(
{
id: messageId,
conversationId: this.id,
role: "user",
content,
},
{
include: [Conversation],
transaction,
}
);
const replies = await Promise.all(
response.map(async (generation) => {
if (!generation?.text) {
throw new Error(t("models.conversation.failedToGenerateResponse"));
}
return await Message.create(
{
conversationId: this.id,
role: "assistant",
content: generation.text,
},
{
include: [Conversation],
transaction,
}
);
})
);
if (md5) {
await Speech.create(
{
sourceId: messageId,
sourceType: "message",
text: content,
md5,
extname,
configuration: {
engine: "Human",
},
},
{
include: [Message],
transaction,
}
);
}
await transaction.commit();
return replies.map((reply) => reply.toJSON());
}
}

View File

@@ -317,20 +317,6 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
destroy: (id: string) => {
return ipcRenderer.invoke("conversations-destroy", id);
},
ask: (
id: string,
params: {
messageId?: string;
content?: string;
file?: string;
blob?: {
type: string;
arrayBuffer: ArrayBuffer;
};
}
) => {
return ipcRenderer.invoke("conversations-ask", id, params);
},
},
messages: {
findAll: (params: { where?: any; offset?: number; limit?: number }) => {

View File

@@ -58,7 +58,8 @@ export const AudibleBooksSegment = () => {
EnjoyApp.providers.audible
.bestsellers()
.then(({ books }) => {
.then((res) => {
const { books = [] } = res || {};
const filteredBooks =
books?.filter((book) => book.language === "English") || [];

View File

@@ -1,53 +0,0 @@
import { t } from "i18next";
import { useContext } from "react";
import { Button, Input, Label } from "@renderer/components/ui";
import { AppSettingsProviderContext } from "@renderer/context";
export const ChooseLibraryPathInput = () => {
const { libraryPath, setLibraryPath, EnjoyApp } = useContext(
AppSettingsProviderContext
);
const handleChooseLibraryPath = async () => {
const filePaths = await EnjoyApp.dialog.showOpenDialog({
properties: ["openDirectory"],
});
if (filePaths) {
EnjoyApp.settings.setLibrary(filePaths[0]);
setLibraryPath(await EnjoyApp.settings.getLibrary());
}
};
const openLibraryPath = async () => {
if (libraryPath) {
await EnjoyApp.shell.openPath(libraryPath);
}
};
return (
<div className="grid gap-1.5 w-full max-w-sm">
<Label htmlFor="library-path">{t("libraryPath")}</Label>
<div className="flex items-center space-x-2">
<Input
id="library-path"
value={libraryPath}
disabled
className="cursor-pointer!"
/>
<div className="flex items-center space-x-2">
<Button
variant="secondary"
onClick={openLibraryPath}
className="min-w-max"
>
{t("open")}
</Button>
<Button onClick={handleChooseLibraryPath} className="min-w-max">
{t("select")}
</Button>
</div>
</div>
</div>
);
};

View File

@@ -28,15 +28,15 @@ import {
SelectContent,
SelectItem,
Textarea,
toast,
} from "@renderer/components/ui";
import { useState, useEffect, useContext } from "react";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { LoaderIcon, Share2Icon } from "lucide-react";
import { LoaderIcon } from "lucide-react";
import { useNavigate } from "react-router-dom";
import { GPT_PROVIDERS, TTS_PROVIDERS, GPTShareButton } from "@renderer/components";
const conversationFormSchema = z.object({
name: z.string().optional(),
@@ -73,7 +73,7 @@ export const ConversationForm = (props: {
}) => {
const { conversation, onFinish } = props;
const [submitting, setSubmitting] = useState<boolean>(false);
const [providers, setProviders] = useState<any>(LLM_PROVIDERS);
const [providers, setProviders] = useState<any>(GPT_PROVIDERS);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { openai } = useContext(AISettingsProviderContext);
const navigate = useNavigate();
@@ -126,20 +126,20 @@ export const ConversationForm = (props: {
// @ts-ignore
values: conversation?.id
? {
name: conversation.name,
engine: conversation.engine,
configuration: {
type: conversation.configuration.type || "gpt",
...conversation.configuration,
},
}
name: conversation.name,
engine: conversation.engine,
configuration: {
type: conversation.configuration.type || "gpt",
...conversation.configuration,
},
}
: {
name: defaultConfig.name,
engine: defaultConfig.engine,
configuration: {
...defaultConfig.configuration,
name: defaultConfig.name,
engine: defaultConfig.engine,
configuration: {
...defaultConfig.configuration,
},
},
},
});
const onSubmit = async (data: z.infer<typeof conversationFormSchema>) => {
@@ -149,7 +149,7 @@ export const ConversationForm = (props: {
Object.keys(configuration).forEach((key) => {
if (key === "type") return;
if (!LLM_PROVIDERS[engine]?.configurable.includes(key)) {
if (!GPT_PROVIDERS[engine]?.configurable.includes(key)) {
// @ts-ignore
delete configuration[key];
}
@@ -161,12 +161,12 @@ export const ConversationForm = (props: {
// use default base url if not set
if (!configuration.baseUrl) {
configuration.baseUrl = LLM_PROVIDERS[engine]?.baseUrl;
configuration.baseUrl = GPT_PROVIDERS[engine]?.baseUrl;
}
// use default base url if not set
if (!configuration.tts.baseUrl) {
configuration.tts.baseUrl = LLM_PROVIDERS[engine]?.baseUrl;
configuration.tts.baseUrl = GPT_PROVIDERS[engine]?.baseUrl;
}
if (conversation?.id) {
@@ -275,7 +275,7 @@ export const ConversationForm = (props: {
<SelectContent>
{Object.keys(providers)
.filter((key) =>
LLM_PROVIDERS[key].types.includes(
GPT_PROVIDERS[key].types.includes(
form.watch("configuration.type")
)
)
@@ -343,163 +343,163 @@ export const ConversationForm = (props: {
)}
/>
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"temperature"
) && (
<FormField
control={form.control}
name="configuration.temperature"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.temperature")}
</FormLabel>
<Input
type="number"
min="0"
max="1.0"
step="0.1"
value={field.value}
onChange={(event) => {
field.onChange(
event.target.value
? parseFloat(event.target.value)
: 0.0
);
}}
/>
<FormDescription>
{t("models.conversation.temperatureDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.temperature"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.temperature")}
</FormLabel>
<Input
type="number"
min="0"
max="1.0"
step="0.1"
value={field.value}
onChange={(event) => {
field.onChange(
event.target.value
? parseFloat(event.target.value)
: 0.0
);
}}
/>
<FormDescription>
{t("models.conversation.temperatureDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"maxTokens"
) && (
<FormField
control={form.control}
name="configuration.maxTokens"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.maxTokens")}
</FormLabel>
<Input
type="number"
min="0"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.maxTokensDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.maxTokens"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.maxTokens")}
</FormLabel>
<Input
type="number"
min="0"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.maxTokensDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"presencePenalty"
) && (
<FormField
control={form.control}
name="configuration.presencePenalty"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.presencePenalty")}
</FormLabel>
<Input
type="number"
min="-2"
step="0.1"
max="2"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.presencePenaltyDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.presencePenalty"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.presencePenalty")}
</FormLabel>
<Input
type="number"
min="-2"
step="0.1"
max="2"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.presencePenaltyDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"frequencyPenalty"
) && (
<FormField
control={form.control}
name="configuration.frequencyPenalty"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.frequencyPenalty")}
</FormLabel>
<Input
type="number"
min="-2"
step="0.1"
max="2"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.frequencyPenaltyDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.frequencyPenalty"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.frequencyPenalty")}
</FormLabel>
<Input
type="number"
min="-2"
step="0.1"
max="2"
value={field.value}
onChange={(event) => {
if (!event.target.value) return;
field.onChange(parseInt(event.target.value));
}}
/>
<FormDescription>
{t("models.conversation.frequencyPenaltyDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"numberOfChoices"
) && (
<FormField
control={form.control}
name="configuration.numberOfChoices"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.numberOfChoices")}
</FormLabel>
<Input
type="number"
min="1"
step="1.0"
value={field.value}
onChange={(event) => {
field.onChange(
event.target.value
? parseInt(event.target.value)
: 1.0
);
}}
/>
<FormDescription>
{t("models.conversation.numberOfChoicesDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.numberOfChoices"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.numberOfChoices")}
</FormLabel>
<Input
type="number"
min="1"
step="1.0"
value={field.value}
onChange={(event) => {
field.onChange(
event.target.value
? parseInt(event.target.value)
: 1.0
);
}}
/>
<FormDescription>
{t("models.conversation.numberOfChoicesDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
@@ -531,28 +531,28 @@ export const ConversationForm = (props: {
)}
/>
{LLM_PROVIDERS[form.watch("engine")]?.configurable.includes(
{GPT_PROVIDERS[form.watch("engine")]?.configurable.includes(
"baseUrl"
) && (
<FormField
control={form.control}
name="configuration.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.baseUrl")}
</FormLabel>
<Input
{...field}
placeholder={t(
"models.conversation.baseUrlDescription"
)}
/>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("models.conversation.baseUrl")}
</FormLabel>
<Input
{...field}
placeholder={t(
"models.conversation.baseUrlDescription"
)}
/>
<FormMessage />
</FormItem>
)}
/>
)}
</>
)}
@@ -588,95 +588,95 @@ export const ConversationForm = (props: {
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("model") && (
<FormField
control={form.control}
name="configuration.tts.model"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsModel")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.models || []
).map((model: string) => (
<SelectItem key={model} value={model}>
{model}
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.tts.model"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsModel")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.models || []
).map((model: string) => (
<SelectItem key={model} value={model}>
{model}
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("voice") && (
<FormField
control={form.control}
name="configuration.tts.voice"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsVoice")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsVoice")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.voices || []
).map((voice: string) => (
<SelectItem key={voice} value={voice}>
<span className="capitalize">{voice}</span>
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.tts.voice"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsVoice")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsVoice")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.voices || []
).map((voice: string) => (
<SelectItem key={voice} value={voice}>
<span className="capitalize">{voice}</span>
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("baseUrl") && (
<FormField
control={form.control}
name="configuration.tts.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsBaseUrl")}</FormLabel>
<Input
{...field}
placeholder={t(
"models.conversation.ttsBaseUrlDescription"
)}
/>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.tts.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsBaseUrl")}</FormLabel>
<Input
{...field}
placeholder={t(
"models.conversation.ttsBaseUrlDescription"
)}
/>
<FormMessage />
</FormItem>
)}
/>
)}
</div>
</ScrollArea>
@@ -730,196 +730,3 @@ export const ConversationForm = (props: {
</Form>
);
};
export const LLM_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: [
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
],
configurable: [
"model",
"roleDefinition",
"temperature",
"numberOfChoices",
"maxTokens",
"frequencyPenalty",
"presencePenalty",
"historyBufferSize",
"tts",
],
types: ["gpt", "tts"],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
models: [
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
],
configurable: [
"model",
"baseUrl",
"roleDefinition",
"temperature",
"numberOfChoices",
"maxTokens",
"frequencyPenalty",
"presencePenalty",
"historyBufferSize",
"tts",
],
types: ["gpt", "tts"],
},
googleGenerativeAi: {
name: "Google Generative AI",
models: ["gemini-pro"],
configurable: [
"model",
"roleDefinition",
"temperature",
"maxTokens",
"historyBufferSize",
"tts",
],
types: ["gpt"],
},
ollama: {
name: "Ollama",
description: t("ensureYouHaveOllamaRunningLocallyAndHasAtLeastOneModel"),
baseUrl: "http://localhost:11434",
models: [],
configurable: [
"model",
"baseUrl",
"roleDefinition",
"temperature",
"maxTokens",
"historyBufferSize",
"frequencyPenalty",
"presencePenalty",
"tts",
],
types: ["gpt"],
},
};
export const TTS_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: ["tts-1", "tts-1-hd"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
configurable: ["voice"],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
models: ["tts-1", "tts-1-hd"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
configurable: ["model", "voice", "baseUrl"],
},
};
const GPTShareButton = (props: {
conversation: Partial<ConversationType>;
}) => {
const { conversation } = props;
const { webApi } = useContext(AppSettingsProviderContext);
const navigate = useNavigate();
const handleShare = () => {
const { configuration } = conversation;
delete configuration.baseUrl
delete configuration?.tts?.baseUrl
if (!configuration.roleDefinition) {
toast.error('shareFailed');
return;
}
webApi
.createPost({
metadata: {
type: "gpt",
content: {
name: conversation.name,
engine: conversation.engine,
configuration,
},
},
})
.then(() => {
toast.success(t("sharedSuccessfully"), {
description: t("sharedGpt"),
action: {
label: t("view"),
onClick: () => {
navigate("/community");
},
},
actionButtonStyle: {
backgroundColor: "var(--primary)",
},
});
})
.catch((err) => {
toast.error(t("shareFailed"), { description: err.message });
});
}
if (!conversation.id) return null;
if (conversation.type !== "gpt") return null;
return (
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="link" size="icon" className="rounded-full p-0 w-6 h-6">
<Share2Icon className="w-4 h-4 text-muted-foreground" />
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("shareGpt")}</AlertDialogTitle>
<AlertDialogDescription>
{t("areYouSureToShareThisGptToCommunity")}
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction asChild>
<Button variant="default" onClick={handleShare}>
{t("share")}
</Button>
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
);
}

View File

@@ -0,0 +1,101 @@
import { t } from "i18next";
export const GPT_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: [
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
],
configurable: [
"model",
"roleDefinition",
"temperature",
"numberOfChoices",
"maxTokens",
"frequencyPenalty",
"presencePenalty",
"historyBufferSize",
"tts",
],
types: ["gpt", "tts"],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
models: [
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
],
configurable: [
"model",
"baseUrl",
"roleDefinition",
"temperature",
"numberOfChoices",
"maxTokens",
"frequencyPenalty",
"presencePenalty",
"historyBufferSize",
"tts",
],
types: ["gpt", "tts"],
},
googleGenerativeAi: {
name: "Google Generative AI",
models: ["gemini-pro"],
configurable: [
"model",
"roleDefinition",
"temperature",
"maxTokens",
"historyBufferSize",
"tts",
],
types: ["gpt"],
},
ollama: {
name: "Ollama",
description: t("ensureYouHaveOllamaRunningLocallyAndHasAtLeastOneModel"),
baseUrl: "http://localhost:11434",
models: [],
configurable: [
"model",
"baseUrl",
"roleDefinition",
"temperature",
"maxTokens",
"historyBufferSize",
"frequencyPenalty",
"presencePenalty",
"tts",
],
types: ["gpt"],
},
};

View File

@@ -0,0 +1,95 @@
import { AppSettingsProviderContext } from "@renderer/context";
import { t } from "i18next";
import { useContext } from "react";
import { useNavigate } from "react-router-dom";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
Button,
toast
} from "@renderer/components/ui";
import { Share2Icon } from "lucide-react";
export const GPTShareButton = (props: {
conversation: Partial<ConversationType>;
}) => {
const { conversation } = props;
const { webApi } = useContext(AppSettingsProviderContext);
const navigate = useNavigate();
const handleShare = () => {
const { configuration } = conversation;
delete configuration.baseUrl;
delete configuration?.tts?.baseUrl;
if (!configuration.roleDefinition) {
toast.error("shareFailed");
return;
}
webApi
.createPost({
metadata: {
type: "gpt",
content: {
name: conversation.name,
engine: conversation.engine,
configuration,
},
},
})
.then(() => {
toast.success(t("sharedSuccessfully"), {
description: t("sharedGpt"),
action: {
label: t("view"),
onClick: () => {
navigate("/community");
},
},
actionButtonStyle: {
backgroundColor: "var(--primary)",
},
});
})
.catch((err) => {
toast.error(t("shareFailed"), { description: err.message });
});
};
if (!conversation.id) return null;
if (conversation.type !== "gpt") return null;
return (
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="link" size="icon" className="rounded-full p-0 w-6 h-6">
<Share2Icon className="w-4 h-4 text-muted-foreground" />
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("shareGpt")}</AlertDialogTitle>
<AlertDialogDescription>
{t("areYouSureToShareThisGptToCommunity")}
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction asChild>
<Button variant="default" onClick={handleShare}>
{t("share")}
</Button>
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
);
};

View File

@@ -1,5 +1,8 @@
export * from "./conversation-form";
export * from "./conversation-shortcuts";
export * from "./speech-form";
export * from "./speech-player";
export * from "./gpt-providers";
export * from "./gpt-share-button";
export * from "./tts-providers";

View File

@@ -1,97 +0,0 @@
import { useState, useContext } from "react";
import { RecordButton, SpeechPlayer } from "@renderer/components";
import {
Button,
Textarea,
Dialog,
DialogContent,
} from "@renderer/components/ui";
import { AppSettingsProviderContext } from "@renderer/context";
import { LoaderIcon } from "lucide-react";
import { t } from "i18next";
export const SpeechForm = (props: {
lastMessage?: MessageType;
onSubmit: (content: string, file: string) => void;
}) => {
const { lastMessage, onSubmit } = props;
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const [transcribing, setTranscribing] = useState(false);
const [editting, setEditting] = useState(false);
const [content, setContent] = useState("");
const [file, setFile] = useState("");
const handleCancel = () => {
setEditting(false);
setContent("");
setFile("");
};
const handleSubmit = () => {
if (!content) return;
onSubmit(content, file);
handleCancel();
};
return (
<>
<RecordButton
disabled={false}
onRecordEnd={async (blob, _duration) => {
setTranscribing(true);
setEditting(true);
EnjoyApp.whisper
.transcribe(
{
type: blob.type.split(";")[0],
arrayBuffer: await blob.arrayBuffer(),
},
lastMessage?.content
)
.then(({ content, file }) => {
setContent(content);
setFile(file);
})
.finally(() => {
setTranscribing(false);
});
}}
/>
<Dialog
open={editting}
onOpenChange={(value) => {
setEditting(value);
}}
>
<DialogContent>
{transcribing ? (
<div className="flex items-center justify-center p-6">
<LoaderIcon className="w-6 h-6 animate-spin" />
</div>
) : (
<div className="">
<div className="my-4">
<Textarea
className="w-full h-36"
value={content}
onChange={(e) => setContent(e.target.value)}
/>
</div>
{file && (
<div className="mb-4">
<SpeechPlayer speech={{ playSource: "enjoy://" + file }} />
</div>
)}
<div className="flex items-center justify-end space-x-2">
<Button variant="secondary" onClick={handleCancel}>
{t("cancel")}
</Button>
<Button onClick={handleSubmit}>{t("send")}</Button>
</div>
</div>
)}
</DialogContent>
</Dialog>
</>
);
};

View File

@@ -0,0 +1,17 @@
import { t } from "i18next";
export const TTS_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: ["tts-1", "tts-1-hd"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
configurable: ["voice"],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
models: ["tts-1", "tts-1-hd"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
configurable: ["model", "voice", "baseUrl"],
},
};

View File

@@ -13,24 +13,4 @@ export * from "./medias";
export * from "./posts";
export * from "./users";
export * from "./db-state";
export * from "./layout";
export * from "./sidebar";
export * from "./page-placeholder";
export * from "./user-card";
export * from "./login-form";
export * from "./choose-library-path-input";
export * from "./whisper-model-options";
export * from "./reset-all-button";
export * from "./loader-spin";
export * from "./no-records-found";
export * from "./selection-menu";
export * from "./lookup-result";
export * from "./record-button";
export * from "./widgets";

View File

@@ -18,7 +18,10 @@ export * from "./balance-settings";
export * from "./reset-settings";
export * from "./reset-all-settings";
export * from "./reset-all-button";
export * from "./theme-settings";
export * from "./proxy-settings";
export * from "./whisper-model-options";

View File

@@ -18,7 +18,7 @@ import {
SelectValue,
SelectContent,
} from "@renderer/components/ui";
import { LLM_PROVIDERS } from "@renderer/components";
import { GPT_PROVIDERS } from "@renderer/components";
import { AISettingsProviderContext } from "@renderer/context";
import { useContext, useState } from "react";
@@ -28,7 +28,7 @@ export const OpenaiSettings = () => {
const openAiConfigSchema = z.object({
key: z.string().optional(),
model: z.enum(LLM_PROVIDERS.openai.models),
model: z.enum(GPT_PROVIDERS.openai.models),
baseUrl: z.string().optional(),
});
@@ -93,7 +93,7 @@ export const OpenaiSettings = () => {
</SelectTrigger>
</FormControl>
<SelectContent>
{(LLM_PROVIDERS.openai.models || []).map(
{(GPT_PROVIDERS.openai.models || []).map(
(option: string) => (
<SelectItem key={option} value={option}>
{option}

View File

@@ -10,7 +10,7 @@ import {
AlertDialogTrigger,
} from "@renderer/components/ui";
import { useContext } from "react";
import { AppSettingsProviderContext } from "../context";
import { AppSettingsProviderContext } from "../../context";
import { t } from "i18next";
export const ResetAllButton = (props: { children: React.ReactNode }) => {

View File

@@ -1,173 +0,0 @@
import { t } from "i18next";
import { MicIcon, LockIcon } from "lucide-react";
import { useState, useEffect, useRef, useContext } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import RecordPlugin from "wavesurfer.js/dist/plugins/record";
import WaveSurfer from "wavesurfer.js";
import { cn } from "@renderer/lib/utils";
import { RadialProgress, toast } from "@renderer/components/ui";
import { useHotkeys } from "react-hotkeys-hook";
import { useTranscribe } from "@renderer/hooks";
export const RecordButton = (props: {
className?: string;
disabled: boolean;
onRecordBegin?: () => void;
onRecordEnd: (blob: Blob, duration: number) => void;
}) => {
const { className, disabled, onRecordBegin, onRecordEnd } = props;
const [isRecording, setIsRecording] = useState<boolean>(false);
const [duration, setDuration] = useState<number>(0);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const [access, setAccess] = useState<boolean>(false);
const askForMediaAccess = () => {
EnjoyApp.system.preferences.mediaAccess("microphone").then((access) => {
if (access) {
setAccess(true);
} else {
setAccess(false);
toast.warning(t("noMicrophoneAccess"));
}
});
};
useHotkeys(["command+alt+r", "control+alt+r"], () => {
if (disabled) return;
setIsRecording((isRecording) => !isRecording);
});
useEffect(() => {
if (!isRecording) return;
onRecordBegin?.();
setDuration(0);
const interval = setInterval(() => {
setDuration((duration) => {
if (duration >= 300) {
setIsRecording(false);
}
return duration + 1;
});
}, 1000);
return () => {
clearInterval(interval);
};
}, [isRecording]);
useEffect(() => {
askForMediaAccess();
}, []);
return (
<div
className={cn(
`shadow-lg w-full aspect-square rounded-full text-primary-foreground flex items-center justify-center relative ${
isRecording
? "bg-red-500 w-24 h-24"
: " bg-primary opacity-80 w-20 h-20"
}
${disabled ? "cursor-not-allowed" : "hover:opacity-100 cursor-pointer"}
`,
className
)}
onClick={() => {
if (disabled) return;
if (access) {
setIsRecording((isRecording) => !isRecording);
} else {
askForMediaAccess();
}
}}
>
{isRecording ? (
<div className="w-full my-auto text-center">
<span className="font-mono text-xl font-bold">{duration}</span>
<RecordButtonPopover
onRecordEnd={(blob, duration) => {
if (duration > 1000) {
onRecordEnd(blob, duration);
} else {
toast.warning(t("recordTooShort"));
}
}}
/>
<RadialProgress
className="w-24 h-24 absolute top-0 left-0"
progress={100 - Math.floor((duration / 300) * 100)}
ringClassName="text-white"
circleClassName="text-red-500"
thickness={6}
label=" "
/>
</div>
) : (
<div className="flex items-center justify-center space-x-4 h-10">
<MicIcon className="w-10 h-10" />
{!access && (
<LockIcon className="w-4 h-4 absolute right-3 bottom-4" />
)}
</div>
)}
</div>
);
};
const RecordButtonPopover = (props: {
onRecordEnd: (blob: Blob, duration: number) => void;
}) => {
const containerRef = useRef<HTMLDivElement>();
const { transcode } = useTranscribe();
useEffect(() => {
if (!containerRef.current) return;
const ws = WaveSurfer.create({
container: containerRef.current,
waveColor: "#fff",
height: 16,
autoCenter: false,
normalize: true,
});
const record = ws.registerPlugin(RecordPlugin.create());
let startAt = 0;
record.on("record-start", () => {
startAt = Date.now();
});
record.on("record-end", async (blob: Blob) => {
const duration = Date.now() - startAt;
try {
const output = await transcode(blob);
props.onRecordEnd(output, duration);
} catch (e) {
console.error(e);
toast.error(t("failedToSaveRecording"));
}
});
RecordPlugin.getAvailableAudioDevices()
.then((devices) => devices.find((d) => d.kind === "audioinput"))
.then((device) => {
if (device) {
record.startRecording({ deviceId: device.deviceId });
} else {
toast.error(t("cannotFindMicrophone"));
}
});
return () => {
record.stopRecording();
ws.destroy();
};
}, []);
return (
<div className="hidden">
<div ref={containerRef}></div>
</div>
);
};

View File

@@ -1 +1,2 @@
export * from './users-rankings';
export * from './user-card';

View File

@@ -0,0 +1,13 @@
export * from "./db-state";
export * from "./layout";
export * from "./sidebar";
export * from "./page-placeholder";
export * from "./login-form";
export * from "./loader-spin";
export * from "./no-records-found";
export * from "./selection-menu";
export * from "./lookup-result";

View File

@@ -96,6 +96,7 @@ export const Sidebar = () => {
tooltip={t("sidebar.aiAssistant")}
active={activeTab.startsWith("/conversations")}
Icon={BotIcon}
testid="sidebar-conversations"
/>
<SidebarItem
@@ -224,8 +225,9 @@ const SidebarItem = (props: {
tooltip: string;
active: boolean;
Icon: LucideIcon;
testid?: string;
}) => {
const { href, label, tooltip, active, Icon } = props;
const { href, label, tooltip, active, Icon, testid } = props;
return (
<Link
@@ -233,6 +235,7 @@ const SidebarItem = (props: {
data-tooltip-id="global-tooltip"
data-tooltip-content={tooltip}
data-tooltip-place="right"
data-testid={testid}
className="block px-1 xl:px-2"
>
<Button

View File

@@ -181,18 +181,6 @@ type EnjoyAppType = {
create: (params: any) => Promise<ConversationType>;
update: (id: string, params: any) => Promise<ConversationType>;
destroy: (id: string) => Promise<void>;
ask: (
id: string,
params: {
messageId?: string;
content?: string;
file?: string;
blob?: {
type: string;
arrayBuffer: ArrayBuffer;
};
}
) => Promise<MessageType[]>;
};
messages: {
findAll: (params: any) => Promise<MessageType[]>;