Feat: add Enjoy AI as option (#206)

* add enjoyAI as option

* use enjoyai config

* may call enjoyai

* may set default ai engine

* refactor setting context

* refactor preferences

* add warning when openai key not provided

* tweak locale

* update duration for audio/video

* add balance settings

* may select ai role when create conversation

* may forward message from conversation

* tweak ui

* refactor transcribe method

* refactor ai commands to hooks

* fix webapi

* tweak playback rate options

* add playMode, next & prev, ref: #124

* upgrade deps

* may skip whisper model download

* audios/videos default order by updated_At
This commit is contained in:
an-lee
2024-01-31 00:04:59 +08:00
committed by GitHub
parent 58dcd1523e
commit 00cbc8403b
56 changed files with 1590 additions and 858 deletions

View File

@@ -39,35 +39,35 @@
"@types/intl-tel-input": "^18.1.4",
"@types/lodash": "^4.14.202",
"@types/mark.js": "^8.11.12",
"@types/node": "^20.11.5",
"@types/node": "^20.11.10",
"@types/react": "^18.2.48",
"@types/react-dom": "^18.2.18",
"@types/validator": "^13.11.8",
"@types/wavesurfer.js": "^6.0.12",
"@typescript-eslint/eslint-plugin": "^6.19.0",
"@typescript-eslint/parser": "^6.19.0",
"@typescript-eslint/eslint-plugin": "^6.20.0",
"@typescript-eslint/parser": "^6.20.0",
"@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.17",
"electron": "^28.1.4",
"electron": "^28.2.0",
"eslint": "^8.56.0",
"eslint-import-resolver-typescript": "^3.6.1",
"eslint-plugin-import": "^2.29.1",
"flora-colossus": "^2.0.0",
"octokit": "^3.1.2",
"tailwind-merge": "^2.2.0",
"tailwind-merge": "^2.2.1",
"tailwindcss": "^3.4.1",
"tailwindcss-animate": "^1.0.7",
"ts-node": "^10.9.2",
"tslib": "^2.6.2",
"typescript": "^5.3.3",
"vite-plugin-static-copy": "^1.0.0",
"vite-plugin-static-copy": "^1.0.1",
"zx": "^7.2.3"
},
"dependencies": {
"@ffmpeg/ffmpeg": "^0.12.10",
"@ffmpeg/util": "^0.12.1",
"@hookform/resolvers": "^3.3.4",
"@langchain/google-genai": "^0.0.7",
"@langchain/google-genai": "^0.0.8",
"@mozilla/readability": "^0.5.0",
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-alert-dialog": "^1.0.5",
@@ -92,46 +92,46 @@
"@radix-ui/react-toggle": "^1.0.3",
"@radix-ui/react-tooltip": "^1.0.7",
"@uidotdev/usehooks": "^2.4.1",
"@vidstack/react": "^1.9.8",
"@vidstack/react": "^1.10.2",
"adm-zip": "^0.5.10",
"autosize": "^6.0.1",
"axios": "^1.6.5",
"axios": "^1.6.7",
"camelcase": "^8.0.0",
"camelcase-keys": "^9.1.3",
"cheerio": "^1.0.0-rc.12",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.0",
"command-exists": "^1.2.9",
"compromise": "^14.11.1",
"compromise": "^14.11.2",
"compromise-paragraphs": "^0.1.0",
"compromise-stats": "^0.1.0",
"dayjs": "^1.11.10",
"decamelize": "^6.0.0",
"decamelize-keys": "^2.0.1",
"electron-log": "^5.0.4",
"electron-log": "^5.1.0",
"electron-settings": "^4.0.2",
"electron-squirrel-startup": "^1.0.0",
"fluent-ffmpeg": "^2.1.2",
"fs-extra": "^11.2.0",
"html-to-text": "^9.0.5",
"i18next": "^23.7.18",
"intl-tel-input": "^19.2.12",
"i18next": "^23.8.1",
"intl-tel-input": "^19.2.15",
"js-md5": "^0.8.3",
"langchain": "^0.1.5",
"langchain": "^0.1.10",
"lodash": "^4.17.21",
"lucide-react": "^0.314.0",
"lucide-react": "^0.319.0",
"mark.js": "^8.11.1",
"microsoft-cognitiveservices-speech-sdk": "^1.34.0",
"next-themes": "^0.2.1",
"openai": "^4.24.7",
"openai": "^4.26.0",
"pitchfinder": "^2.3.2",
"postcss": "^8.4.33",
"react": "^18.2.0",
"react-activity-calendar": "^2.2.6",
"react-activity-calendar": "^2.2.7",
"react-dom": "^18.2.0",
"react-hook-form": "^7.49.3",
"react-hotkeys-hook": "^4.4.4",
"react-i18next": "^14.0.0",
"react-i18next": "^14.0.1",
"react-markdown": "^9.0.1",
"react-router-dom": "^6.21.3",
"react-tooltip": "^5.26.0",
@@ -139,11 +139,11 @@
"rimraf": "^5.0.5",
"sequelize": "^6.35.2",
"sequelize-typescript": "^2.1.6",
"sonner": "^1.3.1",
"sonner": "^1.4.0",
"sqlite3": "^5.1.7",
"tailwind-scrollbar-hide": "^1.1.7",
"umzug": "^3.5.1",
"wavesurfer.js": "^7.6.5",
"wavesurfer.js": "^7.7.1",
"zod": "^3.22.4"
}
}

View File

@@ -13,8 +13,9 @@ export class Client {
baseUrl: string;
accessToken?: string;
logger?: any;
locale?: "en" | "zh-CN";
}) {
const { baseUrl, accessToken, logger } = options;
const { baseUrl, accessToken, logger, locale = "en" } = options;
this.baseUrl = baseUrl;
this.logger = logger || console;
@@ -27,6 +28,7 @@ export class Client {
});
this.api.interceptors.request.use((config) => {
config.headers.Authorization = `Bearer ${accessToken}`;
config.headers['Accept-Language'] = locale;
this.logger.debug(
config.method.toUpperCase(),

View File

@@ -66,9 +66,11 @@
"name": "Name",
"engine": "AI engine",
"baseUrl": "Request endpoint",
"baseUrlDescription": "BaseURL, leave it blank if you don't have one",
"configuration": "Configuration",
"model": "AI model",
"roleDefinition": "Role definition",
"roleDefinitionPlaceholder": "Describe the AI role",
"temperature": "Temperature",
"temperatureDescription": "The higher the temperature, the more creative the result",
"maxTokens": "Max tokens",
@@ -85,6 +87,7 @@
"ttsModel": "TTS model",
"ttsVoice": "TTS voice",
"ttsBaseUrl": "TTS base URL",
"ttsBaseUrlDescription": "BaseURL for TTS, leave it blank if you don't have one",
"notFound": "Conversation not found",
"contentRequired": "Content required",
"failedToGenerateResponse": "Failed to generate response, please retry"
@@ -147,8 +150,11 @@
"yesterday": "yesterday",
"play": "play",
"pause": "pause",
"loop": "loop",
"stopLoop": "stop loop",
"playSingleSegment": "play single segment",
"playAllSegments": "play all segments",
"playInLoop": "play in loop",
"playNextSegment": "play next segment",
"playPreviousSegment": "play previous segment",
"playbackSpeed": "playback speed",
"zoomIn": "zoom in",
"zoomOut": "zoom out",
@@ -161,6 +167,7 @@
"detail": "detail",
"remove": "remove",
"share": "share",
"forward": "forward",
"loadMore": "Load more",
"databaseError": "Failed to connect to database {{url}}",
"somethingWentWrong": "Something went wrong",
@@ -218,7 +225,7 @@
"welcomeBack": "Welcome back! {{name}}",
"download": "Download",
"downloading": "Downloading {{file}}",
"chooseAIModelDependingOnYourHardware": "Choose AI Model depending on your hardware",
"chooseAIModelDependingOnYourHardware": "Choose AI Model depending on your hardware.",
"areYouSureToDownload": "Are you sure to download {{name}}?",
"yourModelsWillBeDownloadedTo": "Your models will be downloaded to {{path}}",
"logout": "Logout",
@@ -291,17 +298,23 @@
"noRecordingActivities": "no recording activities",
"basicSettingsShort": "Basic",
"basicSettings": "Basic settings",
"accountSettingsShort": "Account",
"accountSettings": "Account settings",
"advancedSettingsShort": "Advanced",
"advancedSettings": "Advanced settings",
"advanced": "Advanced",
"language": "Language",
"balance": "Balance",
"deposit": "Deposit",
"notAvailableYet": "Not available yet",
"whisperModel": "Whisper Model",
"sttAiService": "STT AI service",
"local": "Local",
"localSpeechToTextDescription": "Use local whisper model to transcribe.",
"localSpeechToTextDescription": "Use local whisper model to transcribe. It is free.",
"azureAi": "Azure AI",
"azureSpeechToTextDescription": "Use Azure AI Speech to transcribe.",
"azureSpeechToTextDescription": "Use Azure AI Speech to transcribe. It is a paid service.",
"cloudflareAi": "Cloudflare AI",
"cloudflareSpeechToTextDescription": "Use Cloudflare AI Worker to transcribe.",
"cloudflareSpeechToTextDescription": "Use Cloudflare AI Worker to transcribe. It is in beta and free for now.",
"checkingWhisper": "Checking whisper status",
"pleaseDownloadWhisperModelFirst": "Please download whisper model first",
"whisperIsWorkingGood": "Whisper is working good",
@@ -310,6 +323,9 @@
"whisperModelIsWorkingGood": "Whisper model is working good",
"whisperModelIsNotWorking": "Whisper model is not working",
"relaunchIsNeededAfterChanged": "Relaunch is needed after changed",
"defaultAiEngine": "Default AI engine",
"openAiEngineTips": "Use OpenAI with your own key as default AI engine.",
"enjoyAiEngineTips": "Use EnjoyAI as default AI engine. It is a paid service.",
"openaiKeySaved": "OpenAI key saved",
"openaiConfigSaved": "OpenAI config saved",
"openaiKeyRequired": "OpenAI key required",
@@ -318,6 +334,8 @@
"key": "key",
"leaveEmptyToUseDefault": "Leave empty to use default",
"newConversation": "New conversation",
"selectAiRole": "Select AI role",
"custom": "Custom",
"startConversation": "Start conversation",
"editConversation": "Edit conversation",
"deleteConversation": "Delete conversation",
@@ -417,5 +435,11 @@
"removeSharing": "Remove sharing",
"areYouSureToRemoveThisSharing": "Are you sure to remove this sharing?",
"removeSharingSuccessfully": "Remove sharing successfully",
"removeSharingFailed": "Remove sharing failed"
"removeSharingFailed": "Remove sharing failed",
"generatingIpa": "Generating IPA",
"generatedIpaSuccessfully": "Generated IPA successfully",
"generatingIpaFailed": "Generating IPA failed",
"translating": "Translating",
"translatedSuccessfully": "Translated successfully",
"translationFailed": "Translation failed"
}

View File

@@ -66,9 +66,11 @@
"name": "对话标题",
"engine": "AI 引擎",
"baseUrl": "接口地址",
"baseUrlDescription": "接口地址,留空则使用默认值",
"configuration": "AI 配置",
"model": "AI 模型",
"roleDefinition": "角色定义",
"roleDefinitionPlaceholder": "描述 AI 扮演的角色",
"temperature": "随机性 (temperature)",
"temperatureDescription": "值越高,生成的文本越具创造性,反之则越稳定",
"maxTokens": "单次回复限制",
@@ -84,7 +86,8 @@
"ttsEngine": "TTS 引擎",
"ttsModel": "TTS 模型",
"ttsVoice": "TTS 声音",
"ttsBaseUrl": "TTS 请求地址",
"ttsBaseUrl": "TTS 接口地址",
"ttsBaseUrlDescription": "TTS 接口地址,留空则使用默认值",
"notFound": "未找到对话",
"contentRequired": "对话内容不能为空",
"failedToGenerateResponse": "生成失败,请重试"
@@ -147,8 +150,11 @@
"yesterday": "昨天",
"play": "播放",
"pause": "暂停",
"loop": "循环",
"stopLoop": "停止循环",
"playSingleSegment": "播放单句",
"playAllSegments": "播放所有",
"playInLoop": "单句循环",
"playNextSegment": "播放下一句",
"playPreviousSegment": "播放上一句",
"playbackSpeed": "播放速度",
"zoomIn": "放大",
"zoomOut": "缩小",
@@ -161,6 +167,7 @@
"detail": "详情",
"remove": "删除",
"share": "分享",
"forward": "转发",
"loadMore": "加载更多",
"databaseError": "数据库错误 {{url}}",
"somethingWentWrong": "出错了",
@@ -218,7 +225,7 @@
"welcomeBack": "欢迎回来, {{name}}",
"download": "下载",
"downloading": "正在下载 {{file}}",
"chooseAIModelDependingOnYourHardware": "根据您的硬件选择合适的 AI 模型",
"chooseAIModelDependingOnYourHardware": "根据您的硬件选择合适的 AI 模型, 以便语音转文本服务正常工作",
"areYouSureToDownload": "您确定要下载 {{name}} 吗?",
"yourModelsWillBeDownloadedTo": "您的模型将下载到目录 {{path}}",
"logout": "退出登录",
@@ -291,16 +298,22 @@
"noRecordingActivities": "没有练习活动",
"basicSettingsShort": "基本设置",
"basicSettings": "基本设置",
"accountSettingsShort": "账户设置",
"accountSettings": "账户设置",
"advancedSettingsShort": "高级设置",
"advancedSettings": "高级设置",
"language": "语言",
"balance": "余额",
"deposit": "充值",
"notAvailableYet": "暂未开放",
"whisperModel": "Whisper 模型",
"sttAiService": "语音转文本服务",
"local": "本地",
"localSpeechToTextDescription": "使用本地 whisper 模型进行语音转文本",
"localSpeechToTextDescription": "使用本地 whisper 模型进行语音转文本,不会产生费用",
"azureAi": "Azure AI",
"azureSpeechToTextDescription": "使用 Azure AI Speech 进行语音转文本",
"azureSpeechToTextDescription": "使用 Azure AI Speech 进行语音转文本,收费服务",
"cloudflareAi": "Cloudflare AI",
"cloudflareSpeechToTextDescription": "使用 Cloudflare AI 进行语音转文本",
"cloudflareSpeechToTextDescription": "使用 Cloudflare AI 进行语音转文本,目前免费",
"checkingWhisper": "正在检查 Whisper",
"pleaseDownloadWhisperModelFirst": "请先下载 Whisper 模型",
"whisperIsWorkingGood": "Whisper 正常工作",
@@ -309,14 +322,19 @@
"whisperModelIsWorkingGood": "Whisper 模型正常工作",
"whisperModelIsNotWorking": "Whisper 模型无法正常工作,请尝试更换模型后重试,或联系开发者",
"relaunchIsNeededAfterChanged": "更改后需要重新启动",
"defaultAiEngine": "默认 AI 引擎",
"openAiEngineTips": "使用 OpenAI 作为默认 AI 引擎,需要配置 API 密钥。",
"enjoyAiEngineTips": "使用 EnjoyAI 作为默认 AI 引擎,收费服务。",
"openaiKeySaved": "OpenAI 密钥已保存",
"openaiConfigSaved": "OpenAI 配置已保存",
"openaiKeyRequired": "未提供 OpenAI 密钥",
"openaiKeyRequired": "未配置 OpenAI 密钥",
"baseUrl": "接口地址",
"model": "模型",
"key": "密钥",
"leaveEmptyToUseDefault": "留空则使用默认值",
"newConversation": "新对话",
"selectAiRole": "选择 AI 角色",
"custom": "自定义",
"startConversation": "开始对话",
"editConversation": "编辑对话",
"deleteConversation": "删除对话",
@@ -416,5 +434,11 @@
"removeSharing": "取消分享",
"areYouSureToRemoveThisSharing": "您确定要取消分享吗?",
"removeSharingSuccessfully": "取消分享成功",
"removeSharingFailed": "取消分享失败"
"removeSharingFailed": "取消分享失败",
"generatingIpa": "正在生成音标",
"generatedIpaSuccessfully": "音标生成成功",
"generatingIpaFailed": "音标生成失败",
"translating": "正在翻译",
"translatedSuccessfully": "翻译成功",
"translationFailed": "翻译失败"
}

View File

@@ -14,7 +14,7 @@ class AudiosHandler {
options: FindOptions<Attributes<Audio>>
) {
return Audio.findAll({
order: [["createdAt", "DESC"]],
order: [["updatedAt", "DESC"]],
include: [
{
association: "transcription",
@@ -66,39 +66,6 @@ class AudiosHandler {
});
}
private async transcribe(event: IpcMainEvent, id: string) {
const audio = await Audio.findOne({
where: {
id,
},
});
if (!audio) {
event.sender.send("on-notification", {
type: "error",
message: t("models.audio.notFound"),
});
}
const timeout = setTimeout(() => {
event.sender.send("on-notification", {
type: "warning",
message: t("stillTranscribing"),
});
}, 1000 * 10);
audio
.transcribe()
.catch((err) => {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
})
.finally(() => {
clearTimeout(timeout);
});
}
private async create(
event: IpcMainEvent,
uri: string,
@@ -148,7 +115,7 @@ class AudiosHandler {
id: string,
params: Attributes<Audio>
) {
const { name, description, transcription } = params;
const { name, description, metadata } = params;
return Audio.findOne({
where: { id },
@@ -157,7 +124,7 @@ class AudiosHandler {
if (!audio) {
throw new Error(t("models.audio.notFound"));
}
audio.update({ name, description, transcription });
audio.update({ name, description, metadata });
})
.catch((err) => {
event.sender.send("on-notification", {
@@ -208,7 +175,6 @@ class AudiosHandler {
register() {
ipcMain.handle("audios-find-all", this.findAll);
ipcMain.handle("audios-find-one", this.findOne);
ipcMain.handle("audios-transcribe", this.transcribe);
ipcMain.handle("audios-create", this.create);
ipcMain.handle("audios-update", this.update);
ipcMain.handle("audios-destroy", this.destroy);

View File

@@ -86,7 +86,7 @@ class TranscriptionsHandler {
throw new Error("models.transcription.notFound");
}
const timeout = setTimeout(() => {
const interval = setInterval(() => {
event.sender.send("on-notification", {
type: "warning",
message: t("stillTranscribing"),
@@ -102,7 +102,7 @@ class TranscriptionsHandler {
});
})
.finally(() => {
clearTimeout(timeout);
clearInterval(interval);
});
})
.catch((err) => {

View File

@@ -14,7 +14,7 @@ class VideosHandler {
options: FindOptions<Attributes<Video>>
) {
return Video.findAll({
order: [["createdAt", "DESC"]],
order: [["updatedAt", "DESC"]],
include: [
{
association: "transcription",
@@ -66,39 +66,6 @@ class VideosHandler {
});
}
private async transcribe(event: IpcMainEvent, id: string) {
const video = await Video.findOne({
where: {
id,
},
});
if (!video) {
event.sender.send("on-notification", {
type: "error",
message: t("models.video.notFound"),
});
}
const timeout = setTimeout(() => {
event.sender.send("on-notification", {
type: "warning",
message: t("stillTranscribing"),
});
}, 1000 * 10);
video
.transcribe()
.catch((err) => {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
})
.finally(() => {
clearTimeout(timeout);
});
}
private async create(
event: IpcMainEvent,
uri: string,
@@ -149,7 +116,7 @@ class VideosHandler {
id: string,
params: Attributes<Video>
) {
const { name, description, transcription } = params;
const { name, description, metadata } = params;
return Video.findOne({
where: { id },
@@ -158,7 +125,7 @@ class VideosHandler {
if (!video) {
throw new Error(t("models.video.notFound"));
}
video.update({ name, description, transcription });
video.update({ name, description, metadata });
})
.catch((err) => {
event.sender.send("on-notification", {
@@ -209,7 +176,6 @@ class VideosHandler {
register() {
ipcMain.handle("videos-find-all", this.findAll);
ipcMain.handle("videos-find-one", this.findOne);
ipcMain.handle("videos-transcribe", this.transcribe);
ipcMain.handle("videos-create", this.create);
ipcMain.handle("videos-update", this.update);
ipcMain.handle("videos-destroy", this.destroy);

View File

@@ -34,12 +34,6 @@ const SIZE_LIMIT = 1024 * 1024 * 50; // 50MB
const logger = log.scope("db/models/audio");
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
@Table({
modelName: "Audio",
tableName: "audios",
@@ -119,7 +113,7 @@ export class Audio extends Model<Audio> {
@Column(DataType.VIRTUAL)
get transcribed(): boolean {
return this.transcription?.state === "finished";
return Boolean(this.transcription?.result);
}
@Column(DataType.VIRTUAL)
@@ -131,6 +125,11 @@ export class Audio extends Model<Audio> {
)}`;
}
@Column(DataType.VIRTUAL)
get duration(): number {
return this.getDataValue("metadata").duration;
}
get extname(): string {
return (
this.getDataValue("metadata").extname ||
@@ -167,9 +166,13 @@ export class Audio extends Model<Audio> {
}
async sync() {
if (!this.isUploaded) {
this.upload();
}
if (this.isSynced) return;
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
return webApi.syncAudio(this.toJSON()).then(() => {
this.update({ syncedAt: new Date() });
@@ -212,6 +215,7 @@ export class Audio extends Model<Audio> {
@AfterUpdate
static notifyForUpdate(audio: Audio) {
this.notify(audio, "update");
audio.sync().catch(() => {});
}
@AfterDestroy

View File

@@ -30,6 +30,7 @@ import path from "path";
import Ffmpeg from "@main/ffmpeg";
import whisper from "@main/whisper";
import { hashFile } from "@/utils";
import { WEB_API_URL } from "@/constants";
const logger = log.scope("db/models/conversation");
@Table({
@@ -136,7 +137,22 @@ export class Conversation extends Model<Conversation> {
// choose llm based on engine
llm() {
if (this.engine == "openai") {
if (this.engine === "enjoyai") {
return new ChatOpenAI({
modelName: this.model,
configuration: {
baseURL: `${process.env.WEB_API_URL || WEB_API_URL}/api/ai`,
defaultHeaders: {
Authorization: `Bearer ${settings.getSync("user.accessToken")}`,
},
},
temperature: this.configuration.temperature,
n: this.configuration.numberOfChoices,
maxTokens: this.configuration.maxTokens,
frequencyPenalty: this.configuration.frequencyPenalty,
presencePenalty: this.configuration.presencePenalty,
});
} else if (this.engine === "openai") {
const key = settings.getSync("openai.key") as string;
if (!key) {
throw new Error(t("openaiKeyRequired"));

View File

@@ -19,12 +19,6 @@ import { WEB_API_URL } from "@/constants";
import settings from "@main/settings";
import log from "electron-log/main";
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
@Table({
modelName: "PronunciationAssessment",
tableName: "pronunciation_assessments",
@@ -40,7 +34,7 @@ const webApi = new Client({
},
}))
export class PronunciationAssessment extends Model<PronunciationAssessment> {
@IsUUID('all')
@IsUUID("all")
@Default(DataType.UUIDV4)
@Column({ primaryKey: true, type: DataType.UUID })
id: string;
@@ -100,6 +94,12 @@ export class PronunciationAssessment extends Model<PronunciationAssessment> {
}
async sync() {
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
return webApi.syncPronunciationAssessment(this.toJSON()).then(() => {
this.update({ syncedAt: new Date() });
});

View File

@@ -29,12 +29,6 @@ import camelcaseKeys from "camelcase-keys";
const logger = log.scope("db/models/recording");
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
@Table({
modelName: "Recording",
tableName: "recordings",
@@ -144,6 +138,12 @@ export class Recording extends Model<Recording> {
await this.upload();
}
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
return webApi.syncRecording(this.toJSON()).then(() => {
this.update({ syncedAt: new Date() });
});
@@ -158,6 +158,12 @@ export class Recording extends Model<Recording> {
return assessment;
}
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
const { token, region } = await webApi.generateSpeechToken();
const sdk = new AzureSpeechSdk(token, region);

View File

@@ -23,6 +23,7 @@ import { t } from "i18next";
import { hashFile } from "@/utils";
import { Audio, Message } from "@main/db/models";
import log from "electron-log/main";
import { WEB_API_URL } from "@/constants";
const logger = log.scope("db/models/speech");
@Table({
@@ -170,26 +171,34 @@ export class Speech extends Model<Speech> {
const filename = `${Date.now()}${extname}`;
const filePath = path.join(settings.userDataPath(), "speeches", filename);
if (engine === "openai") {
const key = settings.getSync("openai.key") as string;
if (!key) {
let openaiConfig = {};
if (engine === "enjoyai") {
openaiConfig = {
baseURL: `${process.env.WEB_API_URL || WEB_API_URL}/api/ai`,
defaultHeaders: {
Authorization: `Bearer ${settings.getSync("user.accessToken")}`,
},
};
} else if (engine === "openai") {
const defaultConfig = settings.getSync("openai") as LlmProviderType;
if (!defaultConfig.key) {
throw new Error(t("openaiKeyRequired"));
}
const openai = new OpenAI({
apiKey: key,
baseURL: baseUrl,
});
logger.debug("baseURL", openai.baseURL);
const file = await openai.audio.speech.create({
input: text,
model,
voice,
});
const buffer = Buffer.from(await file.arrayBuffer());
await fs.outputFile(filePath, buffer);
openaiConfig = {
apiKey: defaultConfig.key,
baseURL: baseUrl || defaultConfig.baseUrl,
};
}
const openai = new OpenAI(openaiConfig);
const file = await openai.audio.speech.create({
input: text,
model,
voice,
});
const buffer = Buffer.from(await file.arrayBuffer());
await fs.outputFile(filePath, buffer);
const md5 = await hashFile(filePath, { algo: "md5" });
fs.renameSync(

View File

@@ -24,11 +24,6 @@ import path from "path";
import fs from "fs-extra";
const logger = log.scope("db/models/transcription");
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
@Table({
modelName: "Transcription",
@@ -82,6 +77,11 @@ export class Transcription extends Model<Transcription> {
async sync() {
if (this.getDataValue("state") !== "finished") return;
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
return webApi.syncTranscription(this.toJSON()).then(() => {
this.update({ syncedAt: new Date() });
});

View File

@@ -34,12 +34,6 @@ const SIZE_LIMIT = 1024 * 1024 * 100; // 100MB
const logger = log.scope("db/models/video");
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
@Table({
modelName: "Video",
tableName: "videos",
@@ -131,6 +125,11 @@ export class Video extends Model<Video> {
)}`;
}
@Column(DataType.VIRTUAL)
get duration(): number {
return this.getDataValue("metadata").duration;
}
get extname(): string {
return (
this.getDataValue("metadata").extname ||
@@ -189,9 +188,13 @@ export class Video extends Model<Video> {
}
async sync() {
if (!this.isUploaded) {
this.upload();
}
if (this.isSynced) return;
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
return webApi.syncVideo(this.toJSON()).then(() => {
this.update({ syncedAt: new Date() });
@@ -235,6 +238,7 @@ export class Video extends Model<Video> {
@AfterUpdate
static notifyForUpdate(video: Video) {
this.notify(video, "update");
video.sync().catch(() => {});
}
@AfterDestroy

View File

@@ -167,6 +167,14 @@ export default {
ipcMain.handle("settings-switch-language", (_event, language) => {
switchLanguage(language);
});
ipcMain.handle("settings-get-default-engine", (_event) => {
return settings.getSync("defaultEngine");
});
ipcMain.handle("settings-set-default-engine", (_event, engine) => {
return settings.setSync("defaultEngine", engine);
});
},
cachePath,
libraryPath,

View File

@@ -19,12 +19,6 @@ import { sortedUniqBy, take } from "lodash";
const logger = log.scope("whisper");
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
const MAGIC_TOKENS = ["Mrs.", "Ms.", "Mr.", "Dr.", "Prof.", "St."];
const END_OF_WORD_REGEX = /[^\.!,\?][\.!\?]/g;
class Whipser {
@@ -200,6 +194,11 @@ class Whipser {
}
async transcribeFromAzure(file: string): Promise<Partial<WhisperOutputType>> {
const webApi = new Client({
baseUrl: process.env.WEB_API_URL || WEB_API_URL,
accessToken: settings.getSync("user.accessToken") as string,
logger: log.scope("api/client"),
});
const { token, region } = await webApi.generateSpeechToken();
const sdk = new AzureSpeechSdk(token, region);

View File

@@ -130,6 +130,12 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
getUserDataPath: () => {
return ipcRenderer.invoke("settings-get-user-data-path");
},
getDefaultEngine: () => {
return ipcRenderer.invoke("settings-get-default-engine");
},
setDefaultEngine: (engine: "enjoyai" | "openai") => {
return ipcRenderer.invoke("settings-set-default-engine", engine);
},
getLlm: (provider: string) => {
return ipcRenderer.invoke("settings-get-llm", provider);
},
@@ -183,9 +189,6 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
destroy: (id: string) => {
return ipcRenderer.invoke("audios-destroy", id);
},
transcribe: (id: string) => {
return ipcRenderer.invoke("audios-transcribe", id);
},
upload: (id: string) => {
return ipcRenderer.invoke("audios-upload", id);
},
@@ -209,9 +212,6 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
destroy: (id: string) => {
return ipcRenderer.invoke("videos-destroy", id);
},
transcribe: (id: string) => {
return ipcRenderer.invoke("videos-transcribe", id);
},
upload: (id: string) => {
return ipcRenderer.invoke("videos-upload", id);
},

View File

@@ -44,7 +44,7 @@ export const AudioDetail = (props: { id?: string; md5?: string }) => {
const [currentSegmentIndex, setCurrentSegmentIndex] = useState<number>(0);
const [zoomRatio, setZoomRatio] = useState<number>(1.0);
const [isPlaying, setIsPlaying] = useState(false);
const [isLooping, setIsLooping] = useState(false);
const [playMode, setPlayMode] = useState<"loop" | "single" | "all">("all");
const [playBackRate, setPlaybackRate] = useState<number>(1);
const [displayInlineCaption, setDisplayInlineCaption] =
useState<boolean>(true);
@@ -148,13 +148,23 @@ export const AudioDetail = (props: { id?: string; md5?: string }) => {
setZoomRatio={setZoomRatio}
isPlaying={isPlaying}
setIsPlaying={setIsPlaying}
isLooping={isLooping}
setIsLooping={setIsLooping}
playMode={playMode}
setPlayMode={setPlayMode}
playBackRate={playBackRate}
setPlaybackRate={setPlaybackRate}
displayInlineCaption={displayInlineCaption}
setDisplayInlineCaption={setDisplayInlineCaption}
onShare={() => setSharing(true)}
onDecoded={({ duration, sampleRate }) => {
if (audio.duration) return;
EnjoyApp.audios.update(audio.id, {
metadata: Object.assign({}, audio.metadata, {
duration,
sampleRate,
}),
});
}}
/>
<ScrollArea className={`flex-1 relative bg-muted`}>
@@ -182,7 +192,7 @@ export const AudioDetail = (props: { id?: string; md5?: string }) => {
const segment = transcription?.result?.[index];
if (!segment) return;
if (isLooping && isPlaying) setIsPlaying(false);
if (playMode === "loop" && isPlaying) setIsPlaying(false);
setSeek({
seekTo: segment.offsets.from / 1000,
timestamp: Date.now(),

View File

@@ -34,6 +34,7 @@ import {
import { LayoutGridIcon, LayoutListIcon } from "lucide-react";
import { audiosReducer } from "@renderer/reducers";
import { useNavigate } from "react-router-dom";
import { useTranscribe } from "@renderer/hooks";
export const AudiosComponent = () => {
const [audios, dispatchAudios] = useReducer(audiosReducer, []);
@@ -43,6 +44,7 @@ export const AudiosComponent = () => {
const [transcribing, setTranscribing] = useState<Partial<AudioType> | null>(
null
);
const { transcribe } = useTranscribe();
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
@@ -237,7 +239,7 @@ export const AudiosComponent = () => {
<AlertDialogTitle>{t("transcribe")}</AlertDialogTitle>
<AlertDialogDescription>
<p className="break-all">
{t("transcribeResourceConfirmation", {
{t("transcribeAudioConfirmation", {
name: transcribing?.name || "",
})}
</p>
@@ -249,8 +251,14 @@ export const AudiosComponent = () => {
className="bg-destructive"
onClick={async () => {
if (!transcribing) return;
await EnjoyApp.audios.transcribe(transcribing.id);
setTranscribing(null);
transcribe({
mediaId: transcribing.id,
mediaSrc: transcribing.src,
mediaType: "Audio",
}).finally(() => {
setTranscribing(null);
});
}}
>
{t("transcribe")}

View File

@@ -76,9 +76,7 @@ export const AudiosTable = (props: {
</TooltipProvider>
</TableCell>
<TableCell>
{audio.metadata?.format?.duration
? secondsToTimestamp(audio.metadata.format.duration)
: "-"}
{audio.duration ? secondsToTimestamp(audio.duration) : "-"}
</TableCell>
<TableCell>{audio.recordingsCount}</TableCell>
<TableCell>

View File

@@ -39,7 +39,9 @@ import { useNavigate } from "react-router-dom";
const conversationFormSchema = z.object({
name: z.string().optional(),
engine: z.enum(["openai", "ollama", "googleGenerativeAi"]).default("openai"),
engine: z
.enum(["enjoyai", "openai", "ollama", "googleGenerativeAi"])
.default("openai"),
configuration: z
.object({
model: z.string().nonempty(),
@@ -53,7 +55,7 @@ const conversationFormSchema = z.object({
historyBufferSize: z.number().min(0).default(10),
tts: z
.object({
engine: z.enum(["openai"]).default("openai"),
engine: z.enum(["openai", "enjoyai"]).default("openai"),
model: z.string().default("tts-1"),
voice: z.string().optional(),
baseUrl: z.string().optional(),
@@ -98,12 +100,13 @@ export const ConversationForm = (props: {
refreshProviders();
}, []);
const defaultConfig = conversationDefaultConfiguration;
const defaultConfig = conversation || {};
if (defaultConfig.engine === "openai" && openai) {
defaultConfig.configuration.model = openai.model;
defaultConfig.configuration.baseUrl = openai.baseUrl;
}
if (defaultConfig.configuration.tts.engine === "openai" && openai) {
if (defaultConfig.configuration?.tts?.engine === "openai" && openai) {
defaultConfig.configuration.tts.baseUrl = openai.baseUrl;
}
@@ -267,7 +270,13 @@ export const ConversationForm = (props: {
<FormLabel>
{t("models.conversation.roleDefinition")}
</FormLabel>
<Textarea className="h-64" {...field} />
<Textarea
placeholder={t(
"models.conversation.roleDefinitionPlaceholder"
)}
className="h-64"
{...field}
/>
<FormMessage />
</FormItem>
)}
@@ -468,7 +477,7 @@ export const ConversationForm = (props: {
<FormLabel>{t("models.conversation.baseUrl")}</FormLabel>
<Input {...field} />
<FormDescription>
{t("models.conversation.baseUrl")}
{t("models.conversation.baseUrlDescription")}
</FormDescription>
<FormMessage />
</FormItem>
@@ -505,84 +514,96 @@ export const ConversationForm = (props: {
)}
/>
<FormField
control={form.control}
name="configuration.tts.model"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsModel")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.models || []
).map((model: string) => (
<SelectItem key={model} value={model}>
{model}
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("model") && (
<FormField
control={form.control}
name="configuration.tts.model"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsModel")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.models || []
).map((model: string) => (
<SelectItem key={model} value={model}>
{model}
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.tts.voice"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsVoice")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsVoice")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.voices || []
).map((voice: string) => (
<SelectItem key={voice} value={voice}>
<span className="capitalize">{voice}</span>
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("voice") && (
<FormField
control={form.control}
name="configuration.tts.voice"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsVoice")}</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectTtsVoice")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(
TTS_PROVIDERS[form.watch("configuration.tts.engine")]
?.voices || []
).map((voice: string) => (
<SelectItem key={voice} value={voice}>
<span className="capitalize">{voice}</span>
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
)}
<FormField
control={form.control}
name="configuration.tts.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsBaseUrl")}</FormLabel>
<Input {...field} />
<FormDescription>
{t("models.conversation.baseUrl")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
{TTS_PROVIDERS[
form.watch("configuration.tts.engine")
]?.configurable.includes("baseUrl") && (
<FormField
control={form.control}
name="configuration.tts.baseUrl"
render={({ field }) => (
<FormItem>
<FormLabel>{t("models.conversation.ttsBaseUrl")}</FormLabel>
<Input {...field} />
<FormDescription>
{t("models.conversation.ttsBaseUrlDescription")}
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
)}
</div>
</ScrollArea>
@@ -637,6 +658,32 @@ export const ConversationForm = (props: {
};
export const LLM_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: [
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
],
configurable: [
"model",
"roleDefinition",
"temperature",
"numberOfChoices",
"maxTokens",
"frequencyPenalty",
"presencePenalty",
"historyBufferSize",
"tts",
],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
@@ -697,38 +744,17 @@ export const LLM_PROVIDERS: { [key: string]: any } = {
};
export const TTS_PROVIDERS: { [key: string]: any } = {
enjoyai: {
name: "EnjoyAI",
models: ["tts-1"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
configurable: ["voice"],
},
openai: {
name: "OpenAI",
description: t("youNeedToSetupApiKeyBeforeUsingOpenAI"),
models: ["tts-1"],
voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
},
};
const conversationDefaultConfiguration = {
name: "英语教练",
engine: "openai",
configuration: {
model: "gpt-4-1106-preview",
baseUrl: "",
roleDefinition: `你是我的英语教练。
请将我的话改写成英文。
不需要逐字翻译。
请分析清楚我的内容,而后用英文重新逻辑清晰地组织它。
请使用地道的美式英语,纽约腔调。
请尽量使用日常词汇,尽量优先使用短语动词或者习惯用语。
每个句子最长不应该超过 20 个单词。`,
temperature: 0.2,
numberOfChoices: 1,
maxTokens: 2048,
presencePenalty: 0,
frequencyPenalty: 0,
historyBufferSize: 0,
tts: {
baseUrl: "",
engine: "openai",
model: "tts-1",
voice: "alloy",
},
configurable: ["model", "voice", "baseUrl"],
},
};

View File

@@ -0,0 +1,104 @@
import { useContext, useEffect, useState } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import { Button, ScrollArea } from "@renderer/components/ui";
import { LoaderSpin } from "@renderer/components";
import { MessageCircleIcon, LoaderIcon } from "lucide-react";
import { useNavigate } from "react-router-dom";
import { t } from "i18next";
export const ConversationsList = (props: {
prompt: string;
excludedIds?: string[];
}) => {
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { prompt, excludedIds = [] } = props;
const [conversations, setConversations] = useState<ConversationType[]>([]);
const [loading, setLoading] = useState<boolean>(false);
const [offset, setOffset] = useState<number>(0);
const navigate = useNavigate();
const fetchConversations = () => {
if (offset === -1) return;
const limit = 5;
setLoading(true);
EnjoyApp.conversations
.findAll({
order: [["updatedAt", "DESC"]],
limit,
offset,
})
.then((_conversations) => {
if (_conversations.length === 0) {
setOffset(-1);
return;
}
if (_conversations.length < limit) {
setOffset(-1);
} else {
setOffset(offset + _conversations.length);
}
if (offset === 0) {
setConversations(_conversations);
} else {
setConversations([...conversations, ..._conversations]);
}
})
.finally(() => {
setLoading(false);
});
};
useEffect(() => {
fetchConversations();
}, []);
if (loading) {
return <LoaderSpin />;
}
return (
<ScrollArea>
{conversations
.filter((c) => !excludedIds.includes(c.id))
.map((conversation) => {
return (
<div
key={conversation.id}
onClick={() => {
navigate(`/conversations/${conversation.id}?text=${prompt}`);
}}
className="bg-background text-primary rounded-full w-full mb-2 py-2 px-4 hover:bg-primary hover:text-white cursor-pointer flex items-center border"
style={{
borderLeftColor: `#${conversation.id
.replaceAll("-", "")
.substr(0, 6)}`,
borderLeftWidth: 3,
}}
>
<div className="">
<MessageCircleIcon className="mr-2" />
</div>
<div className="flex-1 truncated">{conversation.name}</div>
</div>
);
})}
{offset > -1 && (
<div className="flex justify-center">
<Button
variant="ghost"
onClick={() => fetchConversations()}
disabled={loading || offset === -1}
className="px-4 py-2"
>
{t("loadMore")}
{loading && <LoaderIcon className="w-4 h-4 animate-spin ml-2" />}
</Button>
</div>
)}
</ScrollArea>
);
};

View File

@@ -1,8 +1,9 @@
import { useContext, useEffect, useState } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import { ScrollArea, toast } from "@renderer/components/ui";
import { Button, ScrollArea, toast } from "@renderer/components/ui";
import { LoaderSpin } from "@renderer/components";
import { MessageCircleIcon } from "lucide-react";
import { MessageCircleIcon, LoaderIcon } from "lucide-react";
import { t } from "i18next";
export const ConversationsShortcut = (props: {
prompt: string;
@@ -12,6 +13,41 @@ export const ConversationsShortcut = (props: {
const { prompt, onReply } = props;
const [conversations, setConversations] = useState<ConversationType[]>([]);
const [loading, setLoading] = useState<boolean>(false);
const [offset, setOffset] = useState<number>(0);
const fetchConversations = () => {
if (offset === -1) return;
const limit = 5;
setLoading(true);
EnjoyApp.conversations
.findAll({
order: [["updatedAt", "DESC"]],
limit,
offset,
})
.then((_conversations) => {
if (_conversations.length === 0) {
setOffset(-1);
return;
}
if (_conversations.length < limit) {
setOffset(-1);
} else {
setOffset(offset + _conversations.length);
}
if (offset === 0) {
setConversations(_conversations);
} else {
setConversations([...conversations, ..._conversations]);
}
})
.finally(() => {
setLoading(false);
});
};
const ask = (conversation: ConversationType) => {
setLoading(true);
@@ -31,10 +67,7 @@ export const ConversationsShortcut = (props: {
};
useEffect(() => {
EnjoyApp.conversations.findAll({ limit: 10 }).then((conversations) => {
setConversations(conversations);
setLoading(false);
});
fetchConversations();
}, []);
if (loading) {
@@ -63,6 +96,20 @@ export const ConversationsShortcut = (props: {
</div>
);
})}
{offset > -1 && (
<div className="flex justify-center">
<Button
variant="ghost"
onClick={() => fetchConversations()}
disabled={loading || offset === -1}
className="px-4 py-2"
>
{t("loadMore")}
{loading && <LoaderIcon className="w-4 h-4 animate-spin ml-2" />}
</Button>
</div>
)}
</ScrollArea>
);
};

View File

@@ -1,5 +1,6 @@
export * from "./conversation-form";
export * from "./conversations-shortcut";
export * from "./conversations-list";
export * from "./speech-form";
export * from "./speech-player";

View File

@@ -1,14 +1,9 @@
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { useState, useContext, useEffect } from "react";
import { useState, useEffect } from "react";
import { LoaderSpin, MeaningCard } from "@renderer/components";
import { Button } from "@renderer/components/ui";
import { t } from "i18next";
import { XCircleIcon } from "lucide-react";
import { toast } from "@renderer/components/ui";
import { lookupCommand } from "@commands";
import { useAiCommand } from "@renderer/hooks";
export const LookupResult = (props: {
word: string;
@@ -22,64 +17,26 @@ export const LookupResult = (props: {
const [loading, setLoading] = useState<boolean>(true);
if (!word) return null;
const { webApi } = useContext(AppSettingsProviderContext);
const { openai } = useContext(AISettingsProviderContext);
const { lookupWord } = useAiCommand();
const processLookup = async () => {
if (!word) return;
if (!loading) return;
setLoading(true);
const lookup = await webApi.lookup({
lookupWord({
word,
context,
sourceId,
sourceType,
});
if (lookup.meaning) {
setResult(lookup);
setLoading(false);
onResult && onResult(lookup.meaning);
} else {
if (!openai?.key) {
toast.error(t("openaiApiKeyRequired"));
return;
}
lookupCommand(
{
word,
context,
meaningOptions: lookup.meaningOptions,
},
{
key: openai.key,
modelName: openai.model,
baseUrl: openai.baseUrl,
}
)
.then((res) => {
if (res.context_translation?.trim()) {
webApi
.updateLookup(lookup.id, {
meaning: res,
sourceId,
sourceType,
})
.then((lookup) => {
setResult(lookup);
onResult && onResult(lookup.meaning);
});
}
})
.catch((err) => {
toast.error(`${t("lookupFailed")}: ${err.message}`);
})
.finally(() => {
setLoading(false);
});
}
})
.then((lookup) => {
setResult(lookup);
onResult && onResult(lookup.meaning);
})
.finally(() => {
setLoading(false);
});
};
useEffect(() => {

View File

@@ -1,4 +1,4 @@
import { useState, useEffect, useContext } from "react";
import { useState, useEffect } from "react";
import { cn } from "@renderer/lib/utils";
import {
Button,
@@ -19,13 +19,8 @@ import {
LoaderIcon,
SpeechIcon,
} from "lucide-react";
import { translateCommand, ipaCommand } from "@commands";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { t } from "i18next";
import { md5 } from "js-md5";
import { useAiCommand } from "@renderer/hooks";
export const MediaCaption = (props: {
mediaId: string;
@@ -62,8 +57,7 @@ export const MediaCaption = (props: {
const [ipaGenerating, setIpaGenerating] = useState<boolean>(false);
const [displayIpa, setDisplayIpa] = useState<boolean>(false);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { openai } = useContext(AISettingsProviderContext);
const { translate, pronounce } = useAiCommand();
const toogleIPA = async () => {
if (ipaGenerating) return;
@@ -73,39 +67,28 @@ export const MediaCaption = (props: {
return;
}
const hash = md5.create();
hash.update(transcription.text);
const cacheKey = `ipa-${hash.hex()}`;
const cached = await EnjoyApp.cacheObjects.get(cacheKey);
if (cached) {
setIpa(cached);
return;
}
if (!openai?.key) {
toast.error(t("openaiApiKeyRequired"));
return;
}
setIpaGenerating(true);
ipaCommand(transcription.text, {
key: openai.key,
modelName: openai.model,
baseUrl: openai.baseUrl,
})
.then((result) => {
if (result?.words?.length > 0) {
setIpa(result.words);
EnjoyApp.cacheObjects.set(cacheKey, result.words);
setDisplayIpa(true);
}
})
.finally(() => {
setIpaGenerating(false);
});
toast.promise(
pronounce(transcription.text)
.then((words) => {
if (words?.length > 0) {
setIpa(words);
setDisplayIpa(true);
}
})
.finally(() => {
setIpaGenerating(false);
}),
{
loading: t("generatingIpa"),
success: t("generatedIpaSuccessfully"),
error: (err) => t("generatingIpaFailed", { error: err.message }),
position: "bottom-right",
}
);
};
const translate = async () => {
const toggleTranslation = async () => {
if (translating) return;
if (translation) {
@@ -113,36 +96,24 @@ export const MediaCaption = (props: {
return;
}
const hash = md5.create();
hash.update(transcription.text);
const cacheKey = `translate-${hash.hex()}`;
const cached = await EnjoyApp.cacheObjects.get(cacheKey);
if (cached) {
setTranslation(cached);
return;
}
if (!openai?.key) {
toast.error(t("openaiApiKeyRequired"));
return;
}
setTranslating(true);
translateCommand(transcription.text, {
key: openai.key,
modelName: openai.model,
baseUrl: openai.baseUrl,
})
.then((result) => {
if (result) {
setTranslation(result);
EnjoyApp.cacheObjects.set(cacheKey, result);
setDisplayTranslation(true);
}
})
.finally(() => {
setTranslating(false);
});
toast.promise(
translate(transcription.text)
.then((result) => {
if (result) {
setTranslation(result);
setDisplayTranslation(true);
}
})
.finally(() => {
setTranslating(false);
}),
{
loading: t("translating"),
success: t("translatedSuccessfully"),
error: (err) => t("translationFailed", { error: err.message }),
position: "bottom-right",
}
);
};
useEffect(() => {
@@ -223,7 +194,7 @@ export const MediaCaption = (props: {
<DropdownMenuItem
className="cursor-pointer capitalize"
disabled={translating}
onClick={translate}
onClick={toggleTranslation}
>
{translating ? (
<LoaderIcon className="w-4 h-4 mr-2 animate-spin" />

View File

@@ -17,21 +17,26 @@ import {
GalleryHorizontalIcon,
SpellCheckIcon,
Share2Icon,
ListRestartIcon,
SkipForwardIcon,
SkipBackIcon,
} from "lucide-react";
import { t } from "i18next";
import { type WaveSurferOptions } from "wavesurfer.js";
import { Tooltip } from "react-tooltip";
const PLAYBACK_RATE_OPTIONS = [0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75];
const PLAYBACK_RATE_OPTIONS = [0.8, 0.9, 1.0];
const ZOOM_RATIO_OPTIONS = [0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0];
const MIN_ZOOM_RATIO = 0.25;
const MAX_ZOOM_RATIO = 5.0;
const MAX_ZOOM_RATIO = 2.0;
export const MediaPlayerControls = (props: {
isPlaying: boolean;
isLooping: boolean;
playMode?: "loop" | "single" | "all";
setPlayMode?: (mode: "loop" | "single" | "all") => void;
onPlayOrPause: () => void;
onPause?: () => void;
onLoop?: () => void;
onNext?: () => void;
onPrev?: () => void;
onRecord?: () => void;
playbackRate: number;
setPlaybackRate: (rate: number) => void;
@@ -51,9 +56,11 @@ export const MediaPlayerControls = (props: {
}) => {
const {
isPlaying,
isLooping,
playMode,
setPlayMode,
onPlayOrPause,
onLoop,
onNext,
onPrev,
playbackRate,
setPlaybackRate,
fitZoomRatio,
@@ -73,8 +80,23 @@ export const MediaPlayerControls = (props: {
return (
<div className="w-full flex items-center justify-center space-x-1 relative">
{
onPrev && (
<Button
variant="ghost"
onClick={onPrev}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("playPreviousSegment")}
className="aspect-square p-0 h-10"
>
<SkipBackIcon className="w-6 h-6" />
</Button>
)
}
{isPlaying ? (
<Button
variant="secondary"
onClick={onPlayOrPause}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("pause")}
@@ -93,31 +115,59 @@ export const MediaPlayerControls = (props: {
<PlayIcon className="w-6 h-6" />
</Button>
)}
{isLooping ? (
{
onNext && (
<Button
variant="ghost"
onClick={onNext}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("playNextSegment")}
className="aspect-square p-0 h-10"
>
<SkipForwardIcon className="w-6 h-6" />
</Button>
)
}
{playMode === "single" && (
<Button
onClick={onLoop}
variant="ghost"
onClick={() => setPlayMode("loop")}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("stopLoop")}
data-tooltip-content={t("playSingleSegment")}
className="aspect-square p-0 h-10"
>
<RepeatIcon className="w-6 h-6" />
</Button>
)}
{playMode === "loop" && (
<Button
variant="secondary"
onClick={() => setPlayMode("all")}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("playInLoop")}
className="aspect-square p-0 h-10"
>
<Repeat1Icon className="w-6 h-6" />
</Button>
) : (
)}
{playMode === "all" && (
<Button
variant="ghost"
onClick={onLoop}
onClick={() => setPlayMode("single")}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("loop")}
data-tooltip-content={t("playAllSegments")}
className="aspect-square p-0 h-10"
>
<RepeatIcon className="w-6 h-6" />
<ListRestartIcon className="w-6 h-6" />
</Button>
)}
<Popover>
<PopoverTrigger asChild>
<Button
variant={`${playbackRate == 1.0 ? "ghost" : "default"}`}
variant={`${playbackRate == 1.0 ? "ghost" : "secondary"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("playbackSpeed")}
className="relative aspect-square p-0 h-10"
@@ -145,11 +195,7 @@ export const MediaPlayerControls = (props: {
setPlaybackRate(rate);
}}
>
{[0.5, 1.0, 1.5].includes(rate) || rate === playbackRate ? (
<span className="">{rate}</span>
) : (
<div className="h-2 w-[1px] bg-black/50"></div>
)}
<span className="">{rate}</span>
</div>
))}
</div>
@@ -157,13 +203,13 @@ export const MediaPlayerControls = (props: {
</Popover>
<Button
variant={`${zoomRatio > 1.0 ? "default" : "ghost"}`}
variant={`${zoomRatio > 1.0 ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("zoomIn")}
className="relative aspect-square p-0 h-10"
onClick={() => {
if (zoomRatio < MAX_ZOOM_RATIO) {
const nextZoomRatio = PLAYBACK_RATE_OPTIONS.find(
const nextZoomRatio = ZOOM_RATIO_OPTIONS.find(
(rate) => rate > zoomRatio
);
setZoomRatio(nextZoomRatio || MAX_ZOOM_RATIO);
@@ -174,13 +220,13 @@ export const MediaPlayerControls = (props: {
</Button>
<Button
variant={`${zoomRatio < 1.0 ? "default" : "ghost"}`}
variant={`${zoomRatio < 1.0 ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("zoomOut")}
className="relative aspect-square p-0 h-10"
onClick={() => {
if (zoomRatio > MIN_ZOOM_RATIO) {
const nextZoomRatio = PLAYBACK_RATE_OPTIONS.reverse().find(
const nextZoomRatio = ZOOM_RATIO_OPTIONS.reverse().find(
(rate) => rate < zoomRatio
);
setZoomRatio(nextZoomRatio || MIN_ZOOM_RATIO);
@@ -191,7 +237,7 @@ export const MediaPlayerControls = (props: {
</Button>
<Button
variant={`${zoomRatio === fitZoomRatio ? "default" : "ghost"}`}
variant={`${zoomRatio === fitZoomRatio ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("zoomToFit")}
className="relative aspect-square p-0 h-10"
@@ -207,7 +253,7 @@ export const MediaPlayerControls = (props: {
</Button>
<Button
variant={`${wavesurferOptions?.autoCenter ? "default" : "ghost"}`}
variant={`${wavesurferOptions?.autoCenter ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("autoCenter")}
className="relative aspect-square p-0 h-10"
@@ -221,7 +267,7 @@ export const MediaPlayerControls = (props: {
</Button>
<Button
variant={`${displayInlineCaption ? "default" : "ghost"}`}
variant={`${displayInlineCaption ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("inlineCaption")}
className="relative aspect-square p-0 h-10"
@@ -234,7 +280,7 @@ export const MediaPlayerControls = (props: {
{setRecordButtonVisible && (
<Button
variant={`${recordButtonVisible ? "default" : "ghost"}`}
variant={`${recordButtonVisible ? "secondary" : "ghost"}`}
data-tooltip-id="media-player-controls-tooltip"
data-tooltip-content={t("record")}
className="relative aspect-square p-0 h-10"

View File

@@ -54,13 +54,14 @@ export const MediaPlayer = (props: {
setZoomRatio: (value: number) => void;
isPlaying: boolean;
setIsPlaying: (value: boolean) => void;
isLooping: boolean;
setIsLooping: (value: boolean) => void;
playMode?: "loop" | "single" | "all";
setPlayMode?: (value: "loop" | "single" | "all") => void;
playBackRate: number;
setPlaybackRate: (value: number) => void;
displayInlineCaption?: boolean;
setDisplayInlineCaption?: (value: boolean) => void;
onShare?: () => void;
onDecoded?: (data: { duration: number; sampleRate: number }) => void;
}) => {
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const {
@@ -83,13 +84,14 @@ export const MediaPlayer = (props: {
setZoomRatio,
isPlaying,
setIsPlaying,
isLooping,
setIsLooping,
playMode,
setPlayMode,
playBackRate,
setPlaybackRate,
displayInlineCaption,
setDisplayInlineCaption,
onShare,
onDecoded,
} = props;
if (!mediaUrl) return;
@@ -141,7 +143,7 @@ export const MediaPlayer = (props: {
const findCurrentSegment = (time: number) => {
if (!transcription) return;
if (isPlaying && isLooping) return;
if (isPlaying && playMode === "loop") return;
time = Math.round(time * 1000);
const index = transcriptionResult.findIndex(
@@ -336,6 +338,11 @@ export const MediaPlayer = (props: {
};
EnjoyApp.waveforms.save(mediaMd5, _waveform);
setWaveForm(_waveform);
onDecoded &&
onDecoded({
duration,
sampleRate,
});
}),
wavesurfer.on("ready", () => {
setInitialized(true);
@@ -376,7 +383,7 @@ export const MediaPlayer = (props: {
const subscriptions = [
wavesurfer.on("finish", () => {
if (!isLooping) return;
if (playMode !== "loop") return;
regions?.getRegions()[0]?.play();
}),
@@ -426,8 +433,11 @@ export const MediaPlayer = (props: {
renderPitchContour(region);
}),
regions.on("region-out", (region: Region) => {
if (isPlaying && isLooping) {
if (isPlaying && playMode === "loop") {
region.play();
} else if (isPlaying && playMode === "single") {
wavesurfer.pause();
wavesurfer.seekTo(region.start / wavesurfer.getDuration());
} else {
resetTranscription();
}
@@ -437,7 +447,7 @@ export const MediaPlayer = (props: {
return () => {
subscriptions.forEach((unsub) => unsub());
};
}, [regions, isPlaying, isLooping, currentSegmentIndex, transcriptionDirty]);
}, [regions, isPlaying, playMode, currentSegmentIndex, transcriptionDirty]);
useEffect(() => {
if (!wavesurfer) return;
@@ -481,6 +491,11 @@ export const MediaPlayer = (props: {
useEffect(() => {
EnjoyApp.waveforms.find(mediaMd5).then((waveform) => {
setWaveForm(waveform);
onDecoded &&
onDecoded({
duration: waveform.duration,
sampleRate: waveform.sampleRate,
});
});
}, []);
@@ -518,10 +533,24 @@ export const MediaPlayer = (props: {
<MediaPlayerControls
isPlaying={isPlaying}
onPlayOrPause={onPlayClick}
isLooping={isLooping}
onLoop={() => {
setIsLooping(!isLooping);
onNext={() => {
if (!transcription) return;
const segment = transcription?.result?.[currentSegmentIndex + 1];
if (!segment) return;
wavesurfer.seekTo(segment.offsets.from / 1000 / wavesurfer.getDuration());
}}
onPrev={() => {
if (!transcription) return;
const segment = transcription?.result?.[currentSegmentIndex - 1];
if (!segment) return;
wavesurfer.seekTo(segment.offsets.from / 1000 / wavesurfer.getDuration());
}}
playMode={playMode}
setPlayMode={setPlayMode}
playbackRate={playBackRate}
setPlaybackRate={handlePlaybackRateChange}
zoomRatio={zoomRatio}

View File

@@ -21,7 +21,7 @@ import {
DbProviderContext,
AppSettingsProviderContext,
} from "@renderer/context";
import { fetchFile } from "@ffmpeg/util";
import { useTranscribe } from "@renderer/hooks";
export const MediaTranscription = (props: {
transcription: TranscriptionType;
@@ -33,7 +33,7 @@ export const MediaTranscription = (props: {
onSelectSegment?: (index: number) => void;
}) => {
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp, ffmpeg } = useContext(AppSettingsProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const {
transcription,
mediaId,
@@ -44,60 +44,23 @@ export const MediaTranscription = (props: {
onSelectSegment,
} = props;
const containerRef = React.createRef<HTMLDivElement>();
const [transcoding, setTranscoding] = useState<boolean>(false);
const [transcribing, setTranscribing] = useState<boolean>(false);
const { transcribe } = useTranscribe();
const [recordingStats, setRecordingStats] =
useState<SegementRecordingStatsType>([]);
const generate = async () => {
const data = await transcode();
let blob;
if (data) {
blob = {
type: data.type.split(";")[0],
arrayBuffer: await data.arrayBuffer(),
};
}
if (transcribing) return;
EnjoyApp.transcriptions.process(
{
targetId: mediaId,
targetType: mediaType,
},
{
blob,
}
);
};
const transcode = async () => {
if (!ffmpeg?.loaded) return;
if (transcoding) return;
try {
setTranscoding(true);
const uri = new URL(mediaUrl);
const input = uri.pathname.split("/").pop();
const output = input.replace(/\.[^/.]+$/, ".wav");
await ffmpeg.writeFile(input, await fetchFile(mediaUrl));
await ffmpeg.exec([
"-i",
input,
"-ar",
"16000",
"-ac",
"1",
"-c:a",
"pcm_s16le",
output,
]);
const data = await ffmpeg.readFile(output);
setTranscoding(false);
return new Blob([data], { type: "audio/wav" });
} catch (e) {
setTranscoding(false);
toast.error(t("transcodeError"));
}
setTranscribing(true);
transcribe({
mediaId,
mediaType,
mediaSrc: mediaUrl,
}).finally(() => {
setTranscribing(false);
});
};
const fetchSegmentStats = async () => {
@@ -141,7 +104,7 @@ export const MediaTranscription = (props: {
<div className="w-full h-full flex flex-col">
<div className="mb-4 flex items-cener justify-between">
<div className="flex items-center space-x-2">
{transcoding || transcription.state === "processing" ? (
{transcribing || transcription.state === "processing" ? (
<PingPoint colorClassName="bg-yellow-500" />
) : transcription.state === "finished" ? (
<CheckCircleIcon className="text-green-500 w-4 h-4" />
@@ -153,10 +116,10 @@ export const MediaTranscription = (props: {
<AlertDialog>
<AlertDialogTrigger asChild>
<Button
disabled={transcoding || transcription.state === "processing"}
disabled={transcribing || transcription.state === "processing"}
className="capitalize"
>
{(transcoding || transcription.state === "processing") && (
{(transcribing || transcription.state === "processing") && (
<LoaderIcon className="animate-spin w-4 mr-2" />
)}
{transcription.result ? t("regenerate") : t("transcribe")}

View File

@@ -2,12 +2,21 @@ import {
Avatar,
AvatarImage,
AvatarFallback,
Dialog,
DialogTrigger,
DialogContent,
DialogHeader,
DialogTitle,
Sheet,
SheetContent,
SheetHeader,
SheetClose,
} from "@renderer/components/ui";
import { SpeechPlayer, AudioDetail } from "@renderer/components";
import {
SpeechPlayer,
AudioDetail,
ConversationsList,
} from "@renderer/components";
import { useState, useEffect, useContext } from "react";
import {
LoaderIcon,
@@ -16,6 +25,7 @@ import {
SpeechIcon,
MicIcon,
ChevronDownIcon,
ForwardIcon,
} from "lucide-react";
import { useCopyToClipboard } from "@uidotdev/usehooks";
import { t } from "i18next";
@@ -155,6 +165,27 @@ export const AssistantMessageComponent = (props: {
/>
))}
<Dialog>
<DialogTrigger>
<ForwardIcon
data-tooltip-id="global-tooltip"
data-tooltip-content={t("forward")}
className="w-3 h-3 cursor-pointer"
/>
</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>{t("forward")}</DialogTitle>
</DialogHeader>
<div className="">
<ConversationsList
prompt={message.content}
excludedIds={[message.conversationId]}
/>
</div>
</DialogContent>
</Dialog>
{Boolean(speech) &&
(resourcing ? (
<LoaderIcon

View File

@@ -12,6 +12,11 @@ import {
AvatarImage,
AvatarFallback,
Button,
Dialog,
DialogTrigger,
DialogContent,
DialogHeader,
DialogTitle,
DropdownMenu,
DropdownMenuTrigger,
DropdownMenuContent,
@@ -19,7 +24,7 @@ import {
DropdownMenuSeparator,
toast,
} from "@renderer/components/ui";
import { SpeechPlayer } from "@renderer/components";
import { SpeechPlayer, ConversationsList } from "@renderer/components";
import { useContext, useState } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import {
@@ -29,6 +34,7 @@ import {
CopyIcon,
CheckIcon,
Share2Icon,
ForwardIcon,
} from "lucide-react";
import { useCopyToClipboard } from "@uidotdev/usehooks";
import { t } from "i18next";
@@ -126,6 +132,27 @@ export const UserMessageComponent = (props: {
/>
)}
<Dialog>
<DialogTrigger>
<ForwardIcon
data-tooltip-id="global-tooltip"
data-tooltip-content={t("forward")}
className="w-3 h-3 cursor-pointer"
/>
</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>{t("forward")}</DialogTitle>
</DialogHeader>
<div className="">
<ConversationsList
prompt={message.content}
excludedIds={[message.conversationId]}
/>
</div>
</DialogContent>
</Dialog>
{message.createdAt && (
<AlertDialog>
<AlertDialogTrigger asChild>

View File

@@ -0,0 +1,34 @@
import { t } from "i18next";
import { AppSettingsProviderContext } from "@renderer/context";
import { useContext, useState, useEffect } from "react";
import { Button } from "@renderer/components/ui";
export const BalanceSettings = () => {
const { webApi } = useContext(AppSettingsProviderContext);
const [balance, setBalance] = useState<number>(0);
useEffect(() => {
webApi.me().then((user) => {
setBalance(user.balance);
});
}, []);
if (!balance) return null;
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("balance")}</div>
<div className="text-sm text-muted-foreground mb-2">${balance}</div>
</div>
<Button
data-tooltip-id="preferences-tooltip"
data-tooltip-content={t("notAvailableYet")}
variant="secondary"
size="sm"
className="cursor-not-allowed"
>
{t("deposit")}
</Button>
</div>
);
};

View File

@@ -0,0 +1,51 @@
import { t } from "i18next";
import {
Select,
SelectTrigger,
SelectContent,
SelectItem,
SelectValue,
toast,
} from "@renderer/components/ui";
import { AISettingsProviderContext } from "@renderer/context";
import { useContext } from "react";
export const DefaultEngineSettings = () => {
const { defaultEngine, setDefaultEngine, openai } = useContext(
AISettingsProviderContext
);
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="flex items-center mb-2">
<span>{t("defaultAiEngine")}</span>
</div>
<div className="text-sm text-muted-foreground">
{defaultEngine === "openai" && t("openAiEngineTips")}
{defaultEngine === "enjoyai" && t("enjoyAiEngineTips")}
</div>
</div>
<div className="flex items-center space-x-2">
<Select
value={defaultEngine}
onValueChange={(value) => {
setDefaultEngine(value);
if (value === "openai" && !openai.key) {
toast.warning(t("openaiKeyRequired"));
}
}}
>
<SelectTrigger className="min-w-fit">
<SelectValue placeholder={t("defaultAiEngine")}></SelectValue>
</SelectTrigger>
<SelectContent>
<SelectItem value="enjoyai">EnjoyAI</SelectItem>
<SelectItem value="openai">OpenAI</SelectItem>
</SelectContent>
</Select>
</div>
</div>
);
};

View File

@@ -2,13 +2,16 @@ export * from "./preferences";
export * from "./about";
export * from "./hotkeys";
export * from "./default-engine-settings";
export * from "./openai-settings";
export * from "./user-settings";
export * from "./language-settings";
export * from "./library-settings";
export * from "./ffmpeg-settings";
export * from "./whisper-settings";
export * from "./google-generative-ai-settings";
export * from "./user-settings";
export * from "./balance-settings";
export * from "./reset-settings";
export * from "./reset-all-settings";

View File

@@ -2,8 +2,10 @@ import { t } from "i18next";
import { Button, ScrollArea, Separator } from "@renderer/components/ui";
import {
About,
DefaultEngineSettings,
Hotkeys,
UserSettings,
BalanceSettings,
LanguageSettings,
LibrarySettings,
WhisperSettings,
@@ -15,6 +17,7 @@ import {
} from "@renderer/components";
import { useState, useContext } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import { Tooltip } from "react-tooltip";
export const Preferences = () => {
const { ffmpegConfig } = useContext(AppSettingsProviderContext);
@@ -28,24 +31,22 @@ export const Preferences = () => {
<div className="font-semibold mb-4 capitilized">
{t("basicSettings")}
</div>
<UserSettings />
<Separator />
<LanguageSettings />
<Separator />
<LibrarySettings />
<Separator />
<WhisperSettings />
<Separator />
<DefaultEngineSettings />
<Separator />
<OpenaiSettings />
<Separator />
<GoogleGenerativeAiSettings />
<Separator />
{ffmpegConfig.ready && (
<>
<FfmpegSettings />
<Separator />
</>
)}
<OpenaiSettings />
<Separator />
<GoogleGenerativeAiSettings />
<Separator />
</div>
),
},
@@ -64,6 +65,23 @@ export const Preferences = () => {
</>
),
},
{
value: "account",
label: t("accountSettingsShort"),
component: () => (
<div className="">
<div className="font-semibold mb-4 capitilized">
{t("accountSettings")}
</div>
<UserSettings />
<Separator />
<BalanceSettings />
<Separator />
<LanguageSettings />
<Separator />
</div>
),
},
{
value: "hotkeys",
label: t("hotkeys"),
@@ -79,29 +97,32 @@ export const Preferences = () => {
const [activeTab, setActiveTab] = useState<string>("basic");
return (
<div className="grid grid-cols-5 overflow-hidden h-full">
<ScrollArea className="h-full col-span-1 bg-muted/50 p-4">
<div className="py-2 text-muted-foreground mb-4">
{t("sidebar.preferences")}
</div>
<>
<div className="grid grid-cols-5 overflow-hidden h-full">
<ScrollArea className="h-full col-span-1 bg-muted/50 p-4">
<div className="py-2 text-muted-foreground mb-4">
{t("sidebar.preferences")}
</div>
{TABS.map((tab) => (
<Button
key={tab.value}
variant={activeTab === tab.value ? "default" : "ghost"}
size="sm"
className={`capitilized w-full justify-start mb-2 ${
activeTab === tab.value ? "" : "hover:bg-muted"
}`}
onClick={() => setActiveTab(tab.value)}
>
<span className="text-sm">{tab.label}</span>
</Button>
))}
</ScrollArea>
<ScrollArea className="h-full col-span-4 py-6 px-10">
{TABS.find((tab) => tab.value === activeTab)?.component()}
</ScrollArea>
</div>
{TABS.map((tab) => (
<Button
key={tab.value}
variant={activeTab === tab.value ? "default" : "ghost"}
size="sm"
className={`capitilized w-full justify-start mb-2 ${
activeTab === tab.value ? "" : "hover:bg-muted"
}`}
onClick={() => setActiveTab(tab.value)}
>
<span className="text-sm">{tab.label}</span>
</Button>
))}
</ScrollArea>
<ScrollArea className="h-full col-span-4 py-6 px-10">
{TABS.find((tab) => tab.value === activeTab)?.component()}
</ScrollArea>
</div>
<Tooltip id="preferences-tooltip" />
</>
);
};

View File

@@ -15,13 +15,18 @@ import {
SelectValue,
} from "@renderer/components/ui";
import { WhisperModelOptions } from "@renderer/components";
import { AppSettingsProviderContext } from "@renderer/context";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { useContext, useEffect, useState } from "react";
import { InfoIcon, AlertCircleIcon } from "lucide-react";
export const WhisperSettings = () => {
const { whisperConfig, refreshWhisperConfig, EnjoyApp, setWhisperService } =
useContext(AppSettingsProviderContext);
const { whisperConfig, refreshWhisperConfig, setWhisperService } = useContext(
AISettingsProviderContext
);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const [stderr, setStderr] = useState("");
useEffect(() => {

View File

@@ -46,7 +46,7 @@ export const VideoDetail = (props: { id?: string; md5?: string }) => {
useState<boolean>(false);
const [zoomRatio, setZoomRatio] = useState<number>(1.0);
const [isPlaying, setIsPlaying] = useState(false);
const [isLooping, setIsLooping] = useState(false);
const [playMode, setPlayMode] = useState<"loop" | "single" | "all">("all");
const [playBackRate, setPlaybackRate] = useState<number>(1);
const [displayInlineCaption, setDisplayInlineCaption] =
useState<boolean>(true);
@@ -155,13 +155,23 @@ export const VideoDetail = (props: { id?: string; md5?: string }) => {
setZoomRatio={setZoomRatio}
isPlaying={isPlaying}
setIsPlaying={setIsPlaying}
isLooping={isLooping}
setIsLooping={setIsLooping}
playMode={playMode}
setPlayMode={setPlayMode}
playBackRate={playBackRate}
setPlaybackRate={setPlaybackRate}
displayInlineCaption={displayInlineCaption}
setDisplayInlineCaption={setDisplayInlineCaption}
onShare={() => setSharing(true)}
onDecoded={({ duration, sampleRate }) => {
if (video.duration) return;
EnjoyApp.videos.update(video.id, {
metadata: Object.assign({}, video.metadata, {
duration,
sampleRate,
}),
});
}}
/>
<ScrollArea
@@ -193,7 +203,9 @@ export const VideoDetail = (props: { id?: string; md5?: string }) => {
const segment = transcription?.result?.[index];
if (!segment) return;
if (isLooping && isPlaying) setIsPlaying(false);
if (playMode === "loop" && isPlaying) {
setIsPlaying(false);
}
setSeek({
seekTo: segment.offsets.from / 1000,
timestamp: Date.now(),

View File

@@ -34,6 +34,7 @@ import {
import { LayoutGridIcon, LayoutListIcon } from "lucide-react";
import { videosReducer } from "@renderer/reducers";
import { useNavigate } from "react-router-dom";
import { useTranscribe } from "@renderer/hooks";
export const VideosComponent = () => {
const [videos, dispatchVideos] = useReducer(videosReducer, []);
@@ -43,6 +44,7 @@ export const VideosComponent = () => {
const [transcribing, setTranscribing] = useState<Partial<VideoType> | null>(
null
);
const { transcribe } = useTranscribe();
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
@@ -249,8 +251,14 @@ export const VideosComponent = () => {
className="bg-destructive"
onClick={async () => {
if (!transcribing) return;
await EnjoyApp.videos.transcribe(transcribing.id);
setTranscribing(null);
transcribe({
mediaId: transcribing.id,
mediaSrc: transcribing.src,
mediaType: "Video",
}).finally(() => {
setTranscribing(null);
});
}}
>
{t("transcribe")}

View File

@@ -35,9 +35,7 @@ export const VideosTable = (props: {
<Table>
<TableHeader>
<TableRow>
<TableHead className="capitalize">
{t("models.video.name")}
</TableHead>
<TableHead className="capitalize">{t("models.video.name")}</TableHead>
<TableHead className="capitalize">
{t("models.video.duration")}
</TableHead>
@@ -78,9 +76,7 @@ export const VideosTable = (props: {
</TooltipProvider>
</TableCell>
<TableCell>
{video.metadata?.format?.duration
? secondsToTimestamp(video.metadata.format.duration)
: "-"}
{video.duration ? secondsToTimestamp(video.duration) : "-"}
</TableCell>
<TableCell>{video.recordingsCount}</TableCell>
<TableCell>

View File

@@ -22,7 +22,10 @@ import { t } from "i18next";
import { InfoIcon, CheckCircle, DownloadIcon, XCircleIcon } from "lucide-react";
import { WHISPER_MODELS_OPTIONS } from "@/constants";
import { useState, useContext, useEffect } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
type ModelType = {
type: string;
@@ -34,8 +37,9 @@ type ModelType = {
};
export const WhisperModelOptionsPanel = () => {
const { whisperConfig, refreshWhisperConfig, EnjoyApp } = useContext(
AppSettingsProviderContext
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { whisperConfig, refreshWhisperConfig } = useContext(
AISettingsProviderContext
);
useEffect(() => {
@@ -46,7 +50,7 @@ export const WhisperModelOptionsPanel = () => {
<>
<Card className="w-full max-w-md">
<CardHeader>
<CardTitle>{t("sttAiModel")}</CardTitle>
<CardTitle>{t("whisperModel")}</CardTitle>
<CardDescription>
{t("chooseAIModelDependingOnYourHardware")}
</CardDescription>
@@ -83,8 +87,9 @@ export const WhisperModelOptionsPanel = () => {
export const WhisperModelOptions = () => {
const [selectingModel, setSelectingModel] = useState<ModelType | null>(null);
const [availableModels, setAvailableModels] = useState<ModelType[]>([]);
const { whisperConfig, setWhisperModel, EnjoyApp } = useContext(
AppSettingsProviderContext
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { whisperConfig, setWhisperModel } = useContext(
AISettingsProviderContext
);
useEffect(() => {

View File

@@ -2,10 +2,17 @@ import { createContext, useEffect, useState, useContext } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
type AISettingsProviderState = {
setWhisperModel?: (name: string) => Promise<void>;
setWhisperService?: (name: string) => Promise<void>;
whisperConfig?: WhisperConfigType;
refreshWhisperConfig?: () => void;
openai?: LlmProviderType;
setOpenai?: (config: LlmProviderType) => void;
googleGenerativeAi?: LlmProviderType;
setGoogleGenerativeAi?: (config: LlmProviderType) => void;
defaultEngine?: string;
setDefaultEngine?: (engine: string) => void;
currentEngine?: LlmProviderType;
};
const initialState: AISettingsProviderState = {};
@@ -18,23 +25,70 @@ export const AISettingsProvider = ({
}: {
children: React.ReactNode;
}) => {
const [defaultEngine, setDefaultEngine] = useState<string>(null);
const [openai, setOpenai] = useState<LlmProviderType>(null);
const [googleGenerativeAi, setGoogleGenerativeAi] =
useState<LlmProviderType>(null);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const [whisperConfig, setWhisperConfig] = useState<WhisperConfigType>(null);
const { EnjoyApp, apiUrl, user, libraryPath } = useContext(
AppSettingsProviderContext
);
useEffect(() => {
fetchSettings();
refreshWhisperConfig();
}, []);
useEffect(() => {
refreshWhisperConfig();
}, [libraryPath]);
const refreshWhisperConfig = async () => {
const config = await EnjoyApp.whisper.config();
setWhisperConfig(config);
};
const setWhisperModel = async (name: string) => {
return EnjoyApp.whisper.setModel(name).then((config) => {
if (!config) return;
setWhisperConfig(config);
});
};
const setWhisperService = async (name: WhisperConfigType["service"]) => {
return EnjoyApp.whisper.setService(name).then((config) => {
if (!config) return;
setWhisperConfig(config);
});
};
const fetchSettings = async () => {
const _openai = await EnjoyApp.settings.getLlm("openai");
if (_openai) setOpenai(_openai);
if (_openai) {
setOpenai(Object.assign({ name: "openai" }, _openai));
}
const _googleGenerativeAi = await EnjoyApp.settings.getLlm(
"googleGenerativeAi"
);
if (_googleGenerativeAi) setGoogleGenerativeAi(_googleGenerativeAi);
if (_googleGenerativeAi) {
setGoogleGenerativeAi(
Object.assign({ name: "googleGenerativeAi" }, _googleGenerativeAi)
);
}
const _defaultEngine = await EnjoyApp.settings.getDefaultEngine();
if (_defaultEngine) {
setDefaultEngine(_defaultEngine);
} else if (_openai.key) {
EnjoyApp.settings.setDefaultEngine("openai").then(() => {
setDefaultEngine("openai");
});
} else {
EnjoyApp.settings.setDefaultEngine("enjoyai").then(() => {
setDefaultEngine("enjoyai");
});
}
};
const handleSetLlm = async (
@@ -46,10 +100,12 @@ export const AISettingsProvider = ({
switch (name) {
case "openai":
setOpenai(_config);
setOpenai(Object.assign({ name: "openai" }, _config));
break;
case "googleGenerativeAi":
setGoogleGenerativeAi(_config);
setGoogleGenerativeAi(
Object.assign({ name: "googleGenerativeAi" }, _config)
);
break;
}
};
@@ -57,11 +113,29 @@ export const AISettingsProvider = ({
return (
<AISettingsProviderContext.Provider
value={{
defaultEngine,
setDefaultEngine: (engine: "openai" | "enjoyai") => {
EnjoyApp.settings.setDefaultEngine(engine).then(() => {
setDefaultEngine(engine);
});
},
currentEngine: {
openai: openai,
enjoyai: {
name: "enjoyai" as LlmProviderType["name"],
key: user?.accessToken,
baseUrl: `${apiUrl}/api/ai`,
},
}[defaultEngine],
openai,
setOpenai: (config: LlmProviderType) => handleSetLlm("openai", config),
googleGenerativeAi,
setGoogleGenerativeAi: (config: LlmProviderType) =>
handleSetLlm("googleGenerativeAi", config),
whisperConfig,
refreshWhisperConfig,
setWhisperModel,
setWhisperService,
}}
>
{children}

View File

@@ -8,6 +8,7 @@ import { toBlobURL } from "@ffmpeg/util";
type AppSettingsProviderState = {
webApi: Client;
apiUrl?: string;
user: UserType | null;
initialized: boolean;
version?: string;
@@ -15,12 +16,8 @@ type AppSettingsProviderState = {
login?: (user: UserType) => void;
logout?: () => void;
setLibraryPath?: (path: string) => Promise<void>;
setWhisperModel?: (name: string) => Promise<void>;
setWhisperService?: (name: string) => Promise<void>;
ffmpegConfig?: FfmpegConfigType;
ffmpeg?: FFmpeg;
whisperConfig?: WhisperConfigType;
refreshWhisperConfig?: () => void;
setFfmegConfig?: (config: FfmpegConfigType) => void;
EnjoyApp?: EnjoyAppType;
language?: "en" | "zh-CN";
@@ -47,10 +44,9 @@ export const AppSettingsProvider = ({
const [webApi, setWebApi] = useState<Client>(null);
const [user, setUser] = useState<UserType | null>(null);
const [libraryPath, setLibraryPath] = useState("");
const [whisperConfig, setWhisperConfig] = useState<WhisperConfigType>(null);
const [ffmpegConfig, setFfmegConfig] = useState<FfmpegConfigType>(null);
const [language, setLanguage] = useState<"en" | "zh-CN">();
const [ffmpeg, setFfmpeg] = useState<FFmpeg>(null);
const [language, setLanguage] = useState<"en" | "zh-CN">();
const EnjoyApp = window.__ENJOY_APP__;
const ffmpegRef = useRef(new FFmpeg());
@@ -60,15 +56,10 @@ export const AppSettingsProvider = ({
fetchUser();
fetchLibraryPath();
fetchFfmpegConfig();
refreshWhisperConfig();
fetchLanguage();
loadFfmpegWASM();
}, []);
useEffect(() => {
refreshWhisperConfig();
}, [libraryPath]);
useEffect(() => {
validate();
}, [user, libraryPath]);
@@ -80,9 +71,10 @@ export const AppSettingsProvider = ({
new Client({
baseUrl: apiUrl,
accessToken: user?.accessToken,
locale: language,
})
);
}, [user, apiUrl]);
}, [user, apiUrl, language]);
const loadFfmpegWASM = async () => {
const baseURL = "https://unpkg.com/@ffmpeg/core-mt@0.12.6/dist/esm";
@@ -133,11 +125,6 @@ export const AppSettingsProvider = ({
setFfmegConfig(config);
};
const refreshWhisperConfig = async () => {
const config = await EnjoyApp.whisper.config();
setWhisperConfig(config);
};
const fetchVersion = async () => {
const version = EnjoyApp.app.version;
setVersion(version);
@@ -184,20 +171,6 @@ export const AppSettingsProvider = ({
setLibraryPath(dir);
};
const setWhisperModel = async (name: string) => {
return EnjoyApp.whisper.setModel(name).then((config) => {
if (!config) return;
setWhisperConfig(config);
});
};
const setWhisperService = async (name: WhisperConfigType["service"]) => {
return EnjoyApp.whisper.setService(name).then((config) => {
if (!config) return;
setWhisperConfig(config);
});
};
const validate = async () => {
setInitialized(Boolean(user && libraryPath));
};
@@ -210,17 +183,14 @@ export const AppSettingsProvider = ({
EnjoyApp,
version,
webApi,
apiUrl,
user,
login,
logout,
libraryPath,
setLibraryPath: setLibraryPathHandler,
setWhisperModel,
setWhisperService,
ffmpegConfig,
ffmpeg,
whisperConfig,
refreshWhisperConfig,
setFfmegConfig,
initialized,
}}

View File

@@ -0,0 +1,2 @@
export * from './useTranscode';
export * from './useAiCommand';

View File

@@ -0,0 +1,133 @@
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { useContext } from "react";
import {
lookupCommand,
extractStoryCommand,
translateCommand,
ipaCommand,
} from "@commands";
import { toast } from "@renderer/components/ui";
import { t } from "i18next";
import { md5 } from "js-md5";
export const useAiCommand = () => {
const { EnjoyApp, webApi } = useContext(AppSettingsProviderContext);
const { currentEngine } = useContext(AISettingsProviderContext);
const lookupWord = async (params: {
word: string;
context: string;
sourceId?: string;
sourceType?: string;
}) => {
const { word, context, sourceId, sourceType } = params;
const lookup = await webApi.lookup({
word,
context,
sourceId,
sourceType,
});
if (lookup.meaning) {
return lookup;
}
lookupCommand(
{
word,
context,
meaningOptions: lookup.meaningOptions,
},
{
key: currentEngine.key,
modelName: currentEngine.model,
baseUrl: currentEngine.baseUrl,
}
)
.then((res) => {
if (res.context_translation?.trim()) {
webApi
.updateLookup(lookup.id, {
meaning: res,
sourceId,
sourceType,
})
.then((lookup) => {
return lookup;
});
}
})
.catch((err) => {
toast.error(`${t("lookupFailed")}: ${err.message}`);
});
};
const extractStory = async (story: StoryType) => {
return extractStoryCommand(story.content, {
key: currentEngine.key,
modelName: currentEngine.model,
baseUrl: currentEngine.baseUrl,
}).then((res) => {
const { words = [], idioms = [] } = res;
return webApi.extractVocabularyFromStory(story.id, {
words,
idioms,
});
});
};
const translate = async (text: string): Promise<string> => {
const hash = md5.create();
hash.update(text);
const cacheKey = `translate-${hash.hex()}`;
const cached = await EnjoyApp.cacheObjects.get(cacheKey);
if (cached) return cached;
return translateCommand(text, {
key: currentEngine.key,
modelName: currentEngine.model,
baseUrl: currentEngine.baseUrl,
}).then((res) => {
EnjoyApp.cacheObjects.set(cacheKey, res);
return res;
});
};
const pronounce = async (
text: string
): Promise<
{
word?: string;
ipa?: string;
}[]
> => {
const hash = md5.create();
hash.update(text);
const cacheKey = `ipa-${hash.hex()}`;
const cached = await EnjoyApp.cacheObjects.get(cacheKey);
if (cached) return cached;
return ipaCommand(text, {
key: currentEngine.key,
modelName: currentEngine.model,
baseUrl: currentEngine.baseUrl,
}).then((result) => {
if (result?.words?.length > 0) {
EnjoyApp.cacheObjects.set(cacheKey, result.words);
}
return result.words;
});
};
return {
lookupWord,
extractStory,
translate,
pronounce,
};
};

View File

@@ -0,0 +1,58 @@
import { AppSettingsProviderContext } from "@renderer/context";
import { useContext } from "react";
import { toast } from "@renderer/components/ui";
import { t } from "i18next";
import { fetchFile } from "@ffmpeg/util";
export const useTranscribe = () => {
const { EnjoyApp, ffmpeg } = useContext(AppSettingsProviderContext);
const transcode = async (src: string, options?: string[]) => {
if (!ffmpeg?.loaded) return;
options = options || ["-ar", "16000", "-ac", "1", "-c:a", "pcm_s16le"];
try {
const uri = new URL(src);
const input = uri.pathname.split("/").pop();
const output = input.replace(/\.[^/.]+$/, ".wav");
await ffmpeg.writeFile(input, await fetchFile(src));
await ffmpeg.exec(["-i", input, ...options, output]);
const data = await ffmpeg.readFile(output);
return new Blob([data], { type: "audio/wav" });
} catch (e) {
toast.error(t("transcodeError"));
}
};
const transcribe = async (params: {
mediaSrc: string;
mediaId: string;
mediaType: "Audio" | "Video";
}) => {
const { mediaSrc, mediaId, mediaType } = params;
const data = await transcode(mediaSrc);
let blob;
if (data) {
blob = {
type: data.type.split(";")[0],
arrayBuffer: await data.arrayBuffer(),
};
}
return EnjoyApp.transcriptions.process(
{
targetId: mediaId,
targetType: mediaType,
},
{
blob,
}
);
};
return {
transcode,
transcribe,
};
};

View File

@@ -10,7 +10,7 @@ import {
} from "@renderer/components/ui";
import { MessageComponent, ConversationForm } from "@renderer/components";
import { SendIcon, BotIcon, LoaderIcon, SettingsIcon } from "lucide-react";
import { Link, useParams } from "react-router-dom";
import { Link, useParams, useSearchParams } from "react-router-dom";
import { t } from "i18next";
import {
DbProviderContext,
@@ -22,11 +22,14 @@ import autosize from "autosize";
export default () => {
const { id } = useParams<{ id: string }>();
const [searchParams, _] = useSearchParams();
const [editting, setEditting] = useState<boolean>(false);
const [conversation, setConversation] = useState<ConversationType>();
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const [content, setConent] = useState<string>("");
const [content, setContent] = useState<string>(
searchParams.get("text") || ""
);
const [submitting, setSubmitting] = useState<boolean>(false);
const [messages, dispatchMessages] = useReducer(messagesReducer, []);
@@ -50,7 +53,7 @@ export default () => {
EnjoyApp.messages
.findAll({
where: {
conversationId: id,
conversationId: conversation.id,
},
offset,
limit,
@@ -67,7 +70,11 @@ export default () => {
setOffest(offset + _messages.length);
}
dispatchMessages({ type: "append", records: _messages });
if (offset === 0) {
dispatchMessages({ type: "set", records: _messages });
} else {
dispatchMessages({ type: "append", records: _messages });
}
scrollToMessage(_messages[0]);
})
.finally(() => {
@@ -125,7 +132,7 @@ export default () => {
})
.finally(() => {
setSubmitting(false);
setConent("");
setContent("");
clearTimeout(timeout);
});
};
@@ -162,8 +169,9 @@ export default () => {
};
useEffect(() => {
setOffest(0);
setContent(searchParams.get("text") || "");
fetchConversation();
fetchMessages();
addDblistener(onMessagesUpdate);
return () => {
@@ -171,6 +179,10 @@ export default () => {
};
}, [id]);
useEffect(() => {
fetchMessages();
}, [conversation]);
useEffect(() => {
if (!inputRef.current) return;
@@ -189,7 +201,7 @@ export default () => {
inputRef.current?.removeEventListener("keypress", () => {});
autosize.destroy(inputRef.current);
};
}, [inputRef.current]);
}, [id, inputRef.current]);
if (!conversation) {
return (
@@ -277,7 +289,7 @@ export default () => {
ref={inputRef}
disabled={submitting}
value={content}
onChange={(e) => setConent(e.target.value)}
onChange={(e) => setContent(e.target.value)}
placeholder={t("pressEnterToSend")}
className="px-0 py-0 shadow-none border-none focus-visible:outline-0 focus-visible:ring-0 border-none bg-muted focus:bg-background min-h-[1.25rem] max-h-[3.5rem] !overflow-x-hidden"
/>

View File

@@ -1,25 +1,32 @@
import { t } from "i18next";
import {
Button,
Dialog,
DialogContent,
DialogHeader,
DialogTitle,
DialogTrigger,
Sheet,
SheetContent,
SheetTrigger,
} from "@renderer/components/ui";
import { ConversationForm } from "@renderer/components";
import { useState, useEffect, useContext, useReducer } from "react";
import { ChevronLeftIcon, LoaderIcon, MessageCircleIcon } from "lucide-react";
import { ChevronLeftIcon, MessageCircleIcon } from "lucide-react";
import { Link, useNavigate } from "react-router-dom";
import {
DbProviderContext,
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { conversationsReducer } from "@renderer/reducers";
import dayjs from "dayjs";
export default () => {
const [editting, setEditting] = useState<boolean>(false);
const [creating, setCreating] = useState<boolean>(false);
const [preset, setPreset] = useState<any>({});
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { currentEngine } = useContext(AISettingsProviderContext);
const [conversations, dispatchConversations] = useReducer(
conversationsReducer,
[]
@@ -53,6 +60,57 @@ export default () => {
}
};
const PRESETS = [
{
name: "英语教练",
engine: currentEngine.name,
configuration: {
model: "gpt-4-1106-preview",
baseUrl: "",
roleDefinition: `你是我的英语教练。
请将我的话改写成英文。
不需要逐字翻译。
请分析清楚我的内容,而后用英文重新逻辑清晰地组织它。
请使用地道的美式英语,纽约腔调。
请尽量使用日常词汇,尽量优先使用短语动词或者习惯用语。
每个句子最长不应该超过 20 个单词。`,
temperature: 0.2,
numberOfChoices: 1,
maxTokens: 2048,
presencePenalty: 0,
frequencyPenalty: 0,
historyBufferSize: 0,
tts: {
baseUrl: "",
engine: currentEngine.name,
model: "tts-1",
voice: "alloy",
},
},
},
{
name: t("custom"),
engine: currentEngine.name,
configuration: {
model: "gpt-4-1106-preview",
baseUrl: "",
roleDefinition: "",
temperature: 0.2,
numberOfChoices: 1,
maxTokens: 2048,
presencePenalty: 0,
frequencyPenalty: 0,
historyBufferSize: 0,
tts: {
baseUrl: "",
engine: currentEngine.name,
model: "tts-1",
voice: "alloy",
},
},
},
];
return (
<div className="h-full px-4 py-6 lg:px-8 bg-muted flex flex-col">
<div className="w-full max-w-screen-md mx-auto flex-1">
@@ -64,19 +122,47 @@ export default () => {
</div>
<div className="my-6 flex justify-center">
<Sheet open={editting} onOpenChange={(value) => setEditting(value)}>
<SheetTrigger asChild>
<Button className="h-12 rounded-lg w-96" disabled={editting}>
{editting && <LoaderIcon className="animate-spin mr-2" />}
<Dialog>
<DialogTrigger asChild>
<Button className="h-12 rounded-lg w-96">
{t("newConversation")}
</Button>
</SheetTrigger>
</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>{t("selectAiRole")}</DialogTitle>
</DialogHeader>
<div className="grid grid-cols-2 gap-4">
{PRESETS.map((preset) => (
<DialogTrigger
key={preset.name}
className="p-4 border hover:shadow rounded-lg cursor-pointer space-y-2"
onClick={() => {
setPreset(preset);
setCreating(true);
}}
>
<div className="capitalize text-center line-clamp-1">
{preset.name}
</div>
{preset.configuration.roleDefinition && (
<div className="line-clamp-3 text-sm text-foreground/70">
{preset.configuration.roleDefinition}
</div>
)}
</DialogTrigger>
))}
</div>
</DialogContent>
</Dialog>
<Sheet open={creating} onOpenChange={(value) => setCreating(value)}>
<SheetContent className="p-0">
<div className="h-screen">
<ConversationForm
conversation={{}}
onFinish={() => setEditting(false)}
conversation={preset}
onFinish={() => setCreating(false)}
/>
</div>
</SheetContent>

View File

@@ -7,16 +7,20 @@ import {
ChooseLibraryPathInput,
WhisperModelOptionsPanel,
} from "@renderer/components";
import { AppSettingsProviderContext } from "@renderer/context";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { CheckCircle2Icon } from "lucide-react";
export default () => {
const [currentStep, setCurrentStep] = useState<number>(1);
const [currentStepValid, setCurrentStepValid] = useState<boolean>(false);
const { user, libraryPath, whisperConfig, initialized } = useContext(
const { user, libraryPath, initialized } = useContext(
AppSettingsProviderContext
);
const { whisperConfig } = useContext(AISettingsProviderContext);
const totalSteps = 4;
useEffect(() => {
@@ -32,7 +36,7 @@ export default () => {
setCurrentStepValid(!!libraryPath);
break;
case 3:
setCurrentStepValid(Boolean(whisperConfig.model));
setCurrentStepValid(true);
break;
case 4:
setCurrentStepValid(initialized);

View File

@@ -11,9 +11,8 @@ import { useState, useContext, useEffect } from "react";
import { useParams } from "react-router-dom";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { extractStoryCommand, lookupCommand } from "@/commands";
import { useAiCommand } from "@renderer/hooks";
import nlp from "compromise";
import paragraphs from "compromise-paragraphs";
nlp.plugin(paragraphs);
@@ -21,7 +20,6 @@ nlp.plugin(paragraphs);
export default () => {
const { id } = useParams<{ id: string }>();
const { webApi } = useContext(AppSettingsProviderContext);
const { openai } = useContext(AISettingsProviderContext);
const [loading, setLoading] = useState<boolean>(true);
const [story, setStory] = useState<StoryType>();
const [meanings, setMeanings] = useState<MeaningType[]>([]);
@@ -34,6 +32,7 @@ export default () => {
const [vocabularyVisible, setVocabularyVisible] = useState<boolean>(false);
const [lookingUpInBatch, setLookupInBatch] = useState<boolean>(false);
const [lookingUp, setLookingUp] = useState<boolean>(false);
const { lookupWord, extractStory } = useAiCommand();
const fetchStory = async () => {
webApi
@@ -72,35 +71,13 @@ export default () => {
if (story?.extracted && (words.length > 0 || idioms.length > 0)) return;
toast.promise(
async () => {
if (words.length === 0 && idioms.length === 0) {
if (!openai?.key) {
toast.error(t("openaiKeyRequired"));
return;
}
const res = await extractStoryCommand(story.content, {
key: openai.key,
modelName: openai.model,
baseUrl: openai.baseUrl,
});
words = res.words || [];
idioms = res.idioms || [];
}
webApi
.extractVocabularyFromStory(id, {
words,
idioms,
})
.then(() => {
fetchStory();
})
.finally(() => {
setScanning(false);
});
},
extractStory(story)
.then(() => {
fetchStory();
})
.finally(() => {
setScanning(false);
}),
{
loading: t("extracting"),
success: t("extractedSuccessfully"),
@@ -191,43 +168,16 @@ export default () => {
const processLookup = async (pendingLookup: Partial<LookupType>) => {
if (lookingUp) return;
const { meaningOptions = [] } = await webApi.lookup({
word: pendingLookup.word,
context: pendingLookup.context,
sourceId: story.id,
sourceType: "Story",
});
if (!openai?.key) {
toast.error(t("openaiApiKeyRequired"));
return;
}
setLookingUp(true);
toast.promise(
lookupCommand(
{
word: pendingLookup.word,
context: pendingLookup.context,
meaningOptions,
},
{
key: openai.key,
modelName: openai.model,
baseUrl: openai.baseUrl,
}
)
.then((res) => {
if (res.context_translation?.trim()) {
webApi
.updateLookup(pendingLookup.id, {
meaning: res,
sourceId: story.id,
sourceType: "Story",
})
.then(() => {
fetchMeanings();
});
}
lookupWord({
word: pendingLookup.word,
context: pendingLookup.context,
sourceId: story.id,
sourceType: "Story",
})
.then(() => {
fetchMeanings();
})
.finally(() => {
setLookingUp(false);

View File

@@ -7,6 +7,7 @@ type AudioType = {
coverUrl?: string;
md5: string;
metadata?: Ffmpeg.FfprobeData;
duration?: number;
transcribed?: boolean;
transcribing?: boolean;
recordingsCount?: number;

View File

@@ -1,6 +1,6 @@
type ConversationType = {
id: string;
engine: "openai" | "ollama" | "googleGenerativeAi";
engine: "enjoyai" | "openai" | "ollama" | "googleGenerativeAi";
name: string;
configuration: { [key: string]: any };
model: string;

View File

@@ -84,6 +84,8 @@ type EnjoyAppType = {
getUser: () => Promise<UserType>;
setUser: (user: UserType) => Promise<void>;
getUserDataPath: () => Promise<string>;
getDefaultEngine: () => Promise<string>;
setDefaultEngine: (engine: "enjoyai" | "openai") => Promise<void>;
getLlm: (provider: SupportedLlmProviderType) => Promise<LlmProviderType>;
setLlm: (
provider: SupportedLlmProviderType,
@@ -111,7 +113,6 @@ type EnjoyAppType = {
create: (uri: string, params?: object) => Promise<AudioType>;
update: (id: string, params: object) => Promise<AudioType | undefined>;
destroy: (id: string) => Promise<undefined>;
transcribe: (id: string) => Promise<void>;
upload: (id: string) => Promise<void>;
};
videos: {
@@ -120,7 +121,6 @@ type EnjoyAppType = {
create: (uri: string, params?: any) => Promise<VideoType>;
update: (id: string, params: any) => Promise<VideoType | undefined>;
destroy: (id: string) => Promise<undefined>;
transcribe: (id: string) => Promise<void>;
upload: (id: string) => Promise<void>;
};
recordings: {

View File

@@ -5,9 +5,10 @@ declare const MAIN_WINDOW_VITE_DEV_SERVER_URL: string;
declare const MAIN_WINDOW_VITE_NAME: string;
declare module "compromise-paragraphs";
type SupportedLlmProviderType = "openai" | "googleGenerativeAi";
type SupportedLlmProviderType = "enjoyai" | "openai" | "googleGenerativeAi";
type LlmProviderType = {
name?: "enjoyai" | "openai" | "googleGenerativeAi";
key?: string;
model?: string;
baseUrl?: string;

View File

@@ -1,6 +1,7 @@
type UserType = {
id: string;
name: string;
balance?: number;
avatarUrl?: string;
accessToken?: string;
recordingsCount?: number;

View File

@@ -8,6 +8,7 @@ type VideoType = {
md5: string;
src?: string;
metadata?: Ffmpeg.FfprobeData;
duration?: number;
transcribed: boolean;
transcribing: boolean;
recordingsCount?: number;

354
yarn.lock
View File

@@ -260,7 +260,7 @@ __metadata:
languageName: node
linkType: hard
"@babel/runtime@npm:^7.13.10, @babel/runtime@npm:^7.22.5, @babel/runtime@npm:^7.23.2, @babel/runtime@npm:^7.23.5":
"@babel/runtime@npm:^7.13.10, @babel/runtime@npm:^7.22.5, @babel/runtime@npm:^7.23.2":
version: 7.23.8
resolution: "@babel/runtime@npm:7.23.8"
dependencies:
@@ -269,6 +269,15 @@ __metadata:
languageName: node
linkType: hard
"@babel/runtime@npm:^7.23.7":
version: 7.23.9
resolution: "@babel/runtime@npm:7.23.9"
dependencies:
regenerator-runtime: "npm:^0.14.0"
checksum: e71205fdd7082b2656512cc98e647d9ea7e222e4fe5c36e9e5adc026446fcc3ba7b3cdff8b0b694a0b78bb85db83e7b1e3d4c56ef90726682b74f13249cf952d
languageName: node
linkType: hard
"@babel/template@npm:^7.22.15":
version: 7.22.15
resolution: "@babel/template@npm:7.22.15"
@@ -1204,7 +1213,7 @@ __metadata:
languageName: node
linkType: hard
"@google/generative-ai@npm:^0.1.0":
"@google/generative-ai@npm:^0.1.3":
version: 0.1.3
resolution: "@google/generative-ai@npm:0.1.3"
checksum: ce1e3e4440d31965eae8fcdbb0120c9b47f8503e95b14b361d8b1528cb66f96d940f47ad31eb1baeb7eda72d3394c05bc6fe25de2df8e7bfea1a8e35fa869e40
@@ -1311,9 +1320,9 @@ __metadata:
languageName: node
linkType: hard
"@langchain/community@npm:~0.0.17":
version: 0.0.18
resolution: "@langchain/community@npm:0.0.18"
"@langchain/community@npm:~0.0.20":
version: 0.0.22
resolution: "@langchain/community@npm:0.0.22"
dependencies:
"@langchain/core": "npm:~0.1.16"
"@langchain/openai": "npm:~0.0.10"
@@ -1323,6 +1332,7 @@ __metadata:
zod: "npm:^3.22.3"
peerDependencies:
"@aws-crypto/sha256-js": ^5.0.0
"@aws-sdk/client-bedrock-agent-runtime": ^3.485.0
"@aws-sdk/client-bedrock-runtime": ^3.422.0
"@aws-sdk/client-dynamodb": ^3.310.0
"@aws-sdk/client-kendra": ^3.352.0
@@ -1330,9 +1340,10 @@ __metadata:
"@aws-sdk/client-sagemaker-runtime": ^3.310.0
"@aws-sdk/client-sfn": ^3.310.0
"@aws-sdk/credential-provider-node": ^3.388.0
"@azure/search-documents": ^12.0.0
"@clickhouse/client": ^0.2.5
"@cloudflare/ai": ^1.0.12
"@datastax/astra-db-ts": 0.1.2
"@datastax/astra-db-ts": ^0.1.4
"@elastic/elasticsearch": ^8.4.0
"@getmetal/metal-sdk": "*"
"@getzep/zep-js": ^0.9.0
@@ -1404,6 +1415,8 @@ __metadata:
peerDependenciesMeta:
"@aws-crypto/sha256-js":
optional: true
"@aws-sdk/client-bedrock-agent-runtime":
optional: true
"@aws-sdk/client-bedrock-runtime":
optional: true
"@aws-sdk/client-dynamodb":
@@ -1418,6 +1431,8 @@ __metadata:
optional: true
"@aws-sdk/credential-provider-node":
optional: true
"@azure/search-documents":
optional: true
"@clickhouse/client":
optional: true
"@cloudflare/ai":
@@ -1560,7 +1575,7 @@ __metadata:
optional: true
ws:
optional: true
checksum: e0b66e085fd7af64ef2df0ba3b4a91e6f580266c4ebd752f1f3604f12dde50e7bfcec21d86fdc9f6f1f7242e3c8243c0497345590f0c694bb8c0c7aefdfae17c
checksum: 955c45855872ab2f4664d93bce97e266ad78ed1d180deda05fa58c42ec4915dd03fc349b3fc4f46192c7f3033744c32d386834d5053a97145abb1a5dde258d12
languageName: node
linkType: hard
@@ -1601,13 +1616,13 @@ __metadata:
languageName: node
linkType: hard
"@langchain/google-genai@npm:^0.0.7":
version: 0.0.7
resolution: "@langchain/google-genai@npm:0.0.7"
"@langchain/google-genai@npm:^0.0.8":
version: 0.0.8
resolution: "@langchain/google-genai@npm:0.0.8"
dependencies:
"@google/generative-ai": "npm:^0.1.0"
"@google/generative-ai": "npm:^0.1.3"
"@langchain/core": "npm:~0.1.5"
checksum: 04d82e93e1a4d2be73d67657c88ca0c930a79aafb9823ddb56346814ccfb044e7b51e5b38eebd523c3d25722c2b7568cedb54e0ae254b9d45d5cd4dd585f6ac4
checksum: d908f33c21ed0bef904630ac1813d75935dee493d1d05a083a6f87ff5afb31fa1de052c34f4d7f7a3d6f34aa317639282c7b997b82f11bae9c774e8de5eb3e13
languageName: node
linkType: hard
@@ -3681,7 +3696,7 @@ __metadata:
languageName: node
linkType: hard
"@types/node@npm:*, @types/node@npm:^20.11.5":
"@types/node@npm:*":
version: 20.11.5
resolution: "@types/node@npm:20.11.5"
dependencies:
@@ -3699,6 +3714,15 @@ __metadata:
languageName: node
linkType: hard
"@types/node@npm:^20.11.10":
version: 20.11.10
resolution: "@types/node@npm:20.11.10"
dependencies:
undici-types: "npm:~5.26.4"
checksum: aced8595c2786d4e60471772659add1e2e0d1f2b73d119820b2e1815426d6e52c6a77f1c7fca8ea46490c36f7959cc46b0dc609fa2e80b7fd24f9a7d696c2210
languageName: node
linkType: hard
"@types/prop-types@npm:*":
version: 15.7.11
resolution: "@types/prop-types@npm:15.7.11"
@@ -3837,15 +3861,15 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/eslint-plugin@npm:^6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/eslint-plugin@npm:6.19.0"
"@typescript-eslint/eslint-plugin@npm:^6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/eslint-plugin@npm:6.20.0"
dependencies:
"@eslint-community/regexpp": "npm:^4.5.1"
"@typescript-eslint/scope-manager": "npm:6.19.0"
"@typescript-eslint/type-utils": "npm:6.19.0"
"@typescript-eslint/utils": "npm:6.19.0"
"@typescript-eslint/visitor-keys": "npm:6.19.0"
"@typescript-eslint/scope-manager": "npm:6.20.0"
"@typescript-eslint/type-utils": "npm:6.20.0"
"@typescript-eslint/utils": "npm:6.20.0"
"@typescript-eslint/visitor-keys": "npm:6.20.0"
debug: "npm:^4.3.4"
graphemer: "npm:^1.4.0"
ignore: "npm:^5.2.4"
@@ -3858,44 +3882,44 @@ __metadata:
peerDependenciesMeta:
typescript:
optional: true
checksum: ab1a5ace6663b0c6d2418e321328fa28aa4bdc4b5fae257addec01346fb3a9c2d3a2960ade0f7114e6974c513a28632c9e8e602333cc0fab3135c445babdef59
checksum: 5020faac39be476de056342f58f2bf68bb788f230e2fa4a2e27ceab8a5187dc450beba7333b0aa741a43aeaff45a117558132953f9390b5eca4c2cc004fde716
languageName: node
linkType: hard
"@typescript-eslint/parser@npm:^6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/parser@npm:6.19.0"
"@typescript-eslint/parser@npm:^6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/parser@npm:6.20.0"
dependencies:
"@typescript-eslint/scope-manager": "npm:6.19.0"
"@typescript-eslint/types": "npm:6.19.0"
"@typescript-eslint/typescript-estree": "npm:6.19.0"
"@typescript-eslint/visitor-keys": "npm:6.19.0"
"@typescript-eslint/scope-manager": "npm:6.20.0"
"@typescript-eslint/types": "npm:6.20.0"
"@typescript-eslint/typescript-estree": "npm:6.20.0"
"@typescript-eslint/visitor-keys": "npm:6.20.0"
debug: "npm:^4.3.4"
peerDependencies:
eslint: ^7.0.0 || ^8.0.0
peerDependenciesMeta:
typescript:
optional: true
checksum: d547bfb1aaed112cfc0f9f0be8506a280952ba3b61be42b749352139361bd94e4a47fa043d819e19c6a498cacbd8bb36a46e3628c436a7e2009e7ac27afc8861
checksum: d84ad5e2282b1096c80dedb903c83ecc31eaf7be1aafcb14c18d9ec2d4a319f2fd1e5a9038b944d9f42c36c1c57add5e4292d4026ca7d3d5441d41286700d402
languageName: node
linkType: hard
"@typescript-eslint/scope-manager@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/scope-manager@npm:6.19.0"
"@typescript-eslint/scope-manager@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/scope-manager@npm:6.20.0"
dependencies:
"@typescript-eslint/types": "npm:6.19.0"
"@typescript-eslint/visitor-keys": "npm:6.19.0"
checksum: 1ec7b9dedca7975f0aa4543c1c382f7d6131411bd443a5f9b96f137acb6adb450888ed13c95f6d26546b682b2e0579ce8a1c883fdbe2255dc0b61052193b8243
"@typescript-eslint/types": "npm:6.20.0"
"@typescript-eslint/visitor-keys": "npm:6.20.0"
checksum: f6768ed2dcd2d1771d55ed567ff392a6569ffd683a26500067509dd41769f8838c43686460fe7337144f324fd063df33f5d5646d44e5df4998ceffb3ad1fb790
languageName: node
linkType: hard
"@typescript-eslint/type-utils@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/type-utils@npm:6.19.0"
"@typescript-eslint/type-utils@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/type-utils@npm:6.20.0"
dependencies:
"@typescript-eslint/typescript-estree": "npm:6.19.0"
"@typescript-eslint/utils": "npm:6.19.0"
"@typescript-eslint/typescript-estree": "npm:6.20.0"
"@typescript-eslint/utils": "npm:6.20.0"
debug: "npm:^4.3.4"
ts-api-utils: "npm:^1.0.1"
peerDependencies:
@@ -3903,23 +3927,23 @@ __metadata:
peerDependenciesMeta:
typescript:
optional: true
checksum: 5b146b985481e587122026c703ac9f537ad7e90eee1dca814971bca0d7e4a5d4ff9861fb4bf749014c28c6a4fbb4a01a4527355961315eb9501f3569f8e8dd38
checksum: 8f622fbb14268f1d00b2948f995b570f0ef82be02c12be41d90385290a56ea0dbd34d855d6a5aff100b57f3bdd300ff0c300f16c78f12d6064f7ae6e34fd71bf
languageName: node
linkType: hard
"@typescript-eslint/types@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/types@npm:6.19.0"
checksum: 6f81860a3c14df55232c2e6dec21fb166867b9f30b3c3369b325aef5ee1c7e41e827c0504654daa49c8ff1a3a9ca9d9bfe76786882b6212a7c1b58991a9c80b9
"@typescript-eslint/types@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/types@npm:6.20.0"
checksum: 37589003b0e06f83c1945e3748e91af85918cfd997766894642a08e6f355f611cfe11df4e7632dda96e3a9b3441406283fe834ab0906cf81ea97fd43ca2aebe3
languageName: node
linkType: hard
"@typescript-eslint/typescript-estree@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/typescript-estree@npm:6.19.0"
"@typescript-eslint/typescript-estree@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/typescript-estree@npm:6.20.0"
dependencies:
"@typescript-eslint/types": "npm:6.19.0"
"@typescript-eslint/visitor-keys": "npm:6.19.0"
"@typescript-eslint/types": "npm:6.20.0"
"@typescript-eslint/visitor-keys": "npm:6.20.0"
debug: "npm:^4.3.4"
globby: "npm:^11.1.0"
is-glob: "npm:^4.0.3"
@@ -3929,34 +3953,34 @@ __metadata:
peerDependenciesMeta:
typescript:
optional: true
checksum: 5b365f009e43c7beafdbb7d8ecad78ee1087b0a4338cd9ec695eed514b7b4c1089e56239761139ddae629ec0ce8d428840c6ebfeea3618d2efe00c84f8794da5
checksum: 551f13445a303882d9fc0fbe14ef8507eb8414253fd87a5f13d2e324b5280b626421a238b8ec038e628bc80128dc06c057757f668738e82e64d5b39a9083c27d
languageName: node
linkType: hard
"@typescript-eslint/utils@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/utils@npm:6.19.0"
"@typescript-eslint/utils@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/utils@npm:6.20.0"
dependencies:
"@eslint-community/eslint-utils": "npm:^4.4.0"
"@types/json-schema": "npm:^7.0.12"
"@types/semver": "npm:^7.5.0"
"@typescript-eslint/scope-manager": "npm:6.19.0"
"@typescript-eslint/types": "npm:6.19.0"
"@typescript-eslint/typescript-estree": "npm:6.19.0"
"@typescript-eslint/scope-manager": "npm:6.20.0"
"@typescript-eslint/types": "npm:6.20.0"
"@typescript-eslint/typescript-estree": "npm:6.20.0"
semver: "npm:^7.5.4"
peerDependencies:
eslint: ^7.0.0 || ^8.0.0
checksum: 343ff4cd4f7e102df8c46b41254d017a33d95df76455531fda679fdb92aebb9c111df8ee9ab54972e73c1e8fad9dd7e421001233f0aee8115384462b0821852e
checksum: 0a8ede3d80a365b52ae96d88e4a9f6e6abf3569c6b60ff9f42ff900cd843ae7c5493cd95f8f2029d90bb0acbf31030980206af98e581d760d6d41e0f80e9fb86
languageName: node
linkType: hard
"@typescript-eslint/visitor-keys@npm:6.19.0":
version: 6.19.0
resolution: "@typescript-eslint/visitor-keys@npm:6.19.0"
"@typescript-eslint/visitor-keys@npm:6.20.0":
version: 6.20.0
resolution: "@typescript-eslint/visitor-keys@npm:6.20.0"
dependencies:
"@typescript-eslint/types": "npm:6.19.0"
"@typescript-eslint/types": "npm:6.20.0"
eslint-visitor-keys: "npm:^3.4.1"
checksum: bb34e922e018aadf34866995ea5949d6623f184cc4f6470ab05767dd208ffabb003b7dc3872199714574b7f10afe89d49c6f89a4e8d086edea82be73e189f1bb
checksum: 852d938f2e5d57200cf62733b42e73a369f797b097d17e8fd3fffd0f7315c3b9e1863eed60bb8d57d6535a3b7f1980f645f96ec6d513950f182bfa8107b33fab
languageName: node
linkType: hard
@@ -3977,15 +4001,15 @@ __metadata:
languageName: node
linkType: hard
"@vidstack/react@npm:^1.9.8":
version: 1.9.8
resolution: "@vidstack/react@npm:1.9.8"
"@vidstack/react@npm:^1.10.2":
version: 1.10.2
resolution: "@vidstack/react@npm:1.10.2"
dependencies:
media-captions: "npm:^1.0.1"
peerDependencies:
"@types/react": ^18.0.0
react: ^18.0.0
checksum: ea9ba70b31074c0f29c5fdbcf4d79da9a9a812f8bad9722f5eff6c05e3fde1c8c547cdf31cdcce62afa24f3ee24a94081e2b016608c5a0edf241405b52692c69
checksum: 45481d02fd8dfe3069cf89ae5c3ed78b5f62874c57800e7b19e3b692d883b5e54e7fc0b1660958cb40096e403f1646c6e32089cd009af8560de8caddcaa7aa2a
languageName: node
linkType: hard
@@ -4434,14 +4458,14 @@ __metadata:
languageName: node
linkType: hard
"axios@npm:^1.6.5":
version: 1.6.5
resolution: "axios@npm:1.6.5"
"axios@npm:^1.6.7":
version: 1.6.7
resolution: "axios@npm:1.6.7"
dependencies:
follow-redirects: "npm:^1.15.4"
form-data: "npm:^4.0.0"
proxy-from-env: "npm:^1.1.0"
checksum: aeb9acf87590d8aa67946072ced38e01ca71f5dfe043782c0ccea667e5dd5c45830c08afac9be3d7c894f09684b8ab2a458f497d197b73621233bcf202d9d468
checksum: 131bf8e62eee48ca4bd84e6101f211961bf6a21a33b95e5dfb3983d5a2fe50d9fffde0b57668d7ce6f65063d3dc10f2212cbcb554f75cfca99da1c73b210358d
languageName: node
linkType: hard
@@ -5206,14 +5230,14 @@ __metadata:
languageName: node
linkType: hard
"compromise@npm:^14.11.1":
version: 14.11.1
resolution: "compromise@npm:14.11.1"
"compromise@npm:^14.11.2":
version: 14.11.2
resolution: "compromise@npm:14.11.2"
dependencies:
efrt: "npm:2.7.0"
grad-school: "npm:0.0.5"
suffix-thumb: "npm:5.0.2"
checksum: a20bff3ab6b953f14811a57a2b1169fc530664f6acd17318a67ab167cf959cbf09d221688875cdc0a3a4bda0a0e9c3debd31e6b4dedfca4f8f7404a7c46e31cc
checksum: b27846a30aeb17eb060cbc7a3c6edec6e9e3d3af2ae2d59823be7e13b9961e5e45d0d82241aa74fa12dd9837f2ae5c6713d1415efb358724d36f7a2fc492efe2
languageName: node
linkType: hard
@@ -5792,10 +5816,10 @@ __metadata:
languageName: node
linkType: hard
"electron-log@npm:^5.0.4":
version: 5.0.4
resolution: "electron-log@npm:5.0.4"
checksum: 7e8bb234d01f9d2ac615f84f054e84fa5f159e7d13a1844c670d3146f3e893269be935d0ecbe26a68e21a399414c88a4e260274fcb63bf59aeed41733dd02fa0
"electron-log@npm:^5.1.0":
version: 5.1.0
resolution: "electron-log@npm:5.1.0"
checksum: 7a9bcab5a1f516641364711c0f23d440a00962c8f087f18125c89ddcab1da06181de59359e8023ac0824a59d7001a2faf08a61bbd68609ca53feb85815376bc9
languageName: node
linkType: hard
@@ -5844,16 +5868,16 @@ __metadata:
languageName: node
linkType: hard
"electron@npm:^28.1.4":
version: 28.1.4
resolution: "electron@npm:28.1.4"
"electron@npm:^28.2.0":
version: 28.2.0
resolution: "electron@npm:28.2.0"
dependencies:
"@electron/get": "npm:^2.0.0"
"@types/node": "npm:^18.11.18"
extract-zip: "npm:^2.0.1"
bin:
electron: cli.js
checksum: 06cea12cb9d26534f483a338ff6f259650a020dd2604df8d9d9b76b6e2b0a0e366571f9491a1d03eff66ec9ff92247e74422cb6f1c3603a76da90c5e99d5e2e4
checksum: 6c4dbf84ce00556b80ac5a6da5d7b8e784d1c91ea3538221b86cedec154455f1bc3e1120adc9484f857f5334d5cbca050c8455cdd058538f0741239f576bc284
languageName: node
linkType: hard
@@ -5928,7 +5952,7 @@ __metadata:
"@ffmpeg/ffmpeg": "npm:^0.12.10"
"@ffmpeg/util": "npm:^0.12.1"
"@hookform/resolvers": "npm:^3.3.4"
"@langchain/google-genai": "npm:^0.0.7"
"@langchain/google-genai": "npm:^0.0.8"
"@mozilla/readability": "npm:^0.5.0"
"@radix-ui/react-accordion": "npm:^1.1.2"
"@radix-ui/react-alert-dialog": "npm:^1.0.5"
@@ -5961,34 +5985,34 @@ __metadata:
"@types/intl-tel-input": "npm:^18.1.4"
"@types/lodash": "npm:^4.14.202"
"@types/mark.js": "npm:^8.11.12"
"@types/node": "npm:^20.11.5"
"@types/node": "npm:^20.11.10"
"@types/react": "npm:^18.2.48"
"@types/react-dom": "npm:^18.2.18"
"@types/validator": "npm:^13.11.8"
"@types/wavesurfer.js": "npm:^6.0.12"
"@typescript-eslint/eslint-plugin": "npm:^6.19.0"
"@typescript-eslint/parser": "npm:^6.19.0"
"@typescript-eslint/eslint-plugin": "npm:^6.20.0"
"@typescript-eslint/parser": "npm:^6.20.0"
"@uidotdev/usehooks": "npm:^2.4.1"
"@vidstack/react": "npm:^1.9.8"
"@vidstack/react": "npm:^1.10.2"
"@vitejs/plugin-react": "npm:^4.2.1"
adm-zip: "npm:^0.5.10"
autoprefixer: "npm:^10.4.17"
autosize: "npm:^6.0.1"
axios: "npm:^1.6.5"
axios: "npm:^1.6.7"
camelcase: "npm:^8.0.0"
camelcase-keys: "npm:^9.1.3"
cheerio: "npm:^1.0.0-rc.12"
class-variance-authority: "npm:^0.7.0"
clsx: "npm:^2.1.0"
command-exists: "npm:^1.2.9"
compromise: "npm:^14.11.1"
compromise: "npm:^14.11.2"
compromise-paragraphs: "npm:^0.1.0"
compromise-stats: "npm:^0.1.0"
dayjs: "npm:^1.11.10"
decamelize: "npm:^6.0.0"
decamelize-keys: "npm:^2.0.1"
electron: "npm:^28.1.4"
electron-log: "npm:^5.0.4"
electron: "npm:^28.2.0"
electron-log: "npm:^5.1.0"
electron-settings: "npm:^4.0.2"
electron-squirrel-startup: "npm:^1.0.0"
eslint: "npm:^8.56.0"
@@ -5998,25 +6022,25 @@ __metadata:
fluent-ffmpeg: "npm:^2.1.2"
fs-extra: "npm:^11.2.0"
html-to-text: "npm:^9.0.5"
i18next: "npm:^23.7.18"
intl-tel-input: "npm:^19.2.12"
i18next: "npm:^23.8.1"
intl-tel-input: "npm:^19.2.15"
js-md5: "npm:^0.8.3"
langchain: "npm:^0.1.5"
langchain: "npm:^0.1.10"
lodash: "npm:^4.17.21"
lucide-react: "npm:^0.314.0"
lucide-react: "npm:^0.319.0"
mark.js: "npm:^8.11.1"
microsoft-cognitiveservices-speech-sdk: "npm:^1.34.0"
next-themes: "npm:^0.2.1"
octokit: "npm:^3.1.2"
openai: "npm:^4.24.7"
openai: "npm:^4.26.0"
pitchfinder: "npm:^2.3.2"
postcss: "npm:^8.4.33"
react: "npm:^18.2.0"
react-activity-calendar: "npm:^2.2.6"
react-activity-calendar: "npm:^2.2.7"
react-dom: "npm:^18.2.0"
react-hook-form: "npm:^7.49.3"
react-hotkeys-hook: "npm:^4.4.4"
react-i18next: "npm:^14.0.0"
react-i18next: "npm:^14.0.1"
react-markdown: "npm:^9.0.1"
react-router-dom: "npm:^6.21.3"
react-tooltip: "npm:^5.26.0"
@@ -6024,9 +6048,9 @@ __metadata:
rimraf: "npm:^5.0.5"
sequelize: "npm:^6.35.2"
sequelize-typescript: "npm:^2.1.6"
sonner: "npm:^1.3.1"
sonner: "npm:^1.4.0"
sqlite3: "npm:^5.1.7"
tailwind-merge: "npm:^2.2.0"
tailwind-merge: "npm:^2.2.1"
tailwind-scrollbar-hide: "npm:^1.1.7"
tailwindcss: "npm:^3.4.1"
tailwindcss-animate: "npm:^1.0.7"
@@ -6034,8 +6058,8 @@ __metadata:
tslib: "npm:^2.6.2"
typescript: "npm:^5.3.3"
umzug: "npm:^3.5.1"
vite-plugin-static-copy: "npm:^1.0.0"
wavesurfer.js: "npm:^7.6.5"
vite-plugin-static-copy: "npm:^1.0.1"
wavesurfer.js: "npm:^7.7.1"
zod: "npm:^3.22.4"
zx: "npm:^7.2.3"
languageName: unknown
@@ -7727,12 +7751,12 @@ __metadata:
languageName: node
linkType: hard
"i18next@npm:^23.7.18":
version: 23.7.18
resolution: "i18next@npm:23.7.18"
"i18next@npm:^23.8.1":
version: 23.8.1
resolution: "i18next@npm:23.8.1"
dependencies:
"@babel/runtime": "npm:^7.23.2"
checksum: 7ff20eaaf6b5b6ff31c0bde2907149caecd7d63c6106e06afc9dd23b543b281733c50a45c6f19627338a3212426976fd7dfdfeece305ffbb7d3d4dcefe69c97c
checksum: f74ca2ead03e71bc8ca353c89e842b75042a52b52dcc5dfc14092469dcbc5d61f220fef536dfa95520de42baad0caa86c7a8abc1313a057ece9ba08ad8598bbf
languageName: node
linkType: hard
@@ -7855,15 +7879,15 @@ __metadata:
languageName: node
linkType: hard
"intl-tel-input@npm:^19.2.12":
version: 19.2.12
resolution: "intl-tel-input@npm:19.2.12"
"intl-tel-input@npm:^19.2.15":
version: 19.2.15
resolution: "intl-tel-input@npm:19.2.15"
dependencies:
esbuild: "npm:^0.19.11"
prop-types: "npm:^15.8.1"
react: "npm:^18.2.0"
react-dom: "npm:^18.2.0"
checksum: 4bed202296b69c2107a1f899156e0a7799c702d300b75808780abc5b6da05c6e69e22bcdf71584c11531e03078d5bc7dd14c114d5850c331a01d3d90351ea191
checksum: d9c6b3ada3751c417be34539a5d8d853100af6c6da1261c043667300d9f6a59751871648b7bddcf58c098719a36b104a21dad6a09453d15f64362b30061c0fcf
languageName: node
linkType: hard
@@ -8423,12 +8447,12 @@ __metadata:
languageName: node
linkType: hard
"langchain@npm:^0.1.5":
version: 0.1.5
resolution: "langchain@npm:0.1.5"
"langchain@npm:^0.1.10":
version: 0.1.10
resolution: "langchain@npm:0.1.10"
dependencies:
"@anthropic-ai/sdk": "npm:^0.9.1"
"@langchain/community": "npm:~0.0.17"
"@langchain/community": "npm:~0.0.20"
"@langchain/core": "npm:~0.1.16"
"@langchain/openai": "npm:~0.0.12"
binary-extensions: "npm:^2.2.0"
@@ -8437,7 +8461,7 @@ __metadata:
js-yaml: "npm:^4.1.0"
jsonpointer: "npm:^5.0.1"
langchainhub: "npm:~0.0.6"
langsmith: "npm:~0.0.48"
langsmith: "npm:~0.0.59"
ml-distance: "npm:^4.0.0"
openapi-types: "npm:^12.1.3"
p-retry: "npm:4"
@@ -8457,7 +8481,7 @@ __metadata:
"@google-ai/generativelanguage": ^0.2.1
"@google-cloud/storage": ^6.10.1
"@notionhq/client": ^2.2.10
"@pinecone-database/pinecone": ^1.1.0
"@pinecone-database/pinecone": "*"
"@supabase/supabase-js": ^2.10.0
"@vercel/kv": ^0.2.3
"@xata.io/client": ^0.28.0
@@ -8602,7 +8626,7 @@ __metadata:
optional: true
youtubei.js:
optional: true
checksum: a1cc0b725033efea19b7d97f77f6974bd49929cab38358c83538e1712fabb3beaadc43435c87ee16e158b797d08582ead4a1d4deefea40cbeb629618ffc1d8bd
checksum: a5aed3ce3340b7de7e236bbbf99eab667ddfac791cf1929e92ad6b80af851d7e5e0572eab4b31ea9cd94ddb4214b25dfce2900189ca86f712888889a36347733
languageName: node
linkType: hard
@@ -8628,6 +8652,21 @@ __metadata:
languageName: node
linkType: hard
"langsmith@npm:~0.0.59":
version: 0.0.63
resolution: "langsmith@npm:0.0.63"
dependencies:
"@types/uuid": "npm:^9.0.1"
commander: "npm:^10.0.1"
p-queue: "npm:^6.6.2"
p-retry: "npm:4"
uuid: "npm:^9.0.0"
bin:
langsmith: dist/cli/main.cjs
checksum: a19561430155f7a2668c9e0a8472484dcb9a440fafb9bcdb38535094c3854d19f5b11e50e3ea37853f1287283a88833b7f0134fca3bf8fd1816d0e3e7ba019c7
languageName: node
linkType: hard
"leac@npm:^0.6.0":
version: 0.6.0
resolution: "leac@npm:0.6.0"
@@ -8930,12 +8969,12 @@ __metadata:
languageName: node
linkType: hard
"lucide-react@npm:^0.314.0":
version: 0.314.0
resolution: "lucide-react@npm:0.314.0"
"lucide-react@npm:^0.319.0":
version: 0.319.0
resolution: "lucide-react@npm:0.319.0"
peerDependencies:
react: ^16.5.1 || ^17.0.0 || ^18.0.0
checksum: b7994d4a11167ca590e9ee2be1d8c9d4e22f860a4d40ff8a24cbb0b4b71f7b60c834c9ffeca72a746d21be9f47ce69d560ba0b0975ee51a35cd0d7e7b4a54365
checksum: 86fa2325c3f3633a4b04c68f2b384b65788c5bdde13fe21c12ed3e82708b81481325f4bc4df2ca7a55a952924b00fc49050b90c2c1f216d7e8aac497b8933038
languageName: node
linkType: hard
@@ -10218,7 +10257,7 @@ __metadata:
languageName: node
linkType: hard
"openai@npm:^4.24.2, openai@npm:^4.24.7":
"openai@npm:^4.24.2":
version: 4.24.7
resolution: "openai@npm:4.24.7"
dependencies:
@@ -10237,6 +10276,25 @@ __metadata:
languageName: node
linkType: hard
"openai@npm:^4.26.0":
version: 4.26.0
resolution: "openai@npm:4.26.0"
dependencies:
"@types/node": "npm:^18.11.18"
"@types/node-fetch": "npm:^2.6.4"
abort-controller: "npm:^3.0.0"
agentkeepalive: "npm:^4.2.1"
digest-fetch: "npm:^1.3.0"
form-data-encoder: "npm:1.7.2"
formdata-node: "npm:^4.3.2"
node-fetch: "npm:^2.6.7"
web-streams-polyfill: "npm:^3.2.1"
bin:
openai: bin/cli
checksum: e67f004a7a0b6c33256a2a12c086bd884b33c9431349570ad68bcd046a75917f734ccbd65b0a3ccc9a341f344dfe128bf8f5b2e258e262c1796ee83741cdcf0d
languageName: node
linkType: hard
"openapi-types@npm:^12.1.3":
version: 12.1.3
resolution: "openapi-types@npm:12.1.3"
@@ -10948,9 +11006,9 @@ __metadata:
languageName: node
linkType: hard
"react-activity-calendar@npm:^2.2.6":
version: 2.2.6
resolution: "react-activity-calendar@npm:2.2.6"
"react-activity-calendar@npm:^2.2.7":
version: 2.2.7
resolution: "react-activity-calendar@npm:2.2.7"
dependencies:
"@types/chroma-js": "npm:^2.4.3"
chroma-js: "npm:^2.4.2"
@@ -10958,7 +11016,7 @@ __metadata:
peerDependencies:
react: ^17.0.0 || ^18.0.0
react-dom: ^17.0.0 || ^18.0.0
checksum: ac7ffede800c42a4985a7db98ddd54e5b1bfd0057fd6ad2474bba29a62ab52d83352c42ebfe1496d6b8958398d9036bf62558cf7f82ea786d26d30c27841c91c
checksum: 2714e2a22f46b30436b050e370a0e3ffd27c36aaf5b0c07afefebfa0e4520e16cf994e86c6fb5eab6cf621941a3effc90c290325d18f55963f3e27643fd532ca
languageName: node
linkType: hard
@@ -10993,9 +11051,9 @@ __metadata:
languageName: node
linkType: hard
"react-i18next@npm:^14.0.0":
version: 14.0.0
resolution: "react-i18next@npm:14.0.0"
"react-i18next@npm:^14.0.1":
version: 14.0.1
resolution: "react-i18next@npm:14.0.1"
dependencies:
"@babel/runtime": "npm:^7.22.5"
html-parse-stringify: "npm:^3.0.1"
@@ -11007,7 +11065,7 @@ __metadata:
optional: true
react-native:
optional: true
checksum: 923990e941cdc9ca73bb34fca8a408f850c6ce7ca490661d83c808c5a91f33997882d40e0d2da64664c91a064e12002164c8dd843d75542a7e7e261a3d4198d8
checksum: 9104c51c5d185e6d1b8ad71714b9ef490286f87b8833c7f67949da34f46f65ebcc994f6f3d087ffe180b7f811ba367c64e5e500c8e442fedbdf54c193bad0257
languageName: node
linkType: hard
@@ -11872,13 +11930,13 @@ __metadata:
languageName: node
linkType: hard
"sonner@npm:^1.3.1":
version: 1.3.1
resolution: "sonner@npm:1.3.1"
"sonner@npm:^1.4.0":
version: 1.4.0
resolution: "sonner@npm:1.4.0"
peerDependencies:
react: ^18.0.0
react-dom: ^18.0.0
checksum: 324d2b2edb8bf15a38239f67e533bbf9f38425adf432c9542e7a9610b7451ec5d4082d2e8ddb564096f63f44ba7ec4d2e137f8b0af1786a0becac346fae2b28a
checksum: 2e55de2de03dd0d103d36d18e36a891b16e42102ca789a2eea37635172d7035dea749e95ff0e36cc04f1c43c9fa72637a17a702ef184926bf1c33ed17fba18bd
languageName: node
linkType: hard
@@ -12245,12 +12303,12 @@ __metadata:
languageName: node
linkType: hard
"tailwind-merge@npm:^2.2.0":
version: 2.2.0
resolution: "tailwind-merge@npm:2.2.0"
"tailwind-merge@npm:^2.2.1":
version: 2.2.1
resolution: "tailwind-merge@npm:2.2.1"
dependencies:
"@babel/runtime": "npm:^7.23.5"
checksum: 41476d686af101c770401f4db509ffea9ec13bf99fd18185903dd86df2b71b3f512d9373b0f2785cb9e9abf093f39373327a29e9b3dde643b9daf7062f77c4ba
"@babel/runtime": "npm:^7.23.7"
checksum: 14ab965ec897e9377484b7593f7a700dde09b8035b762ad42652622a3ed1f202b203f48c0f235c0b1b38e9390470d94458f6f9010d33a5a18d71b15f38b986a6
languageName: node
linkType: hard
@@ -13033,9 +13091,9 @@ __metadata:
languageName: node
linkType: hard
"vite-plugin-static-copy@npm:^1.0.0":
version: 1.0.0
resolution: "vite-plugin-static-copy@npm:1.0.0"
"vite-plugin-static-copy@npm:^1.0.1":
version: 1.0.1
resolution: "vite-plugin-static-copy@npm:1.0.1"
dependencies:
chokidar: "npm:^3.5.3"
fast-glob: "npm:^3.2.11"
@@ -13043,7 +13101,7 @@ __metadata:
picocolors: "npm:^1.0.0"
peerDependencies:
vite: ^5.0.0
checksum: f3eef54b6d7f791e3b8869789cdae88b6e33822db2fba98de1f6a80e28b261fb1a74d99e9663d5e7d0a2337e5727e35d95261dffb3e3c447cfe9bc4854918df4
checksum: fbec6188dd118799e26c1107b71189c6e452856500ad72ced0a9f6b0008acb6e06496467faad74f633cfd8268f0a89e3b1f726f0606f9883716bbabb6faac2df
languageName: node
linkType: hard
@@ -13094,10 +13152,10 @@ __metadata:
languageName: node
linkType: hard
"wavesurfer.js@npm:^7.6.5":
version: 7.6.5
resolution: "wavesurfer.js@npm:7.6.5"
checksum: 023324d2c06d45a26664e1034f39b0d87ab693a182958be331a1064038c09ec6b1d239be2dd4dd19a3ba9d9a1ea2897fe4b93a0bb055fbe293d756c3d5fad87a
"wavesurfer.js@npm:^7.7.1":
version: 7.7.1
resolution: "wavesurfer.js@npm:7.7.1"
checksum: e80e25a9e6e24d80d76b514b752517fb6258995709406d5a93b1b4c1b8533bbe86919569528c61231407040b86a5efe15e8e0b35c598fc1301f26813a9ad7886
languageName: node
linkType: hard