Feat: use ffmpeg wasm (#177)

* add ffmepg wasm

* refactor whisper config

* refactor ffmpeg config

* remove depected codes

* refactor model download

* decouple whipser & ffmpeg when transcribe

* do not always toast not found error

* ffmpeg works in renderer

* transcode in renderer & transcript in main

* improve transcode

* refactor

* remove ffmpeg check

* fix whisper config check

* update whisper check

* enable SharedArrayBuffer

* remove ffmpeg setting

* fix UI

* handle ffmpeg not ready

* may create issue when whisper check failed

* upgrade deps

* hide ffmpeg setting if not ready

* hide record button when not in region
This commit is contained in:
an-lee
2024-01-23 14:39:16 +08:00
committed by GitHub
parent 7c9170170d
commit 3a98bd9f40
50 changed files with 1414 additions and 1199 deletions

View File

@@ -63,6 +63,8 @@
"zx": "^7.2.3"
},
"dependencies": {
"@ffmpeg/ffmpeg": "^0.12.10",
"@ffmpeg/util": "^0.12.1",
"@hookform/resolvers": "^3.3.4",
"@langchain/google-genai": "^0.0.7",
"@mozilla/readability": "^0.5.0",
@@ -111,11 +113,11 @@
"fluent-ffmpeg": "^2.1.2",
"fs-extra": "^11.2.0",
"html-to-text": "^9.0.5",
"i18next": "^23.7.16",
"i18next": "^23.7.18",
"js-md5": "^0.8.3",
"langchain": "^0.1.4",
"langchain": "^0.1.5",
"lodash": "^4.17.21",
"lucide-react": "^0.312.0",
"lucide-react": "^0.314.0",
"mark.js": "^8.11.1",
"microsoft-cognitiveservices-speech-sdk": "^1.34.0",
"next-themes": "^0.2.1",
@@ -130,7 +132,7 @@
"react-i18next": "^14.0.0",
"react-markdown": "^9.0.1",
"react-router-dom": "^6.21.3",
"react-tooltip": "^5.25.2",
"react-tooltip": "^5.26.0",
"reflect-metadata": "^0.2.1",
"rimraf": "^5.0.5",
"sequelize": "^6.35.2",
@@ -139,7 +141,7 @@
"sqlite3": "^5.1.7",
"tailwind-scrollbar-hide": "^1.1.7",
"umzug": "^3.5.1",
"wavesurfer.js": "^7.6.4",
"wavesurfer.js": "^7.6.5",
"zod": "^3.22.4"
}
}

View File

@@ -4,6 +4,8 @@ export const LIBRARY_PATH_SUFFIX = "EnjoyLibrary";
export const STORAGE_WORKER_ENDPOINT = "https://enjoy-storage.baizhiheizi.com";
export const WEB_API_URL = "https://enjoy-web.fly.dev";
export const REPO_URL = "https://github.com/xiaolai/everyone-can-use-english"
// https://huggingface.co/ggerganov/whisper.cpp/tree/main
export const WHISPER_MODELS_OPTIONS = [
{

View File

@@ -171,6 +171,8 @@
"error": "Error",
"errors": "Errors",
"cancel": "Cancel",
"cancelled": "Cancelled",
"canceling": "Canceling",
"confirm": "Confirm",
"continue": "continue",
"save": "Save",
@@ -295,6 +297,9 @@
"checkingWhisper": "Checking whisper status",
"whisperIsWorkingGood": "Whisper is working good",
"whisperIsNotWorking": "Whisper is not working",
"checkingWhisperModel": "Checking whisper model",
"whisperModelIsWorkingGood": "Whisper model is working good",
"whisperModelIsNotWorking": "Whisper model is not working",
"relaunchIsNeededAfterChanged": "Relaunch is needed after changed",
"openaiKeySaved": "OpenAI key saved",
"openaiConfigSaved": "OpenAI config saved",

View File

@@ -171,6 +171,8 @@
"error": "错误",
"errors": "错误",
"cancel": "取消",
"cancelled": "已取消",
"canceling": "正在取消",
"confirm": "确认",
"continue": "继续",
"save": "保存",
@@ -294,6 +296,9 @@
"checkingWhisper": "正在检查 Whisper",
"whisperIsWorkingGood": "Whisper 正常工作",
"whisperIsNotWorking": "Whisper 无法正常工作,请尝试更换模型后重试,或联系开发者",
"checkingWhisperModel": "正在检查 Whisper 模型",
"whisperModelIsWorkingGood": "Whisper 模型正常工作",
"whisperModelIsNotWorking": "Whisper 模型无法正常工作,请尝试更换模型后重试,或联系开发者",
"relaunchIsNeededAfterChanged": "更改后需要重新启动",
"openaiKeySaved": "OpenAI 密钥已保存",
"openaiConfigSaved": "OpenAI 配置已保存",

View File

@@ -14,6 +14,8 @@ log.errorHandler.startCatching();
// Fix "getRandomValues() not supported"
global.crypto = crypto;
app.commandLine.appendSwitch('enable-features','SharedArrayBuffer')
// Handle creating/removing shortcuts on Windows when installing/uninstalling.
if (require("electron-squirrel-startup")) {
app.quit();

View File

@@ -49,9 +49,8 @@ class AudiosHandler {
},
})
.then((audio) => {
if (!audio) {
throw new Error(t("models.audio.notFound"));
}
if (!audio) return;
if (!audio.isSynced) {
audio.sync().catch(() => {});
}

View File

@@ -30,27 +30,6 @@ class TranscriptionsHandler {
},
});
if (transcription.state === "pending") {
const timeout = setTimeout(() => {
event.sender.send("on-notification", {
type: "warning",
message: t("stillTranscribing"),
});
}, 1000 * 10);
transcription
.process()
.catch((err) => {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
})
.finally(() => {
clearTimeout(timeout);
});
}
return transcription.toJSON();
} catch (err) {
event.sender.send("on-notification", {
@@ -87,8 +66,16 @@ class TranscriptionsHandler {
private async process(
event: IpcMainEvent,
where: WhereOptions<Attributes<Transcription>>
where: WhereOptions<Attributes<Transcription>>,
options?: {
force?: boolean;
blob: {
type: string;
arrayBuffer: ArrayBuffer;
};
}
) {
const { force = true, blob } = options || {};
return Transcription.findOne({
where: {
...where,
@@ -107,7 +94,7 @@ class TranscriptionsHandler {
}, 1000 * 10);
transcription
.process({ force: true })
.process({ force, wavFileBlob: blob })
.catch((err) => {
event.sender.send("on-notification", {
type: "error",

View File

@@ -49,9 +49,8 @@ class VideosHandler {
},
})
.then((video) => {
if (!video) {
throw new Error(t("models.video.notFound"));
}
if (!video) return;
if (!video.isSynced) {
video.sync().catch(() => {});
}

View File

@@ -178,6 +178,8 @@ export class Audio extends Model<Audio> {
@BeforeCreate
static async setupDefaultAttributes(audio: Audio) {
if (!settings.ffmpegConfig().ready) return;
try {
const ffmpeg = new Ffmpeg();
const fileMetadata = await ffmpeg.generateMetadata(audio.filePath);
@@ -189,9 +191,11 @@ export class Audio extends Model<Audio> {
@AfterCreate
static transcribeAsync(audio: Audio) {
setTimeout(() => {
audio.transcribe();
}, 500);
if (settings.ffmpegConfig().ready) {
setTimeout(() => {
audio.transcribe();
}, 500);
}
}
@AfterCreate

View File

@@ -19,6 +19,9 @@ import log from "electron-log/main";
import { Client } from "@/api";
import { WEB_API_URL, PROCESS_TIMEOUT } from "@/constants";
import settings from "@main/settings";
import Ffmpeg from "@main/ffmpeg";
import path from "path";
import fs from "fs-extra";
const logger = log.scope("db/models/transcription");
const webApi = new Client({
@@ -85,9 +88,15 @@ export class Transcription extends Model<Transcription> {
}
// STT using whisper
async process(options: { force?: boolean } = {}) {
async process(
options: {
force?: boolean;
wavFileBlob?: { type: string; arrayBuffer: ArrayBuffer };
} = {}
) {
if (this.getDataValue("state") === "processing") return;
const { force = false } = options;
const { force = false, wavFileBlob } = options;
logger.info(`[${this.getDataValue("id")}]`, "Start to transcribe.");
@@ -103,11 +112,43 @@ export class Transcription extends Model<Transcription> {
throw new Error("No file path.");
}
let wavFile: string = filePath;
const tmpDir = settings.cachePath();
const outputFile = path.join(
tmpDir,
path.basename(filePath, path.extname(filePath)) + ".wav"
);
if (wavFileBlob) {
const format = wavFileBlob.type.split("/")[1];
if (format !== "wav") {
throw new Error("Only wav format is supported");
}
await fs.outputFile(outputFile, Buffer.from(wavFileBlob.arrayBuffer));
wavFile = outputFile;
} else if (settings.ffmpegConfig().ready) {
const ffmpeg = new Ffmpeg();
try {
wavFile = await ffmpeg.prepareForWhisper(
filePath,
path.join(
tmpDir,
path.basename(filePath, path.extname(filePath)) + ".wav"
)
);
} catch (err) {
logger.error("ffmpeg error", err);
}
}
try {
await this.update({
state: "processing",
});
const { model, transcription } = await whisper.transcribe(filePath, {
const { model, transcription } = await whisper.transcribe(wavFile, {
force,
extra: [
"--split-on-word",

View File

@@ -150,6 +150,7 @@ export class Video extends Model<Video> {
// generate cover and upload
async generateCover() {
if (this.coverUrl) return;
if (!settings.ffmpegConfig().ready) return;
const ffmpeg = new Ffmpeg();
const coverFile = await ffmpeg.generateCover(
@@ -199,6 +200,8 @@ export class Video extends Model<Video> {
@BeforeCreate
static async setupDefaultAttributes(video: Video) {
if (!settings.ffmpegConfig().ready) return;
try {
const ffmpeg = new Ffmpeg();
const fileMetadata = await ffmpeg.generateMetadata(video.filePath);
@@ -210,9 +213,11 @@ export class Video extends Model<Video> {
@AfterCreate
static transcribeAsync(video: Video) {
setTimeout(() => {
video.transcribe();
}, 500);
if (settings.ffmpegConfig().ready) {
setTimeout(() => {
video.transcribe();
}, 500);
}
}
@AfterCreate

View File

@@ -2,7 +2,9 @@ import { ipcMain, app } from "electron";
import path from "path";
import fs from "fs";
import mainWin from "@main/window";
import log from "electron-log/main";
const logger = log.scope("downloader");
class Downloader {
public tasks: Electron.DownloadItem[];
@@ -55,9 +57,11 @@ class Downloader {
if (state === "completed") {
resolve(item.getSavePath());
} else {
fs.rmSync(item.getSavePath(), {
force: true,
});
if (fs.lstatSync(item.getSavePath()).isFile()) {
fs.rmSync(item.getSavePath(), {
force: true,
});
}
resolve(undefined);
}
});
@@ -66,10 +70,14 @@ class Downloader {
}
cancel(filename: string) {
const task = this.tasks.find((t) => t.getFilename() === filename);
if (task && task.getState() === "progressing") {
task.cancel();
}
logger.debug("dashboard", this.dashboard());
this.tasks
.filter(
(t) => t.getFilename() === filename && t.getState() === "progressing"
)
.forEach((t) => {
t.cancel();
});
}
cancelAll() {
@@ -93,7 +101,14 @@ class Downloader {
}
registerIpcHandlers() {
ipcMain.handle("download-start", (event, url, savePath) => {
this.download(url, {
webContents: event.sender,
savePath,
});
});
ipcMain.handle("download-cancel", (_event, filename) => {
logger.debug("download-cancel", filename);
this.cancel(filename);
});
ipcMain.handle("download-cancel-all", () => {

View File

@@ -316,6 +316,15 @@ export class FfmpegDownloader {
}
registerIpcHandlers() {
ipcMain.handle("ffmpeg-config", async (_event) => {
return settings.ffmpegConfig();
});
ipcMain.handle("ffmpeg-set-config", async (_event, config) => {
settings.setSync("ffmpeg.ffmpegPath", config.ffmpegPath);
settings.setSync("ffmpeg.ffprobePath", config.ffrobePath);
});
ipcMain.handle("ffmpeg-download", async (event) => {
try {
return await this.download(event.sender);

View File

@@ -57,32 +57,14 @@ const dbPath = () => {
return path.join(userDataPath(), dbName);
};
const whisperModelsPath = () => {
const dir = path.join(libraryPath(), "whisper", "models");
fs.ensureDirSync(dir);
return dir;
};
const whisperModelPath = () => {
return path.join(
whisperModelsPath(),
settings.getSync("whisper.model") as string
);
};
const llamaModelsPath = () => {
const dir = path.join(libraryPath(), "llama", "models");
fs.ensureDirSync(dir);
return dir;
};
const llamaModelPath = () => {
return path.join(
llamaModelsPath(),
settings.getSync("llama.model") as string
);
const whisperConfig = (): WhisperConfigType => {
return {
availableModels: settings.getSync(
"whisper.availableModels"
) as WhisperConfigType["availableModels"],
modelsPath: settings.getSync("whisper.modelsPath") as string,
model: settings.getSync("whisper.model") as string,
};
};
const userDataPath = () => {
@@ -149,18 +131,6 @@ export default {
settings.setSync("whisper.model", model);
});
ipcMain.handle("settings-get-whisper-models-path", (_event) => {
return whisperModelsPath();
});
ipcMain.handle("settings-set-llama-model", (_event, model) => {
settings.setSync("whisper.model", model);
});
ipcMain.handle("settings-get-llama-models-path", (_event) => {
return llamaModelsPath();
});
ipcMain.handle("settings-get-user-data-path", (_event) => {
return userDataPath();
});
@@ -173,15 +143,6 @@ export default {
return settings.setSync(provider, config);
});
ipcMain.handle("settings-get-ffmpeg-config", (_event) => {
return ffmpegConfig();
});
ipcMain.handle("settings-set-ffmpeg-config", (_event, config) => {
settings.setSync("ffmpeg.ffmpegPath", config.ffmpegPath);
settings.setSync("ffmpeg.ffprobePath", config.ffrobePath);
});
ipcMain.handle("settings-get-language", (_event) => {
return language();
});
@@ -192,12 +153,9 @@ export default {
},
cachePath,
libraryPath,
whisperModelsPath,
whisperModelPath,
llamaModelsPath,
llamaModelPath,
userDataPath,
dbPath,
whisperConfig,
ffmpegConfig,
language,
switchLanguage,

View File

@@ -2,9 +2,6 @@ import { ipcMain } from "electron";
import settings from "@main/settings";
import path from "path";
import { WHISPER_MODELS_OPTIONS, PROCESS_TIMEOUT } from "@/constants";
import { readdir } from "fs/promises";
import downloader from "@main/downloader";
import Ffmpeg from "@main/ffmpeg";
import { exec } from "child_process";
import fs from "fs-extra";
import log from "electron-log/main";
@@ -14,18 +11,84 @@ const MAGIC_TOKENS = ["Mrs.", "Ms.", "Mr.", "Dr.", "Prof.", "St."];
const END_OF_WORD_REGEX = /[^\.!,\?][\.!\?]/g;
class Whipser {
private binMain = path.join(__dirname, "lib", "whisper", "main");
public config: WhisperConfigType;
constructor() {}
constructor(config?: WhisperConfigType) {
this.config = config;
this.initialize();
}
currentModel() {
return (this.config.availableModels || []).find(
(m) => m.name === this.config.model
)?.savePath;
}
async initialize() {
const dir = path.join(settings.libraryPath(), "whisper", "models");
const files = fs.readdirSync(dir);
const models = [];
for (const file of files) {
const model = WHISPER_MODELS_OPTIONS.find((m) => m.name == file);
if (!model) continue;
models.push({
...model,
savePath: path.join(dir, file),
});
}
settings.setSync("whisper.availableModels", models);
settings.setSync("whisper.modelsPath", dir);
this.config = settings.whisperConfig();
return new Promise((resolve, reject) => {
exec(
`"${this.binMain}" --help`,
{
timeout: PROCESS_TIMEOUT,
},
(error, stdout, stderr) => {
if (error) {
logger.error("error", error);
}
if (stderr) {
logger.debug("stderr", stderr);
}
if (stdout) {
logger.debug("stdout", stdout);
}
const std = (stdout || stderr).toString()?.trim();
if (std.startsWith("usage:")) {
resolve(true);
} else {
reject(
error || new Error("Whisper check failed: unknown error").message
);
}
}
);
});
}
async check() {
await this.initialize();
if (!this.currentModel()) {
throw new Error("No model selected");
}
const sampleFile = path.join(__dirname, "samples", "jfk.wav");
const tmpDir = settings.cachePath();
const outputFile = path.join(tmpDir, "jfk.json");
return new Promise((resolve, reject) => {
fs.rmSync(outputFile, { force: true });
return new Promise((resolve, _reject) => {
const commands = [
`"${this.binMain}"`,
`--file "${sampleFile}"`,
`--model "${settings.whisperModelPath()}"`,
`--model "${this.currentModel()}"`,
"--output-json",
`--output-file "${path.join(tmpDir, "jfk")}"`,
];
@@ -48,15 +111,10 @@ class Whipser {
logger.debug(stdout);
}
if (fs.existsSync(outputFile)) {
resolve(true);
} else {
reject(
error ||
new Error(stderr || "Whisper check failed: unknown error")
.message
);
}
resolve({
success: fs.existsSync(outputFile),
log: `${error?.message || ""}\n${stderr}\n${stdout}`,
});
}
);
});
@@ -64,35 +122,43 @@ class Whipser {
async transcribeBlob(
blob: { type: string; arrayBuffer: ArrayBuffer },
prompt?: string
) {
const filename = `${Date.now()}.wav`;
options?: {
prompt?: string;
group?: boolean;
}
): Promise<
TranscriptionSegmentType[] | TranscriptionResultSegmentGroupType[]
> {
const { prompt, group = false } = options || {};
const format = blob.type.split("/")[1];
if (format !== "wav") {
throw new Error("Only wav format is supported");
}
const tempfile = path.join(settings.cachePath(), `${Date.now()}.${format}`);
await fs.outputFile(tempfile, Buffer.from(blob.arrayBuffer));
const wavFile = path.join(settings.cachePath(), filename);
const ffmpeg = new Ffmpeg();
await ffmpeg.convertToWav(tempfile, wavFile);
const extra = [];
if (prompt) {
extra.push(`--prompt "${prompt.replace(/"/g, '\\"')}"`);
}
const { transcription } = await this.transcribe(wavFile, {
const { transcription } = await this.transcribe(tempfile, {
force: true,
extra,
});
const content = transcription
.map((t: TranscriptionSegmentType) => t.text)
.join(" ")
.trim();
return {
file: wavFile,
content,
};
if (group) {
return this.groupTranscription(transcription);
} else {
return transcription;
}
}
/* Ensure the file is in wav format
* and 16kHz sample rate
*/
async transcribe(
file: string,
options: {
@@ -111,16 +177,10 @@ class Whipser {
return fs.readJson(outputFile);
}
const ffmpeg = new Ffmpeg();
const waveFile = await ffmpeg.prepareForWhisper(
file,
path.join(tmpDir, filename + ".wav")
);
const command = [
`"${this.binMain}"`,
`--file "${waveFile}"`,
`--model "${settings.whisperModelPath()}"`,
`--file "${file}"`,
`--model "${this.currentModel()}"`,
"--output-json",
`--output-file "${path.join(tmpDir, filename)}"`,
...extra,
@@ -159,7 +219,9 @@ class Whipser {
});
}
groupTranscription(transcription: TranscriptionSegmentType[]) {
groupTranscription(
transcription: TranscriptionSegmentType[]
): TranscriptionResultSegmentGroupType[] {
const generateGroup = (group?: TranscriptionSegmentType[]) => {
if (!group || group.length === 0) return;
@@ -209,52 +271,38 @@ class Whipser {
}
registerIpcHandlers() {
ipcMain.handle("whisper-available-models", async (event) => {
const models: string[] = [];
ipcMain.handle("whisper-config", async () => {
try {
const files = await readdir(settings.whisperModelsPath());
for (const file of files) {
if (WHISPER_MODELS_OPTIONS.find((m) => m.name == file)) {
models.push(file);
}
}
} catch (err) {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
await this.initialize();
return Object.assign({}, this.config, { ready: true });
} catch (_err) {
return Object.assign({}, this.config, { ready: false });
}
return models;
});
ipcMain.handle("whisper-download-model", (event, name) => {
const model = WHISPER_MODELS_OPTIONS.find((m) => m.name === name);
if (!model) {
event.sender.send("on-notification", {
type: "error",
message: `Model ${name} not supported`,
});
return;
}
ipcMain.handle("whisper-set-model", async (event, model) => {
const originalModel = settings.getSync("whisper.model");
settings.setSync("whisper.model", model);
this.config = settings.whisperConfig();
downloader.download(model.url, {
webContents: event.sender,
savePath: path.join(settings.whisperModelsPath(), model.name),
});
return this.check()
.then(() => {
return Object.assign({}, this.config, { ready: true });
})
.catch((err) => {
settings.setSync("whisper.model", originalModel);
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
});
});
ipcMain.handle("whisper-check", async (event) => {
return this.check().catch((err) => {
event.sender.send("on-notification", {
type: "error",
message: err.message,
});
});
return await this.check();
});
ipcMain.handle("whisper-transcribe", async (event, blob, prompt) => {
ipcMain.handle("whisper-transcribe-blob", async (event, blob, prompt) => {
try {
return await this.transcribeBlob(blob, prompt);
} catch (err) {

View File

@@ -16,7 +16,7 @@ import whisper from "@main/whisper";
import fs from "fs-extra";
import "@main/i18n";
import log from "electron-log/main";
import { WEB_API_URL } from "@/constants";
import { WEB_API_URL, REPO_URL } from "@/constants";
import { AudibleProvider, TedProvider } from "@main/providers";
import { FfmpegDownloader } from "@main/ffmpeg";
import { Waveform } from "./waveform";
@@ -266,6 +266,31 @@ main.init = () => {
mainWindow.webContents.openDevTools();
});
ipcMain.handle("app-create-issue", (_event, title, log) => {
const body = `**Version**
${app.getVersion()}
**Platform**
${process.platform} ${process.arch} ${process.getSystemVersion()}
**Log**
\`\`\`
${log}
\`\`\`
`;
const params = {
title,
body,
};
shell.openExternal(
`${REPO_URL}/issues/new?${new URLSearchParams(params).toString()}`
);
});
ipcMain.handle(
"system-preferences-media-access",
async (_event, mediaType: "microphone" | "camera") => {

View File

@@ -29,6 +29,9 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
openDevTools: () => {
ipcRenderer.invoke("app-open-dev-tools");
},
createIssue: (title: string, body: string) => {
return ipcRenderer.invoke("app-create-issue", title, body);
},
version,
},
system: {
@@ -123,24 +126,6 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
setUser: (user: UserType) => {
return ipcRenderer.invoke("settings-set-user", user);
},
getWhisperModel: () => {
return ipcRenderer.invoke("settings-get-whisper-model");
},
setWhisperModel: (model: string) => {
return ipcRenderer.invoke("settings-set-whisper-model", model);
},
getWhisperModelsPath: () => {
return ipcRenderer.invoke("settings-get-whisper-models-path");
},
getLlamaModel: () => {
return ipcRenderer.invoke("settings-get-llama-model");
},
setLlamaModel: (model: string) => {
return ipcRenderer.invoke("settings-set-llama-model", model);
},
getLlamaModelsPath: () => {
return ipcRenderer.invoke("settings-get-llama-models-path");
},
getUserDataPath: () => {
return ipcRenderer.invoke("settings-get-user-data-path");
},
@@ -150,12 +135,6 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
setLlm: (provider: string, config: LlmProviderType) => {
return ipcRenderer.invoke("settings-set-llm", provider, config);
},
getFfmpegConfig: () => {
return ipcRenderer.invoke("settings-get-ffmpeg-config");
},
setFfmpegConfig: (config: FfmpegConfigType) => {
return ipcRenderer.invoke("settings-set-ffmpeg-config", config);
},
getLanguage: (language: string) => {
return ipcRenderer.invoke("settings-get-language", language);
},
@@ -340,23 +319,29 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
},
},
whisper: {
availableModels: () => {
return ipcRenderer.invoke("whisper-available-models");
config: () => {
return ipcRenderer.invoke("whisper-config");
},
downloadModel: (name: string) => {
return ipcRenderer.invoke("whisper-download-model", name);
setModel: (model: string) => {
return ipcRenderer.invoke("whisper-set-model", model);
},
check: () => {
return ipcRenderer.invoke("whisper-check");
},
transcribe: (
transcribeBlob: (
blob: { type: string; arrayBuffer: ArrayBuffer },
prompt?: string
) => {
return ipcRenderer.invoke("whisper-transcribe", blob, prompt);
return ipcRenderer.invoke("whisper-transcribe-blob", blob, prompt);
},
},
ffmpeg: {
config: () => {
return ipcRenderer.invoke("ffmpeg-config");
},
setConfig: (config: FfmpegConfigType) => {
return ipcRenderer.invoke("ffmpeg-set-config", config);
},
download: () => {
return ipcRenderer.invoke("ffmpeg-download");
},
@@ -371,6 +356,9 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
onState: (
callback: (event: IpcRendererEvent, state: DownloadStateType) => void
) => ipcRenderer.on("download-on-state", callback),
start: (url: string, savePath?: string) => {
ipcRenderer.invoke("download-start", url, savePath);
},
cancel: (filename: string) => {
ipcRenderer.invoke("download-cancel", filename);
},
@@ -402,8 +390,8 @@ contextBridge.exposeInMainWorld("__ENJOY_APP__", {
findOrCreate: (params: any) => {
return ipcRenderer.invoke("transcriptions-find-or-create", params);
},
process: (params: any) => {
return ipcRenderer.invoke("transcriptions-process", params);
process: (params: any, options: any) => {
return ipcRenderer.invoke("transcriptions-process", params, options);
},
update: (id: string, params: any) => {
return ipcRenderer.invoke("transcriptions-update", id, params);

View File

@@ -53,7 +53,7 @@ function App() {
<AISettingsProvider>
<DbProvider>
<RouterProvider router={router} />
<Toaster richColors closeButton position="top-center" />
<Toaster richColors position="top-center" />
<Tooltip id="global-tooltip" />
</DbProvider>
</AISettingsProvider>

View File

@@ -117,6 +117,7 @@ export const AudibleBooksSegment = () => {
<div className="flex items-center mb-4 bg-muted rounded-lg">
<div className="aspect-square h-28 overflow-hidden rounded-l-lg">
<img
crossOrigin="anonymous"
src={selectedBook?.cover}
alt={selectedBook?.title}
className="w-full h-full object-cover"
@@ -183,6 +184,7 @@ const AudioBookCard = (props: {
<div onClick={onClick} className="w-36 cursor-pointer">
<div className="aspect-square border rounded-lg overflow-hidden">
<img
crossOrigin="anonymous"
src={book.cover}
alt={book.title}
className="hover:scale-105 object-cover w-full h-full"

View File

@@ -19,6 +19,7 @@ export const AudioCard = (props: {
>
<img
src={audio.coverUrl ? audio.coverUrl : "./assets/sound-waves.png"}
crossOrigin="anonymous"
className="hover:scale-105 object-cover w-full h-full"
/>
)

View File

@@ -88,9 +88,11 @@ export const AudioDetail = (props: { id?: string; md5?: string }) => {
useEffect(() => {
const where = id ? { id } : { md5 };
EnjoyApp.audios.findOne(where).then((audio) => {
if (!audio) return;
setAudio(audio);
if (audio) {
setAudio(audio);
} else {
toast.error(t("models.audio.notFound"));
}
});
}, [id, md5]);
@@ -171,6 +173,7 @@ export const AudioDetail = (props: { id?: string; md5?: string }) => {
mediaId={audio.id}
mediaType="Audio"
mediaName={audio.name}
mediaUrl={audio.src}
transcription={transcription}
currentSegmentIndex={currentSegmentIndex}
onSelectSegment={(index) => {

View File

@@ -147,7 +147,6 @@ export const MediaPlayer = (props: {
const index = transcriptionResult.findIndex(
(t) => time >= t.offsets.from && time < t.offsets.to
);
if (index === -1) return;
setCurrentSegmentIndex(index);
};

View File

@@ -12,6 +12,7 @@ import {
ScrollArea,
Button,
PingPoint,
toast,
} from "@renderer/components/ui";
import React, { useEffect, useContext, useState } from "react";
import { t } from "i18next";
@@ -20,35 +21,83 @@ import {
DbProviderContext,
AppSettingsProviderContext,
} from "@renderer/context";
import { fetchFile } from "@ffmpeg/util";
export const MediaTranscription = (props: {
transcription: TranscriptionType;
mediaId: string;
mediaType: "Audio" | "Video";
mediaName?: string;
mediaUrl: string;
currentSegmentIndex?: number;
onSelectSegment?: (index: number) => void;
}) => {
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const { EnjoyApp, ffmpeg } = useContext(AppSettingsProviderContext);
const {
transcription,
mediaId,
mediaType,
mediaName,
mediaUrl,
currentSegmentIndex,
onSelectSegment,
} = props;
const containerRef = React.createRef<HTMLDivElement>();
const [transcoding, setTranscoding] = useState<boolean>(false);
const [recordingStats, setRecordingStats] =
useState<SegementRecordingStatsType>([]);
const regenerate = async () => {
EnjoyApp.transcriptions.process({
targetId: mediaId,
targetType: mediaType,
});
const generate = async () => {
const data = await transcode();
let blob;
if (data) {
blob = {
type: data.type.split(";")[0],
arrayBuffer: await data.arrayBuffer(),
};
}
EnjoyApp.transcriptions.process(
{
targetId: mediaId,
targetType: mediaType,
},
{
blob,
}
);
};
const transcode = async () => {
if (!ffmpeg?.loaded) return;
if (transcoding) return;
try {
setTranscoding(true);
const uri = new URL(mediaUrl);
const input = uri.pathname.split("/").pop();
const output = input.replace(/\.[^/.]+$/, ".wav");
await ffmpeg.writeFile(input, await fetchFile(mediaUrl));
await ffmpeg.exec([
"-i",
input,
"-ar",
"16000",
"-ac",
"1",
"-c:a",
"pcm_s16le",
output,
]);
const data = await ffmpeg.readFile(output);
setTranscoding(false);
return new Blob([data], { type: "audio/wav" });
} catch (e) {
setTranscoding(false);
toast.error(t("transcodeError"));
}
};
const fetchSegmentStats = async () => {
@@ -77,6 +126,12 @@ export const MediaTranscription = (props: {
} as ScrollIntoViewOptions);
}, [currentSegmentIndex, transcription]);
useEffect(() => {
if (transcription?.state !== "pending") return;
generate();
}, [transcription]);
if (!transcription)
return (
<div className="p-4 w-full">
@@ -88,7 +143,7 @@ export const MediaTranscription = (props: {
<div className="w-full h-full flex flex-col">
<div className="mb-4 flex items-cener justify-between">
<div className="flex items-center space-x-2">
{transcription.state === "processing" ? (
{transcoding || transcription.state === "processing" ? (
<PingPoint colorClassName="bg-yellow-500" />
) : transcription.state === "finished" ? (
<CheckCircleIcon className="text-green-500 w-4 h-4" />
@@ -100,10 +155,10 @@ export const MediaTranscription = (props: {
<AlertDialog>
<AlertDialogTrigger asChild>
<Button
disabled={transcription.state === "processing"}
disabled={transcoding || transcription.state === "processing"}
className="capitalize"
>
{transcription.state === "processing" && (
{(transcoding || transcription.state === "processing") && (
<LoaderIcon className="animate-spin w-4 mr-2" />
)}
{transcription.result ? t("regenerate") : t("transcribe")}
@@ -120,9 +175,7 @@ export const MediaTranscription = (props: {
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction
onClick={regenerate}
>
<AlertDialogAction onClick={generate}>
{t("transcribe")}
</AlertDialogAction>
</AlertDialogFooter>

View File

@@ -1,72 +0,0 @@
import { t } from "i18next";
import { Button, Separator } from "@renderer/components/ui";
import { ResetAllButton } from "@renderer/components";
import { InfoIcon } from "lucide-react";
export const AdvancedSettings = () => {
return (
<>
<div className="font-semibold mb-4 capitilized">
{t("advancedSettings")}
</div>
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("resetSettings")}</div>
<div className="text-sm text-muted-foreground mb-2">
{t("logoutAndRemoveAllPersonalSettings")}
</div>
</div>
<div className="">
<div className="mb-2 flex justify-end">
<ResetAllButton>
<Button
variant="secondary"
className="text-destructive"
size="sm"
>
{t("resetSettings")}
</Button>
</ResetAllButton>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
<Separator />
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("resetAll")}</div>
<div className="text-sm text-muted-foreground mb-2">
{t("logoutAndRemoveAllPersonalData")}
</div>
</div>
<div className="">
<div className="mb-2 flex justify-end">
<ResetAllButton>
<Button
variant="secondary"
className="text-destructive"
size="sm"
>
{t("resetAll")}
</Button>
</ResetAllButton>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
<Separator />
</>
);
};

View File

@@ -1,564 +0,0 @@
import * as z from "zod";
import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod";
import { t } from "i18next";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
Avatar,
AvatarImage,
AvatarFallback,
Button,
Dialog,
DialogTrigger,
DialogContent,
DialogHeader,
DialogDescription,
DialogFooter,
FormField,
Form,
FormItem,
FormLabel,
FormControl,
FormMessage,
Input,
Label,
Separator,
toast,
Select,
SelectTrigger,
SelectItem,
SelectValue,
SelectContent,
} from "@renderer/components/ui";
import { WhisperModelOptions, LLM_PROVIDERS } from "@renderer/components";
import {
AppSettingsProviderContext,
AISettingsProviderContext,
} from "@renderer/context";
import { useContext, useState, useRef, useEffect } from "react";
import { redirect } from "react-router-dom";
import { InfoIcon, EditIcon } from "lucide-react";
export const BasicSettings = () => {
return (
<div className="">
<div className="font-semibold mb-4 capitilized">{t("basicSettings")}</div>
<UserSettings />
<Separator />
<LanguageSettings />
<Separator />
<LibraryPathSettings />
<Separator />
<FfmpegSettings />
<Separator />
<WhisperSettings />
<Separator />
<OpenaiSettings />
<Separator />
<GoogleGenerativeAiSettings />
<Separator />
</div>
);
};
export const UserSettings = () => {
const { user, logout } = useContext(AppSettingsProviderContext);
if (!user) return null;
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="flex items-center space-x-2">
<Avatar>
<AvatarImage src={user.avatarUrl} />
<AvatarFallback className="text-xl">
{user.name[0].toUpperCase()}
</AvatarFallback>
</Avatar>
<div className="">
<div className="text-sm font-semibold">{user.name}</div>
<div className="text-xs text-muted-foreground">{user.id}</div>
</div>
</div>
</div>
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="secondary" className="text-destructive" size="sm">
{t("logout")}
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("logout")}</AlertDialogTitle>
</AlertDialogHeader>
<AlertDialogDescription>
{t("logoutConfirmation")}
</AlertDialogDescription>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction
className="bg-destructive hover:bg-destructive-hover"
onClick={() => {
logout();
redirect("/");
}}
>
{t("logout")}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</div>
);
};
export const LanguageSettings = () => {
const { language, switchLanguage } = useContext(AppSettingsProviderContext);
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("language")}</div>
<div className="text-sm text-muted-foreground mb-2">
{language === "en" ? "English" : "简体中文"}
</div>
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Select
value={language}
onValueChange={(value: "en" | "zh-CN") => {
switchLanguage(value);
}}
>
<SelectTrigger className="text-xs">
<SelectValue>
{language === "en" ? "English" : "简体中文"}
</SelectValue>
</SelectTrigger>
<SelectContent>
<SelectItem className="text-xs" value="en">
English
</SelectItem>
<SelectItem className="text-xs" value="zh-CN">
</SelectItem>
</SelectContent>
</Select>
</div>
</div>
</div>
);
};
const LibraryPathSettings = () => {
const { libraryPath, EnjoyApp } = useContext(AppSettingsProviderContext);
const handleChooseLibraryPath = async () => {
const filePaths = await EnjoyApp.dialog.showOpenDialog({
properties: ["openDirectory"],
});
if (filePaths) {
EnjoyApp.settings.setLibrary(filePaths[0]);
const _library = await EnjoyApp.settings.getLibrary();
if (_library !== libraryPath) {
EnjoyApp.app.relaunch();
}
}
};
const openLibraryPath = async () => {
if (libraryPath) {
await EnjoyApp.shell.openPath(libraryPath);
}
};
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("libraryPath")}</div>
<div className="text-sm text-muted-foreground mb-2">{libraryPath}</div>
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Button variant="secondary" size="sm" onClick={openLibraryPath}>
{t("open")}
</Button>
<Button
variant="secondary"
size="sm"
onClick={handleChooseLibraryPath}
>
{t("edit")}
</Button>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
);
};
const FfmpegSettings = () => {
const { EnjoyApp, setFfmegConfig, ffmpegConfig } = useContext(
AppSettingsProviderContext
);
const [editing, setEditing] = useState(false);
const refreshFfmpegConfig = async () => {
EnjoyApp.settings.getFfmpegConfig().then((config) => {
setFfmegConfig(config);
});
};
const handleChooseFfmpeg = async () => {
const filePaths = await EnjoyApp.dialog.showOpenDialog({
properties: ["openFile"],
});
const path = filePaths?.[0];
if (!path) return;
if (path.includes("ffmpeg")) {
EnjoyApp.settings.setFfmpegConfig({
...ffmpegConfig,
ffmpegPath: path,
});
refreshFfmpegConfig();
} else if (path.includes("ffprobe")) {
EnjoyApp.settings.setFfmpegConfig({
...ffmpegConfig,
ffprobePath: path,
});
refreshFfmpegConfig();
} else {
toast.error(t("invalidFfmpegPath"));
}
};
return (
<>
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">FFmpeg</div>
<div className="flex items-center space-x-4">
<span className=" text-sm text-muted-foreground">
<b>ffmpeg</b>: {ffmpegConfig?.ffmpegPath || ""}
</span>
{editing && (
<Button onClick={handleChooseFfmpeg} variant="ghost" size="icon">
<EditIcon className="w-4 h-4 text-muted-foreground" />
</Button>
)}
</div>
<div className="flex items-center space-x-4">
<span className=" text-sm text-muted-foreground">
<b>ffprobe</b>: {ffmpegConfig?.ffprobePath || ""}
</span>
{editing && (
<Button onClick={handleChooseFfmpeg} variant="ghost" size="icon">
<EditIcon className="w-4 h-4 text-muted-foreground" />
</Button>
)}
</div>
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Button
variant="secondary"
size="sm"
onClick={() => {
EnjoyApp.ffmpeg
.discover()
.then(({ ffmpegPath, ffprobePath }) => {
if (ffmpegPath && ffprobePath) {
toast.success(
t("ffmpegFoundAt", {
path: ffmpegPath + ", " + ffprobePath,
})
);
} else {
toast.warning(t("ffmpegNotFound"));
}
refreshFfmpegConfig();
});
}}
>
{t("scan")}
</Button>
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
onClick={() => setEditing(!editing)}
>
{editing ? t("cancel") : t("edit")}
</Button>
</div>
</div>
</div>
</>
);
};
const WhisperSettings = () => {
const { whisperModel, whisperModelsPath, EnjoyApp } = useContext(
AppSettingsProviderContext
);
const handleCheck = async () => {
toast.promise(EnjoyApp.whisper.check(), {
loading: t("checkingWhisper"),
success: t("whisperIsWorkingGood"),
error: t("whisperIsNotWorking"),
});
};
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("sttAiModel")}</div>
<div className="text-sm text-muted-foreground">{whisperModel}</div>
</div>
<div className="flex items-center space-x-2">
<Button onClick={handleCheck} variant="secondary" size="sm">
{t("check")}
</Button>
<Dialog>
<DialogTrigger asChild>
<Button variant="secondary" size="sm">
{t("edit")}
</Button>
</DialogTrigger>
<DialogContent>
<DialogHeader>{t("sttAiModel")}</DialogHeader>
<DialogDescription>
{t("chooseAIModelDependingOnYourHardware")}
</DialogDescription>
<WhisperModelOptions />
<DialogFooter>
<div className="text-xs opacity-70 flex items-start">
<InfoIcon className="mr-1.5 w-4 h-4" />
<span className="flex-1">
{t("yourModelsWillBeDownloadedTo", {
path: whisperModelsPath,
})}
</span>
</div>
</DialogFooter>
</DialogContent>
</Dialog>
</div>
</div>
);
};
const OpenaiSettings = () => {
const { openai, setOpenai } = useContext(AISettingsProviderContext);
const [editing, setEditing] = useState(false);
const openAiConfigSchema = z.object({
key: z.string().optional(),
model: z.enum(LLM_PROVIDERS.openai.models),
baseUrl: z.string().optional(),
});
const form = useForm<z.infer<typeof openAiConfigSchema>>({
resolver: zodResolver(openAiConfigSchema),
values: {
key: openai?.key,
model: openai?.model,
baseUrl: openai?.baseUrl,
},
});
const onSubmit = async (data: z.infer<typeof openAiConfigSchema>) => {
setOpenai({
...data,
});
setEditing(false);
toast.success(t("openaiConfigSaved"));
};
return (
<Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)}>
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">Open AI</div>
<div className="text-sm text-muted-foreground space-y-1">
<FormField
control={form.control}
name="key"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel>{t("key")}:</FormLabel>
<Input
disabled={!editing}
type="password"
value={field.value}
onChange={field.onChange}
/>
</div>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="model"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel>{t("model")}:</FormLabel>
<Select
disabled={!editing}
onValueChange={field.onChange}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectAiModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(LLM_PROVIDERS.openai.models || []).map(
(option: string) => (
<SelectItem key={option} value={option}>
{option}
</SelectItem>
)
)}
</SelectContent>
</Select>
</div>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="baseUrl"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel>{t("baseUrl")}:</FormLabel>
<Input
disabled={!editing}
placeholder={t("leaveEmptyToUseDefault")}
defaultValue=""
value={field.value}
onChange={field.onChange}
/>
</div>
<FormMessage />
</FormItem>
)}
/>
</div>
</div>
<div className="flex items-center space-x-2">
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
type="reset"
onClick={(event) => {
event.preventDefault();
form.reset();
setEditing(!editing);
}}
>
{editing ? t("cancel") : t("edit")}
</Button>
<Button className={editing ? "" : "hidden"} size="sm" type="submit">
{t("save")}
</Button>
</div>
</div>
</form>
</Form>
);
};
const GoogleGenerativeAiSettings = () => {
const { googleGenerativeAi, setGoogleGenerativeAi } = useContext(
AISettingsProviderContext
);
const [editing, setEditing] = useState(false);
const ref = useRef<HTMLInputElement>();
const handleSave = () => {
if (!ref.current) return;
setGoogleGenerativeAi({
key: ref.current.value,
});
setEditing(false);
toast.success(t("googleGenerativeAiKeySaved"));
};
useEffect(() => {
if (editing) {
ref.current?.focus();
}
}, [editing]);
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">Google Generative AI</div>
<div className="text-sm text-muted-foreground">
<div className="flex items-center space-x-4">
<Label>{t("key")}:</Label>
<Input
ref={ref}
type="password"
defaultValue={googleGenerativeAi?.key}
placeholder="*********"
disabled={!editing}
className="focus-visible:outline-0 focus-visible:ring-0 shadow-none"
/>
{editing && (
<Button
size="sm"
className="min-w-max text-md"
onClick={handleSave}
>
{t("save")}
</Button>
)}
</div>
</div>
</div>
<div className="">
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
onClick={() => setEditing(!editing)}
>
{editing ? t("cancel") : t("edit")}
</Button>
</div>
</div>
);
};

View File

@@ -0,0 +1,122 @@
import { t } from "i18next";
import { Button, toast } from "@renderer/components/ui";
import { AppSettingsProviderContext } from "@renderer/context";
import { useContext, useState } from "react";
import { EditIcon } from "lucide-react";
export const FfmpegSettings = () => {
const { EnjoyApp, setFfmegConfig, ffmpegConfig } = useContext(
AppSettingsProviderContext
);
const [editing, setEditing] = useState(false);
const refreshFfmpegConfig = async () => {
EnjoyApp.ffmpeg.config().then((config) => {
setFfmegConfig(config);
});
};
const handleChooseFfmpeg = async () => {
const filePaths = await EnjoyApp.dialog.showOpenDialog({
properties: ["openFile"],
});
const path = filePaths?.[0];
if (!path) return;
if (path.includes("ffmpeg")) {
EnjoyApp.ffmpeg.setConfig({
...ffmpegConfig,
ffmpegPath: path,
});
refreshFfmpegConfig();
} else if (path.includes("ffprobe")) {
EnjoyApp.ffmpeg.setConfig({
...ffmpegConfig,
ffprobePath: path,
});
refreshFfmpegConfig();
} else {
toast.error(t("invalidFfmpegPath"));
}
};
return (
<>
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">FFmpeg</div>
{editing ? (
<>
<div className="flex items-center space-x-4">
<span className=" text-sm text-muted-foreground">
<b>ffmpeg</b>: {ffmpegConfig?.ffmpegPath || ""}
</span>
<Button
onClick={handleChooseFfmpeg}
variant="ghost"
size="icon"
>
<EditIcon className="w-4 h-4 text-muted-foreground" />
</Button>
</div>
<div className="flex items-center space-x-4">
<span className=" text-sm text-muted-foreground">
<b>ffprobe</b>: {ffmpegConfig?.ffprobePath || ""}
</span>
<Button
onClick={handleChooseFfmpeg}
variant="ghost"
size="icon"
>
<EditIcon className="w-4 h-4 text-muted-foreground" />
</Button>
</div>
</>
) : (
<div className="text-xs text-muted-foreground">
{ffmpegConfig.ready ? (
<span>{t("ffmpegCommandIsWorking")}</span>
) : (
<span>{t("ffmpegCommandIsNotWorking")}</span>
)}
</div>
)}
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Button
variant="secondary"
size="sm"
onClick={() => {
EnjoyApp.ffmpeg
.discover()
.then(({ ffmpegPath, ffprobePath }) => {
if (ffmpegPath && ffprobePath) {
toast.success(
t("ffmpegFoundAt", {
path: ffmpegPath + ", " + ffprobePath,
})
);
} else {
toast.warning(t("ffmpegNotFound"));
}
refreshFfmpegConfig();
});
}}
>
{t("scan")}
</Button>
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
onClick={() => setEditing(!editing)}
>
{editing ? t("cancel") : t("edit")}
</Button>
</div>
</div>
</div>
</>
);
};

View File

@@ -0,0 +1,68 @@
import { t } from "i18next";
import { Button, Input, Label, toast } from "@renderer/components/ui";
import { AISettingsProviderContext } from "@renderer/context";
import { useContext, useState, useRef, useEffect } from "react";
export const GoogleGenerativeAiSettings = () => {
const { googleGenerativeAi, setGoogleGenerativeAi } = useContext(
AISettingsProviderContext
);
const [editing, setEditing] = useState(false);
const ref = useRef<HTMLInputElement>();
const handleSave = () => {
if (!ref.current) return;
setGoogleGenerativeAi({
key: ref.current.value,
});
setEditing(false);
toast.success(t("googleGenerativeAiKeySaved"));
};
useEffect(() => {
if (editing) {
ref.current?.focus();
}
}, [editing]);
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">Google Generative AI</div>
<div className="text-sm text-muted-foreground">
<div className="flex items-center space-x-4">
<Label className="min-w-max">{t("key")}:</Label>
<Input
ref={ref}
type="password"
defaultValue={googleGenerativeAi?.key}
placeholder="*********"
disabled={!editing}
className="focus-visible:outline-0 focus-visible:ring-0 shadow-none"
/>
{editing && (
<Button
size="sm"
className="min-w-max text-md"
onClick={handleSave}
>
{t("save")}
</Button>
)}
</div>
</div>
</div>
<div className="">
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
onClick={() => setEditing(!editing)}
>
{editing ? t("cancel") : t("edit")}
</Button>
</div>
</div>
);
};

View File

@@ -1,5 +1,14 @@
export * from './preferences';
export * from './basic-settings';
export * from './advanced-settings';
export * from './about';
export * from './hotkeys';
export * from "./preferences";
export * from "./about";
export * from "./hotkeys";
export * from "./openai-settings";
export * from "./user-settings";
export * from "./language-settings";
export * from "./library-settings";
export * from "./ffmpeg-settings";
export * from "./whisper-settings";
export * from "./google-generative-ai-settings";
export * from "./reset-settings";
export * from "./reset-all-settings";

View File

@@ -0,0 +1,52 @@
import { t } from "i18next";
import {
Select,
SelectTrigger,
SelectItem,
SelectValue,
SelectContent,
} from "@renderer/components/ui";
import {
AppSettingsProviderContext,
} from "@renderer/context";
import { useContext } from "react";
export const LanguageSettings = () => {
const { language, switchLanguage } = useContext(AppSettingsProviderContext);
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("language")}</div>
<div className="text-sm text-muted-foreground mb-2">
{language === "en" ? "English" : "简体中文"}
</div>
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Select
value={language}
onValueChange={(value: "en" | "zh-CN") => {
switchLanguage(value);
}}
>
<SelectTrigger className="text-xs">
<SelectValue>
{language === "en" ? "English" : "简体中文"}
</SelectValue>
</SelectTrigger>
<SelectContent>
<SelectItem className="text-xs" value="en">
English
</SelectItem>
<SelectItem className="text-xs" value="zh-CN">
</SelectItem>
</SelectContent>
</Select>
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,61 @@
import { t } from "i18next";
import {
Button,
} from "@renderer/components/ui";
import {
AppSettingsProviderContext,
} from "@renderer/context";
import { useContext } from "react";
import { InfoIcon } from "lucide-react";
export const LibrarySettings = () => {
const { libraryPath, EnjoyApp } = useContext(AppSettingsProviderContext);
const handleChooseLibraryPath = async () => {
const filePaths = await EnjoyApp.dialog.showOpenDialog({
properties: ["openDirectory"],
});
if (filePaths) {
EnjoyApp.settings.setLibrary(filePaths[0]);
const _library = await EnjoyApp.settings.getLibrary();
if (_library !== libraryPath) {
EnjoyApp.app.relaunch();
}
}
};
const openLibraryPath = async () => {
if (libraryPath) {
await EnjoyApp.shell.openPath(libraryPath);
}
};
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("libraryPath")}</div>
<div className="text-sm text-muted-foreground mb-2">{libraryPath}</div>
</div>
<div className="">
<div className="flex items-center justify-end space-x-2 mb-2">
<Button variant="secondary" size="sm" onClick={openLibraryPath}>
{t("open")}
</Button>
<Button
variant="secondary"
size="sm"
onClick={handleChooseLibraryPath}
>
{t("edit")}
</Button>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,152 @@
import * as z from "zod";
import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod";
import { t } from "i18next";
import {
Button,
FormField,
Form,
FormItem,
FormLabel,
FormControl,
FormMessage,
Input,
toast,
Select,
SelectTrigger,
SelectItem,
SelectValue,
SelectContent,
} from "@renderer/components/ui";
import { LLM_PROVIDERS } from "@renderer/components";
import { AISettingsProviderContext } from "@renderer/context";
import { useContext, useState } from "react";
export const OpenaiSettings = () => {
const { openai, setOpenai } = useContext(AISettingsProviderContext);
const [editing, setEditing] = useState(false);
const openAiConfigSchema = z.object({
key: z.string().optional(),
model: z.enum(LLM_PROVIDERS.openai.models),
baseUrl: z.string().optional(),
});
const form = useForm<z.infer<typeof openAiConfigSchema>>({
resolver: zodResolver(openAiConfigSchema),
values: {
key: openai?.key,
model: openai?.model,
baseUrl: openai?.baseUrl,
},
});
const onSubmit = async (data: z.infer<typeof openAiConfigSchema>) => {
setOpenai({
...data,
});
setEditing(false);
toast.success(t("openaiConfigSaved"));
};
return (
<Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)}>
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">Open AI</div>
<div className="text-sm text-muted-foreground space-y-1">
<FormField
control={form.control}
name="key"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel className="min-w-max">{t("key")}:</FormLabel>
<Input
disabled={!editing}
type="password"
value={field.value}
onChange={field.onChange}
/>
</div>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="model"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel className="min-w-max">{t("model")}:</FormLabel>
<Select
disabled={!editing}
onValueChange={field.onChange}
value={field.value}
>
<FormControl>
<SelectTrigger>
<SelectValue placeholder={t("selectAiModel")} />
</SelectTrigger>
</FormControl>
<SelectContent>
{(LLM_PROVIDERS.openai.models || []).map(
(option: string) => (
<SelectItem key={option} value={option}>
{option}
</SelectItem>
)
)}
</SelectContent>
</Select>
</div>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="baseUrl"
render={({ field }) => (
<FormItem>
<div className="flex items-center space-x-2">
<FormLabel className="min-w-max">{t("baseUrl")}:</FormLabel>
<Input
disabled={!editing}
placeholder={t("leaveEmptyToUseDefault")}
defaultValue=""
value={field.value}
onChange={field.onChange}
/>
</div>
<FormMessage />
</FormItem>
)}
/>
</div>
</div>
<div className="flex items-center space-x-2">
<Button
variant={editing ? "outline" : "secondary"}
size="sm"
type="reset"
onClick={(event) => {
event.preventDefault();
form.reset();
setEditing(!editing);
}}
>
{editing ? t("cancel") : t("edit")}
</Button>
<Button className={editing ? "" : "hidden"} size="sm" type="submit">
{t("save")}
</Button>
</div>
</div>
</form>
</Form>
);
};

View File

@@ -1,24 +1,68 @@
import { t } from "i18next";
import { Button, ScrollArea } from "@renderer/components/ui";
import { Button, ScrollArea, Separator } from "@renderer/components/ui";
import {
BasicSettings,
AdvancedSettings,
About,
Hotkeys,
UserSettings,
LanguageSettings,
LibrarySettings,
WhisperSettings,
FfmpegSettings,
OpenaiSettings,
GoogleGenerativeAiSettings,
ResetSettings,
ResetAllSettings,
} from "@renderer/components";
import { useState } from "react";
import { useState, useContext } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
export const Preferences = () => {
const { ffmpegConfig } = useContext(AppSettingsProviderContext);
const TABS = [
{
value: "basic",
label: t("basicSettingsShort"),
component: () => <BasicSettings />,
component: () => (
<div className="">
<div className="font-semibold mb-4 capitilized">
{t("basicSettings")}
</div>
<UserSettings />
<Separator />
<LanguageSettings />
<Separator />
<LibrarySettings />
<Separator />
<WhisperSettings />
<Separator />
{ffmpegConfig.ready && (
<>
<FfmpegSettings />
<Separator />
</>
)}
<OpenaiSettings />
<Separator />
<GoogleGenerativeAiSettings />
<Separator />
</div>
),
},
{
value: "advanced",
label: t("advancedSettingsShort"),
component: () => <AdvancedSettings />,
component: () => (
<>
<div className="font-semibold mb-4 capitilized">
{t("advancedSettings")}
</div>
<ResetSettings />
<Separator />
<ResetAllSettings />
<Separator />
</>
),
},
{
value: "hotkeys",

View File

@@ -0,0 +1,31 @@
import { t } from "i18next";
import { Button } from "@renderer/components/ui";
import { ResetAllButton } from "@renderer/components";
import { InfoIcon } from "lucide-react";
export const ResetAllSettings = () => {
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("resetAll")}</div>
<div className="text-sm text-muted-foreground mb-2">
{t("logoutAndRemoveAllPersonalData")}
</div>
</div>
<div className="">
<div className="mb-2 flex justify-end">
<ResetAllButton>
<Button variant="secondary" className="text-destructive" size="sm">
{t("resetAll")}
</Button>
</ResetAllButton>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,31 @@
import { t } from "i18next";
import { Button } from "@renderer/components/ui";
import { ResetAllButton } from "@renderer/components";
import { InfoIcon } from "lucide-react";
export const ResetSettings = () => {
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="mb-2">{t("resetSettings")}</div>
<div className="text-sm text-muted-foreground mb-2">
{t("logoutAndRemoveAllPersonalSettings")}
</div>
</div>
<div className="">
<div className="mb-2 flex justify-end">
<ResetAllButton>
<Button variant="secondary" className="text-destructive" size="sm">
{t("resetSettings")}
</Button>
</ResetAllButton>
</div>
<div className="text-xs text-muted-foreground">
<InfoIcon className="mr-1 w-3 h-3 inline" />
<span>{t("relaunchIsNeededAfterChanged")}</span>
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,72 @@
import { t } from "i18next";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
Avatar,
AvatarImage,
AvatarFallback,
Button,
} from "@renderer/components/ui";
import { AppSettingsProviderContext } from "@renderer/context";
import { useContext } from "react";
import { redirect } from "react-router-dom";
export const UserSettings = () => {
const { user, logout } = useContext(AppSettingsProviderContext);
if (!user) return null;
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="flex items-center space-x-2">
<Avatar>
<AvatarImage crossOrigin="anonymous" src={user.avatarUrl} />
<AvatarFallback className="text-xl">
{user.name[0].toUpperCase()}
</AvatarFallback>
</Avatar>
<div className="">
<div className="text-sm font-semibold">{user.name}</div>
<div className="text-xs text-muted-foreground">{user.id}</div>
</div>
</div>
</div>
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="secondary" className="text-destructive" size="sm">
{t("logout")}
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("logout")}</AlertDialogTitle>
</AlertDialogHeader>
<AlertDialogDescription>
{t("logoutConfirmation")}
</AlertDialogDescription>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction
className="bg-destructive hover:bg-destructive-hover"
onClick={() => {
logout();
redirect("/");
}}
>
{t("logout")}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</div>
);
};

View File

@@ -0,0 +1,111 @@
import { t } from "i18next";
import {
Button,
Dialog,
DialogTrigger,
DialogContent,
DialogHeader,
DialogDescription,
DialogFooter,
toast,
} from "@renderer/components/ui";
import { WhisperModelOptions } from "@renderer/components";
import { AppSettingsProviderContext } from "@renderer/context";
import { useContext, useEffect, useState } from "react";
import { InfoIcon, AlertCircleIcon } from "lucide-react";
export const WhisperSettings = () => {
const { whisperConfig, refreshWhisperConfig, EnjoyApp } = useContext(
AppSettingsProviderContext
);
const [stderr, setStderr] = useState("");
useEffect(() => {
refreshWhisperConfig();
}, []);
const handleCheck = async () => {
toast.promise(
async () => {
const { success, log } = await EnjoyApp.whisper.check();
if (success) {
setStderr("");
return Promise.resolve();
} else {
setStderr(log);
return Promise.reject();
}
},
{
loading: t("checkingWhisper"),
success: t("whisperIsWorkingGood"),
error: t("whisperIsNotWorking"),
}
);
};
return (
<div className="flex items-start justify-between py-4">
<div className="">
<div className="flex items-center mb-2">
<span>{t("sttAiModel")}</span>
{stderr && (
<Button
variant="ghost"
size="icon"
onClick={() => {
EnjoyApp.app.createIssue("Whisper is not working", stderr);
}}
>
<AlertCircleIcon className="w-4 h-4 text-yellow-500" />
</Button>
)}
</div>
<div className="text-sm text-muted-foreground">
{whisperConfig.model}
</div>
</div>
<div className="flex items-center space-x-2">
<Button onClick={handleCheck} variant="secondary" size="sm">
{t("check")}
</Button>
<Dialog>
<DialogTrigger asChild>
<Button variant="secondary" size="sm">
{t("edit")}
</Button>
</DialogTrigger>
<DialogContent>
<DialogHeader>{t("sttAiModel")}</DialogHeader>
<DialogDescription>
{t("chooseAIModelDependingOnYourHardware")}
</DialogDescription>
<WhisperModelOptions />
<DialogFooter>
<div className="text-xs flex items-start space-x-2">
<InfoIcon className="mr-1.5 w-4 h-4" />
<span className="flex-1 opacity-70">
{t("yourModelsWillBeDownloadedTo", {
path: whisperConfig.modelsPath,
})}
</span>
<Button
onClick={() => {
EnjoyApp.shell.openPath(whisperConfig.modelsPath);
}}
variant="default"
size="sm"
>
{t("open")}
</Button>
</div>
</DialogFooter>
</DialogContent>
</Dialog>
</div>
</div>
);
};

View File

@@ -169,10 +169,12 @@ export const RecordingsList = (props: {
</div>
<div className="z-50 bottom-16 left-1/2 w-0 h-0 absolute flex items-center justify-center">
<RecordButton
disabled={referenceId == undefined || !Boolean(referenceText)}
onRecordEnd={createRecording}
/>
{referenceId !== undefined && Boolean(referenceText) && (
<RecordButton
disabled={referenceId == undefined || !Boolean(referenceText)}
onRecordEnd={createRecording}
/>
)}
</div>
</div>

View File

@@ -14,6 +14,7 @@ export const StoryCard = (props: { story: StoryType; className?: string }) => {
<div className="border rounded-lg overflow-hidden cursor-pointer">
<div className="aspect-[16/9] overflow-hidden">
<img
crossOrigin="anonymous"
src={story.metadata.image}
className="w-full h-full object-cover hover:scale-105"
/>

View File

@@ -1 +0,0 @@
export * from './transcription';

View File

@@ -1,186 +0,0 @@
import {
AlertDialog,
AlertDialogTrigger,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogContent,
AlertDialogTitle,
AlertDialogDescription,
AlertDialogCancel,
AlertDialogAction,
Skeleton,
ScrollArea,
Button,
PingPoint,
} from "@renderer/components/ui";
import React, { useEffect, useContext, useState } from "react";
import { t } from "i18next";
import { LoaderIcon, CheckCircleIcon, MicIcon } from "lucide-react";
import {
DbProviderContext,
AppSettingsProviderContext,
} from "@renderer/context";
export const Transcription = (props: {
targetType: "Audio" | "Video";
targetId: string;
targetName?: string;
currentSegmentIndex?: number;
onSelectSegment?: (index: number) => void;
}) => {
const { addDblistener, removeDbListener } = useContext(DbProviderContext);
const { EnjoyApp } = useContext(AppSettingsProviderContext);
const {
targetType,
targetId,
targetName,
currentSegmentIndex,
onSelectSegment,
} = props;
const containerRef = React.createRef<HTMLDivElement>();
const [transcription, setTranscription] = useState<TranscriptionType>(null);
const [recordingStats, setRecordingStats] =
useState<SegementRecordingStatsType>([]);
const regenerate = async () => {
EnjoyApp.transcriptions.process({ targetId, targetType });
};
const fetchSegmentStats = async () => {
if (!targetId) return;
EnjoyApp.recordings.groupBySegment(targetId, targetType).then((stats) => {
setRecordingStats(stats);
});
};
useEffect(() => {
addDblistener(fetchSegmentStats);
fetchSegmentStats();
return () => {
removeDbListener(fetchSegmentStats);
};
}, [targetType, targetId]);
useEffect(() => {
containerRef.current
?.querySelector(`#segment-${currentSegmentIndex}`)
?.scrollIntoView({
block: "center",
inline: "center",
} as ScrollIntoViewOptions);
}, [currentSegmentIndex, transcription]);
useEffect(() => {
EnjoyApp.transcriptions
.findOrCreate({
targetId,
targetType,
})
.then((transcription) => {
setTranscription(transcription);
});
}, [targetId, targetType]);
if (!transcription)
return (
<div className="p-4 w-full">
<TranscriptionPlaceholder />
</div>
);
return (
<div className="w-full h-full flex flex-col">
<div className="mb-4 flex items-cener justify-between">
<div className="flex items-center space-x-2">
{transcription.state === "processing" ? (
<PingPoint colorClassName="bg-yellow-500" />
) : transcription.state === "finished" ? (
<CheckCircleIcon className="text-green-500 w-4 h-4" />
) : (
<PingPoint colorClassName="bg-mute" />
)}
<span className="">{t("transcription")}</span>
</div>
<AlertDialog>
<AlertDialogTrigger asChild>
<Button
disabled={transcription.state === "processing"}
className="capitalize"
>
{transcription.state === "processing" && (
<LoaderIcon className="animate-spin w-4 mr-2" />
)}
{transcription.result ? t("regenerate") : t("transcribe")}
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("transcribe")}</AlertDialogTitle>
<AlertDialogDescription>
{t("transcribeAudioConfirmation", {
name: targetName,
})}
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>{t("cancel")}</AlertDialogCancel>
<AlertDialogAction
className="bg-destructive"
onClick={regenerate}
>
{t("transcribe")}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</div>
{transcription ? (
<ScrollArea ref={containerRef} className="flex-1">
{transcription.result.map((t, index) => (
<div
key={index}
id={`segment-${index}`}
className={`py-1 px-2 mb-2 cursor-pointer hover:bg-yellow-400/25 ${
currentSegmentIndex === index ? "bg-yellow-400/25" : ""
}`}
onClick={() => {
onSelectSegment?.(index);
}}
>
<div className="flex items-center justify-between">
<span className="text-xs opacity-50">#{index + 1}</span>
<div className="flex items-center space-x-2">
{(recordingStats || []).findIndex(
(s) => s.referenceId === index
) !== -1 && <MicIcon className="w-3 h-3 text-sky-500" />}
<span className="text-xs opacity-50">
{t.timestamps.from.split(",")[0]}
</span>
</div>
</div>
<p className="">{t.text}</p>
</div>
))}
</ScrollArea>
) : (
<TranscriptionPlaceholder />
)}
</div>
);
};
export const TranscriptionPlaceholder = () => {
return (
<div className="p-4">
{Array.from({ length: 5 }).map((_, i) => (
<Skeleton key={i} className="h-4 w-full mb-4" />
))}
<Skeleton className="h-4 w-3/5" />
</div>
);
};

View File

@@ -144,6 +144,7 @@ export const TedTalksSegment = () => {
<div className="aspect-square h-28 overflow-hidden rounded-l-lg">
<img
src={selectedTalk?.primaryImageSet[0].url}
crossOrigin="anonymous"
alt={selectedTalk?.title}
className="w-full h-full object-cover"
/>
@@ -182,7 +183,6 @@ export const TedTalksSegment = () => {
disabled={submitting}
>
{submittingType === DownloadType.audio && (
<LoaderIcon className="w-4 h-4 animate-spin mr-2" />
)}
{t("downloadAudio")}
@@ -216,6 +216,7 @@ const TedTalkCard = (props: { talk: TedTalkType; onClick?: () => void }) => {
<div onClick={onClick} className="w-56 cursor-pointer">
<div className="aspect-[4/3] border rounded-lg overflow-hidden relative">
<img
crossOrigin="anonymous"
src={talk.primaryImageSet[0].url}
alt={talk.title}
className="hover:scale-105 object-cover w-full h-full"

View File

@@ -2,7 +2,10 @@ import { Link } from "react-router-dom";
import { cn } from "@renderer/lib/utils";
import { VideoIcon } from "lucide-react";
export const VideoCard = (props: { video: Partial<VideoType>; className?: string }) => {
export const VideoCard = (props: {
video: Partial<VideoType>;
className?: string;
}) => {
const { video, className } = props;
return (
@@ -19,6 +22,7 @@ export const VideoCard = (props: { video: Partial<VideoType>; className?: string
<VideoIcon className="w-12 h-12" />
<img
src={video.coverUrl}
crossOrigin="anonymous"
className="absolute top-0 left-0 hover:scale-105 object-cover w-full h-full bg-cover bg-center"
/>
</div>

View File

@@ -94,9 +94,11 @@ export const VideoDetail = (props: { id?: string; md5?: string }) => {
useEffect(() => {
const where = id ? { id } : { md5 };
EnjoyApp.videos.findOne(where).then((video) => {
if (!video) return;
setVideo(video);
if (video) {
setVideo(video);
} else {
toast.error(t("models.video.notFound"));
}
});
}, [id, md5]);
@@ -181,6 +183,7 @@ export const VideoDetail = (props: { id?: string; md5?: string }) => {
<MediaTranscription
mediaId={video.id}
mediaType="Video"
mediaUrl={video.src}
mediaName={video.name}
transcription={transcription}
currentSegmentIndex={currentSegmentIndex}

View File

@@ -7,6 +7,7 @@ import {
AlertDialogContent,
AlertDialogAction,
AlertDialogCancel,
Button,
Card,
CardHeader,
CardTitle,
@@ -18,7 +19,7 @@ import {
Progress,
} from "@renderer/components/ui";
import { t } from "i18next";
import { InfoIcon, CheckCircle, DownloadIcon } from "lucide-react";
import { InfoIcon, CheckCircle, DownloadIcon, XCircleIcon } from "lucide-react";
import { WHISPER_MODELS_OPTIONS } from "@/constants";
import { useState, useContext, useEffect } from "react";
import { AppSettingsProviderContext } from "@renderer/context";
@@ -33,7 +34,13 @@ type ModelType = {
};
export const WhisperModelOptionsPanel = () => {
const { whisperModelsPath } = useContext(AppSettingsProviderContext);
const { whisperConfig, refreshWhisperConfig, EnjoyApp } = useContext(
AppSettingsProviderContext
);
useEffect(() => {
refreshWhisperConfig();
}, []);
return (
<>
@@ -50,13 +57,22 @@ export const WhisperModelOptionsPanel = () => {
</CardContent>
<CardFooter>
<div className="text-xs opacity-70 flex items-start">
<div className="text-xs flex items-start space-x-2">
<InfoIcon className="mr-1.5 w-4 h-4" />
<span className="flex-1">
<span className="flex-1 opacity-70">
{t("yourModelsWillBeDownloadedTo", {
path: whisperModelsPath,
path: whisperConfig.modelsPath,
})}
</span>
<Button
onClick={() => {
EnjoyApp.shell.openPath(whisperConfig.modelsPath);
}}
variant="default"
size="sm"
>
{t("open")}
</Button>
</div>
</CardFooter>
</Card>
@@ -67,20 +83,28 @@ export const WhisperModelOptionsPanel = () => {
export const WhisperModelOptions = () => {
const [selectingModel, setSelectingModel] = useState<ModelType | null>(null);
const [availableModels, setAvailableModels] = useState<ModelType[]>([]);
const { whisperModelsPath, whisperModel, setWhisperModel, EnjoyApp } =
useContext(AppSettingsProviderContext);
const { whisperConfig, setWhisperModel, EnjoyApp } = useContext(
AppSettingsProviderContext
);
useEffect(() => {
updateAvailableModels();
}, []);
return EnjoyApp.download.removeAllListeners();
}, [whisperModelsPath]);
useEffect(() => {
listenToDownloadState();
return () => {
EnjoyApp.download.removeAllListeners();
};
}, [selectingModel]);
const updateAvailableModels = async () => {
const models = await EnjoyApp.whisper.availableModels();
const models = whisperConfig.availableModels;
const options: ModelType[] = WHISPER_MODELS_OPTIONS;
options.forEach((o) => {
o.downloaded = models.findIndex((m) => m === o.name) > -1;
o.downloaded = models.findIndex((m) => m.name === o.name) > -1;
});
setAvailableModels(options);
};
@@ -88,8 +112,11 @@ export const WhisperModelOptions = () => {
const downloadModel = async () => {
if (!selectingModel) return;
EnjoyApp.whisper.downloadModel(selectingModel.name);
listenToDownloadState();
const model = WHISPER_MODELS_OPTIONS.find(
(m) => m.name === selectingModel.name
);
EnjoyApp.download.start(model.url, whisperConfig.modelsPath);
setSelectingModel(null);
};
@@ -97,13 +124,17 @@ export const WhisperModelOptions = () => {
const listenToDownloadState = () => {
EnjoyApp.download.onState((_event, state) => {
const model = availableModels.find((m) => m.name === state.name);
if (!model) return;
if (model) {
model.downloadState = state;
}
if (state.state === "completed") {
model.downloaded = true;
setWhisperModel(model.name);
EnjoyApp.download.removeAllListeners();
} else if (state.state === "cancelled") {
model.downloaded = false;
model.downloadState = null;
}
setAvailableModels([...availableModels]);
@@ -115,39 +146,45 @@ export const WhisperModelOptions = () => {
<ScrollArea className="max-h-96">
{availableModels.map((option) => {
return (
<div
key={option.name}
className={`cursor-pointer hover:bg-secondary px-4 py-2 rounded ${
whisperModel === option.name ? "bg-secondary" : ""
}`}
onClick={() => {
if (option.downloaded) {
setWhisperModel(option.name);
} else if (option.downloadState) {
toast.warning(t("downloading", { file: option.name }));
} else {
setSelectingModel(option);
}
}}
>
<div className="flex justify-between">
<div className="font-semibold">{option.type}</div>
{option.downloaded ? (
<CheckCircle
className={`w-4 ${
whisperModel === option.name ? "text-green-500" : ""
}`}
/>
) : (
<DownloadIcon className="w-4 opacity-70" />
)}
</div>
<div className="text-sm opacity-70 flex justify-between">
<span>{option.name}</span>
<span>~{option.size}</span>
<>
<div
key={option.name}
className={`cursor-pointer hover:bg-secondary px-4 py-2 rounded ${
whisperConfig.model === option.name ? "bg-secondary" : ""
}`}
onClick={() => {
if (option.downloaded) {
toast.promise(setWhisperModel(option.name), {
loading: t("checkingWhisperModel"),
success: t("whisperModelIsWorkingGood"),
error: t("whisperModelIsNotWorking"),
});
} else if (!option.downloadState) {
setSelectingModel(option);
}
}}
>
<div className="flex justify-between">
<div className="font-semibold">{option.type}</div>
{option.downloaded ? (
<CheckCircle
className={`w-4 ${
whisperConfig.model === option.name
? "text-green-500"
: ""
}`}
/>
) : (
<DownloadIcon className="w-4 opacity-70" />
)}
</div>
<div className="text-sm opacity-70 flex justify-between">
<span>{option.name}</span>
<span>~{option.size}</span>
</div>
</div>
{!option.downloaded && option.downloadState && (
<div className="py-2">
<div className="flex items-center space-x-2 py-2 px-4">
<Progress
className="h-1"
value={
@@ -156,9 +193,25 @@ export const WhisperModelOptions = () => {
100
}
/>
<Button
onClick={() => {
toast.promise(
EnjoyApp.download.cancel(option.downloadState.name),
{
loading: t("cancelling"),
success: t("cancelled"),
}
);
}}
className=""
variant="ghost"
size="icon"
>
<XCircleIcon className="w-4 h-4" />
</Button>
</div>
)}
</div>
</>
);
})}
</ScrollArea>

View File

@@ -1,7 +1,10 @@
import { createContext, useEffect, useState } from "react";
import { createContext, useEffect, useState, useRef } from "react";
import { toast } from "@renderer/components/ui";
import { WEB_API_URL } from "@/constants";
import { Client } from "@/api";
import i18n from "@renderer/i18n";
import { FFmpeg } from "@ffmpeg/ffmpeg";
import { toBlobURL } from "@ffmpeg/util";
type AppSettingsProviderState = {
webApi: Client;
@@ -9,13 +12,14 @@ type AppSettingsProviderState = {
initialized: boolean;
version?: string;
libraryPath?: string;
whisperModelsPath?: string;
whisperModel?: string;
login?: (user: UserType) => void;
logout?: () => void;
setLibraryPath?: (path: string) => Promise<void>;
setWhisperModel?: (name: string) => void;
setWhisperModel?: (name: string) => Promise<void>;
ffmpegConfig?: FfmpegConfigType;
ffmpeg?: FFmpeg;
whisperConfig?: WhisperConfigType;
refreshWhisperConfig?: () => void;
setFfmegConfig?: (config: FfmpegConfigType) => void;
EnjoyApp?: EnjoyAppType;
language?: "en" | "zh-CN";
@@ -42,28 +46,31 @@ export const AppSettingsProvider = ({
const [webApi, setWebApi] = useState<Client>(null);
const [user, setUser] = useState<UserType | null>(null);
const [libraryPath, setLibraryPath] = useState("");
const [whisperModelsPath, setWhisperModelsPath] = useState<string>("");
const [whisperModel, setWhisperModel] = useState<string>(null);
const [whisperConfig, setWhisperConfig] = useState<WhisperConfigType>(null);
const [ffmpegConfig, setFfmegConfig] = useState<FfmpegConfigType>(null);
const [language, setLanguage] = useState<"en" | "zh-CN">();
const [ffmpeg, setFfmpeg] = useState<FFmpeg>(null);
const EnjoyApp = window.__ENJOY_APP__;
const ffmpegRef = useRef(new FFmpeg());
useEffect(() => {
fetchVersion();
fetchUser();
fetchLibraryPath();
fetchModel();
fetchFfmpegConfig();
refreshWhisperConfig();
fetchLanguage();
loadFfmpegWASM();
}, []);
useEffect(() => {
updatePaths();
refreshWhisperConfig();
}, [libraryPath]);
useEffect(() => {
validate();
}, [user, libraryPath, whisperModel, ffmpegConfig]);
}, [user, libraryPath]);
useEffect(() => {
if (!apiUrl) return;
@@ -76,6 +83,37 @@ export const AppSettingsProvider = ({
);
}, [user, apiUrl]);
const loadFfmpegWASM = async () => {
const baseURL = "https://unpkg.com/@ffmpeg/core-mt@0.12.6/dist/esm";
ffmpegRef.current.on("log", ({ message }) => {
console.log(message);
});
const coreURL = await toBlobURL(
`${baseURL}/ffmpeg-core.js`,
"text/javascript"
);
const wasmURL = await toBlobURL(
`${baseURL}/ffmpeg-core.wasm`,
"application/wasm"
);
const workerURL = await toBlobURL(
`${baseURL}/ffmpeg-core.worker.js`,
"text/javascript"
);
try {
await ffmpegRef.current.load({
coreURL,
wasmURL,
workerURL,
});
setFfmpeg(ffmpegRef.current);
} catch (err) {
toast.error(err.message);
}
};
const fetchLanguage = async () => {
const language = await EnjoyApp.settings.getLanguage();
setLanguage(language as "en" | "zh-CN");
@@ -90,10 +128,15 @@ export const AppSettingsProvider = ({
};
const fetchFfmpegConfig = async () => {
const config = await EnjoyApp.settings.getFfmpegConfig();
const config = await EnjoyApp.ffmpeg.config();
setFfmegConfig(config);
};
const refreshWhisperConfig = async () => {
const config = await EnjoyApp.whisper.config();
setWhisperConfig(config);
};
const fetchVersion = async () => {
const version = EnjoyApp.app.version;
setVersion(version);
@@ -140,29 +183,15 @@ export const AppSettingsProvider = ({
setLibraryPath(dir);
};
const updatePaths = async () => {
const _path = await EnjoyApp.settings.getWhisperModelsPath();
setWhisperModelsPath(_path);
};
const fetchModel = async () => {
const whisperModel = await EnjoyApp.settings.getWhisperModel();
setWhisperModel(whisperModel);
};
const fetchApiUrl = async () => {
return apiUrl;
};
const setModelHandler = async (name: string) => {
await EnjoyApp.settings.setWhisperModel(name);
setWhisperModel(name);
const setWhisperModel = async (name: string) => {
return EnjoyApp.whisper.setModel(name).then((config) => {
if (!config) return;
setWhisperConfig(config);
});
};
const validate = async () => {
setInitialized(
!!(user && libraryPath && whisperModel && ffmpegConfig?.ready)
);
setInitialized(Boolean(user && libraryPath));
};
return (
@@ -178,10 +207,11 @@ export const AppSettingsProvider = ({
logout,
libraryPath,
setLibraryPath: setLibraryPathHandler,
whisperModelsPath,
whisperModel,
setWhisperModel: setModelHandler,
setWhisperModel,
ffmpegConfig,
ffmpeg,
whisperConfig,
refreshWhisperConfig,
setFfmegConfig,
initialized,
}}

View File

@@ -6,7 +6,6 @@ import {
LoginForm,
ChooseLibraryPathInput,
WhisperModelOptionsPanel,
FfmpegCheck,
} from "@renderer/components";
import { AppSettingsProviderContext } from "@renderer/context";
import { CheckCircle2Icon } from "lucide-react";
@@ -15,13 +14,14 @@ export default () => {
const [currentStep, setCurrentStep] = useState<number>(1);
const [currentStepValid, setCurrentStepValid] = useState<boolean>(false);
const { user, libraryPath, whisperModel, ffmpegConfig, initialized } =
useContext(AppSettingsProviderContext);
const totalSteps = 5;
const { user, libraryPath, whisperConfig, initialized } = useContext(
AppSettingsProviderContext
);
const totalSteps = 4;
useEffect(() => {
validateCurrentStep();
}, [currentStep, user, whisperModel, ffmpegConfig]);
}, [currentStep, user, whisperConfig]);
const validateCurrentStep = async () => {
switch (currentStep) {
@@ -32,12 +32,9 @@ export default () => {
setCurrentStepValid(!!libraryPath);
break;
case 3:
setCurrentStepValid(!!whisperModel);
setCurrentStepValid(true);
break;
case 4:
setCurrentStepValid(ffmpegConfig?.ready);
break;
case 5:
setCurrentStepValid(initialized);
break;
default:
@@ -71,10 +68,6 @@ export default () => {
subtitle: t("chooseAIModelToDownload"),
},
4: {
title: t("ffmpegCheck"),
subtitle: t("checkIfFfmpegIsInstalled"),
},
5: {
title: t("finish"),
subtitle: t("youAreReadyToGo"),
},
@@ -95,8 +88,7 @@ export default () => {
{currentStep == 1 && <LoginForm />}
{currentStep == 2 && <ChooseLibraryPathInput />}
{currentStep == 3 && <WhisperModelOptionsPanel />}
{currentStep == 4 && <FfmpegCheck />}
{currentStep == 5 && (
{currentStep == 4 && (
<div className="flex justify-center items-center">
<CheckCircle2Icon className="text-green-500 w-24 h-24" />
</div>

View File

@@ -8,6 +8,7 @@ type EnjoyAppType = {
apiUrl: () => Promise<string>;
quit: () => Promise<void>;
openDevTools: () => Promise<void>;
createIssue: (title: string, body: string) => Promise<void>;
version: string;
};
system: {
@@ -71,17 +72,12 @@ type EnjoyAppType = {
setLibrary: (library: string) => Promise<void>;
getUser: () => Promise<UserType>;
setUser: (user: UserType) => Promise<void>;
getWhisperModel: () => Promise<string>;
setWhisperModel: (model: string) => Promise<void>;
getWhisperModelsPath: () => Promise<string>;
getUserDataPath: () => Promise<string>;
getLlm: (provider: SupportedLlmProviderType) => Promise<LlmProviderType>;
setLlm: (
provider: SupportedLlmProviderType,
LlmProviderType
) => Promise<void>;
getFfmpegConfig: () => Promise<FfmpegConfigType>;
setFfmpegConfig: (config: FfmpegConfigType) => Promise<void>;
getLanguage: () => Promise<string>;
switchLanguage: (language: string) => Promise<void>;
};
@@ -176,15 +172,17 @@ type EnjoyAppType = {
createSpeech: (id: string, configuration?: any) => Promise<SpeechType>;
};
whisper: {
availableModels: () => Promise<string[]>;
downloadModel: (name: string) => Promise<any>;
check: () => Promise<boolean>;
transcribe: (
config: () => Promise<WhisperConfigType>;
check: () => Promise<{ success: boolean; log: string }>;
setModel: (model: string) => Promise<WhisperConfigType>;
transcribeBlob: (
blob: { type: string; arrayBuffer: ArrayBuffer },
prompt?: string
) => Promise<{ file: string; content: string }>;
};
ffmpeg: {
config: () => Promise<FfmpegConfigType>;
setConfig: (config: FfmpegConfigType) => Promise<FfmpegConfigType>;
download: () => Promise<FfmpegConfigType>;
check: () => Promise<boolean>;
discover: () => Promise<{
@@ -195,7 +193,8 @@ type EnjoyAppType = {
};
download: {
onState: (callback: (event, state) => void) => void;
cancel: (filename: string) => void;
start: (url: string, savePath?: string) => void;
cancel: (filename: string) => Promise<void>;
cancelAll: () => void;
dashboard: () => Promise<DownloadStateType[]>;
removeAllListeners: () => void;
@@ -208,7 +207,7 @@ type EnjoyAppType = {
};
transcriptions: {
findOrCreate: (params: any) => Promise<TranscriptionType>;
process: (params: any) => Promise<void>;
process: (params: any, options: any) => Promise<void>;
update: (id: string, params: any) => Promise<void>;
};
waveforms: {

View File

@@ -25,6 +25,19 @@ type NotificationType = {
message: string;
};
type WhisperConfigType = {
availableModels: {
type: string;
name: string;
size: string;
url: string;
savePath: string;
}[];
modelsPath: string;
model: string;
ready?: boolean;
};
type WhisperOutputType = {
model: {
audio: {

View File

@@ -23,4 +23,14 @@ export default defineConfig({
"@commands": path.resolve(__dirname, "./src/commands"),
},
},
optimizeDeps: {
exclude: ["@ffmpeg/ffmpeg", "@ffmpeg/util"],
},
server: {
headers: {
"Cross-Origin-Resource-Policy": "cross-origin",
"Cross-Origin-Opener-Policy": "same-origin",
"Cross-Origin-Embedder-Policy": "require-corp",
},
},
});

View File

@@ -975,6 +975,29 @@ __metadata:
languageName: node
linkType: hard
"@ffmpeg/ffmpeg@npm:^0.12.10":
version: 0.12.10
resolution: "@ffmpeg/ffmpeg@npm:0.12.10"
dependencies:
"@ffmpeg/types": "npm:^0.12.2"
checksum: 224185b24b4fe9d3d1d6d17e741205793f74b29cce04435d6c25e5d17b7e12b608e876a64ad33d3e92114d4f0079f4e2e4d809632b354ad1a5f775f5bdb1b8e6
languageName: node
linkType: hard
"@ffmpeg/types@npm:^0.12.2":
version: 0.12.2
resolution: "@ffmpeg/types@npm:0.12.2"
checksum: 5c3f250c7ed828a3f66073504e3e92ee9f7cd73ddf3bbdf777263710beb6e757da3d7178b2bae29bef6d602abbc21b561bcc659455420d3aed1b30a535ca1d0a
languageName: node
linkType: hard
"@ffmpeg/util@npm:^0.12.1":
version: 0.12.1
resolution: "@ffmpeg/util@npm:0.12.1"
checksum: 943cc8b886cdfd3c448d3618acc3fe02abda46472c11aefb9e90a4a814962142632e90ab475374357fddb6ba8176757dbbe434147e0cd1a8fcd5133f95bee0a6
languageName: node
linkType: hard
"@floating-ui/core@npm:^1.5.3":
version: 1.5.3
resolution: "@floating-ui/core@npm:1.5.3"
@@ -5732,6 +5755,8 @@ __metadata:
"@electron-forge/plugin-auto-unpack-natives": "npm:^7.2.0"
"@electron-forge/plugin-vite": "npm:^7.2.0"
"@electron-forge/publisher-github": "npm:^7.2.0"
"@ffmpeg/ffmpeg": "npm:^0.12.10"
"@ffmpeg/util": "npm:^0.12.1"
"@hookform/resolvers": "npm:^3.3.4"
"@langchain/google-genai": "npm:^0.0.7"
"@mozilla/readability": "npm:^0.5.0"
@@ -5802,11 +5827,11 @@ __metadata:
fluent-ffmpeg: "npm:^2.1.2"
fs-extra: "npm:^11.2.0"
html-to-text: "npm:^9.0.5"
i18next: "npm:^23.7.16"
i18next: "npm:^23.7.18"
js-md5: "npm:^0.8.3"
langchain: "npm:^0.1.4"
langchain: "npm:^0.1.5"
lodash: "npm:^4.17.21"
lucide-react: "npm:^0.312.0"
lucide-react: "npm:^0.314.0"
mark.js: "npm:^8.11.1"
microsoft-cognitiveservices-speech-sdk: "npm:^1.34.0"
next-themes: "npm:^0.2.1"
@@ -5822,7 +5847,7 @@ __metadata:
react-i18next: "npm:^14.0.0"
react-markdown: "npm:^9.0.1"
react-router-dom: "npm:^6.21.3"
react-tooltip: "npm:^5.25.2"
react-tooltip: "npm:^5.26.0"
reflect-metadata: "npm:^0.2.1"
rimraf: "npm:^5.0.5"
sequelize: "npm:^6.35.2"
@@ -5838,7 +5863,7 @@ __metadata:
typescript: "npm:^5.3.3"
umzug: "npm:^3.5.1"
vite-plugin-static-copy: "npm:^1.0.0"
wavesurfer.js: "npm:^7.6.4"
wavesurfer.js: "npm:^7.6.5"
zod: "npm:^3.22.4"
zx: "npm:^7.2.3"
languageName: unknown
@@ -7450,12 +7475,12 @@ __metadata:
languageName: node
linkType: hard
"i18next@npm:^23.7.16":
version: 23.7.16
resolution: "i18next@npm:23.7.16"
"i18next@npm:^23.7.18":
version: 23.7.18
resolution: "i18next@npm:23.7.18"
dependencies:
"@babel/runtime": "npm:^7.23.2"
checksum: 8e18d56970a468e0c3f38cf105d2eb22c2b275b596ecd3ac7193fe6e73a57705f3415b510ea761b702163278b975bcab6c15187f254d350fb0501baf66927168
checksum: 7ff20eaaf6b5b6ff31c0bde2907149caecd7d63c6106e06afc9dd23b543b281733c50a45c6f19627338a3212426976fd7dfdfeece305ffbb7d3d4dcefe69c97c
languageName: node
linkType: hard
@@ -8134,9 +8159,9 @@ __metadata:
languageName: node
linkType: hard
"langchain@npm:^0.1.4":
version: 0.1.4
resolution: "langchain@npm:0.1.4"
"langchain@npm:^0.1.5":
version: 0.1.5
resolution: "langchain@npm:0.1.5"
dependencies:
"@anthropic-ai/sdk": "npm:^0.9.1"
"@langchain/community": "npm:~0.0.17"
@@ -8313,7 +8338,7 @@ __metadata:
optional: true
youtubei.js:
optional: true
checksum: cee2e20268a718e58acbc11b98def9d0badd2f215749bfbed26a8e89c257f6904ee775156b711c075c8aa202bd8be2995c1b166bef54a77d1ae81e7d1012dcde
checksum: a1cc0b725033efea19b7d97f77f6974bd49929cab38358c83538e1712fabb3beaadc43435c87ee16e158b797d08582ead4a1d4deefea40cbeb629618ffc1d8bd
languageName: node
linkType: hard
@@ -8641,12 +8666,12 @@ __metadata:
languageName: node
linkType: hard
"lucide-react@npm:^0.312.0":
version: 0.312.0
resolution: "lucide-react@npm:0.312.0"
"lucide-react@npm:^0.314.0":
version: 0.314.0
resolution: "lucide-react@npm:0.314.0"
peerDependencies:
react: ^16.5.1 || ^17.0.0 || ^18.0.0
checksum: 491b3403f51a1740c8cb0a558c4151c28eb175fc386099dbf9eaa63a314bb941ecb5cf6434b233ef6708e2af51ebaf8362987cc59958584990c1d1b5f34d4237
checksum: b7994d4a11167ca590e9ee2be1d8c9d4e22f860a4d40ff8a24cbb0b4b71f7b60c834c9ffeca72a746d21be9f47ce69d560ba0b0975ee51a35cd0d7e7b4a54365
languageName: node
linkType: hard
@@ -10815,16 +10840,16 @@ __metadata:
languageName: node
linkType: hard
"react-tooltip@npm:^5.25.2":
version: 5.25.2
resolution: "react-tooltip@npm:5.25.2"
"react-tooltip@npm:^5.26.0":
version: 5.26.0
resolution: "react-tooltip@npm:5.26.0"
dependencies:
"@floating-ui/dom": "npm:^1.0.0"
classnames: "npm:^2.3.0"
peerDependencies:
react: ">=16.14.0"
react-dom: ">=16.14.0"
checksum: 11eadec3590fdad0ae3e3d575e65418415aa2780fa117842721dc0d05b2d03b63e99f19514637b21e8e2a8fdc2f647af149f9e050a6d0b4b4e809f1b401cbaa6
checksum: 7bae47df14958be169f969e7d5e56d6ad9f294acedd6905c58af44d1e2c6123b4debb373f94f68c25a9d92256668970803e2ebcf3f9e7243786d2923866a43b9
languageName: node
linkType: hard
@@ -12787,10 +12812,10 @@ __metadata:
languageName: node
linkType: hard
"wavesurfer.js@npm:^7.6.4":
version: 7.6.4
resolution: "wavesurfer.js@npm:7.6.4"
checksum: d10ba699a451ddcba81aa5059b080eaa56376dc393f747438d1f3d8913cecaceca7c8bf75f622aad2d59c347c0ec9bb1dbb50247dcc00a57cf56709a02aaf9cb
"wavesurfer.js@npm:^7.6.5":
version: 7.6.5
resolution: "wavesurfer.js@npm:7.6.5"
checksum: 023324d2c06d45a26664e1034f39b0d87ab693a182958be331a1064038c09ec6b1d239be2dd4dd19a3ba9d9a1ea2897fe4b93a0bb055fbe293d756c3d5fad87a
languageName: node
linkType: hard