Improve some default setting & err message (#656)
* use azure as default stt engine for new user * refactor recording saving * tune recording trimming * fix recording err msg
This commit is contained in:
@@ -60,7 +60,8 @@
|
||||
"duration": "Duration",
|
||||
"durationTooShort": "Duration too short",
|
||||
"failedToSave": "Failed to save recording",
|
||||
"notFound": "Recording not found"
|
||||
"notFound": "Recording not found",
|
||||
"cannotDetectAnySound": "Cannot detect any sound"
|
||||
},
|
||||
"conversation": {
|
||||
"name": "Name",
|
||||
|
||||
@@ -60,7 +60,8 @@
|
||||
"duration": "时长",
|
||||
"durationTooShort": "录音时长太短",
|
||||
"failedToSave": "保存录音失败",
|
||||
"notFound": "未找到录音"
|
||||
"notFound": "未找到录音",
|
||||
"cannotDetectAnySound": "未检测到任何声音"
|
||||
},
|
||||
"conversation": {
|
||||
"name": "对话标题",
|
||||
|
||||
@@ -112,7 +112,7 @@ class RecordingsHandler {
|
||||
}
|
||||
|
||||
private async create(
|
||||
event: IpcMainEvent,
|
||||
_event: IpcMainEvent,
|
||||
options: Attributes<Recording> & {
|
||||
blob: {
|
||||
type: string;
|
||||
@@ -122,28 +122,20 @@ class RecordingsHandler {
|
||||
) {
|
||||
const { targetId, targetType, referenceId, referenceText, duration } =
|
||||
options;
|
||||
return Recording.createFromBlob(options.blob, {
|
||||
const recording = await Recording.createFromBlob(options.blob, {
|
||||
targetId,
|
||||
targetType,
|
||||
referenceId,
|
||||
referenceText,
|
||||
duration,
|
||||
})
|
||||
.then((recording) => {
|
||||
if (!recording) {
|
||||
throw new Error(t("models.recording.failedToSave"));
|
||||
}
|
||||
return recording.toJSON();
|
||||
})
|
||||
.catch((err) => {
|
||||
event.sender.send("on-notification", {
|
||||
type: "error",
|
||||
message: err.message,
|
||||
});
|
||||
});
|
||||
});
|
||||
if (!recording) {
|
||||
throw new Error(t("models.recording.failedToSave"));
|
||||
}
|
||||
return recording.toJSON();
|
||||
}
|
||||
|
||||
private async destroy(event: IpcMainEvent, id: string) {
|
||||
private async destroy(_event: IpcMainEvent, id: string) {
|
||||
const recording = await Recording.findOne({
|
||||
where: {
|
||||
id,
|
||||
@@ -151,17 +143,10 @@ class RecordingsHandler {
|
||||
});
|
||||
|
||||
if (!recording) {
|
||||
event.sender.send("on-notification", {
|
||||
type: "error",
|
||||
message: t("models.recording.notFound"),
|
||||
});
|
||||
throw new Error(t("models.recording.notFound"));
|
||||
}
|
||||
return recording.destroy().catch((err) => {
|
||||
event.sender.send("on-notification", {
|
||||
type: "error",
|
||||
message: err.message,
|
||||
});
|
||||
});
|
||||
|
||||
await recording.destroy();
|
||||
}
|
||||
|
||||
private async upload(_event: IpcMainEvent, id: string) {
|
||||
|
||||
@@ -27,6 +27,7 @@ import { WEB_API_URL } from "@/constants";
|
||||
import { AzureSpeechSdk } from "@main/azure-speech-sdk";
|
||||
import echogarden from "@main/echogarden";
|
||||
import camelcaseKeys from "camelcase-keys";
|
||||
import { t } from "i18next";
|
||||
|
||||
const logger = log.scope("db/models/recording");
|
||||
|
||||
@@ -311,25 +312,19 @@ export class Recording extends Model<Recording> {
|
||||
Buffer.from(blob.arrayBuffer)
|
||||
);
|
||||
|
||||
// denoise audio
|
||||
// const { denoisedAudio } = await echogarden.denoise(
|
||||
// rawAudio,
|
||||
// {}
|
||||
// );
|
||||
|
||||
// trim audio
|
||||
let trimmedSamples = echogarden.trimAudioStart(
|
||||
rawAudio.audioChannels[0],
|
||||
0,
|
||||
-30
|
||||
-35
|
||||
);
|
||||
trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -30);
|
||||
trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -35);
|
||||
rawAudio.audioChannels[0] = trimmedSamples;
|
||||
|
||||
duration = Math.round(echogarden.getRawAudioDuration(rawAudio) * 1000);
|
||||
|
||||
if (duration === 0) {
|
||||
throw new Error("Failed to get duration of the recording");
|
||||
throw new Error(t("models.recording.cannotDetectAnySound"));
|
||||
}
|
||||
|
||||
// save recording to file
|
||||
|
||||
@@ -68,8 +68,8 @@ const whisperConfig = (): WhisperConfigType => {
|
||||
) as WhisperConfigType["service"];
|
||||
|
||||
if (!service) {
|
||||
settings.setSync("whisper.service", "local");
|
||||
service = "local";
|
||||
settings.setSync("whisper.service", "azure");
|
||||
service = "azure";
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@@ -51,7 +51,7 @@ export const MediaRecorder = () => {
|
||||
transcription?.result?.timeline?.[currentSegmentIndex];
|
||||
if (!currentSegment) return;
|
||||
|
||||
return EnjoyApp.recordings.create({
|
||||
await EnjoyApp.recordings.create({
|
||||
targetId: media.id,
|
||||
targetType: media.mediaType,
|
||||
blob: {
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
DropdownMenuTrigger,
|
||||
DropdownMenuContent,
|
||||
ScrollArea,
|
||||
toast,
|
||||
} from "@renderer/components/ui";
|
||||
import {
|
||||
AppSettingsProviderContext,
|
||||
@@ -48,7 +49,9 @@ export const MediaRecordings = () => {
|
||||
const handleDelete = () => {
|
||||
if (!selectedRecording) return;
|
||||
|
||||
EnjoyApp.recordings.destroy(selectedRecording.id);
|
||||
EnjoyApp.recordings.destroy(selectedRecording.id).catch((err) => {
|
||||
toast.error(err.message);
|
||||
});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
Reference in New Issue
Block a user