diff --git a/enjoy/src/i18n/en.json b/enjoy/src/i18n/en.json index 07ecae5c..55a00b71 100644 --- a/enjoy/src/i18n/en.json +++ b/enjoy/src/i18n/en.json @@ -61,7 +61,7 @@ "durationTooShort": "Duration too short", "failedToSave": "Failed to save recording", "notFound": "Recording not found", - "cannotDetectAnySound": "Cannot detect any sound" + "cannotDetectAnySound": "Cannot detect any sound in the recording, please check your microphone" }, "conversation": { "name": "Name", @@ -737,5 +737,6 @@ "waveforms": "Contains all waveforms decoded from audio/videos. They're for caching. It's save to delete them.", "logs": "Contains some logs helpful for debugging.", "cache": "Contains cached files. They will be cleaned up automatically." - } + }, + "recordingIsTooLongToAssess": "Recording is too long to assess. The maximum duration is 60 seconds." } diff --git a/enjoy/src/i18n/zh-CN.json b/enjoy/src/i18n/zh-CN.json index 7cda8e26..c86bdfa5 100644 --- a/enjoy/src/i18n/zh-CN.json +++ b/enjoy/src/i18n/zh-CN.json @@ -61,7 +61,7 @@ "durationTooShort": "录音时长太短", "failedToSave": "保存录音失败", "notFound": "未找到录音", - "cannotDetectAnySound": "未检测到任何声音" + "cannotDetectAnySound": "录音中未检测到任何声音,请检查您的录音设备是否正常后重试" }, "conversation": { "name": "对话标题", @@ -737,5 +737,6 @@ "waveforms": "波形文件,用于显示音频波形。用作缓存,可以删除", "logs": "日志文件,帮助开发者调试问题", "cache": "缓存文件,会自动清理。" - } + }, + "recordingIsTooLongToAssess": "录音时长过长,无法评估。最长支持 1 分钟。" } diff --git a/enjoy/src/main/db/models/recording.ts b/enjoy/src/main/db/models/recording.ts index 31812289..59d37a52 100644 --- a/enjoy/src/main/db/models/recording.ts +++ b/enjoy/src/main/db/models/recording.ts @@ -254,10 +254,9 @@ export class Recording extends Model { ) { const { targetId, targetType, referenceId, referenceText, language } = params; - let { duration } = params; if (blob.arrayBuffer.byteLength === 0) { - throw new Error("Empty recording"); + throw new Error(t("models.recording.cannotDetectAnySound")); } let rawAudio = await echogarden.ensureRawAudio( @@ -270,10 +269,12 @@ export class Recording extends Model { 0, -50 ); - trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -50); + trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -100); rawAudio.audioChannels[0] = trimmedSamples; - duration = Math.round(echogarden.getRawAudioDuration(rawAudio) * 1000); + const duration = Math.round( + echogarden.getRawAudioDuration(rawAudio) * 1000 + ); if (duration === 0) { throw new Error(t("models.recording.cannotDetectAnySound")); diff --git a/enjoy/src/renderer/components/medias/media-current-recording.tsx b/enjoy/src/renderer/components/medias/media-current-recording.tsx index 67574e29..a5732ec5 100644 --- a/enjoy/src/renderer/components/medias/media-current-recording.tsx +++ b/enjoy/src/renderer/components/medias/media-current-recording.tsx @@ -693,57 +693,27 @@ export const MediaCurrentRecording = () => { }; export const MediaRecordButton = () => { - const { - media, - recordingBlob, - isRecording, - startRecording, - stopRecording, - recordingTime, - transcription, - currentSegmentIndex, - } = useContext(MediaPlayerProviderContext); + const { media, isRecording, startRecording, stopRecording, recordingTime } = + useContext(MediaPlayerProviderContext); const { EnjoyApp } = useContext(AppSettingsProviderContext); + const [access, setAccess] = useState(true); const [active, setActive] = useState(false); const ref = useRef(null); - const createRecording = async (blob: Blob) => { - const currentSegment = - transcription?.result?.timeline?.[currentSegmentIndex]; - if (!currentSegment) return; - - EnjoyApp.recordings - .create({ - targetId: media.id, - targetType: media.mediaType, - blob: { - type: recordingBlob.type.split(";")[0], - arrayBuffer: await blob.arrayBuffer(), - }, - referenceId: currentSegmentIndex, - referenceText: currentSegment.text, - }) - .then(() => - toast.success(t("recordingSaved"), { position: "bottom-right" }) - ) - .catch((err) => - toast.error(t("failedToSaveRecording" + " : " + err.message)) - ); + const askForMediaAccess = () => { + EnjoyApp.system.preferences.mediaAccess("microphone").then((access) => { + if (access) { + setAccess(true); + } else { + setAccess(false); + toast.warning(t("noMicrophoneAccess")); + } + }); }; - /* - * Save recording - * when recording is stopped - * And only when record button is active - */ useEffect(() => { - if (!media) return; - if (!transcription) return; - if (!active) return; - if (!recordingBlob) return; - - createRecording(recordingBlob); - }, [recordingBlob, media, transcription, active]); + askForMediaAccess(); + }, [media]); useEffect(() => { if (!active) return; @@ -769,6 +739,7 @@ export const MediaRecordButton = () => { + )} + + event.preventDefault()} + className="max-w-screen-md xl:max-w-screen-lg h-5/6 flex flex-col p-0" + > + {t("readThrough")} + +
+

{media.name}

+ {open && + transcription.result.timeline.map( + (sentence: TimelineEntry, index: number) => ( +
+ + #{index + 1} + + +
+ ) + )} +
+
+ {open && } +
+
+
{open && }
+
+ + + ); +}; + +const TranscriptionRecordingsList = () => { const [deleting, setDeleting] = useState(null); const { EnjoyApp } = useContext(AppSettingsProviderContext); - const { media, transcription } = useContext(MediaPlayerProviderContext); + const { media } = useContext(MediaPlayerProviderContext); const [assessing, setAssessing] = useState(); const handleDelete = () => { @@ -106,72 +165,36 @@ export const MediaTranscriptionReadButton = (props: { } = useRecordings(media, -1); return ( - <> - - - {props.children ? ( - props.children - ) : ( - - )} - - event.preventDefault()} - className="max-w-screen-md xl:max-w-screen-lg h-5/6 flex flex-col p-0" +
+ {recordings.map((recording) => ( +
- {t("readThrough")} - -
-

{media.name}

- {open && - transcription.result.timeline.map( - (sentence: TimelineEntry, index: number) => ( -
- - #{index + 1} - - -
- ) - )} -
-
- {recordings.map((recording) => ( -
-
- - {formatDateTime(recording.createdAt)} - - - - - +
+ + {formatDateTime(recording.createdAt)} + + + + + - - handleDownload(recording)} - > - - {t("download")} - - setAssessing(recording)} - > - + handleDownload(recording)} + > + + {t("download")} + + setAssessing(recording)} + > + - {t("pronunciationAssessment")} - - setDeleting(recording)} - > - - {t("delete")} - - - -
- - - - -
- ))} - {hasMoreRecordings && ( -
- -
- )} -
- -
- {open && fetchRecordings(0)} />} + /> + {t("pronunciationAssessment")} + + setDeleting(recording)} + > + + {t("delete")} + + +
- -
+ + + ))} + {hasMoreRecordings && ( +
+ +
+ )} + + { + if (!open) setAssessing(undefined); + }} + > + + + + + + + + {assessing && } + + - - { - if (!open) setAssessing(undefined); - }} - > - - - - - - - - {assessing && } - - - + ); }; -const RecorderButton = (props: { onRecorded: () => void }) => { - const { onRecorded } = props; +const RecorderButton = () => { const { - media, - recordingBlob, isRecording, isPaused, togglePauseResume, startRecording, stopRecording, - transcription, - currentSegmentIndex, mediaRecorder, recordingTime, } = useContext(MediaPlayerProviderContext); const { EnjoyApp } = useContext(AppSettingsProviderContext); const [access, setAccess] = useState(false); - const createRecording = async (blob: Blob) => { - const currentSegment = - transcription?.result?.timeline?.[currentSegmentIndex]; - if (!currentSegment) return; - - EnjoyApp.recordings - .create({ - targetId: media.id, - targetType: media.mediaType, - blob: { - type: recordingBlob.type.split(";")[0], - arrayBuffer: await blob.arrayBuffer(), - }, - referenceId: -1, - referenceText: transcription.result.timeline - .map((s: TimelineEntry) => s.text) - .join("\n"), - }) - .then(() => - toast.success(t("recordingSaved"), { position: "bottom-right" }) - ) - .catch((err) => - toast.error(t("failedToSaveRecording" + " : " + err.message)) - ); - }; - const askForMediaAccess = () => { EnjoyApp.system.preferences.mediaAccess("microphone").then((access) => { if (access) { @@ -334,20 +313,6 @@ const RecorderButton = (props: { onRecorded: () => void }) => { askForMediaAccess(); }, []); - useEffect(() => { - if (!media) return; - if (!transcription) return; - if (!recordingBlob) return; - - createRecording(recordingBlob); - }, [recordingBlob, media, transcription]); - - useEffect(() => { - if (recordingTime >= TEN_MINUTES) { - onRecorded(); - } - }, [recordingTime]); - if (isRecording) { return (
diff --git a/enjoy/src/renderer/components/misc/wavesurfer-player.tsx b/enjoy/src/renderer/components/misc/wavesurfer-player.tsx index b2d7578f..3c950c46 100644 --- a/enjoy/src/renderer/components/misc/wavesurfer-player.tsx +++ b/enjoy/src/renderer/components/misc/wavesurfer-player.tsx @@ -16,7 +16,6 @@ export const WavesurferPlayer = (props: { id: string; src: string; height?: number; - currentTime?: number; setCurrentTime?: (currentTime: number) => void; onError?: (error: Error) => void; wavesurferOptions?: any; @@ -28,7 +27,7 @@ export const WavesurferPlayer = (props: { src, height = 80, onError, - setCurrentTime, + setCurrentTime: onSetCurrentTime, wavesurferOptions, pitchContourOptions, className = "", @@ -42,6 +41,7 @@ export const WavesurferPlayer = (props: { }); const [duration, setDuration] = useState(0); const [error, setError] = useState(null); + const [currentTime, setCurrentTime] = useState(0); const onPlayClick = useCallback(() => { if (!wavesurfer) return; @@ -93,7 +93,8 @@ export const WavesurferPlayer = (props: { setIsPlaying(false); }), wavesurfer.on("timeupdate", (time: number) => { - setCurrentTime && setCurrentTime(time); + setCurrentTime(time); + onSetCurrentTime && onSetCurrentTime(time); }), wavesurfer.on("ready", () => { setDuration(wavesurfer.getDuration()); @@ -161,7 +162,7 @@ export const WavesurferPlayer = (props: {
- {secondsToTimestamp(duration)} + {secondsToTimestamp(currentTime)} / {secondsToTimestamp(duration)}
diff --git a/enjoy/src/renderer/components/pronunciation-assessments/pronunciation-assessment-word-result.tsx b/enjoy/src/renderer/components/pronunciation-assessments/pronunciation-assessment-word-result.tsx index d64faa26..025b5928 100644 --- a/enjoy/src/renderer/components/pronunciation-assessments/pronunciation-assessment-word-result.tsx +++ b/enjoy/src/renderer/components/pronunciation-assessments/pronunciation-assessment-word-result.tsx @@ -84,6 +84,11 @@ export const PronunciationAssessmentWordResult = (props: { if (!audio.current) { audio.current = new Audio(); } + + return () => { + audio.current?.pause(); + delete audio.current; + }; }, []); return ( diff --git a/enjoy/src/renderer/components/recordings/recording-detail.tsx b/enjoy/src/renderer/components/recordings/recording-detail.tsx index ef2fb332..741e9d2f 100644 --- a/enjoy/src/renderer/components/recordings/recording-detail.tsx +++ b/enjoy/src/renderer/components/recordings/recording-detail.tsx @@ -8,6 +8,7 @@ import { useState, useContext, useEffect } from "react"; import { AppSettingsProviderContext } from "@renderer/context"; import { Tooltip } from "react-tooltip"; import { usePronunciationAssessments } from "@renderer/hooks"; +import { t } from "i18next"; export const RecordingDetail = (props: { recording: RecordingType; @@ -32,6 +33,10 @@ export const RecordingDetail = (props: { if (assessing) return; if (result) return; + if (recording.duration > 60 * 1000) { + toast.error(t("recordingIsTooLongToAssess")); + return; + } setAssessing(true); createAssessment({ recording, diff --git a/enjoy/src/renderer/context/media-player-provider.tsx b/enjoy/src/renderer/context/media-player-provider.tsx index 175c6a31..c908988f 100644 --- a/enjoy/src/renderer/context/media-player-provider.tsx +++ b/enjoy/src/renderer/context/media-player-provider.tsx @@ -17,6 +17,10 @@ import { toast } from "@renderer/components/ui"; import { Tooltip } from "react-tooltip"; import { debounce } from "lodash"; import { useAudioRecorder } from "react-audio-voice-recorder"; +import { t } from "i18next"; + +const ONE_MINUTE = 60; +const TEN_MINUTES = 10 * ONE_MINUTE; type MediaPlayerContextType = { layout: { @@ -84,6 +88,8 @@ type MediaPlayerContextType = { recordingBlob: Blob; isRecording: boolean; isPaused: boolean; + recordingType: string; + setRecordingType: (type: string) => void; recordingTime: number; mediaRecorder: MediaRecorder; currentRecording: RecordingType; @@ -171,6 +177,7 @@ export const MediaPlayerProvider = ({ const [zoomRatio, setZoomRatio] = useState(1.0); const [currentRecording, setCurrentRecording] = useState(null); + const [recordingType, setRecordingType] = useState("segment"); const [transcriptionDraft, setTranscriptionDraft] = useState(); @@ -454,6 +461,44 @@ export const MediaPlayerProvider = ({ const deboundeCalculateHeight = debounce(calculateHeight, 100); + const createRecording = async (blob: Blob) => { + if (!blob) return; + if (!media) return; + if (!transcription?.result?.timeline) return; + + let referenceId = -1; + let referenceText = transcription.result.timeline + .map((s: TimelineEntry) => s.text) + .join("\n"); + + if (recordingType === "segment") { + const currentSegment = + transcription?.result?.timeline?.[currentSegmentIndex]; + if (!currentSegment) return; + + referenceId = currentSegmentIndex; + referenceText = currentSegment.text; + } + + EnjoyApp.recordings + .create({ + targetId: media.id, + targetType: media.mediaType, + blob: { + type: recordingBlob.type.split(";")[0], + arrayBuffer: await blob.arrayBuffer(), + }, + referenceId, + referenceText, + }) + .then(() => + toast.success(t("recordingSaved"), { position: "bottom-right" }) + ) + .catch((err) => + toast.error(t("failedToSaveRecording" + " : " + err.message)) + ); + }; + /* * When wavesurfer is decoded, * set up event listeners for wavesurfer @@ -607,6 +652,24 @@ export const MediaPlayerProvider = ({ }; }, []); + /** + * create recording when recordingBlob is updated + */ + useEffect(() => { + createRecording(recordingBlob); + }, [recordingBlob]); + + /** + * auto stop recording when recording time is over + */ + useEffect(() => { + if (recordingType === "segment" && recordingTime >= ONE_MINUTE) { + stopRecording(); + } else if (recordingTime >= TEN_MINUTES) { + stopRecording(); + } + }, [recordingTime, recordingType]); + return ( <> { useEffect(() => { preparePresets(); - }, []); + }, [currentEngine]); return (