Improve recorder (#1001)
* update recording notification * refactor recording button * fix undefined * improve wavesurfer player
This commit is contained in:
@@ -61,7 +61,7 @@
|
||||
"durationTooShort": "Duration too short",
|
||||
"failedToSave": "Failed to save recording",
|
||||
"notFound": "Recording not found",
|
||||
"cannotDetectAnySound": "Cannot detect any sound"
|
||||
"cannotDetectAnySound": "Cannot detect any sound in the recording, please check your microphone"
|
||||
},
|
||||
"conversation": {
|
||||
"name": "Name",
|
||||
@@ -737,5 +737,6 @@
|
||||
"waveforms": "Contains all waveforms decoded from audio/videos. They're for caching. It's save to delete them.",
|
||||
"logs": "Contains some logs helpful for debugging.",
|
||||
"cache": "Contains cached files. They will be cleaned up automatically."
|
||||
}
|
||||
},
|
||||
"recordingIsTooLongToAssess": "Recording is too long to assess. The maximum duration is 60 seconds."
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
"durationTooShort": "录音时长太短",
|
||||
"failedToSave": "保存录音失败",
|
||||
"notFound": "未找到录音",
|
||||
"cannotDetectAnySound": "未检测到任何声音"
|
||||
"cannotDetectAnySound": "录音中未检测到任何声音,请检查您的录音设备是否正常后重试"
|
||||
},
|
||||
"conversation": {
|
||||
"name": "对话标题",
|
||||
@@ -737,5 +737,6 @@
|
||||
"waveforms": "波形文件,用于显示音频波形。用作缓存,可以删除",
|
||||
"logs": "日志文件,帮助开发者调试问题",
|
||||
"cache": "缓存文件,会自动清理。"
|
||||
}
|
||||
},
|
||||
"recordingIsTooLongToAssess": "录音时长过长,无法评估。最长支持 1 分钟。"
|
||||
}
|
||||
|
||||
@@ -254,10 +254,9 @@ export class Recording extends Model<Recording> {
|
||||
) {
|
||||
const { targetId, targetType, referenceId, referenceText, language } =
|
||||
params;
|
||||
let { duration } = params;
|
||||
|
||||
if (blob.arrayBuffer.byteLength === 0) {
|
||||
throw new Error("Empty recording");
|
||||
throw new Error(t("models.recording.cannotDetectAnySound"));
|
||||
}
|
||||
|
||||
let rawAudio = await echogarden.ensureRawAudio(
|
||||
@@ -270,10 +269,12 @@ export class Recording extends Model<Recording> {
|
||||
0,
|
||||
-50
|
||||
);
|
||||
trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -50);
|
||||
trimmedSamples = echogarden.trimAudioEnd(trimmedSamples, 0, -100);
|
||||
rawAudio.audioChannels[0] = trimmedSamples;
|
||||
|
||||
duration = Math.round(echogarden.getRawAudioDuration(rawAudio) * 1000);
|
||||
const duration = Math.round(
|
||||
echogarden.getRawAudioDuration(rawAudio) * 1000
|
||||
);
|
||||
|
||||
if (duration === 0) {
|
||||
throw new Error(t("models.recording.cannotDetectAnySound"));
|
||||
|
||||
@@ -693,57 +693,27 @@ export const MediaCurrentRecording = () => {
|
||||
};
|
||||
|
||||
export const MediaRecordButton = () => {
|
||||
const {
|
||||
media,
|
||||
recordingBlob,
|
||||
isRecording,
|
||||
startRecording,
|
||||
stopRecording,
|
||||
recordingTime,
|
||||
transcription,
|
||||
currentSegmentIndex,
|
||||
} = useContext(MediaPlayerProviderContext);
|
||||
const { media, isRecording, startRecording, stopRecording, recordingTime } =
|
||||
useContext(MediaPlayerProviderContext);
|
||||
const { EnjoyApp } = useContext(AppSettingsProviderContext);
|
||||
const [access, setAccess] = useState(true);
|
||||
const [active, setActive] = useState(false);
|
||||
const ref = useRef(null);
|
||||
|
||||
const createRecording = async (blob: Blob) => {
|
||||
const currentSegment =
|
||||
transcription?.result?.timeline?.[currentSegmentIndex];
|
||||
if (!currentSegment) return;
|
||||
|
||||
EnjoyApp.recordings
|
||||
.create({
|
||||
targetId: media.id,
|
||||
targetType: media.mediaType,
|
||||
blob: {
|
||||
type: recordingBlob.type.split(";")[0],
|
||||
arrayBuffer: await blob.arrayBuffer(),
|
||||
},
|
||||
referenceId: currentSegmentIndex,
|
||||
referenceText: currentSegment.text,
|
||||
})
|
||||
.then(() =>
|
||||
toast.success(t("recordingSaved"), { position: "bottom-right" })
|
||||
)
|
||||
.catch((err) =>
|
||||
toast.error(t("failedToSaveRecording" + " : " + err.message))
|
||||
);
|
||||
const askForMediaAccess = () => {
|
||||
EnjoyApp.system.preferences.mediaAccess("microphone").then((access) => {
|
||||
if (access) {
|
||||
setAccess(true);
|
||||
} else {
|
||||
setAccess(false);
|
||||
toast.warning(t("noMicrophoneAccess"));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/*
|
||||
* Save recording
|
||||
* when recording is stopped
|
||||
* And only when record button is active
|
||||
*/
|
||||
useEffect(() => {
|
||||
if (!media) return;
|
||||
if (!transcription) return;
|
||||
if (!active) return;
|
||||
if (!recordingBlob) return;
|
||||
|
||||
createRecording(recordingBlob);
|
||||
}, [recordingBlob, media, transcription, active]);
|
||||
askForMediaAccess();
|
||||
}, [media]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!active) return;
|
||||
@@ -769,6 +739,7 @@ export const MediaRecordButton = () => {
|
||||
<Button
|
||||
ref={ref}
|
||||
variant="ghost"
|
||||
disabled={!access}
|
||||
onClick={() => {
|
||||
if (isRecording) {
|
||||
stopRecording();
|
||||
|
||||
@@ -44,22 +44,81 @@ import {
|
||||
} from "lucide-react";
|
||||
import { useRecordings } from "@renderer/hooks";
|
||||
import { formatDateTime } from "@renderer/lib/utils";
|
||||
import { MediaPlayer, MediaProvider } from "@vidstack/react";
|
||||
import {
|
||||
DefaultAudioLayout,
|
||||
defaultLayoutIcons,
|
||||
} from "@vidstack/react/player/layouts/default";
|
||||
import { Caption, RecordingDetail } from "@renderer/components";
|
||||
Caption,
|
||||
RecordingDetail,
|
||||
WavesurferPlayer,
|
||||
} from "@renderer/components";
|
||||
import { LiveAudioVisualizer } from "react-audio-visualize";
|
||||
|
||||
const TEN_MINUTES = 60 * 10;
|
||||
export const MediaTranscriptionReadButton = (props: {
|
||||
children: React.ReactNode;
|
||||
}) => {
|
||||
const [open, setOpen] = useState(false);
|
||||
const { media, transcription, setRecordingType } = useContext(
|
||||
MediaPlayerProviderContext
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
setRecordingType("transcription");
|
||||
} else {
|
||||
setRecordingType("segment");
|
||||
}
|
||||
}, [open]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Dialog open={open} onOpenChange={setOpen}>
|
||||
<DialogTrigger asChild>
|
||||
{props.children ? (
|
||||
props.children
|
||||
) : (
|
||||
<Button variant="outline" size="sm" className="hidden lg:block">
|
||||
{t("readThrough")}
|
||||
</Button>
|
||||
)}
|
||||
</DialogTrigger>
|
||||
<DialogContent
|
||||
onPointerDownOutside={(event) => event.preventDefault()}
|
||||
className="max-w-screen-md xl:max-w-screen-lg h-5/6 flex flex-col p-0"
|
||||
>
|
||||
<DialogTitle className="hidden">{t("readThrough")}</DialogTitle>
|
||||
<ScrollArea className="flex-1 px-6 pt-4">
|
||||
<div className="select-text mx-auto w-full max-w-prose">
|
||||
<h3 className="font-bold text-xl my-4">{media.name}</h3>
|
||||
{open &&
|
||||
transcription.result.timeline.map(
|
||||
(sentence: TimelineEntry, index: number) => (
|
||||
<div key={index} className="flex flex-start space-x-2 mb-4">
|
||||
<span className="text-sm text-muted-foreground min-w-max leading-8">
|
||||
#{index + 1}
|
||||
</span>
|
||||
<Caption
|
||||
caption={sentence}
|
||||
currentSegmentIndex={index}
|
||||
displayIpa={true}
|
||||
displayNotes={false}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
<div className="mt-12">
|
||||
{open && <TranscriptionRecordingsList />}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
<div className="h-16 border-t">{open && <RecorderButton />}</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
const TranscriptionRecordingsList = () => {
|
||||
const [deleting, setDeleting] = useState<RecordingType>(null);
|
||||
const { EnjoyApp } = useContext(AppSettingsProviderContext);
|
||||
const { media, transcription } = useContext(MediaPlayerProviderContext);
|
||||
const { media } = useContext(MediaPlayerProviderContext);
|
||||
const [assessing, setAssessing] = useState<RecordingType>();
|
||||
|
||||
const handleDelete = () => {
|
||||
@@ -106,72 +165,36 @@ export const MediaTranscriptionReadButton = (props: {
|
||||
} = useRecordings(media, -1);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Dialog open={open} onOpenChange={setOpen}>
|
||||
<DialogTrigger asChild>
|
||||
{props.children ? (
|
||||
props.children
|
||||
) : (
|
||||
<Button variant="outline" size="sm" className="hidden lg:block">
|
||||
{t("readThrough")}
|
||||
</Button>
|
||||
)}
|
||||
</DialogTrigger>
|
||||
<DialogContent
|
||||
onPointerDownOutside={(event) => event.preventDefault()}
|
||||
className="max-w-screen-md xl:max-w-screen-lg h-5/6 flex flex-col p-0"
|
||||
<div>
|
||||
{recordings.map((recording) => (
|
||||
<div
|
||||
key={recording.id}
|
||||
className="mx-auto w-full max-w-prose px-4 mb-4"
|
||||
id={recording.id}
|
||||
>
|
||||
<DialogTitle className="hidden">{t("readThrough")}</DialogTitle>
|
||||
<ScrollArea className="flex-1 px-6 pt-4">
|
||||
<div className="select-text mx-auto w-full max-w-prose">
|
||||
<h3 className="font-bold text-xl my-4">{media.name}</h3>
|
||||
{open &&
|
||||
transcription.result.timeline.map(
|
||||
(sentence: TimelineEntry, index: number) => (
|
||||
<div key={index} className="flex flex-start space-x-2 mb-4">
|
||||
<span className="text-sm text-muted-foreground min-w-max leading-8">
|
||||
#{index + 1}
|
||||
</span>
|
||||
<Caption
|
||||
caption={sentence}
|
||||
currentSegmentIndex={index}
|
||||
displayIpa={true}
|
||||
displayNotes={false}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
<div className="mt-12">
|
||||
{recordings.map((recording) => (
|
||||
<div
|
||||
key={recording.id}
|
||||
className="mx-auto w-full max-w-prose px-4 mb-4"
|
||||
id={recording.id}
|
||||
>
|
||||
<div className="flex items-center justify-end space-x-2 mb-2">
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{formatDateTime(recording.createdAt)}
|
||||
</span>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger>
|
||||
<MoreHorizontalIcon className="w-4 h-4" />
|
||||
</DropdownMenuTrigger>
|
||||
<div className="flex items-center justify-end space-x-2 mb-2">
|
||||
<span className="text-sm text-muted-foreground">
|
||||
{formatDateTime(recording.createdAt)}
|
||||
</span>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger>
|
||||
<MoreHorizontalIcon className="w-4 h-4" />
|
||||
</DropdownMenuTrigger>
|
||||
|
||||
<DropdownMenuContent>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onClick={() => handleDownload(recording)}
|
||||
>
|
||||
<DownloadIcon className="w-4 h-4 mr-2" />
|
||||
<span>{t("download")}</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onClick={() => setAssessing(recording)}
|
||||
>
|
||||
<GaugeCircleIcon
|
||||
className={`w-4 h-4 mr-2
|
||||
<DropdownMenuContent>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onClick={() => handleDownload(recording)}
|
||||
>
|
||||
<DownloadIcon className="w-4 h-4 mr-2" />
|
||||
<span>{t("download")}</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onClick={() => setAssessing(recording)}
|
||||
>
|
||||
<GaugeCircleIcon
|
||||
className={`w-4 h-4 mr-2
|
||||
${
|
||||
recording.pronunciationAssessment
|
||||
? recording.pronunciationAssessment
|
||||
@@ -184,48 +207,57 @@ export const MediaTranscriptionReadButton = (props: {
|
||||
: ""
|
||||
}
|
||||
`}
|
||||
/>
|
||||
<span>{t("pronunciationAssessment")}</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="text-destructive cursor-pointer"
|
||||
onClick={() => setDeleting(recording)}
|
||||
>
|
||||
<Trash2Icon className="w-4 h-4 mr-2" />
|
||||
<span>{t("delete")}</span>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
<MediaPlayer
|
||||
duration={recording.duration / 1000}
|
||||
src={recording.src}
|
||||
>
|
||||
<MediaProvider />
|
||||
<DefaultAudioLayout icons={defaultLayoutIcons} />
|
||||
</MediaPlayer>
|
||||
</div>
|
||||
))}
|
||||
{hasMoreRecordings && (
|
||||
<div className="flex items-center justify-center">
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() => fetchRecordings(recordings.length)}
|
||||
>
|
||||
{loadingRecordings && (
|
||||
<LoaderIcon className="w-4 h-4 animate-spin" />
|
||||
)}
|
||||
<span>{t("loadMore")}</span>
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
<div className="h-16 border-t">
|
||||
{open && <RecorderButton onRecorded={() => fetchRecordings(0)} />}
|
||||
/>
|
||||
<span>{t("pronunciationAssessment")}</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="text-destructive cursor-pointer"
|
||||
onClick={() => setDeleting(recording)}
|
||||
>
|
||||
<Trash2Icon className="w-4 h-4 mr-2" />
|
||||
<span>{t("delete")}</span>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
<WavesurferPlayer id={recording.id} src={recording.src} />
|
||||
</div>
|
||||
))}
|
||||
{hasMoreRecordings && (
|
||||
<div className="flex items-center justify-center">
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() => fetchRecordings(recordings.length)}
|
||||
>
|
||||
{loadingRecordings && (
|
||||
<LoaderIcon className="w-4 h-4 animate-spin" />
|
||||
)}
|
||||
<span>{t("loadMore")}</span>
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Sheet
|
||||
open={Boolean(assessing)}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) setAssessing(undefined);
|
||||
}}
|
||||
>
|
||||
<SheetContent
|
||||
aria-describedby={undefined}
|
||||
side="bottom"
|
||||
className="rounded-t-2xl shadow-lg max-h-screen overflow-y-scroll"
|
||||
displayClose={false}
|
||||
>
|
||||
<SheetHeader className="flex items-center justify-center -mt-4 mb-2">
|
||||
<SheetClose>
|
||||
<ChevronDownIcon />
|
||||
</SheetClose>
|
||||
</SheetHeader>
|
||||
|
||||
{assessing && <RecordingDetail recording={assessing} />}
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
|
||||
<AlertDialog
|
||||
open={!!deleting}
|
||||
@@ -249,76 +281,23 @@ export const MediaTranscriptionReadButton = (props: {
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
|
||||
<Sheet
|
||||
open={Boolean(assessing)}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) setAssessing(undefined);
|
||||
}}
|
||||
>
|
||||
<SheetContent
|
||||
aria-describedby={undefined}
|
||||
side="bottom"
|
||||
className="rounded-t-2xl shadow-lg max-h-screen overflow-y-scroll"
|
||||
displayClose={false}
|
||||
>
|
||||
<SheetHeader className="flex items-center justify-center -mt-4 mb-2">
|
||||
<SheetClose>
|
||||
<ChevronDownIcon />
|
||||
</SheetClose>
|
||||
</SheetHeader>
|
||||
|
||||
{assessing && <RecordingDetail recording={assessing} />}
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
</>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const RecorderButton = (props: { onRecorded: () => void }) => {
|
||||
const { onRecorded } = props;
|
||||
const RecorderButton = () => {
|
||||
const {
|
||||
media,
|
||||
recordingBlob,
|
||||
isRecording,
|
||||
isPaused,
|
||||
togglePauseResume,
|
||||
startRecording,
|
||||
stopRecording,
|
||||
transcription,
|
||||
currentSegmentIndex,
|
||||
mediaRecorder,
|
||||
recordingTime,
|
||||
} = useContext(MediaPlayerProviderContext);
|
||||
const { EnjoyApp } = useContext(AppSettingsProviderContext);
|
||||
const [access, setAccess] = useState<boolean>(false);
|
||||
|
||||
const createRecording = async (blob: Blob) => {
|
||||
const currentSegment =
|
||||
transcription?.result?.timeline?.[currentSegmentIndex];
|
||||
if (!currentSegment) return;
|
||||
|
||||
EnjoyApp.recordings
|
||||
.create({
|
||||
targetId: media.id,
|
||||
targetType: media.mediaType,
|
||||
blob: {
|
||||
type: recordingBlob.type.split(";")[0],
|
||||
arrayBuffer: await blob.arrayBuffer(),
|
||||
},
|
||||
referenceId: -1,
|
||||
referenceText: transcription.result.timeline
|
||||
.map((s: TimelineEntry) => s.text)
|
||||
.join("\n"),
|
||||
})
|
||||
.then(() =>
|
||||
toast.success(t("recordingSaved"), { position: "bottom-right" })
|
||||
)
|
||||
.catch((err) =>
|
||||
toast.error(t("failedToSaveRecording" + " : " + err.message))
|
||||
);
|
||||
};
|
||||
|
||||
const askForMediaAccess = () => {
|
||||
EnjoyApp.system.preferences.mediaAccess("microphone").then((access) => {
|
||||
if (access) {
|
||||
@@ -334,20 +313,6 @@ const RecorderButton = (props: { onRecorded: () => void }) => {
|
||||
askForMediaAccess();
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!media) return;
|
||||
if (!transcription) return;
|
||||
if (!recordingBlob) return;
|
||||
|
||||
createRecording(recordingBlob);
|
||||
}, [recordingBlob, media, transcription]);
|
||||
|
||||
useEffect(() => {
|
||||
if (recordingTime >= TEN_MINUTES) {
|
||||
onRecorded();
|
||||
}
|
||||
}, [recordingTime]);
|
||||
|
||||
if (isRecording) {
|
||||
return (
|
||||
<div className="h-16 flex items-center justify-center px-6">
|
||||
|
||||
@@ -16,7 +16,6 @@ export const WavesurferPlayer = (props: {
|
||||
id: string;
|
||||
src: string;
|
||||
height?: number;
|
||||
currentTime?: number;
|
||||
setCurrentTime?: (currentTime: number) => void;
|
||||
onError?: (error: Error) => void;
|
||||
wavesurferOptions?: any;
|
||||
@@ -28,7 +27,7 @@ export const WavesurferPlayer = (props: {
|
||||
src,
|
||||
height = 80,
|
||||
onError,
|
||||
setCurrentTime,
|
||||
setCurrentTime: onSetCurrentTime,
|
||||
wavesurferOptions,
|
||||
pitchContourOptions,
|
||||
className = "",
|
||||
@@ -42,6 +41,7 @@ export const WavesurferPlayer = (props: {
|
||||
});
|
||||
const [duration, setDuration] = useState<number>(0);
|
||||
const [error, setError] = useState<string>(null);
|
||||
const [currentTime, setCurrentTime] = useState<number>(0);
|
||||
|
||||
const onPlayClick = useCallback(() => {
|
||||
if (!wavesurfer) return;
|
||||
@@ -93,7 +93,8 @@ export const WavesurferPlayer = (props: {
|
||||
setIsPlaying(false);
|
||||
}),
|
||||
wavesurfer.on("timeupdate", (time: number) => {
|
||||
setCurrentTime && setCurrentTime(time);
|
||||
setCurrentTime(time);
|
||||
onSetCurrentTime && onSetCurrentTime(time);
|
||||
}),
|
||||
wavesurfer.on("ready", () => {
|
||||
setDuration(wavesurfer.getDuration());
|
||||
@@ -161,7 +162,7 @@ export const WavesurferPlayer = (props: {
|
||||
<div className="w-full max-w-screen-lg">
|
||||
<div className="flex justify-end">
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{secondsToTimestamp(duration)}
|
||||
{secondsToTimestamp(currentTime)} / {secondsToTimestamp(duration)}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -84,6 +84,11 @@ export const PronunciationAssessmentWordResult = (props: {
|
||||
if (!audio.current) {
|
||||
audio.current = new Audio();
|
||||
}
|
||||
|
||||
return () => {
|
||||
audio.current?.pause();
|
||||
delete audio.current;
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
|
||||
@@ -8,6 +8,7 @@ import { useState, useContext, useEffect } from "react";
|
||||
import { AppSettingsProviderContext } from "@renderer/context";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
import { usePronunciationAssessments } from "@renderer/hooks";
|
||||
import { t } from "i18next";
|
||||
|
||||
export const RecordingDetail = (props: {
|
||||
recording: RecordingType;
|
||||
@@ -32,6 +33,10 @@ export const RecordingDetail = (props: {
|
||||
if (assessing) return;
|
||||
if (result) return;
|
||||
|
||||
if (recording.duration > 60 * 1000) {
|
||||
toast.error(t("recordingIsTooLongToAssess"));
|
||||
return;
|
||||
}
|
||||
setAssessing(true);
|
||||
createAssessment({
|
||||
recording,
|
||||
|
||||
@@ -17,6 +17,10 @@ import { toast } from "@renderer/components/ui";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
import { debounce } from "lodash";
|
||||
import { useAudioRecorder } from "react-audio-voice-recorder";
|
||||
import { t } from "i18next";
|
||||
|
||||
const ONE_MINUTE = 60;
|
||||
const TEN_MINUTES = 10 * ONE_MINUTE;
|
||||
|
||||
type MediaPlayerContextType = {
|
||||
layout: {
|
||||
@@ -84,6 +88,8 @@ type MediaPlayerContextType = {
|
||||
recordingBlob: Blob;
|
||||
isRecording: boolean;
|
||||
isPaused: boolean;
|
||||
recordingType: string;
|
||||
setRecordingType: (type: string) => void;
|
||||
recordingTime: number;
|
||||
mediaRecorder: MediaRecorder;
|
||||
currentRecording: RecordingType;
|
||||
@@ -171,6 +177,7 @@ export const MediaPlayerProvider = ({
|
||||
const [zoomRatio, setZoomRatio] = useState<number>(1.0);
|
||||
|
||||
const [currentRecording, setCurrentRecording] = useState<RecordingType>(null);
|
||||
const [recordingType, setRecordingType] = useState<string>("segment");
|
||||
|
||||
const [transcriptionDraft, setTranscriptionDraft] =
|
||||
useState<TranscriptionType["result"]>();
|
||||
@@ -454,6 +461,44 @@ export const MediaPlayerProvider = ({
|
||||
|
||||
const deboundeCalculateHeight = debounce(calculateHeight, 100);
|
||||
|
||||
const createRecording = async (blob: Blob) => {
|
||||
if (!blob) return;
|
||||
if (!media) return;
|
||||
if (!transcription?.result?.timeline) return;
|
||||
|
||||
let referenceId = -1;
|
||||
let referenceText = transcription.result.timeline
|
||||
.map((s: TimelineEntry) => s.text)
|
||||
.join("\n");
|
||||
|
||||
if (recordingType === "segment") {
|
||||
const currentSegment =
|
||||
transcription?.result?.timeline?.[currentSegmentIndex];
|
||||
if (!currentSegment) return;
|
||||
|
||||
referenceId = currentSegmentIndex;
|
||||
referenceText = currentSegment.text;
|
||||
}
|
||||
|
||||
EnjoyApp.recordings
|
||||
.create({
|
||||
targetId: media.id,
|
||||
targetType: media.mediaType,
|
||||
blob: {
|
||||
type: recordingBlob.type.split(";")[0],
|
||||
arrayBuffer: await blob.arrayBuffer(),
|
||||
},
|
||||
referenceId,
|
||||
referenceText,
|
||||
})
|
||||
.then(() =>
|
||||
toast.success(t("recordingSaved"), { position: "bottom-right" })
|
||||
)
|
||||
.catch((err) =>
|
||||
toast.error(t("failedToSaveRecording" + " : " + err.message))
|
||||
);
|
||||
};
|
||||
|
||||
/*
|
||||
* When wavesurfer is decoded,
|
||||
* set up event listeners for wavesurfer
|
||||
@@ -607,6 +652,24 @@ export const MediaPlayerProvider = ({
|
||||
};
|
||||
}, []);
|
||||
|
||||
/**
|
||||
* create recording when recordingBlob is updated
|
||||
*/
|
||||
useEffect(() => {
|
||||
createRecording(recordingBlob);
|
||||
}, [recordingBlob]);
|
||||
|
||||
/**
|
||||
* auto stop recording when recording time is over
|
||||
*/
|
||||
useEffect(() => {
|
||||
if (recordingType === "segment" && recordingTime >= ONE_MINUTE) {
|
||||
stopRecording();
|
||||
} else if (recordingTime >= TEN_MINUTES) {
|
||||
stopRecording();
|
||||
}
|
||||
}, [recordingTime, recordingType]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MediaPlayerProviderContext.Provider
|
||||
@@ -648,6 +711,8 @@ export const MediaPlayerProvider = ({
|
||||
recordingBlob,
|
||||
isRecording,
|
||||
isPaused,
|
||||
recordingType,
|
||||
setRecordingType,
|
||||
recordingTime,
|
||||
mediaRecorder,
|
||||
currentRecording,
|
||||
|
||||
@@ -14,7 +14,11 @@ export function secondsToTimestamp(seconds: number) {
|
||||
const m = Math.floor((seconds % 3600) / 60).toString();
|
||||
const s = Math.floor((seconds % 3600) % 60).toString();
|
||||
|
||||
return `${h.padStart(2, "0")}:${m.padStart(2, "0")}:${s.padStart(2, "0")}`;
|
||||
if (h === "0") {
|
||||
return `${m.padStart(2, "0")}:${s.padStart(2, "0")}`;
|
||||
} else {
|
||||
return `${h.padStart(2, "0")}:${m.padStart(2, "0")}:${s.padStart(2, "0")}`;
|
||||
}
|
||||
}
|
||||
|
||||
export function humanizeDuration(
|
||||
|
||||
@@ -16,7 +16,6 @@ import {
|
||||
import {
|
||||
ConversationCard,
|
||||
ConversationForm,
|
||||
LoaderSpin,
|
||||
} from "@renderer/components";
|
||||
import { useState, useEffect, useContext, useReducer } from "react";
|
||||
import { ChevronLeftIcon, LoaderIcon } from "lucide-react";
|
||||
@@ -212,7 +211,7 @@ export default () => {
|
||||
|
||||
useEffect(() => {
|
||||
preparePresets();
|
||||
}, []);
|
||||
}, [currentEngine]);
|
||||
|
||||
return (
|
||||
<div className="h-full px-4 py-6 lg:px-8 flex flex-col">
|
||||
|
||||
Reference in New Issue
Block a user