Unverified Commit e51985a2 authored by boojack's avatar boojack Committed by GitHub

feat(memo-editor): add compact live waveform recorder panel (#5817)

parent 61c96384
...@@ -4,47 +4,55 @@ import { formatAudioTime } from "@/components/MemoMetadata/Attachment/attachment ...@@ -4,47 +4,55 @@ import { formatAudioTime } from "@/components/MemoMetadata/Attachment/attachment
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useTranslate } from "@/utils/i18n"; import { useTranslate } from "@/utils/i18n";
import { useAudioWaveform } from "../hooks/useAudioWaveform";
import type { AudioRecorderPanelProps } from "../types/components"; import type { AudioRecorderPanelProps } from "../types/components";
import { VoiceWaveform } from "./VoiceWaveform";
export const AudioRecorderPanel: FC<AudioRecorderPanelProps> = ({ audioRecorder, onStop, onCancel }) => { export const AudioRecorderPanel: FC<AudioRecorderPanelProps> = ({ audioRecorder, mediaStream, onStop, onCancel }) => {
const t = useTranslate(); const t = useTranslate();
const { status, elapsedSeconds } = audioRecorder; const { status, elapsedSeconds } = audioRecorder;
const isRequestingPermission = status === "requesting_permission"; const isRequestingPermission = status === "requesting_permission";
const isRecording = status === "recording";
const waveformLevels = useAudioWaveform(mediaStream, isRecording && mediaStream !== null);
const srStatusText = isRequestingPermission ? t("editor.audio-recorder.requesting-permission") : t("editor.audio-recorder.recording");
return ( return (
<div className="w-full rounded-lg border border-border/60 bg-muted/20 px-2.5 py-2"> <div
<div className="flex items-center gap-2"> className={cn(
<div className="min-w-0 flex flex-1 gap-2"> "flex w-full items-center justify-between gap-2 rounded-lg border border-border bg-muted/30 px-2.5 py-1.5",
<div className="truncate text-sm font-medium text-foreground"> "dark:bg-muted/20",
{isRequestingPermission ? t("editor.audio-recorder.requesting-permission") : t("editor.audio-recorder.recording")} )}
</div> >
<div <div className="flex min-w-0 flex-1 items-center gap-2">
className={cn( {isRequestingPermission ? <LoaderCircleIcon className="size-3.5 shrink-0 animate-spin text-muted-foreground" aria-hidden /> : null}
"inline-flex shrink-0 items-center gap-1.5 rounded-full px-2 py-0.5 text-xs font-medium", <span className="sr-only">{srStatusText}</span>
isRequestingPermission <VoiceWaveform levels={waveformLevels} className="max-w-[200px] overflow-hidden" />
? "border border-border/60 bg-background text-muted-foreground" <span className="shrink-0 font-mono text-xs tabular-nums text-muted-foreground">{formatAudioTime(elapsedSeconds)}</span>
: "border border-destructive/20 bg-destructive/[0.08] text-destructive", </div>
)}
>
{isRequestingPermission ? (
<LoaderCircleIcon className="size-3 animate-spin" />
) : (
<span className="size-2 rounded-full bg-destructive" />
)}
{formatAudioTime(elapsedSeconds)}
</div>
</div>
<div className="ml-auto flex shrink-0 items-center gap-1"> <div className="flex shrink-0 items-center gap-1 border-l border-border/60 pl-2">
<Button variant="ghost" size="sm" onClick={onCancel} aria-label={t("common.cancel")}> <Button
<XIcon className="size-4" /> type="button"
</Button> variant="ghost"
<Button size="sm" className="gap-1.5" onClick={onStop} disabled={isRequestingPermission}> size="icon"
<span className="size-2.5 rounded-[2px] bg-current" aria-hidden="true" /> className="size-7 shrink-0 rounded-full text-muted-foreground hover:bg-accent hover:text-foreground"
{t("editor.audio-recorder.stop")} onClick={onCancel}
</Button> aria-label={t("common.cancel")}
</div> >
<XIcon className="size-3.25" />
</Button>
<Button
type="button"
variant="destructive"
size="icon"
className="size-7 shrink-0 rounded-full shadow-xs"
onClick={onStop}
disabled={isRequestingPermission}
aria-label={t("editor.audio-recorder.stop")}
>
<span className="size-[7px] rounded-[1.5px] bg-destructive-foreground" aria-hidden />
</Button>
</div> </div>
</div> </div>
); );
......
import type { FC } from "react";
import { cn } from "@/lib/utils";
/** Max half-height of each bar (px); bars are centered vertically. */
const MAX_BAR_PX = 11;
const MIN_BAR_PX = 2;
type VoiceWaveformProps = {
levels: number[];
className?: string;
};
/**
* Tight-packed vertical bars (rounded caps): fixed bar width + minimal gap — no `flex-1` columns
* so bars stay visually dense like compact voice-memo waveforms.
*/
export const VoiceWaveform: FC<VoiceWaveformProps> = ({ levels, className }) => {
return (
<div className={cn("flex h-5 w-max max-w-full shrink-0 items-center gap-px", className)} aria-hidden>
{levels.map((level, i) => {
const h = Math.max(MIN_BAR_PX, level * MAX_BAR_PX);
const centerDistance = Math.abs(i - (levels.length - 1) / 2) / (levels.length / 2);
const opacity = 0.35 + (1 - centerDistance) * 0.35;
return (
<span
key={`bar-${i}`}
className="w-[2px] shrink-0 rounded-full bg-muted-foreground transition-[height,opacity] duration-75 ease-out"
style={{ height: `${h}px`, opacity }}
/>
);
})}
</div>
);
};
// Custom hooks for MemoEditor (internal use only) // Custom hooks for MemoEditor (internal use only)
export { useAudioRecorder } from "./useAudioRecorder"; export { useAudioRecorder } from "./useAudioRecorder";
export { useAudioWaveform } from "./useAudioWaveform";
export { useAutoSave } from "./useAutoSave"; export { useAutoSave } from "./useAutoSave";
export { useBlobUrls } from "./useBlobUrls"; export { useBlobUrls } from "./useBlobUrls";
export { useDragAndDrop } from "./useDragAndDrop"; export { useDragAndDrop } from "./useDragAndDrop";
......
import { useEffect, useRef } from "react"; import { useEffect, useRef, useState } from "react";
import type { LocalFile } from "../types/attachment"; import type { LocalFile } from "../types/attachment";
import { useBlobUrls } from "./useBlobUrls"; import { useBlobUrls } from "./useBlobUrls";
...@@ -50,6 +50,7 @@ function createRecordedFile(blob: Blob, mimeType: string): File { ...@@ -50,6 +50,7 @@ function createRecordedFile(blob: Blob, mimeType: string): File {
export const useAudioRecorder = (actions: AudioRecorderActions) => { export const useAudioRecorder = (actions: AudioRecorderActions) => {
const mediaRecorderRef = useRef<MediaRecorder | null>(null); const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const mediaStreamRef = useRef<MediaStream | null>(null); const mediaStreamRef = useRef<MediaStream | null>(null);
const [recordingStream, setRecordingStream] = useState<MediaStream | null>(null);
const chunksRef = useRef<Blob[]>([]); const chunksRef = useRef<Blob[]>([]);
const startedAtRef = useRef<number | null>(null); const startedAtRef = useRef<number | null>(null);
const elapsedTimerRef = useRef<number | null>(null); const elapsedTimerRef = useRef<number | null>(null);
...@@ -67,6 +68,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => { ...@@ -67,6 +68,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
const cleanupStream = () => { const cleanupStream = () => {
mediaStreamRef.current?.getTracks().forEach((track) => track.stop()); mediaStreamRef.current?.getTracks().forEach((track) => track.stop());
mediaStreamRef.current = null; mediaStreamRef.current = null;
setRecordingStream(null);
}; };
const resetRecorderRefs = () => { const resetRecorderRefs = () => {
...@@ -130,6 +132,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => { ...@@ -130,6 +132,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
recorderMimeTypeRef.current = mimeType; recorderMimeTypeRef.current = mimeType;
mediaStreamRef.current = stream; mediaStreamRef.current = stream;
setRecordingStream(stream);
mediaRecorderRef.current = mediaRecorder; mediaRecorderRef.current = mediaRecorder;
chunksRef.current = []; chunksRef.current = [];
...@@ -221,5 +224,6 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => { ...@@ -221,5 +224,6 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
startRecording, startRecording,
stopRecording, stopRecording,
resetRecording, resetRecording,
recordingStream,
}; };
}; };
import { useEffect, useState } from "react";
const BAR_COUNT = 40;
const IDLE_LEVEL = 0.08;
const UPDATE_MS = 33;
const CENTER_EMPHASIS = 0.35;
const BOOST_OFFSET = 0.04;
const BOOST_CURVE = 0.55;
const BOOST_GAIN = 2.8;
const createIdleLevels = (): number[] => Array.from({ length: BAR_COUNT }, () => IDLE_LEVEL);
const clamp01 = (value: number): number => Math.min(1, Math.max(0, value));
function computeLevels(dataArray: Uint8Array, bufferLength: number): number[] {
const sampled: number[] = [];
for (let i = 0; i < BAR_COUNT; i++) {
const start = Math.floor((i / BAR_COUNT) * bufferLength);
const end = Math.floor(((i + 1) / BAR_COUNT) * bufferLength);
let sum = 0;
for (let j = start; j < end; j++) {
sum += dataArray[j];
}
const span = Math.max(1, end - start);
const avg = sum / (255 * span);
const boosted = Math.min(1, (avg + BOOST_OFFSET) ** BOOST_CURVE * BOOST_GAIN);
sampled.push(Math.max(IDLE_LEVEL, boosted));
}
const mirrored: number[] = [];
for (let i = 0; i < BAR_COUNT; i++) {
const j = BAR_COUNT - 1 - i;
const v = (sampled[i] + sampled[j]) / 2;
const centerDistance = Math.abs(i - (BAR_COUNT - 1) / 2) / (BAR_COUNT / 2);
const envelope = 1 - centerDistance * CENTER_EMPHASIS;
mirrored.push(Math.max(IDLE_LEVEL, clamp01(v * envelope)));
}
for (let i = 1; i < BAR_COUNT - 1; i++) {
mirrored[i] = Math.max(IDLE_LEVEL, (mirrored[i - 1] + mirrored[i] * 2 + mirrored[i + 1]) / 4);
}
return mirrored;
}
/**
* Derives normalized bar levels (0–1) from a microphone MediaStream for live waveform UI.
*/
export function useAudioWaveform(stream: MediaStream | null, enabled: boolean): number[] {
const [levels, setLevels] = useState<number[]>(createIdleLevels);
useEffect(() => {
if (!enabled || !stream) {
setLevels(createIdleLevels());
return;
}
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 512;
analyser.minDecibels = -72;
analyser.maxDecibels = -8;
analyser.smoothingTimeConstant = 0.72;
source.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
let rafId = 0;
let lastEmit = 0;
let first = true;
const tick = (now: number) => {
analyser.getByteFrequencyData(dataArray);
if (first || now - lastEmit >= UPDATE_MS) {
first = false;
lastEmit = now;
setLevels(computeLevels(dataArray, bufferLength));
}
rafId = requestAnimationFrame(tick);
};
const start = async () => {
await audioContext.resume();
rafId = requestAnimationFrame(tick);
};
void start();
return () => {
cancelAnimationFrame(rafId);
source.disconnect();
analyser.disconnect();
void audioContext.close();
};
}, [stream, enabled]);
return levels;
}
...@@ -206,6 +206,7 @@ const MemoEditorImpl: React.FC<MemoEditorProps> = ({ ...@@ -206,6 +206,7 @@ const MemoEditorImpl: React.FC<MemoEditorProps> = ({
{isAudioRecorderOpen && (state.audioRecorder.status === "recording" || state.audioRecorder.status === "requesting_permission") && ( {isAudioRecorderOpen && (state.audioRecorder.status === "recording" || state.audioRecorder.status === "requesting_permission") && (
<AudioRecorderPanel <AudioRecorderPanel
audioRecorder={state.audioRecorder} audioRecorder={state.audioRecorder}
mediaStream={audioRecorder.recordingStream}
onStop={audioRecorder.stopRecording} onStop={audioRecorder.stopRecording}
onCancel={handleCancelAudioRecording} onCancel={handleCancelAudioRecording}
/> />
......
...@@ -32,6 +32,8 @@ export interface EditorMetadataProps { ...@@ -32,6 +32,8 @@ export interface EditorMetadataProps {
export interface AudioRecorderPanelProps { export interface AudioRecorderPanelProps {
audioRecorder: EditorState["audioRecorder"]; audioRecorder: EditorState["audioRecorder"];
/** Active mic stream while recording; used for live waveform visualization. */
mediaStream: MediaStream | null;
onStop: () => void; onStop: () => void;
onCancel: () => void; onCancel: () => void;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment