Unverified Commit e51985a2 authored by boojack's avatar boojack Committed by GitHub

feat(memo-editor): add compact live waveform recorder panel (#5817)

parent 61c96384
......@@ -4,48 +4,56 @@ import { formatAudioTime } from "@/components/MemoMetadata/Attachment/attachment
import { Button } from "@/components/ui/button";
import { cn } from "@/lib/utils";
import { useTranslate } from "@/utils/i18n";
import { useAudioWaveform } from "../hooks/useAudioWaveform";
import type { AudioRecorderPanelProps } from "../types/components";
import { VoiceWaveform } from "./VoiceWaveform";
export const AudioRecorderPanel: FC<AudioRecorderPanelProps> = ({ audioRecorder, onStop, onCancel }) => {
export const AudioRecorderPanel: FC<AudioRecorderPanelProps> = ({ audioRecorder, mediaStream, onStop, onCancel }) => {
const t = useTranslate();
const { status, elapsedSeconds } = audioRecorder;
const isRequestingPermission = status === "requesting_permission";
const isRecording = status === "recording";
const waveformLevels = useAudioWaveform(mediaStream, isRecording && mediaStream !== null);
const srStatusText = isRequestingPermission ? t("editor.audio-recorder.requesting-permission") : t("editor.audio-recorder.recording");
return (
<div className="w-full rounded-lg border border-border/60 bg-muted/20 px-2.5 py-2">
<div className="flex items-center gap-2">
<div className="min-w-0 flex flex-1 gap-2">
<div className="truncate text-sm font-medium text-foreground">
{isRequestingPermission ? t("editor.audio-recorder.requesting-permission") : t("editor.audio-recorder.recording")}
</div>
<div
className={cn(
"inline-flex shrink-0 items-center gap-1.5 rounded-full px-2 py-0.5 text-xs font-medium",
isRequestingPermission
? "border border-border/60 bg-background text-muted-foreground"
: "border border-destructive/20 bg-destructive/[0.08] text-destructive",
"flex w-full items-center justify-between gap-2 rounded-lg border border-border bg-muted/30 px-2.5 py-1.5",
"dark:bg-muted/20",
)}
>
{isRequestingPermission ? (
<LoaderCircleIcon className="size-3 animate-spin" />
) : (
<span className="size-2 rounded-full bg-destructive" />
)}
{formatAudioTime(elapsedSeconds)}
</div>
<div className="flex min-w-0 flex-1 items-center gap-2">
{isRequestingPermission ? <LoaderCircleIcon className="size-3.5 shrink-0 animate-spin text-muted-foreground" aria-hidden /> : null}
<span className="sr-only">{srStatusText}</span>
<VoiceWaveform levels={waveformLevels} className="max-w-[200px] overflow-hidden" />
<span className="shrink-0 font-mono text-xs tabular-nums text-muted-foreground">{formatAudioTime(elapsedSeconds)}</span>
</div>
<div className="ml-auto flex shrink-0 items-center gap-1">
<Button variant="ghost" size="sm" onClick={onCancel} aria-label={t("common.cancel")}>
<XIcon className="size-4" />
<div className="flex shrink-0 items-center gap-1 border-l border-border/60 pl-2">
<Button
type="button"
variant="ghost"
size="icon"
className="size-7 shrink-0 rounded-full text-muted-foreground hover:bg-accent hover:text-foreground"
onClick={onCancel}
aria-label={t("common.cancel")}
>
<XIcon className="size-3.25" />
</Button>
<Button size="sm" className="gap-1.5" onClick={onStop} disabled={isRequestingPermission}>
<span className="size-2.5 rounded-[2px] bg-current" aria-hidden="true" />
{t("editor.audio-recorder.stop")}
<Button
type="button"
variant="destructive"
size="icon"
className="size-7 shrink-0 rounded-full shadow-xs"
onClick={onStop}
disabled={isRequestingPermission}
aria-label={t("editor.audio-recorder.stop")}
>
<span className="size-[7px] rounded-[1.5px] bg-destructive-foreground" aria-hidden />
</Button>
</div>
</div>
</div>
);
};
import type { FC } from "react";
import { cn } from "@/lib/utils";
/** Max half-height of each bar (px); bars are centered vertically. */
const MAX_BAR_PX = 11;
const MIN_BAR_PX = 2;
type VoiceWaveformProps = {
levels: number[];
className?: string;
};
/**
* Tight-packed vertical bars (rounded caps): fixed bar width + minimal gap — no `flex-1` columns
* so bars stay visually dense like compact voice-memo waveforms.
*/
export const VoiceWaveform: FC<VoiceWaveformProps> = ({ levels, className }) => {
return (
<div className={cn("flex h-5 w-max max-w-full shrink-0 items-center gap-px", className)} aria-hidden>
{levels.map((level, i) => {
const h = Math.max(MIN_BAR_PX, level * MAX_BAR_PX);
const centerDistance = Math.abs(i - (levels.length - 1) / 2) / (levels.length / 2);
const opacity = 0.35 + (1 - centerDistance) * 0.35;
return (
<span
key={`bar-${i}`}
className="w-[2px] shrink-0 rounded-full bg-muted-foreground transition-[height,opacity] duration-75 ease-out"
style={{ height: `${h}px`, opacity }}
/>
);
})}
</div>
);
};
// Custom hooks for MemoEditor (internal use only)
export { useAudioRecorder } from "./useAudioRecorder";
export { useAudioWaveform } from "./useAudioWaveform";
export { useAutoSave } from "./useAutoSave";
export { useBlobUrls } from "./useBlobUrls";
export { useDragAndDrop } from "./useDragAndDrop";
......
import { useEffect, useRef } from "react";
import { useEffect, useRef, useState } from "react";
import type { LocalFile } from "../types/attachment";
import { useBlobUrls } from "./useBlobUrls";
......@@ -50,6 +50,7 @@ function createRecordedFile(blob: Blob, mimeType: string): File {
export const useAudioRecorder = (actions: AudioRecorderActions) => {
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const mediaStreamRef = useRef<MediaStream | null>(null);
const [recordingStream, setRecordingStream] = useState<MediaStream | null>(null);
const chunksRef = useRef<Blob[]>([]);
const startedAtRef = useRef<number | null>(null);
const elapsedTimerRef = useRef<number | null>(null);
......@@ -67,6 +68,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
const cleanupStream = () => {
mediaStreamRef.current?.getTracks().forEach((track) => track.stop());
mediaStreamRef.current = null;
setRecordingStream(null);
};
const resetRecorderRefs = () => {
......@@ -130,6 +132,7 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
recorderMimeTypeRef.current = mimeType;
mediaStreamRef.current = stream;
setRecordingStream(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
......@@ -221,5 +224,6 @@ export const useAudioRecorder = (actions: AudioRecorderActions) => {
startRecording,
stopRecording,
resetRecording,
recordingStream,
};
};
import { useEffect, useState } from "react";
const BAR_COUNT = 40;
const IDLE_LEVEL = 0.08;
const UPDATE_MS = 33;
const CENTER_EMPHASIS = 0.35;
const BOOST_OFFSET = 0.04;
const BOOST_CURVE = 0.55;
const BOOST_GAIN = 2.8;
const createIdleLevels = (): number[] => Array.from({ length: BAR_COUNT }, () => IDLE_LEVEL);
const clamp01 = (value: number): number => Math.min(1, Math.max(0, value));
function computeLevels(dataArray: Uint8Array, bufferLength: number): number[] {
const sampled: number[] = [];
for (let i = 0; i < BAR_COUNT; i++) {
const start = Math.floor((i / BAR_COUNT) * bufferLength);
const end = Math.floor(((i + 1) / BAR_COUNT) * bufferLength);
let sum = 0;
for (let j = start; j < end; j++) {
sum += dataArray[j];
}
const span = Math.max(1, end - start);
const avg = sum / (255 * span);
const boosted = Math.min(1, (avg + BOOST_OFFSET) ** BOOST_CURVE * BOOST_GAIN);
sampled.push(Math.max(IDLE_LEVEL, boosted));
}
const mirrored: number[] = [];
for (let i = 0; i < BAR_COUNT; i++) {
const j = BAR_COUNT - 1 - i;
const v = (sampled[i] + sampled[j]) / 2;
const centerDistance = Math.abs(i - (BAR_COUNT - 1) / 2) / (BAR_COUNT / 2);
const envelope = 1 - centerDistance * CENTER_EMPHASIS;
mirrored.push(Math.max(IDLE_LEVEL, clamp01(v * envelope)));
}
for (let i = 1; i < BAR_COUNT - 1; i++) {
mirrored[i] = Math.max(IDLE_LEVEL, (mirrored[i - 1] + mirrored[i] * 2 + mirrored[i + 1]) / 4);
}
return mirrored;
}
/**
* Derives normalized bar levels (0–1) from a microphone MediaStream for live waveform UI.
*/
export function useAudioWaveform(stream: MediaStream | null, enabled: boolean): number[] {
const [levels, setLevels] = useState<number[]>(createIdleLevels);
useEffect(() => {
if (!enabled || !stream) {
setLevels(createIdleLevels());
return;
}
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 512;
analyser.minDecibels = -72;
analyser.maxDecibels = -8;
analyser.smoothingTimeConstant = 0.72;
source.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
let rafId = 0;
let lastEmit = 0;
let first = true;
const tick = (now: number) => {
analyser.getByteFrequencyData(dataArray);
if (first || now - lastEmit >= UPDATE_MS) {
first = false;
lastEmit = now;
setLevels(computeLevels(dataArray, bufferLength));
}
rafId = requestAnimationFrame(tick);
};
const start = async () => {
await audioContext.resume();
rafId = requestAnimationFrame(tick);
};
void start();
return () => {
cancelAnimationFrame(rafId);
source.disconnect();
analyser.disconnect();
void audioContext.close();
};
}, [stream, enabled]);
return levels;
}
......@@ -206,6 +206,7 @@ const MemoEditorImpl: React.FC<MemoEditorProps> = ({
{isAudioRecorderOpen && (state.audioRecorder.status === "recording" || state.audioRecorder.status === "requesting_permission") && (
<AudioRecorderPanel
audioRecorder={state.audioRecorder}
mediaStream={audioRecorder.recordingStream}
onStop={audioRecorder.stopRecording}
onCancel={handleCancelAudioRecording}
/>
......
......@@ -32,6 +32,8 @@ export interface EditorMetadataProps {
export interface AudioRecorderPanelProps {
audioRecorder: EditorState["audioRecorder"];
/** Active mic stream while recording; used for live waveform visualization. */
mediaStream: MediaStream | null;
onStop: () => void;
onCancel: () => void;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment