diff --git a/.llm/research/mixer-routing-gap.md b/.llm/research/mixer-routing-gap.md new file mode 100644 index 00000000..17a5e103 --- /dev/null +++ b/.llm/research/mixer-routing-gap.md @@ -0,0 +1,114 @@ +# Mixer & Signal Routing — Gap Analysis vs openDAW + +**Date:** 2026-03-27 +**Theme:** Mixer & Signal Routing is the largest depth gap (~35% complete vs pro DAW) +**Reference:** openDAW `packages/studio/core/src/Mixer.ts`, `lib-dsp/src/graph.ts` + +## Executive Summary + +ACE-Step-DAW has a functional mixer with volume/pan/EQ/compressor/reverb per channel, 2 send slots, and basic group tracks. However, it lacks the **routing depth** that defines a professional mixer: pre/post fader sends, group bus processing, topological routing validation, send automation, and dynamic slot limits. + +openDAW implements a two-layer solo system (UI + audio thread), topological sort with loop detection, pre/post aux sends with ramped gains, and a full graph-based routing model. These patterns are well-documented and adaptable to our React + Zustand + Tone.js stack. + +## Current State (ACE-Step-DAW) + +### What Works +- Channel strip: volume (0-1), pan (-1 to +1), 3-band fixed EQ, compressor, convolver reverb +- Sends: `Send { returnTrackId, amount }` — 2 slots max, post-fader only (implicit) +- Return tracks: `ReturnTrack { id, name, effects[], volume, pan }` +- Group tracks: `isGroup` flag, `parentTrackId` for hierarchy, mute/solo propagation to children +- Solo logic: flat — if any track soloed, all others with `soloActive=true` get muted +- Audio routing: child tracks re-routed to group's `inputGain` via `rerouteOutput()` + +### What's Missing (Priority Order) + +| # | Gap | Impact | Effort | +|---|-----|--------|--------| +| 1 | Pre/post fader sends | Blocks headphone mix, parallel compression | M | +| 2 | Group bus processing (effects on groups) | Blocks drum bus compression, submix FX | M | +| 3 | Dynamic send/insert slots | Blocks complex routing (3+ aux buses) | S | +| 4 | Routing validation (topo sort) | Risk of feedback loops, undefined behavior | M | +| 5 | Send automation | Can't automate reverb sends over time | S | +| 6 | Virtual solo (routing-aware) | Solo'd group doesn't bring input tracks | S | +| 7 | Signal flow visualization | Users can't see routing topology | L | +| 8 | Return track metering | Can't monitor what feeds a return | S | +| 9 | Sidechain routing UI | Param exists but no picker | S | +| 10 | Mid/Side processing | Advanced mixing technique unavailable | L | + +## openDAW Reference Patterns + +### 1. Pre/Post Sends +```typescript +// openDAW: AudioSendRouting enum +enum AudioSendRouting { Pre, Post } + +// AuxSendProcessor applies gain + pan, taps pre or post fader +// Pre: tapped before channel volume node +// Post: tapped after channel volume node (default) +``` + +**Our adaptation:** Add `preFader?: boolean` to `Send` interface. In `TrackNode`, create a send tap point before `volumeGain` (pre) or after `volumeGain` (post). Wire send gain nodes accordingly. + +### 2. Virtual Solo (Routing-Aware) +```typescript +// openDAW: Two-pass algorithm +// Pass 1: Trace upstream from solo'd channels → mark as virtualSolo +// Pass 2: Trace downstream from solo'd channels → mark bus outputs as virtualSolo +// Result: Solo a group → its children stay audible +``` + +**Our adaptation:** Replace flat `soloActive` boolean with graph traversal: +```typescript +function computeAudibleTracks(tracks: Track[]): Set { + const soloed = tracks.filter(t => t.soloed); + if (soloed.length === 0) return new Set(tracks.filter(t => !t.muted).map(t => t.id)); + + const audible = new Set(soloed.map(t => t.id)); + // Upstream: if group soloed, add all children + for (const t of soloed) { + if (t.isGroup) addChildren(t.id, tracks, audible); + } + // Downstream: if child soloed, add parent group chain + for (const t of soloed) { + let parent = t.parentTrackId; + while (parent) { + audible.add(parent); + parent = tracks.find(p => p.id === parent)?.parentTrackId; + } + } + return audible; +} +``` + +### 3. Topological Sort for Routing +```typescript +// openDAW: TopologicalSort class +// - Builds successor map from edges +// - Computes transitive closure (iterative fixed-point) +// - DFS with loop detection +// - Output: sorted order + hasLoops flag +``` + +**Our adaptation:** Before connecting Web Audio nodes, build a directed graph of track→return→master routing and sort topologically. Reject routing changes that create cycles. + +### 4. Group Bus Processing +openDAW treats groups as full audio units with their own effect chain, sends, and volume/pan. Our groups are organizational only — audio routes through `inputGain` but bypasses effects. + +**Our adaptation:** When `isGroup`, the TrackNode should have its own effect chain that processes the summed input of all children before routing to master/parent. + +## Proposed Issue Breakdown + +1. **feat: add pre/post fader toggle to sends** — Extend Send interface, wire tap points in TrackNode +2. **feat: enable effects processing on group tracks** — Allow effect chain on group TrackNodes +3. **feat: remove hardcoded send/insert slot limits** — Dynamic arrays instead of MAX_SEND_SLOTS=2 +4. **feat: add routing validation with topological sort** — Prevent feedback loops in send routing +5. **feat: add send amount to automation parameters** — Extend AutomationParameter for sends + +## Files to Modify + +- `src/types/project.ts` — Send interface, Track interface +- `src/engine/TrackNode.ts` — Pre/post tap points, group effect chain +- `src/engine/AudioEngine.ts` — Solo algorithm, routing validation +- `src/store/projectStore.ts` — Store actions for new send fields, group effects +- `src/components/mixer/MixerPanel.tsx` — UI for pre/post toggle, dynamic slots +- `src/utils/effectAutomation.ts` — Send automation parameter descriptors diff --git a/src/components/mixer/LevelMeter.tsx b/src/components/mixer/LevelMeter.tsx index d6cf2f8b..bd2c33b2 100644 --- a/src/components/mixer/LevelMeter.tsx +++ b/src/components/mixer/LevelMeter.tsx @@ -21,9 +21,10 @@ function getMeterColor(level: number): string { interface LevelMeterProps { trackId?: string; masterStage?: 'input' | 'output'; + returnTrackId?: string; } -export function LevelMeter({ trackId, masterStage }: LevelMeterProps) { +export function LevelMeter({ trackId, masterStage, returnTrackId }: LevelMeterProps) { const rafRef = useRef(0); const peakLevelRef = useRef(0); const peakHoldFramesRef = useRef(0); @@ -37,9 +38,11 @@ export function LevelMeter({ trackId, masterStage }: LevelMeterProps) { const tick = () => { const meter = masterStage ? engine.getMasterMeter(masterStage) - : trackId - ? engine.getTrackMeter(trackId) - : { level: 0, clipped: false }; + : returnTrackId + ? engine.getReturnTrackMeter(returnTrackId) + : trackId + ? engine.getTrackMeter(trackId) + : { level: 0, clipped: false }; const nextLevel = meter.level; setLevel(nextLevel); setClipped((wasClipped) => wasClipped || meter.clipped); @@ -59,19 +62,25 @@ export function LevelMeter({ trackId, masterStage }: LevelMeterProps) { rafRef.current = requestAnimationFrame(tick); return () => cancelAnimationFrame(rafRef.current); - }, [trackId, masterStage]); + }, [trackId, masterStage, returnTrackId]); const label = masterStage ? `Master ${masterStage} level meter` - : `Mixer level meter for ${trackId}`; + : returnTrackId + ? `Return track level meter for ${returnTrackId}` + : `Mixer level meter for ${trackId}`; const clipResetLabel = masterStage ? `Reset clip indicator for master ${masterStage}` - : `Reset clip indicator for ${trackId}`; + : returnTrackId + ? `Reset clip indicator for return ${returnTrackId}` + : `Reset clip indicator for ${trackId}`; const resetClip = () => { const engine = getAudioEngine(); if (masterStage) { engine.resetMasterClip(masterStage); + } else if (returnTrackId) { + engine.resetReturnTrackClip(returnTrackId); } else if (trackId) { engine.resetTrackClip(trackId); } diff --git a/src/components/mixer/MixerPanel.tsx b/src/components/mixer/MixerPanel.tsx index af85ba78..8d0e97a4 100644 --- a/src/components/mixer/MixerPanel.tsx +++ b/src/components/mixer/MixerPanel.tsx @@ -184,24 +184,39 @@ function ChannelStrip({ track, faderHeight, returnTracks }: ChannelStripProps) { const rt = returnTracks[i]; const send = rt ? sends.find((s) => s.returnTrackId === rt.id) : undefined; const amount = send?.amount ?? 0; + const isPreFader = send?.preFader ?? false; return (
{rt ? ( <> - {rt.name} + {rt.name} + updateTrackSend(track.id, rt.id, parseFloat(e.target.value))} + onChange={(e) => updateTrackSend(track.id, rt.id, parseFloat(e.target.value), isPreFader)} aria-label={`Send ${track.displayName} to ${rt.name}`} - className="w-14 h-3 accent-blue-500" + className="w-10 h-3 accent-blue-500" disabled={isFrozen} /> @@ -271,6 +286,65 @@ interface MasterStripProps { faderHeight: number; } +interface ReturnTrackStripProps { + returnTrack: ReturnTrack; + faderHeight: number; +} + +function ReturnTrackStrip({ returnTrack, faderHeight }: ReturnTrackStripProps) { + const updateReturnTrack = useProjectStore((s) => s.updateReturnTrack); + + return ( +
+ {/* Return track label */} + + {returnTrack.name} + + + {/* Pan knob */} + updateReturnTrack(returnTrack.id, { pan: v })} + label="Pan" + size={28} + step={0.01} + /> + + {/* Effects indicator */} +
+ {returnTrack.effects.length > 0 + ? {returnTrack.effects.length} FX + : No FX + } +
+ + {/* Volume fader + meter */} +
+
+ + updateReturnTrack(returnTrack.id, { volume: v })} + aria-label={`${returnTrack.name} volume fader`} + accentColor="#2dd4bf" + width={12} + /> +
+ {volumeToDb(returnTrack.volume)} +
+
+ ); +} + function MasterStrip({ faderHeight }: MasterStripProps) { const project = useProjectStore((s) => s.project); const updateProject = useProjectStore((s) => s.updateProject); @@ -406,6 +480,14 @@ export function MixerPanel() { {[...project.tracks].sort((a, b) => a.order - b.order).map((track) => ( ))} + {returnTracks.length > 0 && ( + <> +
+ {returnTracks.map((rt) => ( + + ))} + + )}
diff --git a/src/engine/AudioEngine.ts b/src/engine/AudioEngine.ts index fdfeb040..d189a66b 100644 --- a/src/engine/AudioEngine.ts +++ b/src/engine/AudioEngine.ts @@ -1,12 +1,16 @@ import * as Tone from 'tone'; import { TrackNode } from './TrackNode'; +import { ReturnTrackNode } from './ReturnTrackNode'; import type { AudioWarpMarker, GainEnvelopePoint, MasteringState, + ReturnTrack, + Send, SequencerPattern, TempoEvent, TimeSignatureEvent, + Track, } from '../types/project'; import { ensureMasteringState } from '../utils/mastering'; import { applyClipFadeAutomation } from '../utils/clipFade'; @@ -84,6 +88,7 @@ export class AudioEngine { ctx: AudioContext; masterGain: GainNode; trackNodes: Map = new Map(); + returnTrackNodes: Map = new Map(); scheduledSources: ScheduledSource[] = []; private readonly masterInputGain: GainNode; private readonly masterDryGain: GainNode; @@ -1104,12 +1109,111 @@ export class AudioEngine { return this.ctx.decodeAudioData(arrayBuffer); } + // ----------------------------------------------------------------------- + // Return Track Nodes & Send Routing + // ----------------------------------------------------------------------- + + getOrCreateReturnTrackNode(returnTrackId: string): ReturnTrackNode { + let node = this.returnTrackNodes.get(returnTrackId); + if (!node) { + node = new ReturnTrackNode(this.ctx, this.masterInputGain); + this.returnTrackNodes.set(returnTrackId, node); + } + return node; + } + + removeReturnTrackNode(returnTrackId: string) { + const node = this.returnTrackNodes.get(returnTrackId); + if (node) { + node.disconnect(); + this.returnTrackNodes.delete(returnTrackId); + } + } + + getReturnTrackMeter(returnTrackId: string): { level: number; leftLevel: number; rightLevel: number; clipped: boolean } { + return this.returnTrackNodes.get(returnTrackId)?.getMeter() ?? { level: 0, leftLevel: 0, rightLevel: 0, clipped: false }; + } + + resetReturnTrackClip(returnTrackId: string) { + this.returnTrackNodes.get(returnTrackId)?.resetClip(); + } + + /** + * Synchronize send routing between tracks and return tracks. + * Creates/updates ReturnTrackNodes, wires send gain nodes, and cleans up stale connections. + */ + syncSends(tracks: Track[], returnTracks: ReturnTrack[]) { + const returnTrackIds = new Set(returnTracks.map(rt => rt.id)); + + // 1. Create/update ReturnTrackNodes + for (const rt of returnTracks) { + const node = this.getOrCreateReturnTrackNode(rt.id); + node.volume = rt.volume; + node.pan = rt.pan; + } + + // 2. Remove ReturnTrackNodes that no longer exist in the data model + for (const [id] of this.returnTrackNodes) { + if (!returnTrackIds.has(id)) { + this.removeReturnTrackNode(id); + } + } + + // 3. Wire sends for each track + // Build a set of currently active send connections to detect stale ones + const activeSends = new Map>(); // trackId → Set + + for (const track of tracks) { + const trackNode = this.trackNodes.get(track.id); + if (!trackNode) continue; + + const sends = track.sends ?? []; + const activeReturnIds = new Set(); + + for (const send of sends) { + if (!returnTrackIds.has(send.returnTrackId)) continue; + if (send.amount <= 0) continue; + + const returnNode = this.returnTrackNodes.get(send.returnTrackId); + if (!returnNode) continue; + + activeReturnIds.add(send.returnTrackId); + trackNode.connectSend(send.returnTrackId, returnNode.inputGain, send.amount, send.preFader ?? false); + } + + activeSends.set(track.id, activeReturnIds); + } + + // 4. Disconnect sends that are no longer active + // (connectSend already disconnects existing before reconnecting, so this handles + // the case where a send was removed entirely from the track's sends array) + // TrackNode.connectSend calls disconnectSend first, so reconnections are safe. + // We only need to handle sends that were removed from the array: + for (const track of tracks) { + const trackNode = this.trackNodes.get(track.id); + if (!trackNode) continue; + const active = activeSends.get(track.id) ?? new Set(); + // Note: TrackNode doesn't expose its sendGains keys, so we rely on + // connectSend's built-in disconnect-before-connect behavior. + // For sends with amount=0, explicitly disconnect: + for (const send of (track.sends ?? [])) { + if (send.amount <= 0 && !active.has(send.returnTrackId)) { + trackNode.disconnectSend(send.returnTrackId); + } + } + } + } + dispose() { this.stop(); for (const node of this.trackNodes.values()) { node.disconnect(); } this.trackNodes.clear(); + for (const node of this.returnTrackNodes.values()) { + node.disconnect(); + } + this.returnTrackNodes.clear(); this.ctx.close(); } } diff --git a/src/engine/ReturnTrackNode.ts b/src/engine/ReturnTrackNode.ts new file mode 100644 index 00000000..87ba0664 --- /dev/null +++ b/src/engine/ReturnTrackNode.ts @@ -0,0 +1,190 @@ +/** + * Audio channel strip for return (aux) tracks. + * + * Signal chain: + * inputGain → [effects splice] → volumeGain → panNode → analyserNode → destination + * + * Simpler than TrackNode: no EQ, compressor, or reverb (those come from + * EffectsEngine if the user adds insert effects to the return track). + */ +export class ReturnTrackNode { + readonly inputGain: GainNode; + private readonly volumeGain: GainNode; + private readonly panNode: StereoPannerNode; + private readonly analyserNode: AnalyserNode; + private readonly analyserData: Uint8Array; + private readonly analyserTimeDomainData: Float32Array; + private readonly splitter: ChannelSplitterNode; + private readonly analyserLeft: AnalyserNode; + private readonly analyserRight: AnalyserNode; + private readonly analyserLeftData: Uint8Array; + private readonly analyserRightData: Uint8Array; + private readonly analyserLeftTimeDomain: Float32Array; + private readonly analyserRightTimeDomain: Float32Array; + + private _volume = 1; + private _muted = false; + private _clipped = false; + private _effectsInput: AudioNode | null = null; + private _effectsOutput: AudioNode | null = null; + + /** Fade duration in seconds to avoid audio clicks. */ + static readonly MUTE_FADE_SEC = 0.005; + private static readonly CLIP_THRESHOLD = 0.995; + + constructor(private ctx: AudioContext, destination: AudioNode) { + this.inputGain = ctx.createGain(); + this.volumeGain = ctx.createGain(); + this.panNode = ctx.createStereoPanner(); + this.analyserNode = ctx.createAnalyser(); + this.analyserNode.fftSize = 2048; + this.analyserNode.smoothingTimeConstant = 0.75; + this.analyserData = new Uint8Array(this.analyserNode.frequencyBinCount); + this.analyserTimeDomainData = new Float32Array(this.analyserNode.fftSize); + + // Stereo metering + this.splitter = ctx.createChannelSplitter(2); + this.analyserLeft = ctx.createAnalyser(); + this.analyserLeft.fftSize = 2048; + this.analyserLeft.smoothingTimeConstant = 0.75; + this.analyserRight = ctx.createAnalyser(); + this.analyserRight.fftSize = 2048; + this.analyserRight.smoothingTimeConstant = 0.75; + this.analyserLeftData = new Uint8Array(this.analyserLeft.frequencyBinCount); + this.analyserRightData = new Uint8Array(this.analyserRight.frequencyBinCount); + this.analyserLeftTimeDomain = new Float32Array(this.analyserLeft.fftSize); + this.analyserRightTimeDomain = new Float32Array(this.analyserRight.fftSize); + + // Wire: inputGain → volumeGain → panNode → analyserNode → destination + this.inputGain.connect(this.volumeGain); + this.volumeGain.connect(this.panNode); + this.panNode.connect(this.analyserNode); + this.analyserNode.connect(destination); + + // Stereo metering tap + this.panNode.connect(this.splitter); + this.splitter.connect(this.analyserLeft, 0); + this.splitter.connect(this.analyserRight, 1); + } + + // ----------------------------------------------------------------------- + // Volume / Mute + // ----------------------------------------------------------------------- + + get volume() { return this._volume; } + set volume(v: number) { this._volume = v; this._applyGain(); } + + get muted() { return this._muted; } + set muted(v: boolean) { this._muted = v; this._applyGain(); } + + private _applyGain() { + const target = this._muted ? 0 : this._volume; + const now = this.ctx.currentTime; + this.volumeGain.gain.cancelScheduledValues(now); + this.volumeGain.gain.setValueAtTime(this.volumeGain.gain.value, now); + this.volumeGain.gain.linearRampToValueAtTime(target, now + ReturnTrackNode.MUTE_FADE_SEC); + } + + // ----------------------------------------------------------------------- + // Pan + // ----------------------------------------------------------------------- + + set pan(v: number) { + this.panNode.pan.value = Math.max(-1, Math.min(1, v)); + } + + // ----------------------------------------------------------------------- + // Effects splice + // ----------------------------------------------------------------------- + + /** + * Splice an external effects chain between inputGain and volumeGain. + * Pass null/null to remove effects and restore the direct path. + */ + spliceEffects(input: AudioNode | null, output: AudioNode | null) { + try { this.inputGain.disconnect(this.volumeGain); } catch { /* noop */ } + + if (this._effectsOutput) { + try { this._effectsOutput.disconnect(this.volumeGain); } catch { /* noop */ } + } + + if (input && output) { + this.inputGain.connect(input); + output.connect(this.volumeGain); + } else { + this.inputGain.connect(this.volumeGain); + } + + this._effectsInput = input; + this._effectsOutput = output; + } + + // ----------------------------------------------------------------------- + // Metering + // ----------------------------------------------------------------------- + + getMeter(): { level: number; leftLevel: number; rightLevel: number; clipped: boolean } { + this.analyserNode.getByteFrequencyData(this.analyserData); + this.analyserNode.getFloatTimeDomainData(this.analyserTimeDomainData); + + let spectralPeak = 0; + for (let i = 0; i < this.analyserData.length; i++) { + if (this.analyserData[i] > spectralPeak) spectralPeak = this.analyserData[i]; + } + + let samplePeak = 0; + for (let i = 0; i < this.analyserTimeDomainData.length; i++) { + const abs = Math.abs(this.analyserTimeDomainData[i]); + if (abs > samplePeak) samplePeak = abs; + } + + if (samplePeak >= ReturnTrackNode.CLIP_THRESHOLD) this._clipped = true; + + const leftLevel = this._getChannelLevel(this.analyserLeft, this.analyserLeftData, this.analyserLeftTimeDomain); + const rightLevel = this._getChannelLevel(this.analyserRight, this.analyserRightData, this.analyserRightTimeDomain); + const level = Math.max(leftLevel, rightLevel); + + return { level: Math.max(0, Math.min(1, level)), leftLevel, rightLevel, clipped: this._clipped }; + } + + private _getChannelLevel( + analyser: AnalyserNode, + freqData: Uint8Array, + timeDomainData: Float32Array, + ): number { + analyser.getByteFrequencyData(freqData); + analyser.getFloatTimeDomainData(timeDomainData); + + let spectralPeak = 0; + for (let i = 0; i < freqData.length; i++) { + if (freqData[i] > spectralPeak) spectralPeak = freqData[i]; + } + + let samplePeak = 0; + for (let i = 0; i < timeDomainData.length; i++) { + const abs = Math.abs(timeDomainData[i]); + if (abs > samplePeak) samplePeak = abs; + } + + if (samplePeak >= ReturnTrackNode.CLIP_THRESHOLD) this._clipped = true; + + return Math.max(0, Math.min(1, Math.max(spectralPeak / 255, samplePeak))); + } + + resetClip() { this._clipped = false; } + + // ----------------------------------------------------------------------- + + disconnect() { + this.inputGain.disconnect(); + this.volumeGain.disconnect(); + this.panNode.disconnect(); + this.analyserNode.disconnect(); + this.splitter.disconnect(); + this.analyserLeft.disconnect(); + this.analyserRight.disconnect(); + if (this._effectsOutput) { + try { this._effectsOutput.disconnect(); } catch { /* noop */ } + } + } +} diff --git a/src/engine/TrackNode.ts b/src/engine/TrackNode.ts index 41ea7063..a737dd5d 100644 --- a/src/engine/TrackNode.ts +++ b/src/engine/TrackNode.ts @@ -42,6 +42,13 @@ export class TrackNode { private _clipped = false; private latencyCompNode: DelayNode | null = null; + /** + * Send gain nodes per return track ID. + * Each send has two gain nodes (pre and post fader); only one is non-zero + * at a time. This avoids reconnecting the audio graph on pre/post toggle. + */ + private readonly sendGains = new Map(); + private static readonly CLIP_THRESHOLD = 0.995; constructor(private ctx: AudioContext, destination: AudioNode) { @@ -370,6 +377,7 @@ export class TrackNode { this.latencyCompNode.disconnect(this.volumeGain); this.compressor.connect(this.volumeGain); this.latencyCompNode = null; + this._reconnectPreFaderSends(); } return; } @@ -387,6 +395,7 @@ export class TrackNode { this.compressor.disconnect(this.volumeGain); this.compressor.connect(this.latencyCompNode); this.latencyCompNode.connect(this.volumeGain); + this._reconnectPreFaderSends(); } } @@ -401,7 +410,106 @@ export class TrackNode { this.analyserNode.connect(destination); } + // ----------------------------------------------------------------------- + // Sends (Pre/Post Fader) + // ----------------------------------------------------------------------- + + /** + * The node just before volumeGain — used as the pre-fader send tap point. + * If latency compensation is active, this is the delay node; otherwise the compressor. + */ + get preFaderOutput(): AudioNode { + return this.latencyCompNode ?? this.compressor; + } + + /** + * Connect a send to a return track's input. + * Creates two gain nodes (pre + post fader) and connects them. + * Only the active tap has non-zero gain; the other is silent. + */ + connectSend(returnTrackId: string, destination: AudioNode, amount: number, preFader: boolean) { + // Disconnect existing send if any + this.disconnectSend(returnTrackId); + + const preGain = this.ctx.createGain(); + const postGain = this.ctx.createGain(); + + // Set initial gains based on pre/post mode + preGain.gain.value = preFader ? amount : 0; + postGain.gain.value = preFader ? 0 : amount; + + // Pre-fader: tap after compressor (or latency comp), before volumeGain + this.preFaderOutput.connect(preGain); + preGain.connect(destination); + + // Post-fader: tap after volumeGain + this.volumeGain.connect(postGain); + postGain.connect(destination); + + this.sendGains.set(returnTrackId, { pre: preGain, post: postGain }); + } + + /** + * Update send amount and/or pre/post mode with click-free gain ramp. + */ + updateSendAmount(returnTrackId: string, amount: number, preFader: boolean) { + const send = this.sendGains.get(returnTrackId); + if (!send) return; + + const now = this.ctx.currentTime; + const preTarget = preFader ? amount : 0; + const postTarget = preFader ? 0 : amount; + + send.pre.gain.cancelScheduledValues(now); + send.pre.gain.setValueAtTime(send.pre.gain.value, now); + send.pre.gain.linearRampToValueAtTime(preTarget, now + TrackNode.MUTE_FADE_SEC); + + send.post.gain.cancelScheduledValues(now); + send.post.gain.setValueAtTime(send.post.gain.value, now); + send.post.gain.linearRampToValueAtTime(postTarget, now + TrackNode.MUTE_FADE_SEC); + } + + /** + * Disconnect and remove a single send. + */ + disconnectSend(returnTrackId: string) { + const send = this.sendGains.get(returnTrackId); + if (!send) return; + try { send.pre.disconnect(); } catch { /* noop */ } + try { send.post.disconnect(); } catch { /* noop */ } + // Also disconnect the source connections to the gain nodes + try { this.preFaderOutput.disconnect(send.pre); } catch { /* noop */ } + try { this.volumeGain.disconnect(send.post); } catch { /* noop */ } + this.sendGains.delete(returnTrackId); + } + + /** + * Disconnect all sends (called during cleanup). + */ + disconnectAllSends() { + for (const [id] of this.sendGains) { + this.disconnectSend(id); + } + } + + /** + * Reconnect pre-fader sends after latency compensation node changes. + * Must be called when setLatencyCompensation adds/removes the delay node. + */ + private _reconnectPreFaderSends() { + for (const [, send] of this.sendGains) { + // Disconnect old pre-fader source (could be compressor or old latencyCompNode) + try { this.compressor.disconnect(send.pre); } catch { /* noop */ } + if (this.latencyCompNode) { + try { this.latencyCompNode.disconnect(send.pre); } catch { /* noop */ } + } + // Reconnect to current pre-fader output + this.preFaderOutput.connect(send.pre); + } + } + disconnect() { + this.disconnectAllSends(); this.inputGain.disconnect(); this.panNode.disconnect(); this.eqLow.disconnect(); diff --git a/src/engine/__tests__/ReturnTrackNode.test.ts b/src/engine/__tests__/ReturnTrackNode.test.ts new file mode 100644 index 00000000..c9a1b55c --- /dev/null +++ b/src/engine/__tests__/ReturnTrackNode.test.ts @@ -0,0 +1,141 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { ReturnTrackNode } from '../ReturnTrackNode'; + +function makeAudioParam(initial = 0) { + let _value = initial; + const rampCalls: { value: number; endTime: number }[] = []; + return { + get value() { return _value; }, + set value(v: number) { _value = v; }, + linearRampToValueAtTime(value: number, endTime: number) { + rampCalls.push({ value, endTime }); + _value = value; + return this; + }, + setValueAtTime(value: number, _time: number) { + _value = value; + return this; + }, + cancelScheduledValues() { return this; }, + rampCalls, + }; +} + +function makeNode(overrides: Record = {}) { + return { + connect: vi.fn().mockReturnThis(), + disconnect: vi.fn(), + ...overrides, + }; +} + +function makeAudioContext(): AudioContext { + return { + get currentTime() { return 0; }, + sampleRate: 44100, + createGain() { return makeNode({ gain: makeAudioParam(1) }); }, + createStereoPanner() { return makeNode({ pan: makeAudioParam(0) }); }, + createAnalyser() { + return makeNode({ + fftSize: 2048, + smoothingTimeConstant: 0.6, + frequencyBinCount: 1024, + getByteFrequencyData: vi.fn(), + getFloatFrequencyData: vi.fn(), + getFloatTimeDomainData: vi.fn(), + }); + }, + createChannelSplitter() { return makeNode(); }, + } as unknown as AudioContext; +} + +describe('ReturnTrackNode', () => { + let ctx: AudioContext; + let destination: ReturnType; + let node: ReturnTrackNode; + + beforeEach(() => { + ctx = makeAudioContext(); + destination = makeNode(); + node = new ReturnTrackNode(ctx, destination as unknown as AudioNode); + }); + + it('connects signal chain: inputGain → volumeGain → panNode → analyser → destination', () => { + // inputGain connects to volumeGain + expect((node.inputGain as any).connect).toHaveBeenCalled(); + }); + + it('applies volume with 5ms click-free ramp', () => { + node.volume = 0.5; + // Volume gain should have been ramped + const volumeGain = (node as any).volumeGain; + const param = volumeGain.gain; + expect(param.rampCalls.length).toBeGreaterThan(0); + expect(param.rampCalls[param.rampCalls.length - 1].value).toBe(0.5); + }); + + it('mutes to 0 with ramp', () => { + node.volume = 0.8; + node.muted = true; + const param = (node as any).volumeGain.gain; + expect(param.rampCalls[param.rampCalls.length - 1].value).toBe(0); + }); + + it('unmutes restores volume', () => { + node.volume = 0.7; + node.muted = true; + node.muted = false; + const param = (node as any).volumeGain.gain; + expect(param.rampCalls[param.rampCalls.length - 1].value).toBe(0.7); + }); + + it('sets pan value clamped to [-1, 1]', () => { + node.pan = 2; + expect((node as any).panNode.pan.value).toBe(1); + node.pan = -5; + expect((node as any).panNode.pan.value).toBe(-1); + }); + + it('spliceEffects inserts chain between input and volume', () => { + const effectInput = makeNode(); + const effectOutput = makeNode(); + node.spliceEffects(effectInput as unknown as AudioNode, effectOutput as unknown as AudioNode); + // inputGain should connect to effectInput + expect((node.inputGain as any).connect).toHaveBeenCalledWith(effectInput); + // effectOutput should connect to volumeGain + expect(effectOutput.connect).toHaveBeenCalled(); + }); + + it('spliceEffects(null, null) restores direct path', () => { + const effectInput = makeNode(); + const effectOutput = makeNode(); + node.spliceEffects(effectInput as unknown as AudioNode, effectOutput as unknown as AudioNode); + node.spliceEffects(null, null); + // inputGain should reconnect directly to volumeGain + const connectCalls = (node.inputGain as any).connect.mock.calls; + const lastCall = connectCalls[connectCalls.length - 1]; + expect(lastCall[0]).toBe((node as any).volumeGain); + }); + + it('disconnect() disconnects all nodes', () => { + node.disconnect(); + expect((node.inputGain as any).disconnect).toHaveBeenCalled(); + expect((node as any).volumeGain.disconnect).toHaveBeenCalled(); + expect((node as any).panNode.disconnect).toHaveBeenCalled(); + expect((node as any).analyserNode.disconnect).toHaveBeenCalled(); + }); + + it('getMeter returns level data', () => { + const meter = node.getMeter(); + expect(meter).toHaveProperty('level'); + expect(meter).toHaveProperty('leftLevel'); + expect(meter).toHaveProperty('rightLevel'); + expect(meter).toHaveProperty('clipped'); + expect(meter.level).toBe(0); + }); + + it('resetClip clears clipped state', () => { + node.resetClip(); + expect(node.getMeter().clipped).toBe(false); + }); +}); diff --git a/src/engine/__tests__/TrackNode.sends.test.ts b/src/engine/__tests__/TrackNode.sends.test.ts new file mode 100644 index 00000000..bdaa05d2 --- /dev/null +++ b/src/engine/__tests__/TrackNode.sends.test.ts @@ -0,0 +1,193 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { TrackNode } from '../TrackNode'; + +function makeAudioParam(initial = 0) { + let _value = initial; + const rampCalls: { value: number; endTime: number }[] = []; + return { + get value() { return _value; }, + set value(v: number) { _value = v; }, + linearRampToValueAtTime(value: number, endTime: number) { + rampCalls.push({ value, endTime }); + _value = value; + return this; + }, + setValueAtTime(value: number, _time: number) { + _value = value; + return this; + }, + cancelScheduledValues() { return this; }, + rampCalls, + }; +} + +function makeNode(overrides: Record = {}) { + return { + connect: vi.fn().mockReturnThis(), + disconnect: vi.fn(), + ...overrides, + }; +} + +function makeAudioContext(): AudioContext { + return { + get currentTime() { return 0; }, + sampleRate: 44100, + createGain() { return makeNode({ gain: makeAudioParam(1) }); }, + createStereoPanner() { return makeNode({ pan: makeAudioParam(0) }); }, + createBiquadFilter() { + return makeNode({ + type: 'lowshelf', + frequency: makeAudioParam(1000), + Q: makeAudioParam(1), + gain: makeAudioParam(0), + }); + }, + createDynamicsCompressor() { + return makeNode({ + threshold: makeAudioParam(0), + ratio: makeAudioParam(1), + attack: makeAudioParam(0.003), + release: makeAudioParam(0.25), + knee: makeAudioParam(30), + }); + }, + createConvolver() { return makeNode({ buffer: null }); }, + createAnalyser() { + return makeNode({ + fftSize: 2048, + smoothingTimeConstant: 0.6, + frequencyBinCount: 1024, + getByteFrequencyData: vi.fn(), + getFloatFrequencyData: vi.fn(), + getFloatTimeDomainData: vi.fn(), + }); + }, + createChannelSplitter() { return makeNode(); }, + createBuffer(_channels: number, length: number, sampleRate: number) { + const data = new Float32Array(length); + return { getChannelData: () => data, sampleRate, length, numberOfChannels: _channels, duration: length / sampleRate }; + }, + createDelay() { return makeNode({ delayTime: makeAudioParam(0) }); }, + } as unknown as AudioContext; +} + +describe('TrackNode sends', () => { + let ctx: AudioContext; + let destination: ReturnType; + let node: TrackNode; + let returnInput: ReturnType; + + beforeEach(() => { + ctx = makeAudioContext(); + destination = makeNode(); + node = new TrackNode(ctx, destination as unknown as AudioNode); + returnInput = makeNode({ gain: makeAudioParam(1) }); + }); + + describe('connectSend', () => { + it('creates pre and post gain nodes connected to destination', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.6, false); + // Post-fader: volumeGain should connect to a post gain node + const volumeGainConnects = (node.volumeGain as any).connect.mock.calls; + // volumeGain connects to: analyserNode, splitter, and now a post-fader send gain + expect(volumeGainConnects.length).toBeGreaterThanOrEqual(3); + }); + + it('post-fader send: pre gain = 0, post gain = amount', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.7, false); + const sends = (node as any).sendGains; + const send = sends.get('ret1'); + expect(send).toBeDefined(); + expect(send.pre.gain.value).toBe(0); + expect(send.post.gain.value).toBe(0.7); + }); + + it('pre-fader send: pre gain = amount, post gain = 0', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.5, true); + const send = (node as any).sendGains.get('ret1'); + expect(send.pre.gain.value).toBe(0.5); + expect(send.post.gain.value).toBe(0); + }); + + it('overwrites existing send on same returnTrackId', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.3, false); + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.9, true); + const send = (node as any).sendGains.get('ret1'); + expect(send.pre.gain.value).toBe(0.9); + expect(send.post.gain.value).toBe(0); + }); + }); + + describe('updateSendAmount', () => { + it('ramps gain values for click-free transition', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.5, false); + node.updateSendAmount('ret1', 0.8, false); + const send = (node as any).sendGains.get('ret1'); + // Post gain should be ramped to 0.8 + expect(send.post.gain.rampCalls.length).toBeGreaterThan(0); + expect(send.post.gain.value).toBe(0.8); + }); + + it('switches from post to pre-fader', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.6, false); + node.updateSendAmount('ret1', 0.6, true); + const send = (node as any).sendGains.get('ret1'); + // Pre should now have the amount, post should be 0 + expect(send.pre.gain.value).toBe(0.6); + expect(send.post.gain.value).toBe(0); + }); + + it('no-op for unknown returnTrackId', () => { + node.updateSendAmount('unknown', 0.5, false); + // Should not throw + }); + }); + + describe('disconnectSend', () => { + it('removes send gains and disconnects', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.5, false); + node.disconnectSend('ret1'); + expect((node as any).sendGains.has('ret1')).toBe(false); + }); + + it('no-op for unknown returnTrackId', () => { + node.disconnectSend('unknown'); + // Should not throw + }); + }); + + describe('disconnectAllSends', () => { + it('removes all sends', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.5, false); + node.connectSend('ret2', returnInput as unknown as AudioNode, 0.3, true); + node.disconnectAllSends(); + expect((node as any).sendGains.size).toBe(0); + }); + }); + + describe('disconnect cleans up sends', () => { + it('disconnect() also disconnects all sends', () => { + node.connectSend('ret1', returnInput as unknown as AudioNode, 0.5, false); + node.disconnect(); + expect((node as any).sendGains.size).toBe(0); + }); + }); + + describe('preFaderOutput', () => { + it('returns compressor by default', () => { + expect(node.preFaderOutput).toBe((node as any).compressor); + }); + + it('returns latencyCompNode when set', () => { + node.setLatencyCompensation(512, 44100); + expect(node.preFaderOutput).toBe((node as any).latencyCompNode); + }); + + it('returns compressor after removing latency compensation', () => { + node.setLatencyCompensation(512, 44100); + node.setLatencyCompensation(0, 44100); + expect(node.preFaderOutput).toBe((node as any).compressor); + }); + }); +}); diff --git a/src/hooks/__tests__/useTransport.strudel.test.ts b/src/hooks/__tests__/useTransport.strudel.test.ts index 8a46feca..40c45e88 100644 --- a/src/hooks/__tests__/useTransport.strudel.test.ts +++ b/src/hooks/__tests__/useTransport.strudel.test.ts @@ -11,6 +11,7 @@ const mocks = vi.hoisted(() => ({ setOnEndedCallback: vi.fn(), trackNodes: new Map(), updateSoloState: vi.fn(), + syncSends: vi.fn(), setPlaybackLatencyCompensation: vi.fn(), applyMastering: vi.fn(), masterVolume: 1, diff --git a/src/hooks/useTransport.ts b/src/hooks/useTransport.ts index 9eeecc20..81737b02 100644 --- a/src/hooks/useTransport.ts +++ b/src/hooks/useTransport.ts @@ -140,6 +140,7 @@ export function useTransport() { const { isPlaying, currentTime } = useTransportStore(); const isRecording = useTransportStore((s) => s.isRecording); const playbackTracks = useProjectStore((s) => s.project?.tracks); + const playbackReturnTracks = useProjectStore((s) => s.project?.returnTracks); const masterVolume = useProjectStore((s) => s.project?.masterVolume ?? 1.0); const playbackLatency = useProjectStore((s) => s.project?.playbackLatency); const mastering = useProjectStore((s) => s.project?.mastering); @@ -314,6 +315,9 @@ export function useTransport() { engine.updateSoloState(); + // Wire aux sends to return tracks + engine.syncSends(proj.tracks, proj.returnTracks ?? []); + let startFrom = fromTime ?? useTransportStore.getState().playStartTime; // When loop is enabled, use loop boundaries for playback range @@ -838,7 +842,12 @@ export function useTransport() { } } engine.updateSoloState(); - }, [isPlaying, masterVolume, mastering, playbackLatency, playbackTracks]); + + // Sync aux send routing (handles amount, pre/post, and return track params) + if (playbackTracks) { + engine.syncSends(playbackTracks, playbackReturnTracks ?? []); + } + }, [isPlaying, masterVolume, mastering, playbackLatency, playbackTracks, playbackReturnTracks]); return { isPlaying, diff --git a/src/store/projectStore.ts b/src/store/projectStore.ts index 7100601b..eb438be0 100644 --- a/src/store/projectStore.ts +++ b/src/store/projectStore.ts @@ -797,7 +797,7 @@ export interface ProjectState { addReturnTrack: (name?: string) => ReturnTrack; removeReturnTrack: (returnTrackId: string) => void; updateReturnTrack: (returnTrackId: string, updates: Partial>) => void; - updateTrackSend: (trackId: string, returnTrackId: string, amount: number) => void; + updateTrackSend: (trackId: string, returnTrackId: string, amount: number, preFader?: boolean) => void; // Track grouping / folder tracks createGroupTrack: (name: string) => Track; @@ -6653,7 +6653,7 @@ export const useProjectStore = create()( }); }, - updateTrackSend: (trackId, returnTrackId, amount) => { + updateTrackSend: (trackId, returnTrackId, amount, preFader?) => { const state = get(); if (!state.project) return; _pushHistory(state.project); @@ -6669,9 +6669,11 @@ export const useProjectStore = create()( // Remove the send if amount is 0 or negative if (existingIdx >= 0) sends.splice(existingIdx, 1); } else if (existingIdx >= 0) { - sends[existingIdx] = { ...sends[existingIdx], amount }; + const updated = { ...sends[existingIdx], amount }; + if (preFader !== undefined) updated.preFader = preFader; + sends[existingIdx] = updated; } else { - sends.push({ returnTrackId, amount }); + sends.push({ returnTrackId, amount, preFader: preFader ?? false }); } return { ...track, sends }; }), diff --git a/src/types/project.ts b/src/types/project.ts index a0fd69d0..3da7b5be 100644 --- a/src/types/project.ts +++ b/src/types/project.ts @@ -461,6 +461,7 @@ export interface SequencerPattern { export interface Send { returnTrackId: string; amount: number; // 0–1 + preFader?: boolean; // default false (post-fader) } export interface ReturnTrack { diff --git a/tests/unit/useTransportScrubLifecycle.test.tsx b/tests/unit/useTransportScrubLifecycle.test.tsx index a13ce399..807dd178 100644 --- a/tests/unit/useTransportScrubLifecycle.test.tsx +++ b/tests/unit/useTransportScrubLifecycle.test.tsx @@ -35,6 +35,7 @@ const engineMock = { })), setTrackGroupRouting: vi.fn(), updateSoloState: vi.fn(), + syncSends: vi.fn(), playing: false, masterVolume: 1, };