From 79fcaf6db73b5f187a03d1e130e828a85a492d9f Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 06:26:40 +0000 Subject: [PATCH 01/27] WIP: Add modular architecture types and audio decoder Begin refactoring sendspin-js into a core library (protocol + decoding) and a player wrapper (Web Audio playback). This commit adds: - DecodedAudioChunk, SendspinCoreConfig, StreamHandler interfaces to types.ts - SendspinDecoder class in audio-decoder.ts: standalone PCM/Opus/FLAC decoder that produces raw Float32Array[] samples without Web Audio playback concerns https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/audio-decoder.ts | 610 +++++++++++++++++++++++++++++++++++++++++++ src/types.ts | 78 ++++++ 2 files changed, 688 insertions(+) create mode 100644 src/audio-decoder.ts diff --git a/src/audio-decoder.ts b/src/audio-decoder.ts new file mode 100644 index 0000000..2431ae4 --- /dev/null +++ b/src/audio-decoder.ts @@ -0,0 +1,610 @@ +/** + * Audio decoder pipeline for Sendspin protocol. + * + * Decodes compressed audio (PCM, Opus, FLAC) into raw Float32Array PCM samples. + * This module has no Web Audio playback concerns — it only produces decoded data. + */ + +import type { StreamFormat, DecodedAudioChunk } from "./types"; + +export class SendspinDecoder { + private onDecodedChunk: (chunk: DecodedAudioChunk) => void; + private currentGeneration: () => number; + + // Native Opus decoder (WebCodecs API) + private webCodecsDecoder: AudioDecoder | null = null; + private webCodecsDecoderReady: Promise | null = null; + private webCodecsFormat: StreamFormat | null = null; + private useNativeOpus: boolean = true; + private nativeDecoderQueue: Array<{ + serverTimeUs: number; + generation: number; + }> = []; + + // Fallback Opus decoder (opus-encdec library) + private opusDecoder: any = null; + private opusDecoderModule: any = null; + private opusDecoderReady: Promise | null = null; + + // FLAC decoding context (OfflineAudioContext, no playback needed) + private flacDecodingContext: OfflineAudioContext | null = null; + private flacDecodingContextSampleRate: number = 0; + + constructor( + onDecodedChunk: (chunk: DecodedAudioChunk) => void, + currentGeneration: () => number, + ) { + this.onDecodedChunk = onDecodedChunk; + this.currentGeneration = currentGeneration; + } + + /** + * Handle a binary audio message from the WebSocket. + * Parses the message, decodes the audio, and emits a DecodedAudioChunk. + */ + async handleBinaryMessage( + data: ArrayBuffer, + format: StreamFormat, + generation: number, + ): Promise { + // First byte contains role type and message slot + const firstByte = new Uint8Array(data)[0]; + + // Type 4 is audio chunk (Player role, slot 0) + if (firstByte === 4) { + // Next 8 bytes are server timestamp in microseconds (big-endian int64) + const timestampView = new DataView(data, 1, 8); + const serverTimeUs = Number(timestampView.getBigInt64(0, false)); + + // Rest is audio data + const audioData = data.slice(9); + + // For Opus: use native decoder (non-blocking async path) + if (format.codec === "opus" && this.useNativeOpus) { + await this.initWebCodecsDecoder(format); + + if (this.useNativeOpus && this.webCodecsDecoder) { + if ( + this.queueToNativeOpusDecoder(audioData, serverTimeUs, generation) + ) { + return; // Async path - callback handles output + } + // Fall through to fallback on error + } + } + + // Fallback decode path (PCM, FLAC, or Opus via opus-encdec) + try { + const decoded = await this.decode(audioData, format); + + if (decoded && generation === this.currentGeneration()) { + this.onDecodedChunk({ + samples: decoded.samples, + sampleRate: decoded.sampleRate, + serverTimeUs, + generation, + }); + } + } catch (error) { + console.error("Sendspin: Failed to decode audio buffer:", error); + } + } + } + + private async decode( + audioData: ArrayBuffer, + format: StreamFormat, + ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> { + if (format.codec === "opus") { + return this.decodeOpusWithEncdec(audioData, format); + } else if (format.codec === "flac") { + return this.decodeFLAC(audioData, format); + } else if (format.codec === "pcm") { + return this.decodePCM(audioData, format); + } + return null; + } + + // ======================================== + // PCM Decoder + // ======================================== + + private decodePCM( + audioData: ArrayBuffer, + format: StreamFormat, + ): { samples: Float32Array[]; sampleRate: number } | null { + const bytesPerSample = (format.bit_depth || 16) / 8; + const dataView = new DataView(audioData); + const numSamples = + audioData.byteLength / (bytesPerSample * format.channels); + + const samples: Float32Array[] = []; + for (let ch = 0; ch < format.channels; ch++) { + samples.push(new Float32Array(numSamples)); + } + + // Decode PCM data (interleaved format) + for (let channel = 0; channel < format.channels; channel++) { + const channelData = samples[channel]; + for (let i = 0; i < numSamples; i++) { + const offset = (i * format.channels + channel) * bytesPerSample; + let sample = 0; + + if (format.bit_depth === 16) { + sample = dataView.getInt16(offset, true) / 32768.0; + } else if (format.bit_depth === 24) { + const byte1 = dataView.getUint8(offset); + const byte2 = dataView.getUint8(offset + 1); + const byte3 = dataView.getUint8(offset + 2); + let int24 = (byte3 << 16) | (byte2 << 8) | byte1; + if (int24 & 0x800000) { + int24 |= 0xff000000; + } + sample = int24 / 8388608.0; + } else if (format.bit_depth === 32) { + sample = dataView.getInt32(offset, true) / 2147483648.0; + } + + channelData[i] = sample; + } + } + + return { samples, sampleRate: format.sample_rate }; + } + + // ======================================== + // FLAC Decoder (uses OfflineAudioContext) + // ======================================== + + private getFlacDecodingContext(sampleRate: number): OfflineAudioContext { + if ( + !this.flacDecodingContext || + this.flacDecodingContextSampleRate !== sampleRate + ) { + this.flacDecodingContext = new OfflineAudioContext(2, 1, sampleRate); + this.flacDecodingContextSampleRate = sampleRate; + } + return this.flacDecodingContext; + } + + private async decodeFLAC( + audioData: ArrayBuffer, + format: StreamFormat, + ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> { + try { + let dataToEncode = audioData; + if (format.codec_header) { + // Decode Base64 codec header and prepend to audio data + const headerBytes = Uint8Array.from(atob(format.codec_header), (c) => + c.charCodeAt(0), + ); + const combined = new Uint8Array( + headerBytes.length + audioData.byteLength, + ); + combined.set(headerBytes, 0); + combined.set(new Uint8Array(audioData), headerBytes.length); + dataToEncode = combined.buffer; + } + + const ctx = this.getFlacDecodingContext(format.sample_rate); + const audioBuffer = await ctx.decodeAudioData(dataToEncode); + + // Extract Float32Array per channel from AudioBuffer + const samples: Float32Array[] = []; + for (let ch = 0; ch < audioBuffer.numberOfChannels; ch++) { + samples.push(new Float32Array(audioBuffer.getChannelData(ch))); + } + + return { samples, sampleRate: audioBuffer.sampleRate }; + } catch (error) { + console.error("Error decoding FLAC data:", error); + return null; + } + } + + // ======================================== + // Opus - Native WebCodecs Decoder + // ======================================== + + private async initWebCodecsDecoder(format: StreamFormat): Promise { + const tryConfigureExistingDecoder = (): boolean => { + if (!this.webCodecsDecoder) return false; + + const matchesFormat = + !!this.webCodecsFormat && + this.webCodecsFormat.sample_rate === format.sample_rate && + this.webCodecsFormat.channels === format.channels; + + if (this.webCodecsDecoder.state === "configured" && matchesFormat) { + return true; + } + + if (this.webCodecsDecoder.state === "closed") { + return false; + } + + try { + this.webCodecsDecoder.configure({ + codec: "opus", + sampleRate: format.sample_rate, + numberOfChannels: format.channels, + }); + this.webCodecsFormat = format; + return true; + } catch { + return false; + } + }; + + if (tryConfigureExistingDecoder()) { + return; + } + + if (this.webCodecsDecoderReady) { + await this.webCodecsDecoderReady; + if (tryConfigureExistingDecoder()) { + return; + } + + try { + this.webCodecsDecoder?.close(); + } catch { + // Ignore close errors; we'll recreate below. + } + this.webCodecsDecoder = null; + this.webCodecsDecoderReady = null; + this.webCodecsFormat = null; + } + + if (this.webCodecsDecoderReady) { + await this.webCodecsDecoderReady; + return; + } + + this.webCodecsDecoderReady = this.createWebCodecsDecoder(format); + await this.webCodecsDecoderReady; + } + + private async createWebCodecsDecoder(format: StreamFormat): Promise { + if (typeof AudioDecoder === "undefined") { + this.useNativeOpus = false; + return; + } + + try { + const support = await AudioDecoder.isConfigSupported({ + codec: "opus", + sampleRate: format.sample_rate, + numberOfChannels: format.channels, + }); + + if (!support.supported) { + console.log( + "[NativeOpus] WebCodecs Opus not supported, will use fallback", + ); + this.useNativeOpus = false; + return; + } + + this.webCodecsDecoder = new AudioDecoder({ + output: (audioData: AudioData) => this.handleAudioData(audioData), + error: (error: Error) => { + console.error("[NativeOpus] WebCodecs decoder error:", error); + }, + }); + + this.webCodecsDecoder.configure({ + codec: "opus", + sampleRate: format.sample_rate, + numberOfChannels: format.channels, + }); + + this.webCodecsFormat = format; + console.log( + `[NativeOpus] Using WebCodecs AudioDecoder: ${format.sample_rate}Hz, ${format.channels}ch`, + ); + } catch (error) { + console.warn( + "[NativeOpus] WebCodecs init failed, will use fallback:", + error, + ); + this.useNativeOpus = false; + } + } + + // Handle decoded audio data from native Opus decoder + private handleAudioData(audioData: AudioData): void { + try { + const outputTimestampUs = Number(audioData.timestamp); + const metadata = this.nativeDecoderQueue.shift(); + + if (!metadata) { + console.warn( + `[NativeOpus] Dropping frame with empty decode queue (out ts=${outputTimestampUs})`, + ); + audioData.close(); + return; + } + + const { serverTimeUs, generation } = metadata; + if (generation !== this.currentGeneration()) { + console.warn( + `[NativeOpus] Dropping old-stream frame (ts=${serverTimeUs}, gen=${generation} != current=${this.currentGeneration()})`, + ); + audioData.close(); + return; + } + + const channels = audioData.numberOfChannels; + const frames = audioData.numberOfFrames; + const fmt = audioData.format; + + let interleaved: Float32Array; + + if (fmt === "f32-planar") { + interleaved = new Float32Array(frames * channels); + for (let ch = 0; ch < channels; ch++) { + const channelData = new Float32Array(frames); + audioData.copyTo(channelData, { planeIndex: ch }); + for (let i = 0; i < frames; i++) { + interleaved[i * channels + ch] = channelData[i]; + } + } + } else if (fmt === "f32") { + interleaved = new Float32Array(frames * channels); + audioData.copyTo(interleaved, { planeIndex: 0 }); + } else if (fmt === "s16-planar") { + interleaved = new Float32Array(frames * channels); + for (let ch = 0; ch < channels; ch++) { + const channelData = new Int16Array(frames); + audioData.copyTo(channelData, { planeIndex: ch }); + for (let i = 0; i < frames; i++) { + interleaved[i * channels + ch] = channelData[i] / 32768.0; + } + } + } else if (fmt === "s16") { + const int16Data = new Int16Array(frames * channels); + audioData.copyTo(int16Data, { planeIndex: 0 }); + interleaved = new Float32Array(frames * channels); + for (let i = 0; i < frames * channels; i++) { + interleaved[i] = int16Data[i] / 32768.0; + } + } else { + console.warn(`[NativeOpus] Unsupported AudioData format: ${fmt}`); + audioData.close(); + return; + } + + this.emitDeinterleavedChunk( + interleaved, + serverTimeUs, + channels, + generation, + ); + audioData.close(); + } catch (e) { + console.error("[NativeOpus] Error in output callback:", e); + audioData.close(); + } + } + + private emitDeinterleavedChunk( + interleaved: Float32Array, + serverTimeUs: number, + channels: number, + generation: number, + ): void { + if (!this.webCodecsFormat) return; + + const numFrames = interleaved.length / channels; + const samples: Float32Array[] = []; + + for (let ch = 0; ch < channels; ch++) { + const channelData = new Float32Array(numFrames); + for (let i = 0; i < numFrames; i++) { + channelData[i] = interleaved[i * channels + ch]; + } + samples.push(channelData); + } + + this.onDecodedChunk({ + samples, + sampleRate: this.webCodecsFormat.sample_rate, + serverTimeUs, + generation, + }); + } + + private queueToNativeOpusDecoder( + audioData: ArrayBuffer, + serverTimeUs: number, + generation: number, + ): boolean { + if ( + !this.webCodecsDecoder || + this.webCodecsDecoder.state !== "configured" + ) { + return false; + } + + try { + this.nativeDecoderQueue.push({ + serverTimeUs, + generation, + }); + + const chunk = new EncodedAudioChunk({ + type: "key", + timestamp: serverTimeUs, + data: audioData, + }); + + this.webCodecsDecoder.decode(chunk); + return true; + } catch (error) { + if (this.nativeDecoderQueue.length > 0) { + this.nativeDecoderQueue.pop(); + } + console.error("[NativeOpus] WebCodecs queue error:", error); + return false; + } + } + + // ======================================== + // Opus - Fallback (opus-encdec library) + // ======================================== + + private resolveOpusDecoderModule(moduleExport: any): any { + const maybeDefault = moduleExport?.default; + const maybeCommonJs = moduleExport?.["module.exports"]; + const resolved = maybeDefault ?? maybeCommonJs ?? moduleExport; + + if (!resolved || typeof resolved !== "object") { + throw new Error("[Opus] Invalid libopus decoder module export"); + } + return resolved; + } + + private resolveOggOpusDecoderClass(wrapperExport: any): any { + const maybeDefault = wrapperExport?.default; + const maybeCommonJs = wrapperExport?.["module.exports"]; + const wrapper = maybeDefault ?? maybeCommonJs ?? wrapperExport; + const resolved = wrapper?.OggOpusDecoder ?? wrapper; + + if (typeof resolved !== "function") { + throw new Error("[Opus] OggOpusDecoder class export not found"); + } + return resolved; + } + + private async waitForOpusReady(target: { + isReady: boolean; + onready?: () => void; + }): Promise { + if (target.isReady) return; + + if (Object.isExtensible(target)) { + await new Promise((resolve) => { + target.onready = () => resolve(); + }); + return; + } + + while (!target.isReady) { + await new Promise((resolve) => setTimeout(resolve, 20)); + } + } + + private async initOpusEncdecDecoder(format: StreamFormat): Promise { + if (this.opusDecoderReady) { + await this.opusDecoderReady; + return; + } + + this.opusDecoderReady = (async () => { + console.log("[Opus] Initializing decoder (opus-encdec)..."); + + const [DecoderModuleExport, DecoderWrapperExport] = await Promise.all([ + import("opus-encdec/dist/libopus-decoder.js"), + import("opus-encdec/src/oggOpusDecoder.js"), + ]); + + this.opusDecoderModule = + this.resolveOpusDecoderModule(DecoderModuleExport); + + const OggOpusDecoderClass = + this.resolveOggOpusDecoderClass(DecoderWrapperExport); + + await this.waitForOpusReady(this.opusDecoderModule); + + this.opusDecoder = new OggOpusDecoderClass( + { + rawOpus: true, + decoderSampleRate: format.sample_rate, + outputBufferSampleRate: format.sample_rate, + numberOfChannels: format.channels, + }, + this.opusDecoderModule, + ); + + await this.waitForOpusReady(this.opusDecoder); + + console.log("[Opus] Decoder ready"); + })(); + + await this.opusDecoderReady; + } + + private async decodeOpusWithEncdec( + audioData: ArrayBuffer, + format: StreamFormat, + ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> { + try { + await this.initOpusEncdecDecoder(format); + + const uint8Array = new Uint8Array(audioData); + const decodedSamples: Float32Array[] = []; + + this.opusDecoder.decodeRaw(uint8Array, (samples: Float32Array) => { + decodedSamples.push(new Float32Array(samples)); + }); + + if (decodedSamples.length === 0) { + console.warn("[Opus] Fallback decoder produced no samples"); + return null; + } + + // Convert interleaved samples to per-channel arrays + const interleavedSamples = decodedSamples[0]; + const numFrames = interleavedSamples.length / format.channels; + const samples: Float32Array[] = []; + + for (let ch = 0; ch < format.channels; ch++) { + const channelData = new Float32Array(numFrames); + for (let i = 0; i < numFrames; i++) { + channelData[i] = interleavedSamples[i * format.channels + ch]; + } + samples.push(channelData); + } + + return { samples, sampleRate: format.sample_rate }; + } catch (error) { + console.error("[Opus] Decode error:", error); + return null; + } + } + + // ======================================== + // Lifecycle + // ======================================== + + /** Clear decoder state (on stream change/clear). Drops in-flight async decodes. */ + clearState(): void { + this.nativeDecoderQueue = []; + try { + this.webCodecsDecoder?.close(); + } catch { + // Ignore close errors + } + this.webCodecsDecoder = null; + this.webCodecsDecoderReady = null; + this.webCodecsFormat = null; + } + + /** Full cleanup (on disconnect). Releases all decoder resources. */ + close(): void { + this.clearState(); + + if (this.opusDecoder) { + this.opusDecoder = null; + this.opusDecoderModule = null; + this.opusDecoderReady = null; + } + + // Reset native Opus flag for next session + this.useNativeOpus = true; + + this.flacDecodingContext = null; + this.flacDecodingContextSampleRate = 0; + } +} diff --git a/src/types.ts b/src/types.ts index 3baf5d6..e7da4b7 100644 --- a/src/types.ts +++ b/src/types.ts @@ -385,6 +385,84 @@ export interface AudioBufferQueueItem { generation: number; } +/** + * A decoded audio chunk with raw PCM samples. + * Emitted by SendspinCore after decoding compressed audio. + * Consumed by SendspinPlayer for playback, or by visualization/analysis tools. + */ +export interface DecodedAudioChunk { + /** PCM sample data, one Float32Array per channel (values in -1.0 to 1.0) */ + samples: Float32Array[]; + /** Sample rate in Hz */ + sampleRate: number; + /** Server timestamp in microseconds */ + serverTimeUs: number; + /** Stream generation (incremented on each new stream) */ + generation: number; +} + +/** + * Configuration for SendspinCore (protocol + decoding, no playback). + */ +export interface SendspinCoreConfig { + /** Unique player identifier. Auto-generated if not provided. */ + playerId?: string; + + /** Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095") */ + baseUrl: string; + + /** Human-readable name for this player. Auto-generated if not provided. */ + clientName?: string; + + /** + * Codecs to use for audio streaming, in priority order. + * Default: ["opus", "flac", "pcm"] + */ + codecs?: Codec[]; + + /** Buffer capacity in bytes. Defaults to 5MB. */ + bufferCapacity?: number; + + /** Static sync delay in milliseconds (0-5000). */ + syncDelay?: number; + + /** Use hardware/external volume control instead of software gain. */ + useHardwareVolume?: boolean; + + /** Callback when server sends volume/mute commands (hardware volume mode). */ + onVolumeCommand?: (volume: number, muted: boolean) => void; + + /** Callback when server sends a set_static_delay command. */ + onDelayCommand?: (delayMs: number) => void; + + /** Getter for external volume state (hardware volume mode). */ + getExternalVolume?: () => { volume: number; muted: boolean }; + + /** Callback when player state changes */ + onStateChange?: (state: { + isPlaying: boolean; + volume: number; + muted: boolean; + playerState: PlayerState; + serverState: ServerStatePayload; + groupState: GroupUpdatePayload; + }) => void; +} + +/** + * Interface for protocol handler to call into the audio subsystem. + * Implemented by SendspinCore as the bridge between protocol and audio. + */ +export interface StreamHandler { + handleBinaryMessage(data: ArrayBuffer): void; + handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void; + handleStreamClear(): void; + handleStreamEnd(): void; + handleVolumeUpdate(): void; + handleSyncDelayChange(delayMs: number): void; + getSyncDelayMs(): number; +} + /** * Storage interface for persisting SDK state. * Compatible with Web Storage API (localStorage/sessionStorage). From 645d62105e768583490217b06cb91a25d8618bf9 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 09:58:05 +0000 Subject: [PATCH 02/27] Refactor into modular core/player architecture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split the monolithic SDK into a composable architecture: - SendspinCore: Protocol + time sync + state + decoding layer. Produces raw PCM DecodedAudioChunk (Float32Array[] per channel) that visualization apps or conformance tests can consume directly. - SendspinPlayer: Wraps Core + AudioScheduler. Maintains the same public API so existing consumers are unaffected. - SendspinDecoder: Standalone audio decoder (PCM, Opus, FLAC) that produces Float32Array[] without any Web Audio playback dependency. Uses OfflineAudioContext for FLAC instead of a playback context. - AudioScheduler: Web Audio scheduling, sync correction, volume, and output routing — extracted from the former AudioProcessor. - StreamHandler interface: Decouples ProtocolHandler from audio implementation, enabling protocol-only testing. File changes: - New: core.ts, audio-decoder.ts, audio-scheduler.ts - Modified: types.ts (DecodedAudioChunk, SendspinCoreConfig, StreamHandler), protocol-handler.ts (uses StreamHandler), index.ts (re-exports new modules) - Deleted: audio-processor.ts (split into decoder + scheduler) https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- ...{audio-processor.ts => audio-scheduler.ts} | 828 ++---------------- src/core.ts | 321 +++++++ src/index.ts | 283 ++---- src/protocol-handler.ts | 47 +- 4 files changed, 490 insertions(+), 989 deletions(-) rename src/{audio-processor.ts => audio-scheduler.ts} (62%) create mode 100644 src/core.ts diff --git a/src/audio-processor.ts b/src/audio-scheduler.ts similarity index 62% rename from src/audio-processor.ts rename to src/audio-scheduler.ts index 9040783..ac74efa 100644 --- a/src/audio-processor.ts +++ b/src/audio-scheduler.ts @@ -1,18 +1,23 @@ +/** + * Audio scheduler for synchronized playback. + * + * Handles Web Audio API scheduling, sync correction, AudioContext management, + * volume control, and output routing. Receives pre-decoded audio chunks + * (DecodedAudioChunk) from SendspinCore and schedules them for playback. + */ + import type { AudioBufferQueueItem, - StreamFormat, AudioOutputMode, CorrectionMode, + DecodedAudioChunk, SendspinStorage, } from "./types"; import type { StateManager } from "./state-manager"; import type { SendspinTimeFilter } from "./time-filter"; // Sync correction constants -const SAMPLE_CORRECTION_FADE_LEN = 8; // samples to blend around correction points -// Blend budget across the whole fade window. -// We derive per-sample strength from fade length so longer fades become gentler. -// 1.0 means the whole fade applies roughly a full-strength blend in total. +const SAMPLE_CORRECTION_FADE_LEN = 8; const SAMPLE_CORRECTION_TARGET_BLEND_SUM = 1.0; const SAMPLE_CORRECTION_FADE_STRENGTH = Math.min( 1, @@ -26,9 +31,9 @@ for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { ((SAMPLE_CORRECTION_FADE_LEN - f) / (SAMPLE_CORRECTION_FADE_LEN + 1)) * SAMPLE_CORRECTION_FADE_STRENGTH; } -const OUTPUT_LATENCY_ALPHA = 0.01; // EMA smoothing factor for outputLatency -const SYNC_ERROR_ALPHA = 0.1; // EMA smoothing factor for sync error (filters jitter) -const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; // LocalStorage key +const OUTPUT_LATENCY_ALPHA = 0.01; +const SYNC_ERROR_ALPHA = 0.1; +const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000; const RECORRECTION_CHECK_INTERVAL_MS = 250; const RECORRECTION_TRIGGER_MS = 30; @@ -68,49 +73,48 @@ const OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES = 6; const OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS = 750; const OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES = 2; -// Mode-specific sync correction thresholds const CORRECTION_THRESHOLDS: Record< CorrectionMode, { - resyncAboveMs: number; // ms - hard resync for extreme errors - rate2AboveMs: number; // ms - use 2% rate above this - rate1AboveMs: number; // ms - use 1% rate above this - samplesBelowMs: number; // ms - use sample manipulation below this - deadbandBelowMs: number; // ms - don't correct if error < this - enableRecorrectionMonitor: boolean; // Whether recorrection monitor should run in this mode - immediateDelayCutover: boolean; // Whether runtime static delay should trigger immediate cutover + resyncAboveMs: number; + rate2AboveMs: number; + rate1AboveMs: number; + samplesBelowMs: number; + deadbandBelowMs: number; + enableRecorrectionMonitor: boolean; + immediateDelayCutover: boolean; } > = { sync: { - resyncAboveMs: 200, // Hard resync for large errors - rate2AboveMs: 35, // Use 2% rate when error exceeds this - rate1AboveMs: 8, // Use 1% rate when error exceeds this - samplesBelowMs: 8, // Use sample insertion/deletion below this - deadbandBelowMs: 1, // Ignore corrections below this + resyncAboveMs: 200, + rate2AboveMs: 35, + rate1AboveMs: 8, + samplesBelowMs: 8, + deadbandBelowMs: 1, enableRecorrectionMonitor: true, immediateDelayCutover: true, }, quality: { - resyncAboveMs: 35, // Tighter resync threshold to avoid drifting too far - rate2AboveMs: Infinity, // Disabled - never use rate correction - rate1AboveMs: Infinity, // Disabled - never use rate correction - samplesBelowMs: 35, // Use sample insertion/deletion below this - deadbandBelowMs: 1, // Keep deadband tight for accurate sync + resyncAboveMs: 35, + rate2AboveMs: Infinity, + rate1AboveMs: Infinity, + samplesBelowMs: 35, + deadbandBelowMs: 1, enableRecorrectionMonitor: false, immediateDelayCutover: false, }, "quality-local": { - resyncAboveMs: 600, // Last resort only (prefer keeping uninterrupted playback even if out of sync) - rate2AboveMs: Infinity, // Disabled - never use rate correction - rate1AboveMs: Infinity, // Disabled - never use rate correction - samplesBelowMs: 0, // Disabled - never use sample corrections (prioritize smooth local playback) - deadbandBelowMs: 5, // Larger deadband to avoid frequent small adjustments + resyncAboveMs: 600, + rate2AboveMs: Infinity, + rate1AboveMs: Infinity, + samplesBelowMs: 0, + deadbandBelowMs: 5, enableRecorrectionMonitor: false, immediateDelayCutover: false, }, }; -export class AudioProcessor { +export class AudioScheduler { private audioContext: AudioContext | null = null; private gainNode: GainNode | null = null; private streamDestination: MediaStreamAudioDestinationNode | null = null; @@ -124,21 +128,18 @@ export class AudioProcessor { generation: number; }[] = []; - // Seamless playback tracking - private nextPlaybackTime: number = 0; // AudioContext time when audio should reach the output - private nextScheduleTime: number = 0; // AudioContext time for source.start() (delayed, for Web Audio) - private lastScheduledServerTime: number = 0; // Server timestamp of last scheduled chunk end + private nextPlaybackTime: number = 0; + private nextScheduleTime: number = 0; + private lastScheduledServerTime: number = 0; - // Sync tracking (for debugging/display) private currentSyncErrorMs: number = 0; - private smoothedSyncErrorMs: number = 0; // EMA-filtered sync error for corrections + private smoothedSyncErrorMs: number = 0; private resyncCount: number = 0; private currentPlaybackRate: number = 1.0; private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" = "none"; private lastSamplesAdjusted: number = 0; - // Output latency smoothing (EMA to filter Chrome jitter) private lastRawOutputLatencyUs: number = 0; private smoothedOutputLatencyUs: number | null = null; private lastLatencyPersistAtMs: number | null = null; @@ -146,30 +147,13 @@ export class AudioProcessor { private timingEstimateAudioContextTimeSec: number | null = null; private timingEstimateAtMs: number | null = null; - // Correction mode private _correctionMode: CorrectionMode = "sync"; - // Periodic status logging private _lastStatusLogMs: number = 0; private _lastTimestampRejectReason: string | null = null; private _intervalResyncCount: number = 0; - // Native Opus decoder (uses WebCodecs API) - private webCodecsDecoder: AudioDecoder | null = null; - private webCodecsDecoderReady: Promise | null = null; - private webCodecsFormat: StreamFormat | null = null; - private useNativeOpus: boolean = true; // false when WebCodecs unavailable - - // Fallback Opus decoder (opus-encdec library) - private opusDecoder: any = null; - private opusDecoderModule: any = null; - private opusDecoderReady: Promise | null = null; - private useOutputLatencyCompensation: boolean = true; - private nativeDecoderQueue: Array<{ - serverTimeUs: number; - generation: number; - }> = []; private recorrectionInterval: ReturnType | null = null; private recorrectionBreachStartedAtMs: number | null = null; private lastRecorrectionAtMs: number = -Infinity; @@ -186,6 +170,9 @@ export class AudioProcessor { private outputTimestampBadSamples: number = 0; private outputTimestampGoodSinceMs: number | null = null; + private scheduleTimeout: ReturnType | null = null; + private queueProcessScheduled = false; + constructor( private stateManager: StateManager, private timeFilter: SendspinTimeFilter, @@ -203,8 +190,6 @@ export class AudioProcessor { this._correctionMode = correctionMode; this.useOutputLatencyCompensation = useOutputLatencyCompensation; this.syncDelayMs = this.sanitizeSyncDelayMs(this.syncDelayMs); - - // Load persisted output latency from storage this.loadPersistedLatency(); } @@ -215,7 +200,6 @@ export class AudioProcessor { return Math.max(0, Math.min(5000, Math.round(delayMs))); } - // Load persisted output latency from storage private loadPersistedLatency(): void { if (!this.storage) return; try { @@ -227,11 +211,10 @@ export class AudioProcessor { } } } catch { - // Storage may fail depending on the implementation, ignore errors + // ignore } } - // Persist output latency to storage private persistLatency(): void { if (!this.storage || this.smoothedOutputLatencyUs === null) return; try { @@ -240,16 +223,15 @@ export class AudioProcessor { this.smoothedOutputLatencyUs.toString(), ); } catch { - // Storage may fail depending on the implementation, ignore errors + // ignore } } - // Get current correction mode + get correctionMode(): CorrectionMode { return this._correctionMode; } - // Set correction mode at runtime setCorrectionMode(mode: CorrectionMode): void { this._correctionMode = mode; if (!this.modeUsesRecorrectionMonitor(mode)) { @@ -330,11 +312,9 @@ export class AudioProcessor { rawTimeSec: number, nowMs: number, ): number { - // Fallback: de-quantize `currentTime` using wall clock and slew toward the raw value. - // Key goal: avoid discrete ~10/20ms jumps in derived audio time. - const TIMING_MAX_SLEW_SEC = 0.002; // max correction per snapshot (2ms) - const TIMING_RESET_THRESHOLD_SEC = 0.5; // snap if mapping is clearly invalid - const TIMING_MAX_LEAD_SEC = 0.1; // don't run far ahead of raw time + const TIMING_MAX_SLEW_SEC = 0.002; + const TIMING_RESET_THRESHOLD_SEC = 0.5; + const TIMING_MAX_LEAD_SEC = 0.1; if (this.timingEstimateAudioContextTimeSec === null) { this.timingEstimateAudioContextTimeSec = rawTimeSec; @@ -355,7 +335,6 @@ export class AudioProcessor { -TIMING_MAX_SLEW_SEC, Math.min(TIMING_MAX_SLEW_SEC, errorSec), ); - // Keep monotonic and bounded vs raw time. const next = Math.max( this.timingEstimateAudioContextTimeSec, predicted + slew, @@ -417,9 +396,6 @@ export class AudioProcessor { try { const ts = getOutputTimestamp.call(this.audioContext); - // Sample performance.now() after getOutputTimestamp() so we validate the - // timestamp against a contemporaneous wall-clock reading instead of an - // earlier one taken before the browser produced the timestamp snapshot. const nowMs = performance.now(); const rawFreshnessMs = nowMs - ts.performanceTime; if (rawFreshnessMs < -OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS) { @@ -551,8 +527,8 @@ export class AudioProcessor { } private getTimingSnapshot(): { - audioContextTimeSec: number; // derived; use for target-time math - audioContextRawTimeSec: number; // raw; use for comparisons (late drops/headroom) + audioContextTimeSec: number; + audioContextRawTimeSec: number; nowMs: number; nowUs: number; } { @@ -718,6 +694,7 @@ export class AudioProcessor { return true; } + private performGuardedCutover( reason: "recorrection" | "delay-change", options: { @@ -842,7 +819,6 @@ export class AudioProcessor { return this.syncDelayMs; } - // Update sync delay at runtime setSyncDelay(delayMs: number): void { const sanitizedDelayMs = this.sanitizeSyncDelayMs(delayMs); const oldDelayMs = this.syncDelayMs; @@ -872,7 +848,6 @@ export class AudioProcessor { }); } - // Get current sync info for debugging/display get syncInfo(): { clockDriftPercent: number; syncErrorMs: number; @@ -901,7 +876,6 @@ export class AudioProcessor { } this._lastStatusLogMs = nowMs; - // corr field let corr: string; switch (this.currentCorrectionMethod) { case "rate": @@ -914,14 +888,12 @@ export class AudioProcessor { corr = this.currentCorrectionMethod; } - // q field const queueDepth = this.audioBufferQueue.length + this.scheduledSources.length; const aheadSec = this.audioContext ? this.getScheduledAheadSec(this.audioContext.currentTime) : 0; - // clock field let clock: string; if (this.activeAudioClockSource === "timestamp") { clock = `timestamp(good:${this.outputTimestampGoodSamples})`; @@ -931,12 +903,10 @@ export class AudioProcessor { clock = "estimated"; } - // tf field const tf = this.timeFilter.is_synchronized ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})` : `pending(n=${this.timeFilter.count})`; - // lat field const latMs = this.smoothedOutputLatencyUs !== null ? Math.round(this.smoothedOutputLatencyUs / 1000) @@ -970,22 +940,18 @@ export class AudioProcessor { this.smoothedSyncErrorMs = 0; } - // Get raw output latency in microseconds (for Kalman filter input) getRawOutputLatencyUs(): number { if (!this.audioContext) return 0; const baseLatency = this.audioContext.baseLatency ?? 0; const outputLatency = this.audioContext.outputLatency ?? 0; - const rawUs = (baseLatency + outputLatency) * 1_000_000; // Convert seconds to microseconds + const rawUs = (baseLatency + outputLatency) * 1_000_000; this.lastRawOutputLatencyUs = rawUs; return rawUs; } - // Get smoothed output latency in microseconds (filters Chrome jitter) getSmoothedOutputLatencyUs(): number { const rawLatencyUs = this.getRawOutputLatencyUs(); - // Some browsers report 0 until playback is active; treat 0 as "unknown" - // and keep the last good estimate to avoid poisoning sync. if (rawLatencyUs <= 0 && this.smoothedOutputLatencyUs !== null) { return this.smoothedOutputLatencyUs; } @@ -1011,13 +977,10 @@ export class AudioProcessor { return this.smoothedOutputLatencyUs; } - // Reset latency smoother (call on stream change or audio context recreation) private resetLatencySmoother(): void { this.smoothedOutputLatencyUs = null; } - // Create a fresh copy of an AudioBuffer - // Some decoders produce buffers with boundary artifacts - copying fixes this private copyBuffer(buffer: AudioBuffer): AudioBuffer { if (!this.audioContext) return buffer; @@ -1034,9 +997,6 @@ export class AudioProcessor { return newBuffer; } - // Adjust buffer by inserting or deleting 1 sample using interpolation - // Insert: [A, B, ...] → [A, (A+B)/2, B, ...] (at start) - // Delete: [..., Y, Z] → [..., (Y+Z)/2] (at end) private adjustBufferSamples( buffer: AudioBuffer, samplesToAdjust: number, @@ -1051,7 +1011,6 @@ export class AudioProcessor { try { if (samplesToAdjust > 0) { - // Insert 1 sample at START: [A, B, ...] → [A, (A+B)/2, B, ...] const newBuffer = this.audioContext.createBuffer( channels, len + 1, @@ -1067,8 +1026,6 @@ export class AudioProcessor { newData[1] = insertedSample; newData.set(oldData.subarray(1), 2); - // After inserting one synthetic sample, gently pull the next few real samples toward it. - // This smooths the splice and avoids a hard step immediately after the insertion point. for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { const pos = 2 + f; if (pos >= newData.length) break; @@ -1079,7 +1036,6 @@ export class AudioProcessor { return newBuffer; } else { - // Delete 1 sample at END: [..., Y, Z] → [..., (Y+Z)/2] const newBuffer = this.audioContext.createBuffer( channels, len - 1, @@ -1094,8 +1050,6 @@ export class AudioProcessor { const replacementSample = (oldData[len - 2] + oldData[len - 1]) / 2; newData[len - 2] = replacementSample; - // Before a deletion collapse, gently pull the preceding samples toward the replacement. - // This smooths entry into the new boundary formed by skipping one sample. for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { const pos = len - 3 - f; if (pos < 0) break; @@ -1113,10 +1067,9 @@ export class AudioProcessor { } } - // Initialize AudioContext with platform-specific setup initAudioContext(): void { if (this.audioContext) { - return; // Already initialized + return; } if (this.outputMode === "media-element" && this.ownsAudioElement) { @@ -1125,8 +1078,6 @@ export class AudioProcessor { document.body.appendChild(this.audioElement); } - // Set audio session to "playback" so audio continues when iOS device is muted - // (iOS 17+, no-op on other platforms) if ((navigator as any).audioSession) { (navigator as any).audioSession.type = "playback"; } @@ -1139,7 +1090,6 @@ export class AudioProcessor { const audioElement = this.audioElement; if (this.outputMode === "direct") { - // Direct output to audioContext.destination (e.g., Cast receiver) this.gainNode.connect(this.audioContext.destination); } else { if (!audioElement) { @@ -1149,35 +1099,20 @@ export class AudioProcessor { } if (this.isAndroid && this.silentAudioSrc) { - // Android MediaSession workaround: Play almost-silent audio file - // Android browsers don't support MediaSession with MediaStream from Web Audio API - // Solution: Loop almost-silent audio to keep MediaSession active - // Real audio plays through Web Audio API → audioContext.destination this.gainNode.connect(this.audioContext.destination); - - // Use almost-silent audio file to trick Android into showing MediaSession audioElement.src = this.silentAudioSrc; audioElement.loop = true; - // CRITICAL: Do NOT mute - Android requires audible audio for MediaSession audioElement.muted = false; - // Set volume to 100% (the file itself is almost silent) audioElement.volume = 1.0; - // Start playing to activate MediaSession audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); } else { - // iOS/Desktop: Use MediaStream approach for background playback - // Create MediaStreamDestination to bridge Web Audio API to HTML5 audio element this.streamDestination = this.audioContext.createMediaStreamDestination(); this.gainNode.connect(this.streamDestination); - // Do NOT connect to audioContext.destination to avoid echo - - // Connect to HTML5 audio element for iOS background playback audioElement.srcObject = this.streamDestination.stream; audioElement.volume = 1.0; - // Start playing to activate MediaSession audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); @@ -1190,7 +1125,6 @@ export class AudioProcessor { } } - // Resume AudioContext if suspended (required for browser autoplay policies) async resumeAudioContext(): Promise { if (this.audioContext && this.audioContext.state === "suspended") { try { @@ -1216,18 +1150,13 @@ export class AudioProcessor { keptTailEndTimeSec: number; } { if (!this.audioContext) { - return { - requeuedCount: 0, - cutCount: 0, - keptTailEndTimeSec: 0, - }; + return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 }; } const stopTime = Math.max(cutoffTime, this.audioContext.currentTime); let requeued = 0; let cutCount = 0; let keptTailEndTimeSec = 0; this.scheduledSources = this.scheduledSources.filter((entry) => { - // Keep sources scheduled before stopTime to avoid cutting mid-buffer artifacts. if (entry.startTime < stopTime) { keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime); return true; @@ -1247,18 +1176,12 @@ export class AudioProcessor { cutCount++; return false; }); - return { - requeuedCount: requeued, - cutCount, - keptTailEndTimeSec, - }; + return { requeuedCount: requeued, cutCount, keptTailEndTimeSec }; } - // Update volume based on current state updateVolume(): void { if (!this.gainNode) return; - // Hardware volume mode: keep software gain at 1.0, external handles volume if (this.useHardwareVolume) { this.gainNode.gain.value = 1.0; return; @@ -1271,352 +1194,7 @@ export class AudioProcessor { } } - // Decode audio data based on codec - async decodeAudioData( - audioData: ArrayBuffer, - format: StreamFormat, - ): Promise { - if (!this.audioContext) return null; - - try { - if (format.codec === "opus") { - // Opus fallback path - native decoder uses async queueToNativeOpusDecoder - return await this.decodeOpusWithEncdec(audioData, format); - } else if (format.codec === "flac") { - // FLAC can be decoded by the browser's native decoder - // If codec_header is provided, prepend it to the audio data - let dataToEncode = audioData; - if (format.codec_header) { - // Decode Base64 codec header - const headerBytes = Uint8Array.from(atob(format.codec_header), (c) => - c.charCodeAt(0), - ); - // Concatenate header + audio data - const combined = new Uint8Array( - headerBytes.length + audioData.byteLength, - ); - combined.set(headerBytes, 0); - combined.set(new Uint8Array(audioData), headerBytes.length); - dataToEncode = combined.buffer; - } - return await this.audioContext.decodeAudioData(dataToEncode); - } else if (format.codec === "pcm") { - // PCM data needs manual decoding - return this.decodePCMData(audioData, format); - } - } catch (error) { - console.error("Error decoding audio data:", error); - } - - return null; - } - - // Initialize native Opus decoder - private async initWebCodecsDecoder(format: StreamFormat): Promise { - const tryConfigureExistingDecoder = (): boolean => { - if (!this.webCodecsDecoder) return false; - - const matchesFormat = - !!this.webCodecsFormat && - this.webCodecsFormat.sample_rate === format.sample_rate && - this.webCodecsFormat.channels === format.channels; - - if (this.webCodecsDecoder.state === "configured" && matchesFormat) { - return true; - } - - if (this.webCodecsDecoder.state === "closed") { - return false; - } - - try { - this.webCodecsDecoder.configure({ - codec: "opus", - sampleRate: format.sample_rate, - numberOfChannels: format.channels, - }); - this.webCodecsFormat = format; - return true; - } catch { - return false; - } - }; - - if (tryConfigureExistingDecoder()) { - return; - } - - if (this.webCodecsDecoderReady) { - await this.webCodecsDecoderReady; - if (tryConfigureExistingDecoder()) { - return; - } - - try { - this.webCodecsDecoder?.close(); - } catch { - // Ignore close errors; we'll recreate below. - } - this.webCodecsDecoder = null; - this.webCodecsDecoderReady = null; - this.webCodecsFormat = null; - } - - if (this.webCodecsDecoderReady) { - await this.webCodecsDecoderReady; - return; - } - - this.webCodecsDecoderReady = this.createWebCodecsDecoder(format); - await this.webCodecsDecoderReady; - } - - // Create and configure native Opus decoder (WebCodecs) - private async createWebCodecsDecoder(format: StreamFormat): Promise { - if (typeof AudioDecoder === "undefined") { - this.useNativeOpus = false; - return; - } - - try { - const support = await AudioDecoder.isConfigSupported({ - codec: "opus", - sampleRate: format.sample_rate, - numberOfChannels: format.channels, - }); - - if (!support.supported) { - console.log( - "[NativeOpus] WebCodecs Opus not supported, will use fallback", - ); - this.useNativeOpus = false; - return; - } - - this.webCodecsDecoder = new AudioDecoder({ - output: (audioData: AudioData) => this.handleAudioData(audioData), - error: (error: Error) => { - console.error("[NativeOpus] WebCodecs decoder error:", error); - }, - }); - - this.webCodecsDecoder.configure({ - codec: "opus", - sampleRate: format.sample_rate, - numberOfChannels: format.channels, - }); - - this.webCodecsFormat = format; - console.log( - `[NativeOpus] Using WebCodecs AudioDecoder: ${format.sample_rate}Hz, ${format.channels}ch`, - ); - } catch (error) { - console.warn( - "[NativeOpus] WebCodecs init failed, will use fallback:", - error, - ); - this.useNativeOpus = false; - } - } - - // Handle decoded audio data from native Opus decoder - private handleAudioData(audioData: AudioData): void { - try { - const outputTimestampUs = Number(audioData.timestamp); - const metadata = this.nativeDecoderQueue.shift(); - - if (!metadata) { - console.warn( - `[NativeOpus] Dropping frame with empty decode queue (out ts=${outputTimestampUs})`, - ); - audioData.close(); - return; - } - - const { serverTimeUs, generation } = metadata; - if (generation !== this.stateManager.streamGeneration) { - console.warn( - `[NativeOpus] Dropping old-stream frame (ts=${serverTimeUs}, gen=${generation} != current=${this.stateManager.streamGeneration})`, - ); - audioData.close(); - return; - } - - const channels = audioData.numberOfChannels; - const frames = audioData.numberOfFrames; - const fmt = audioData.format; - - let interleaved: Float32Array; - - if (fmt === "f32-planar") { - interleaved = new Float32Array(frames * channels); - for (let ch = 0; ch < channels; ch++) { - const channelData = new Float32Array(frames); - audioData.copyTo(channelData, { planeIndex: ch }); - for (let i = 0; i < frames; i++) { - interleaved[i * channels + ch] = channelData[i]; - } - } - } else if (fmt === "f32") { - interleaved = new Float32Array(frames * channels); - audioData.copyTo(interleaved, { planeIndex: 0 }); - } else if (fmt === "s16-planar") { - interleaved = new Float32Array(frames * channels); - for (let ch = 0; ch < channels; ch++) { - const channelData = new Int16Array(frames); - audioData.copyTo(channelData, { planeIndex: ch }); - for (let i = 0; i < frames; i++) { - interleaved[i * channels + ch] = channelData[i] / 32768.0; - } - } - } else if (fmt === "s16") { - const int16Data = new Int16Array(frames * channels); - audioData.copyTo(int16Data, { planeIndex: 0 }); - interleaved = new Float32Array(frames * channels); - for (let i = 0; i < frames * channels; i++) { - interleaved[i] = int16Data[i] / 32768.0; - } - } else { - console.warn(`[NativeOpus] Unsupported AudioData format: ${fmt}`); - audioData.close(); - return; - } - - this.handleNativeOpusOutput(interleaved, serverTimeUs, channels); - audioData.close(); - } catch (e) { - console.error("[NativeOpus] Error in output callback:", e); - audioData.close(); - } - } - - private resolveOpusDecoderModule(moduleExport: any): any { - const maybeDefault = moduleExport?.default; - const maybeCommonJs = moduleExport?.["module.exports"]; - const resolved = maybeDefault ?? maybeCommonJs ?? moduleExport; - - if (!resolved || typeof resolved !== "object") { - throw new Error("[Opus] Invalid libopus decoder module export"); - } - return resolved; - } - - private resolveOggOpusDecoderClass(wrapperExport: any): any { - const maybeDefault = wrapperExport?.default; - const maybeCommonJs = wrapperExport?.["module.exports"]; - const wrapper = maybeDefault ?? maybeCommonJs ?? wrapperExport; - const resolved = wrapper?.OggOpusDecoder ?? wrapper; - - if (typeof resolved !== "function") { - throw new Error("[Opus] OggOpusDecoder class export not found"); - } - return resolved; - } - - private async waitForOpusReady(target: { - isReady: boolean; - onready?: () => void; - }): Promise { - if (target.isReady) return; - - if (Object.isExtensible(target)) { - await new Promise((resolve) => { - target.onready = () => resolve(); - }); - return; - } - - while (!target.isReady) { - await new Promise((resolve) => setTimeout(resolve, 20)); - } - } - - // Initialize opus-encdec decoder (fallback when WebCodecs unavailable) - private async initOpusEncdecDecoder(format: StreamFormat): Promise { - if (this.opusDecoderReady) { - await this.opusDecoderReady; - return; - } - - this.opusDecoderReady = (async () => { - console.log("[Opus] Initializing decoder (opus-encdec)..."); - - // Dynamically import the pure JavaScript decoder (not WASM) to avoid bundling issues - const [DecoderModuleExport, DecoderWrapperExport] = await Promise.all([ - import("opus-encdec/dist/libopus-decoder.js"), - import("opus-encdec/src/oggOpusDecoder.js"), - ]); - - this.opusDecoderModule = - this.resolveOpusDecoderModule(DecoderModuleExport); - - const OggOpusDecoderClass = - this.resolveOggOpusDecoderClass(DecoderWrapperExport); - - // Wait for Module to be ready (async asm.js initialization) - await this.waitForOpusReady(this.opusDecoderModule); - - // Create decoder instance - this.opusDecoder = new OggOpusDecoderClass( - { - rawOpus: true, // We're decoding raw Opus packets, not Ogg containers - decoderSampleRate: format.sample_rate, - outputBufferSampleRate: format.sample_rate, - numberOfChannels: format.channels, - }, - this.opusDecoderModule, - ); - - // Wait for decoder to be ready if needed - await this.waitForOpusReady(this.opusDecoder); - - console.log("[Opus] Decoder ready"); - })(); - - await this.opusDecoderReady; - } - - // Handle native Opus decoder output - creates AudioBuffer and adds to queue - private handleNativeOpusOutput( - interleaved: Float32Array, - serverTimeUs: number, - channels: number, - ): void { - if (!this.audioContext || !this.webCodecsFormat) { - return; - } - - const numFrames = interleaved.length / channels; - const audioBuffer = this.audioContext.createBuffer( - channels, - numFrames, - this.webCodecsFormat.sample_rate, - ); - - // De-interleave samples into separate channels - for (let ch = 0; ch < channels; ch++) { - const channelData = audioBuffer.getChannelData(ch); - for (let i = 0; i < numFrames; i++) { - channelData[i] = interleaved[i * channels + ch]; - } - } - - // Add to queue directly - this.audioBufferQueue.push({ - buffer: audioBuffer, - serverTime: serverTimeUs, - generation: this.stateManager.streamGeneration, - }); - - this.scheduleQueueProcessing(); - } - - private scheduleTimeout: ReturnType | null = null; - private queueProcessScheduled = false; - // Schedule queue processing without starvation. - // Uses a short timeout to allow out-of-order async decodes (FLAC) to batch. - // TODO: Consider a "max-wait" watchdog if timer throttling/clamping causes excessive scheduling latency. private scheduleQueueProcessing(): void { if (this.queueProcessScheduled) { return; @@ -1649,231 +1227,42 @@ export class AudioProcessor { } } - // Queue Opus packet to native decoder for async decoding (non-blocking) - private queueToNativeOpusDecoder( - audioData: ArrayBuffer, - serverTimeUs: number, - generation: number, - ): boolean { - if ( - !this.webCodecsDecoder || - this.webCodecsDecoder.state !== "configured" - ) { - return false; - } - - try { - this.nativeDecoderQueue.push({ - serverTimeUs, - generation, - }); - - const chunk = new EncodedAudioChunk({ - type: "key", // Opus packets are self-contained - // Keep server time as timestamp for easier debugging/inspection. - timestamp: serverTimeUs, - data: audioData, - }); - - // Queue for async decoding (non-blocking) - this.webCodecsDecoder.decode(chunk); - return true; - } catch (error) { - if (this.nativeDecoderQueue.length > 0) { - this.nativeDecoderQueue.pop(); - } - console.error("[NativeOpus] WebCodecs queue error:", error); - return false; - } - } - - // Decode using opus-encdec library (fallback) - private async decodeOpusWithEncdec( - audioData: ArrayBuffer, - format: StreamFormat, - ): Promise { - if (!this.audioContext) { - return null; - } - - try { - // Initialize fallback decoder if needed - await this.initOpusEncdecDecoder(format); - - // Decode the raw Opus packet - const uint8Array = new Uint8Array(audioData); - const decodedSamples: Float32Array[] = []; - - this.opusDecoder.decodeRaw(uint8Array, (samples: Float32Array) => { - // Copy samples since they're from WASM heap - decodedSamples.push(new Float32Array(samples)); - }); - - if (decodedSamples.length === 0) { - console.warn("[Opus] Fallback decoder produced no samples"); - return null; - } - - // Convert interleaved samples to AudioBuffer - const interleavedSamples = decodedSamples[0]; - const numFrames = interleavedSamples.length / format.channels; - - const audioBuffer = this.audioContext.createBuffer( - format.channels, - numFrames, - format.sample_rate, - ); - - // De-interleave samples into separate channels - for (let ch = 0; ch < format.channels; ch++) { - const channelData = audioBuffer.getChannelData(ch); - for (let i = 0; i < numFrames; i++) { - channelData[i] = interleavedSamples[i * format.channels + ch]; - } - } - - return audioBuffer; - } catch (error) { - console.error("[Opus] Decode error:", error); - return null; - } - } - - // Decode PCM audio data - private decodePCMData( - audioData: ArrayBuffer, - format: StreamFormat, - ): AudioBuffer | null { - if (!this.audioContext) return null; - - const bytesPerSample = (format.bit_depth || 16) / 8; - const dataView = new DataView(audioData); - const numSamples = - audioData.byteLength / (bytesPerSample * format.channels); + /** Accept a decoded audio chunk and queue it for synchronized playback. */ + handleDecodedChunk(chunk: DecodedAudioChunk): void { + if (!this.audioContext || !this.gainNode) return; + if (chunk.generation !== this.stateManager.streamGeneration) return; + const numChannels = chunk.samples.length; + const numFrames = chunk.samples[0].length; const audioBuffer = this.audioContext.createBuffer( - format.channels, - numSamples, - format.sample_rate, + numChannels, + numFrames, + chunk.sampleRate, ); - - // Decode PCM data (interleaved format) - for (let channel = 0; channel < format.channels; channel++) { - const channelData = audioBuffer.getChannelData(channel); - for (let i = 0; i < numSamples; i++) { - const offset = (i * format.channels + channel) * bytesPerSample; - let sample = 0; - - if (format.bit_depth === 16) { - sample = dataView.getInt16(offset, true) / 32768.0; - } else if (format.bit_depth === 24) { - // 24-bit is stored in 3 bytes (little-endian) - const byte1 = dataView.getUint8(offset); - const byte2 = dataView.getUint8(offset + 1); - const byte3 = dataView.getUint8(offset + 2); - // Reconstruct as signed 24-bit value - let int24 = (byte3 << 16) | (byte2 << 8) | byte1; - // Sign extend if necessary - if (int24 & 0x800000) { - int24 |= 0xff000000; - } - sample = int24 / 8388608.0; - } else if (format.bit_depth === 32) { - sample = dataView.getInt32(offset, true) / 2147483648.0; - } - - channelData[i] = sample; - } - } - - return audioBuffer; - } - - // Handle binary audio message - async handleBinaryMessage(data: ArrayBuffer): Promise { - const format = this.stateManager.currentStreamFormat; - if (!format) { - console.warn("Sendspin: Received audio chunk but no stream format set"); - return; + for (let ch = 0; ch < numChannels; ch++) { + audioBuffer.getChannelData(ch).set(chunk.samples[ch]); } - if (!this.audioContext) { - console.warn("Sendspin: Received audio chunk but no audio context"); - return; - } - if (!this.gainNode) { - console.warn("Sendspin: Received audio chunk but no gain node"); - return; - } - - // Capture stream generation before async decode - const generation = this.stateManager.streamGeneration; - - // First byte contains role type and message slot - // Spec: bits 7-2 identify role type (6 bits), bits 1-0 identify message slot (2 bits) - const firstByte = new Uint8Array(data)[0]; - - // Type 4 is audio chunk (Player role, slot 0) - IDs 4-7 are player role - if (firstByte === 4) { - // Next 8 bytes are server timestamp in microseconds (big-endian int64) - const timestampView = new DataView(data, 1, 8); - // Read as big-endian int64 and convert to number - const serverTimeUs = Number(timestampView.getBigInt64(0, false)); - - // Rest is audio data - const audioData = data.slice(9); - // For Opus: use native decoder (non-blocking async path) - if (format.codec === "opus" && this.useNativeOpus) { - await this.initWebCodecsDecoder(format); - - if (this.useNativeOpus && this.webCodecsDecoder) { - if ( - this.queueToNativeOpusDecoder(audioData, serverTimeUs, generation) - ) { - return; // Async path - callback handles queue - } - // Fall through to fallback on error - } - } - - // Fallback decode path (PCM, FLAC, or Opus via opus-encdec) - const audioBuffer = await this.decodeAudioData(audioData, format); - - if (audioBuffer) { - // Check if stream generation changed during async decode - if (generation !== this.stateManager.streamGeneration) { - return; - } - - // Add to queue for ordered playback - this.audioBufferQueue.push({ - buffer: audioBuffer, - serverTime: serverTimeUs, - generation: generation, - }); + this.audioBufferQueue.push({ + buffer: audioBuffer, + serverTime: chunk.serverTimeUs, + generation: chunk.generation, + }); - this.scheduleQueueProcessing(); - } else { - console.error("Sendspin: Failed to decode audio buffer"); - } - } + this.scheduleQueueProcessing(); } - // Process the audio queue and schedule chunks in order processAudioQueue(): void { if (!this.audioContext || !this.gainNode) return; if (this.audioContext.state !== "running") return; - // Filter out any chunks from old streams (safety check) const currentGeneration = this.stateManager.streamGeneration; this.audioBufferQueue = this.audioBufferQueue.filter( (chunk) => chunk.generation === currentGeneration, ); - // Sort queue by server timestamp to ensure proper ordering this.audioBufferQueue.sort((a, b) => a.serverTime - b.serverTime); - // Don't schedule until time sync is ready if (!this.timeFilter.is_synchronized) { return; } @@ -1911,7 +1300,6 @@ export class AudioProcessor { } } - // Schedule chunks until we have enough future audio to survive short JS throttling. while (this.audioBufferQueue.length > 0) { const scheduledAheadSec = this.getScheduledAheadSec( audioContextRawTimeSec, @@ -1929,7 +1317,6 @@ export class AudioProcessor { let scheduleTime: number; let playbackRate: number; - // Always compute the drift-corrected target time const targetPlaybackTime = this.computeTargetPlaybackTime( chunk.serverTime, audioContextTime, @@ -1937,7 +1324,6 @@ export class AudioProcessor { outputLatencySec, ); - // First chunk or after a gap: calculate from server timestamp if (this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) { this.armHardResyncStartupGrace(nowMs); playbackTime = targetPlaybackTime; @@ -1953,22 +1339,16 @@ export class AudioProcessor { playbackRate = 1.0; chunk.buffer = this.copyBuffer(chunk.buffer); } else { - // Subsequent chunks: schedule back-to-back for seamless playback - // Check if this chunk is contiguous with the last one const expectedServerTime = this.lastScheduledServerTime; const serverGapUs = chunk.serverTime - expectedServerTime; const serverGapSec = serverGapUs / 1_000_000; if (Math.abs(serverGapSec) < 0.1) { - // Chunk is contiguous (within 100ms) - // Calculate sync error (positive = behind target, negative = ahead) const syncErrorSec = this.nextPlaybackTime - targetPlaybackTime; const syncErrorMs = syncErrorSec * 1000; - // Apply EMA smoothing to filter jitter - use smoothed value for corrections const correctionErrorMs = this.applySyncErrorEma(syncErrorMs); - // Get thresholds for current correction mode const thresholds = CORRECTION_THRESHOLDS[this._correctionMode]; const canUseHardResync = this.canUseHardResync(nowMs); @@ -1976,7 +1356,6 @@ export class AudioProcessor { Math.abs(correctionErrorMs) > thresholds.resyncAboveMs && canUseHardResync ) { - // Tier 4: Hard resync if sync error exceeds threshold this.noteHardResync(nowMs); this.resyncCount++; this._intervalResyncCount++; @@ -1989,8 +1368,6 @@ export class AudioProcessor { this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs) { - // We cannot hard resync right now because startup grace or the - // cooldown is active, so use the strongest smooth correction instead. playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; playbackRate = Number.isFinite(thresholds.rate2AboveMs) @@ -2003,7 +1380,6 @@ export class AudioProcessor { this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) < thresholds.deadbandBelowMs) { - // Tier 1: Within deadband - no correction needed playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; playbackRate = 1.0; @@ -2011,7 +1387,6 @@ export class AudioProcessor { this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) <= thresholds.samplesBelowMs) { - // Tier 2: Small error - use single sample insertion/deletion playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; playbackRate = 1.0; @@ -2023,7 +1398,6 @@ export class AudioProcessor { this.currentCorrectionMethod = "samples"; this.lastSamplesAdjusted = samplesToAdjust; } else { - // Tier 3: Medium error - use playback rate adjustment playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; const absErrorMs = Math.abs(correctionErrorMs); @@ -2050,7 +1424,6 @@ export class AudioProcessor { chunk.buffer = this.copyBuffer(chunk.buffer); } } else { - // Gap detected in server timestamps - hard resync this.noteHardResync(nowMs); this.resyncCount++; this._intervalResyncCount++; @@ -2064,13 +1437,9 @@ export class AudioProcessor { } } - // Track current rate for debugging this.currentPlaybackRate = playbackRate; - // Drop only if we already missed the logical playback time. Missing the - // early-start window just means we apply less sync delay for this chunk. if (playbackTime < audioContextRawTimeSec) { - // Reset seamless tracking since we dropped a chunk this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; @@ -2086,12 +1455,10 @@ export class AudioProcessor { const source = this.audioContext.createBufferSource(); source.buffer = chunk.buffer; - source.playbackRate.value = playbackRate; // Apply rate correction + source.playbackRate.value = playbackRate; source.connect(this.gainNode); source.start(effectiveScheduleTime); - // Track for seamless scheduling of next chunk - // Account for actual duration with playback rate adjustment const actualDuration = chunk.buffer.duration / playbackRate; this.nextPlaybackTime = effectivePlaybackTime + actualDuration; this.nextScheduleTime = effectiveScheduleTime + actualDuration; @@ -2135,7 +1502,6 @@ export class AudioProcessor { ); } - // Start audio element playback (for MediaSession) startAudioElement(): void { if (this.outputMode === "media-element" && this.audioElement) { if (this.audioElement.paused) { @@ -2144,24 +1510,19 @@ export class AudioProcessor { }); } } - // No-op for direct mode } - // Stop audio element playback (for MediaSession) stopAudioElement(): void { if (this.outputMode === "media-element" && this.audioElement) { if (!this.audioElement.paused) { this.audioElement.pause(); } } - // No-op for direct mode } - // Clear all audio buffers and scheduled sources clearBuffers(): void { this.stopRecorrectionMonitor(); - // Stop all scheduled audio sources this.scheduledSources.forEach((entry) => { try { entry.source.stop(); @@ -2171,7 +1532,6 @@ export class AudioProcessor { }); this.scheduledSources = []; - // Clear buffers and reset scheduling state this.audioBufferQueue = []; if (this.scheduleTimeout !== null) { clearTimeout(this.scheduleTimeout); @@ -2179,23 +1539,8 @@ export class AudioProcessor { } this.queueProcessScheduled = false; - // Drop any pending native Opus decode outputs from the previous stream. - // We close and recreate the decoder on next use to ensure stale callbacks - // cannot be correlated with new-stream metadata. - this.nativeDecoderQueue = []; - try { - this.webCodecsDecoder?.close(); - } catch { - // Ignore close errors - } - this.webCodecsDecoder = null; - this.webCodecsDecoderReady = null; - this.webCodecsFormat = null; - - // Reset stream anchors this.stateManager.resetStreamAnchors(); - // Reset sync stats and timing sources this.resetScheduledPlaybackState(); this.resyncCount = 0; this.lastRawOutputLatencyUs = 0; @@ -2205,7 +1550,6 @@ export class AudioProcessor { this.resetOutputTimestampValidation(); } - // Cleanup and close AudioContext close(): void { this.clearBuffers(); @@ -2214,32 +1558,9 @@ export class AudioProcessor { this.audioContext = null; } - // Clean up native Opus decoder - if (this.webCodecsDecoder) { - try { - this.webCodecsDecoder.close(); - } catch (e) { - // Ignore if already closed - } - this.webCodecsDecoder = null; - this.webCodecsDecoderReady = null; - this.webCodecsFormat = null; - } - - // Clean up fallback Opus decoder - if (this.opusDecoder) { - this.opusDecoder = null; - this.opusDecoderModule = null; - this.opusDecoderReady = null; - } - - // Reset native Opus flag for next session - this.useNativeOpus = true; - this.gainNode = null; this.streamDestination = null; - // Always stop and clear the audio element on full disconnect/teardown. if (this.outputMode === "media-element" && this.audioElement) { this.audioElement.pause(); this.audioElement.srcObject = null; @@ -2254,7 +1575,6 @@ export class AudioProcessor { } } - // Get AudioContext for external use getAudioContext(): AudioContext | null { return this.audioContext; } diff --git a/src/core.ts b/src/core.ts new file mode 100644 index 0000000..1142b8d --- /dev/null +++ b/src/core.ts @@ -0,0 +1,321 @@ +/** + * SendspinCore: Protocol + decoding layer. + * + * Manages the WebSocket connection, Sendspin protocol, time synchronization, + * state management, and audio decoding. Emits decoded PCM audio chunks that + * can be consumed by SendspinPlayer for playback, or by visualization/analysis + * tools directly. + */ + +import { SendspinDecoder } from "./audio-decoder"; +import { ProtocolHandler } from "./protocol-handler"; +import { StateManager } from "./state-manager"; +import { WebSocketManager } from "./websocket-manager"; +import { SendspinTimeFilter } from "./time-filter"; +import type { + SendspinCoreConfig, + DecodedAudioChunk, + StreamFormat, + StreamHandler, + GoodbyeReason, + PlayerState, + ControllerCommand, + ControllerCommands, + ServerStatePayload, + GroupUpdatePayload, +} from "./types"; + +function generateRandomId(): string { + return Math.random().toString(36).substring(2, 6); +} + +export class SendspinCore implements StreamHandler { + private wsManager: WebSocketManager; + private protocolHandler: ProtocolHandler; + private stateManager: StateManager; + private timeFilter: SendspinTimeFilter; + private decoder: SendspinDecoder; + + private config: SendspinCoreConfig; + private wsUrl: string = ""; + private _syncDelayMs: number; + + // Stream events — consumers (e.g., SendspinPlayer) subscribe to these + private _onAudioData?: (chunk: DecodedAudioChunk) => void; + private _onStreamStart?: (format: StreamFormat, isFormatUpdate: boolean) => void; + private _onStreamClear?: () => void; + private _onStreamEnd?: () => void; + private _onVolumeUpdate?: () => void; + private _onSyncDelayChange?: (delayMs: number) => void; + + constructor(config: SendspinCoreConfig) { + const randomId = generateRandomId(); + const playerId = config.playerId ?? `sendspin-js-${randomId}`; + const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`; + + this.config = { ...config, playerId, clientName }; + this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(config.syncDelay ?? 0))); + + this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12); + this.stateManager = new StateManager(config.onStateChange); + + this.decoder = new SendspinDecoder( + (chunk) => this._onAudioData?.(chunk), + () => this.stateManager.streamGeneration, + ); + + this.wsManager = new WebSocketManager(); + + this.protocolHandler = new ProtocolHandler( + playerId, + this.wsManager, + this, // this class implements StreamHandler + this.stateManager, + this.timeFilter, + { + clientName, + codecs: config.codecs, + bufferCapacity: config.bufferCapacity, + useHardwareVolume: config.useHardwareVolume, + onVolumeCommand: config.onVolumeCommand, + onDelayCommand: config.onDelayCommand, + getExternalVolume: config.getExternalVolume, + }, + ); + } + + // ======================================== + // StreamHandler implementation + // (called by ProtocolHandler) + // ======================================== + + handleBinaryMessage(data: ArrayBuffer): void { + const format = this.stateManager.currentStreamFormat; + if (!format) { + console.warn("Sendspin: Received audio chunk but no stream format set"); + return; + } + const generation = this.stateManager.streamGeneration; + this.decoder.handleBinaryMessage(data, format, generation); + } + + handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void { + if (!isFormatUpdate) { + this.decoder.clearState(); + } + this._onStreamStart?.(format, isFormatUpdate); + } + + handleStreamClear(): void { + this.decoder.clearState(); + this._onStreamClear?.(); + } + + handleStreamEnd(): void { + this.decoder.clearState(); + this._onStreamEnd?.(); + } + + handleVolumeUpdate(): void { + this._onVolumeUpdate?.(); + } + + handleSyncDelayChange(delayMs: number): void { + this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(delayMs))); + this._onSyncDelayChange?.(this._syncDelayMs); + } + + getSyncDelayMs(): number { + return this._syncDelayMs; + } + + // ======================================== + // Event registration + // ======================================== + + set onAudioData(cb: ((chunk: DecodedAudioChunk) => void) | undefined) { + this._onAudioData = cb; + } + set onStreamStart(cb: ((format: StreamFormat, isFormatUpdate: boolean) => void) | undefined) { + this._onStreamStart = cb; + } + set onStreamClear(cb: (() => void) | undefined) { + this._onStreamClear = cb; + } + set onStreamEnd(cb: (() => void) | undefined) { + this._onStreamEnd = cb; + } + set onVolumeUpdate(cb: (() => void) | undefined) { + this._onVolumeUpdate = cb; + } + set onSyncDelayChange(cb: ((delayMs: number) => void) | undefined) { + this._onSyncDelayChange = cb; + } + + // ======================================== + // Connection + // ======================================== + + async connect(): Promise { + const url = new URL(this.config.baseUrl); + const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; + this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; + + await this.wsManager.connect( + this.wsUrl, + () => { + console.log("Sendspin: Using player_id:", this.config.playerId); + this.protocolHandler.sendClientHello(); + }, + (event: MessageEvent) => { + this.protocolHandler.handleMessage(event); + }, + (error: Event) => { + console.error("Sendspin: WebSocket error", error); + }, + () => { + this.protocolHandler.stopTimeSync(); + console.log("Sendspin: Connection closed"); + }, + ); + } + + disconnect(reason: GoodbyeReason = "shutdown"): void { + if (this.wsManager.isConnected()) { + this.protocolHandler.sendGoodbye(reason); + } + this.protocolHandler.stopTimeSync(); + this.stateManager.clearAllIntervals(); + this.wsManager.disconnect(); + this.decoder.close(); + this.timeFilter.reset(); + this.stateManager.reset(); + } + + // ======================================== + // Volume / Mute + // ======================================== + + setVolume(volume: number): void { + this.stateManager.volume = volume; + this._onVolumeUpdate?.(); + this.protocolHandler.sendStateUpdate(); + } + + setMuted(muted: boolean): void { + this.stateManager.muted = muted; + this._onVolumeUpdate?.(); + this.protocolHandler.sendStateUpdate(); + } + + // ======================================== + // Sync delay + // ======================================== + + setSyncDelay(delayMs: number): void { + this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(delayMs))); + this._onSyncDelayChange?.(this._syncDelayMs); + this.protocolHandler.sendStateUpdate(); + } + + // ======================================== + // Controller commands + // ======================================== + + sendCommand( + command: T, + params: ControllerCommands[T], + ): void { + const supportedCommands = + this.stateManager.serverState.controller?.supported_commands; + if (supportedCommands && !supportedCommands.includes(command)) { + throw new Error( + `Command '${command}' is not supported by the server. ` + + `Supported commands: ${supportedCommands.join(", ")}`, + ); + } + this.protocolHandler.sendCommand(command, params); + } + + // ======================================== + // State getters + // ======================================== + + get isPlaying(): boolean { + return this.stateManager.isPlaying; + } + + get volume(): number { + return this.stateManager.volume; + } + + get muted(): boolean { + return this.stateManager.muted; + } + + get playerState(): PlayerState { + return this.stateManager.playerState; + } + + get currentFormat(): StreamFormat | null { + return this.stateManager.currentStreamFormat; + } + + get isConnected(): boolean { + return this.wsManager.isConnected(); + } + + get timeSyncInfo(): { synced: boolean; offset: number; error: number } { + return { + synced: this.timeFilter.is_synchronized, + offset: Math.round(this.timeFilter.offset / 1000), + error: Math.round(this.timeFilter.error / 1000), + }; + } + + getCurrentServerTimeUs(): number { + return this.timeFilter.computeServerTime( + Math.floor(performance.now() * 1000), + ); + } + + get trackProgress(): { + positionMs: number; + durationMs: number; + playbackSpeed: number; + } | null { + const metadata = this.stateManager.serverState.metadata; + if (!metadata?.progress || metadata.timestamp === undefined) { + return null; + } + + const serverTimeUs = this.getCurrentServerTimeUs(); + const elapsedUs = serverTimeUs - metadata.timestamp; + const positionMs = + metadata.progress.track_progress + + (elapsedUs * metadata.progress.playback_speed) / 1_000_000; + + return { + positionMs: Math.max( + 0, + Math.min(positionMs, metadata.progress.track_duration), + ), + durationMs: metadata.progress.track_duration, + playbackSpeed: metadata.progress.playback_speed / 1000, + }; + } + + // ======================================== + // Internal accessors (for SendspinPlayer) + // ======================================== + + /** @internal */ + get _stateManager(): StateManager { + return this.stateManager; + } + + /** @internal */ + get _timeFilter(): SendspinTimeFilter { + return this.timeFilter; + } +} diff --git a/src/index.ts b/src/index.ts index bf0eb17..2549b08 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,8 +1,5 @@ -import { AudioProcessor } from "./audio-processor"; -import { ProtocolHandler } from "./protocol-handler"; -import { StateManager } from "./state-manager"; -import { WebSocketManager } from "./websocket-manager"; -import { SendspinTimeFilter } from "./time-filter"; +import { SendspinCore } from "./core"; +import { AudioScheduler } from "./audio-scheduler"; import { SILENT_AUDIO_SRC } from "./silent-audio.generated"; import type { SendspinPlayerConfig, @@ -62,35 +59,17 @@ function getDefaultSyncDelay(): number { return 200; } -function generateRandomId(): string { - return Math.random().toString(36).substring(2, 6); -} - export class SendspinPlayer { - private wsManager: WebSocketManager; - private audioProcessor: AudioProcessor; - private protocolHandler: ProtocolHandler; - private stateManager: StateManager; - private timeFilter: SendspinTimeFilter; - - private config: SendspinPlayerConfig; - private wsUrl: string = ""; + private core: SendspinCore; + private scheduler: AudioScheduler; private ownsAudioElement = false; constructor(config: SendspinPlayerConfig) { - // Apply defaults for playerId and clientName (share same random ID) - const randomId = generateRandomId(); - const playerId = config.playerId ?? `sendspin-js-${randomId}`; - const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`; - // Auto-detect platform const isAndroid = detectIsAndroid(); const isMobile = detectIsMobile(); - // Determine output mode: - // - If audioElement provided, use media-element - // - If mobile (iOS/Android), default to media-element - // - Otherwise, use direct + // Determine output mode const outputMode = config.audioElement || isMobile ? "media-element" : "direct"; this.ownsAudioElement = @@ -102,126 +81,94 @@ export class SendspinPlayer { ); } - // Store config with resolved defaults - this.config = { - ...config, - playerId, - clientName, - }; - - // Initialize time filter (shared between audio processor and protocol handler) - this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12); - - // Initialize state manager with callback - this.stateManager = new StateManager(config.onStateChange); - - // Initialize audio processor + const syncDelay = config.syncDelay ?? getDefaultSyncDelay(); + + // Create core (protocol + decoding) + this.core = new SendspinCore({ + playerId: config.playerId, + baseUrl: config.baseUrl, + clientName: config.clientName, + codecs: config.codecs, + bufferCapacity: + config.bufferCapacity ?? + (outputMode === "media-element" ? 1024 * 1024 * 5 : 1024 * 1024 * 1.5), + syncDelay, + useHardwareVolume: config.useHardwareVolume, + onVolumeCommand: config.onVolumeCommand, + onDelayCommand: config.onDelayCommand, + getExternalVolume: config.getExternalVolume, + onStateChange: config.onStateChange, + }); + + // Create scheduler (Web Audio playback) let storage: SendspinStorage | null = null; if (config.storage !== undefined) { storage = config.storage; } else if (typeof localStorage !== "undefined") { storage = localStorage; } - this.audioProcessor = new AudioProcessor( - this.stateManager, - this.timeFilter, + + this.scheduler = new AudioScheduler( + this.core._stateManager, + this.core._timeFilter, outputMode, config.audioElement, isAndroid, this.ownsAudioElement, isAndroid ? SILENT_AUDIO_SRC : undefined, - config.syncDelay ?? getDefaultSyncDelay(), + syncDelay, config.useHardwareVolume ?? false, config.correctionMode ?? "sync", storage, config.useOutputLatencyCompensation ?? true, ); - // Initialize WebSocket manager - this.wsManager = new WebSocketManager(); - - // Initialize protocol handler - this.protocolHandler = new ProtocolHandler( - playerId, - this.wsManager, - this.audioProcessor, - this.stateManager, - this.timeFilter, - { - clientName, - codecs: config.codecs, - bufferCapacity: config.bufferCapacity, - useHardwareVolume: config.useHardwareVolume, - onVolumeCommand: config.onVolumeCommand, - onDelayCommand: config.onDelayCommand, - getExternalVolume: config.getExternalVolume, - useOutputLatencyCompensation: config.useOutputLatencyCompensation, - }, - ); + // Wire core events to scheduler + this.core.onAudioData = (chunk) => { + this.scheduler.handleDecodedChunk(chunk); + }; + + this.core.onStreamStart = (format, isFormatUpdate) => { + this.scheduler.initAudioContext(); + this.scheduler.resumeAudioContext(); + if (!isFormatUpdate) { + this.scheduler.clearBuffers(); + } + this.scheduler.startAudioElement(); + }; + + this.core.onStreamClear = () => { + this.scheduler.clearBuffers(); + }; + + this.core.onStreamEnd = () => { + this.scheduler.clearBuffers(); + this.scheduler.stopAudioElement(); + }; + + this.core.onVolumeUpdate = () => { + this.scheduler.updateVolume(); + }; + + this.core.onSyncDelayChange = (delayMs) => { + this.scheduler.setSyncDelay(delayMs); + }; } // Connect to Sendspin server async connect(): Promise { - // Build WebSocket URL - const url = new URL(this.config.baseUrl); - const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; - this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; - - // Connect to WebSocket - await this.wsManager.connect( - this.wsUrl, - // onOpen - () => { - console.log("Sendspin: Using player_id:", this.config.playerId); - this.protocolHandler.sendClientHello(); - }, - // onMessage - (event: MessageEvent) => { - this.protocolHandler.handleMessage(event); - }, - // onError - (error: Event) => { - console.error("Sendspin: WebSocket error", error); - }, - // onClose - () => { - this.protocolHandler.stopTimeSync(); - console.log("Sendspin: Connection closed"); - }, - ); + return this.core.connect(); } /** * Disconnect from Sendspin server * @param reason - Optional reason for disconnecting (default: 'shutdown') - * - 'another_server': Switching to a different Sendspin server - * - 'shutdown': Client is shutting down - * - 'restart': Client is restarting and will reconnect - * - 'user_request': User explicitly requested to disconnect */ disconnect(reason: GoodbyeReason = "shutdown"): void { - // Send goodbye message if connected - if (this.wsManager.isConnected()) { - this.protocolHandler.sendGoodbye(reason); - } - - // Stop time sync burst scheduler and in-flight timeout state - this.protocolHandler.stopTimeSync(); - - // Clear intervals - this.stateManager.clearAllIntervals(); - - // Disconnect WebSocket - this.wsManager.disconnect(); - - // Close audio processor - this.audioProcessor.close(); - - // Reset time filter - this.timeFilter.reset(); + this.core.disconnect(reason); - // Reset state - this.stateManager.reset(); + // Close scheduler + this.scheduler.close(); // Reset MediaSession playbackState (if available) if (typeof navigator !== "undefined" && navigator.mediaSession) { @@ -232,34 +179,24 @@ export class SendspinPlayer { // Set volume (0-100) setVolume(volume: number): void { - this.stateManager.volume = volume; - this.audioProcessor.updateVolume(); - this.protocolHandler.sendStateUpdate(); + this.core.setVolume(volume); } // Set muted state setMuted(muted: boolean): void { - this.stateManager.muted = muted; - this.audioProcessor.updateVolume(); - this.protocolHandler.sendStateUpdate(); + this.core.setMuted(muted); } - // Set static delay (in milliseconds, 0-5000). Positive values schedule playback earlier. + // Set static delay (in milliseconds, 0-5000) setSyncDelay(delayMs: number): void { - this.audioProcessor.setSyncDelay(delayMs); - this.protocolHandler.sendStateUpdate(); + this.core.setSyncDelay(delayMs); } /** * Set the sync correction mode at runtime. - * @param mode - The correction mode to use: - * - "sync": Multi-device sync, may use pitch-changing playback-rate adjustments for faster convergence. - * - "quality": No playback-rate changes; uses sample fixes and tighter resyncs, so expect fewer adjustments but occasional jumps. Starts out of sync until the clock converges. Not recommended for bad networks. - * - "quality-local": Avoids playback-rate changes; may drift vs. other players and only resyncs - * as a last resort. */ setCorrectionMode(mode: CorrectionMode): void { - this.audioProcessor.setCorrectionMode(mode); + this.scheduler.setCorrectionMode(mode); } // ======================================== @@ -268,87 +205,52 @@ export class SendspinPlayer { /** * Send a controller command to the server. - * Use this for playback control when the server manages the audio source. - * - * @throws Error if the command is not supported by the server - * - * @example - * // Simple commands (no parameters) - * player.sendCommand('play'); - * player.sendCommand('pause'); - * player.sendCommand('next'); - * player.sendCommand('previous'); - * player.sendCommand('stop'); - * player.sendCommand('shuffle'); - * player.sendCommand('unshuffle'); - * player.sendCommand('repeat_off'); - * player.sendCommand('repeat_one'); - * player.sendCommand('repeat_all'); - * player.sendCommand('switch'); - * - * // Commands with required parameters - * player.sendCommand('volume', { volume: 50 }); - * player.sendCommand('mute', { mute: true }); */ sendCommand( command: T, params: ControllerCommands[T], ): void { - const supportedCommands = - this.stateManager.serverState.controller?.supported_commands; - if (supportedCommands && !supportedCommands.includes(command)) { - throw new Error( - `Command '${command}' is not supported by the server. ` + - `Supported commands: ${supportedCommands.join(", ")}`, - ); - } - this.protocolHandler.sendCommand(command, params); + this.core.sendCommand(command, params); } // Getters for reactive state get isPlaying(): boolean { - return this.stateManager.isPlaying; + return this.core.isPlaying; } get volume(): number { - return this.stateManager.volume; + return this.core.volume; } get muted(): boolean { - return this.stateManager.muted; + return this.core.muted; } get playerState(): PlayerState { - return this.stateManager.playerState; + return this.core.playerState; } get currentFormat(): StreamFormat | null { - return this.stateManager.currentStreamFormat; + return this.core.currentFormat; } get isConnected(): boolean { - return this.wsManager.isConnected(); + return this.core.isConnected; } // Get current correction mode get correctionMode(): CorrectionMode { - return this.audioProcessor.correctionMode; + return this.scheduler.correctionMode; } // Time sync info for debugging get timeSyncInfo(): { synced: boolean; offset: number; error: number } { - return { - synced: this.timeFilter.is_synchronized, - offset: Math.round(this.timeFilter.offset / 1000), // ms - error: Math.round(this.timeFilter.error / 1000), // ms - }; + return this.core.timeSyncInfo; } /** Get current server time in microseconds using synchronized clock */ getCurrentServerTimeUs(): number { - return this.timeFilter.computeServerTime( - Math.floor(performance.now() * 1000), - ); + return this.core.getCurrentServerTimeUs(); } /** Get current track progress with real-time position calculation */ @@ -357,27 +259,7 @@ export class SendspinPlayer { durationMs: number; playbackSpeed: number; } | null { - const metadata = this.stateManager.serverState.metadata; - if (!metadata?.progress || metadata.timestamp === undefined) { - return null; - } - - const serverTimeUs = this.getCurrentServerTimeUs(); - const elapsedUs = serverTimeUs - metadata.timestamp; - // playback_speed is multiplied by 1000 in protocol (1000 = normal speed) - const positionMs = - metadata.progress.track_progress + - (elapsedUs * metadata.progress.playback_speed) / 1_000_000; - - return { - positionMs: Math.max( - 0, - Math.min(positionMs, metadata.progress.track_duration), - ), - durationMs: metadata.progress.track_duration, - // Normalize to float (1.0 = normal speed) - playbackSpeed: metadata.progress.playback_speed / 1000, - }; + return this.core.trackProgress; } // Sync info for debugging/display @@ -391,13 +273,16 @@ export class SendspinPlayer { samplesAdjusted: number; correctionMode: CorrectionMode; } { - return this.audioProcessor.syncInfo; + return this.scheduler.syncInfo; } } // Re-export types for convenience export * from "./types"; export { SendspinTimeFilter } from "./time-filter"; +export { SendspinCore } from "./core"; +export { SendspinDecoder } from "./audio-decoder"; +export { AudioScheduler } from "./audio-scheduler"; // Export platform detection utilities export { detectIsAndroid, detectIsIOS, detectIsMobile, getDefaultSyncDelay }; diff --git a/src/protocol-handler.ts b/src/protocol-handler.ts index 26cb268..0691f44 100644 --- a/src/protocol-handler.ts +++ b/src/protocol-handler.ts @@ -20,7 +20,7 @@ import type { StreamStart, SupportedFormat, } from "./types"; -import type { AudioProcessor } from "./audio-processor"; +import type { StreamHandler } from "./types"; import type { StateManager } from "./state-manager"; import type { WebSocketManager } from "./websocket-manager"; @@ -67,7 +67,7 @@ export class ProtocolHandler { constructor( private playerId: string, private wsManager: WebSocketManager, - private audioProcessor: AudioProcessor, + private streamHandler: StreamHandler, private stateManager: StateManager, private timeFilter: SendspinTimeFilter, config: ProtocolHandlerConfig = {}, @@ -91,11 +91,11 @@ export class ProtocolHandler { this.handleServerMessage(message); } else if (event.data instanceof ArrayBuffer) { // Binary message (audio chunk) - this.audioProcessor.handleBinaryMessage(event.data); + this.streamHandler.handleBinaryMessage(event.data); } else if (event.data instanceof Blob) { // Convert Blob to ArrayBuffer event.data.arrayBuffer().then((buffer) => { - this.audioProcessor.handleBinaryMessage(buffer); + this.streamHandler.handleBinaryMessage(buffer); }); } } @@ -332,7 +332,6 @@ export class ProtocolHandler { this.sendNextTimeSyncBurstProbe(); } - // Handle stream start (also used for format updates per new spec) private handleStreamStart(message: StreamStart): void { const isFormatUpdate = this.stateManager.currentStreamFormat !== null; @@ -350,61 +349,37 @@ export class ProtocolHandler { `BitDepth=${this.stateManager.currentStreamFormat.bit_depth}bit`, ); - this.audioProcessor.initAudioContext(); - // Resume AudioContext if suspended (required for browser autoplay policies) - this.audioProcessor.resumeAudioContext(); - - if (!isFormatUpdate) { - // New stream: reset scheduling state and clear buffers - this.audioProcessor.clearBuffers(); - } - // Format update: don't clear buffers (per new spec) + this.streamHandler.handleStreamStart(this.stateManager.currentStreamFormat, isFormatUpdate); this.stateManager.isPlaying = true; - // Ensure audio element is playing for MediaSession - this.audioProcessor.startAudioElement(); - // Explicitly set playbackState for Android (if mediaSession available) if (typeof navigator !== "undefined" && navigator.mediaSession) { navigator.mediaSession.playbackState = "playing"; } } - // Handle stream clear (for seek operations) private handleStreamClear(message: StreamClear): void { const roles = message.payload.roles; - // If roles is undefined or includes 'player', clear player buffers if (!roles || roles.includes("player")) { console.log("Sendspin: Stream clear (seek)"); - this.audioProcessor.clearBuffers(); - // Note: Don't stop playing, don't clear format - just clear buffers + this.streamHandler.handleStreamClear(); } } - // Handle stream end private handleStreamEnd(message: StreamEnd): void { const roles = message.payload?.roles; - - // If roles is undefined or includes 'player', handle player stream end if (!roles || roles.includes("player")) { console.log("Sendspin: Stream ended"); - // Per spec: Stop playback and clear buffers - this.audioProcessor.clearBuffers(); + this.streamHandler.handleStreamEnd(); - // Clear format and reset state this.stateManager.currentStreamFormat = null; this.stateManager.isPlaying = false; - // Stop audio element (except on Android where silent loop continues) - this.audioProcessor.stopAudioElement(); - - // Explicitly set playbackState (if mediaSession available) if (typeof navigator !== "undefined" && navigator.mediaSession) { navigator.mediaSession.playbackState = "paused"; } - // Send state update to server this.sendStateUpdate(); } } @@ -419,7 +394,7 @@ export class ProtocolHandler { // Set volume command if (playerCommand.volume !== undefined) { this.stateManager.volume = playerCommand.volume; - this.audioProcessor.updateVolume(); + this.streamHandler.handleVolumeUpdate(); // Notify external handler for hardware volume if (this.useHardwareVolume && this.onVolumeCommand) { this.onVolumeCommand(playerCommand.volume, this.stateManager.muted); @@ -431,7 +406,7 @@ export class ProtocolHandler { // Mute/unmute command - uses boolean mute field if (playerCommand.mute !== undefined) { this.stateManager.muted = playerCommand.mute; - this.audioProcessor.updateVolume(); + this.streamHandler.handleVolumeUpdate(); // Notify external handler for hardware volume if (this.useHardwareVolume && this.onVolumeCommand) { this.onVolumeCommand(this.stateManager.volume, playerCommand.mute); @@ -443,7 +418,7 @@ export class ProtocolHandler { const delay = playerCommand.static_delay_ms; if (typeof delay === "number" && isFinite(delay)) { const clamped = Math.max(0, Math.min(5000, Math.round(delay))); - this.audioProcessor.setSyncDelay(clamped); + this.streamHandler.handleSyncDelayChange(clamped); this.onDelayCommand?.(clamped); } break; @@ -583,7 +558,7 @@ export class ProtocolHandler { muted = externalVol.muted; } - const syncDelayMs = this.audioProcessor.getSyncDelayMs(); + const syncDelayMs = this.streamHandler.getSyncDelayMs(); const staticDelayMs = Math.max(0, Math.min(5000, Math.round(syncDelayMs))); const message: ClientState = { From 1862e155d41782e141e20f7db96bc1692841a8c0 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 10:39:20 +0000 Subject: [PATCH 03/27] Support passing in an external WebSocket connection Add webSocket option to SendspinCoreConfig and SendspinPlayerConfig. When provided, the core adopts the existing socket via WebSocketManager.adopt() instead of creating one from baseUrl. Auto-reconnect is disabled for externally-managed sockets. This enables server-side proxying, conformance testing with mock sockets, and scenarios where the WebSocket is managed externally. https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/core.ts | 59 ++++++++++++++++++++++------------- src/index.ts | 1 + src/types.ts | 16 ++++++++++ src/websocket-manager.ts | 66 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 121 insertions(+), 21 deletions(-) diff --git a/src/core.ts b/src/core.ts index 1142b8d..55123dd 100644 --- a/src/core.ts +++ b/src/core.ts @@ -157,27 +157,44 @@ export class SendspinCore implements StreamHandler { // ======================================== async connect(): Promise { - const url = new URL(this.config.baseUrl); - const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; - this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; - - await this.wsManager.connect( - this.wsUrl, - () => { - console.log("Sendspin: Using player_id:", this.config.playerId); - this.protocolHandler.sendClientHello(); - }, - (event: MessageEvent) => { - this.protocolHandler.handleMessage(event); - }, - (error: Event) => { - console.error("Sendspin: WebSocket error", error); - }, - () => { - this.protocolHandler.stopTimeSync(); - console.log("Sendspin: Connection closed"); - }, - ); + const onOpen = () => { + console.log("Sendspin: Using player_id:", this.config.playerId); + this.protocolHandler.sendClientHello(); + }; + const onMessage = (event: MessageEvent) => { + this.protocolHandler.handleMessage(event); + }; + const onError = (error: Event) => { + console.error("Sendspin: WebSocket error", error); + }; + const onClose = () => { + this.protocolHandler.stopTimeSync(); + console.log("Sendspin: Connection closed"); + }; + + if (this.config.webSocket) { + // Adopt externally-managed WebSocket + this.wsManager.adopt( + this.config.webSocket, + onOpen, + onMessage, + onError, + onClose, + ); + } else { + // Create connection from baseUrl + const url = new URL(this.config.baseUrl); + const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; + this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; + + await this.wsManager.connect( + this.wsUrl, + onOpen, + onMessage, + onError, + onClose, + ); + } } disconnect(reason: GoodbyeReason = "shutdown"): void { diff --git a/src/index.ts b/src/index.ts index 2549b08..9bc1b4d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -88,6 +88,7 @@ export class SendspinPlayer { playerId: config.playerId, baseUrl: config.baseUrl, clientName: config.clientName, + webSocket: config.webSocket, codecs: config.codecs, bufferCapacity: config.bufferCapacity ?? diff --git a/src/types.ts b/src/types.ts index e7da4b7..5981830 100644 --- a/src/types.ts +++ b/src/types.ts @@ -273,6 +273,14 @@ export interface SendspinPlayerConfig { /** Human-readable name for this player. Auto-generated if not provided. */ clientName?: string; + /** + * Pre-established WebSocket connection. + * When provided, the player adopts this socket instead of creating one from baseUrl. + * The socket must connect to the Sendspin /sendspin endpoint. + * Auto-reconnect is disabled for externally-managed sockets. + */ + webSocket?: WebSocket; + /** * HTMLAudioElement for media-element output mode. * Auto-created on mobile browsers if not provided. @@ -423,6 +431,14 @@ export interface SendspinCoreConfig { /** Buffer capacity in bytes. Defaults to 5MB. */ bufferCapacity?: number; + /** + * Pre-established WebSocket connection. + * When provided, the core adopts this socket instead of creating one from baseUrl. + * The socket must connect to the Sendspin /sendspin endpoint. + * Auto-reconnect is disabled for externally-managed sockets. + */ + webSocket?: WebSocket; + /** Static sync delay in milliseconds (0-5000). */ syncDelay?: number; diff --git a/src/websocket-manager.ts b/src/websocket-manager.ts index 449edc1..4d4e4d4 100644 --- a/src/websocket-manager.ts +++ b/src/websocket-manager.ts @@ -11,6 +11,72 @@ export class WebSocketManager { constructor() {} + /** + * Adopt an existing WebSocket connection. + * The caller is responsible for having already opened the socket. + * Reconnection is disabled for adopted sockets. + */ + adopt( + ws: WebSocket, + onOpen?: () => void, + onMessage?: (event: MessageEvent) => void, + onError?: (error: Event) => void, + onClose?: () => void, + ): void { + // Store handlers + this.onOpenHandler = onOpen; + this.onMessageHandler = onMessage; + this.onErrorHandler = onError; + this.onCloseHandler = onClose; + + // Close any existing connection + if (this.ws) { + this.ws.close(); + this.ws = null; + } + + this.ws = ws; + this.ws.binaryType = "arraybuffer"; + // No auto-reconnect for externally-managed sockets + this.shouldReconnect = false; + + this.ws.onmessage = (event: MessageEvent) => { + if (this.onMessageHandler) { + this.onMessageHandler(event); + } + }; + + this.ws.onerror = (error: Event) => { + console.error("Sendspin: WebSocket error", error); + if (this.onErrorHandler) { + this.onErrorHandler(error); + } + }; + + this.ws.onclose = () => { + console.log("Sendspin: WebSocket disconnected"); + if (this.onCloseHandler) { + this.onCloseHandler(); + } + }; + + // If already open, fire onOpen immediately + if (ws.readyState === WebSocket.OPEN) { + console.log("Sendspin: Adopted open WebSocket"); + if (this.onOpenHandler) { + this.onOpenHandler(); + } + } else if (ws.readyState === WebSocket.CONNECTING) { + // Wait for it to open + this.ws.onopen = () => { + console.log("Sendspin: Adopted WebSocket connected"); + if (this.onOpenHandler) { + this.onOpenHandler(); + } + }; + } + } + // Connect to WebSocket server async connect( url: string, From 0cda6b0f167df8ad6dbe42a5f0a0e09010f5cf1b Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 10:44:11 +0000 Subject: [PATCH 04/27] Make baseUrl optional when webSocket is provided baseUrl is no longer required if a pre-established WebSocket is passed in. A clear error is thrown at connect() if neither is provided. https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/core.ts | 5 +++++ src/types.ts | 14 ++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/core.ts b/src/core.ts index 55123dd..f77c845 100644 --- a/src/core.ts +++ b/src/core.ts @@ -183,6 +183,11 @@ export class SendspinCore implements StreamHandler { ); } else { // Create connection from baseUrl + if (!this.config.baseUrl) { + throw new Error( + "SendspinCore requires either baseUrl or webSocket to be provided.", + ); + } const url = new URL(this.config.baseUrl); const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; diff --git a/src/types.ts b/src/types.ts index 5981830..e4ff599 100644 --- a/src/types.ts +++ b/src/types.ts @@ -267,8 +267,11 @@ export interface SendspinPlayerConfig { /** Unique player identifier. Auto-generated if not provided. */ playerId?: string; - /** Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095") */ - baseUrl: string; + /** + * Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095"). + * Required unless webSocket is provided. + */ + baseUrl?: string; /** Human-readable name for this player. Auto-generated if not provided. */ clientName?: string; @@ -416,8 +419,11 @@ export interface SendspinCoreConfig { /** Unique player identifier. Auto-generated if not provided. */ playerId?: string; - /** Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095") */ - baseUrl: string; + /** + * Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095"). + * Required unless webSocket is provided. + */ + baseUrl?: string; /** Human-readable name for this player. Auto-generated if not provided. */ clientName?: string; From e57b175b010aee6608239f400b7d4e11ed7094fc Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 10:55:05 +0000 Subject: [PATCH 05/27] Move source files into core/ and audio/ folders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Organize modules into logical groups: - src/core/ — protocol, state, websocket, time sync - src/audio/ — decoder, scheduler All import paths updated. No logic changes. https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/{audio-decoder.ts => audio/decoder.ts} | 2 +- src/{audio-scheduler.ts => audio/scheduler.ts} | 6 +++--- src/{ => core}/core.ts | 4 ++-- src/{ => core}/protocol-handler.ts | 4 ++-- src/{ => core}/state-manager.ts | 2 +- src/{ => core}/time-filter.ts | 0 src/{ => core}/websocket-manager.ts | 0 src/index.ts | 12 ++++++------ 8 files changed, 15 insertions(+), 15 deletions(-) rename src/{audio-decoder.ts => audio/decoder.ts} (99%) rename src/{audio-scheduler.ts => audio/scheduler.ts} (99%) rename src/{ => core}/core.ts (99%) rename src/{ => core}/protocol-handler.ts (99%) rename src/{ => core}/state-manager.ts (99%) rename src/{ => core}/time-filter.ts (100%) rename src/{ => core}/websocket-manager.ts (100%) diff --git a/src/audio-decoder.ts b/src/audio/decoder.ts similarity index 99% rename from src/audio-decoder.ts rename to src/audio/decoder.ts index 2431ae4..820f672 100644 --- a/src/audio-decoder.ts +++ b/src/audio/decoder.ts @@ -5,7 +5,7 @@ * This module has no Web Audio playback concerns — it only produces decoded data. */ -import type { StreamFormat, DecodedAudioChunk } from "./types"; +import type { StreamFormat, DecodedAudioChunk } from "../types"; export class SendspinDecoder { private onDecodedChunk: (chunk: DecodedAudioChunk) => void; diff --git a/src/audio-scheduler.ts b/src/audio/scheduler.ts similarity index 99% rename from src/audio-scheduler.ts rename to src/audio/scheduler.ts index ac74efa..17e9b9b 100644 --- a/src/audio-scheduler.ts +++ b/src/audio/scheduler.ts @@ -12,9 +12,9 @@ import type { CorrectionMode, DecodedAudioChunk, SendspinStorage, -} from "./types"; -import type { StateManager } from "./state-manager"; -import type { SendspinTimeFilter } from "./time-filter"; +} from "../types"; +import type { StateManager } from "../core/state-manager"; +import type { SendspinTimeFilter } from "../core/time-filter"; // Sync correction constants const SAMPLE_CORRECTION_FADE_LEN = 8; diff --git a/src/core.ts b/src/core/core.ts similarity index 99% rename from src/core.ts rename to src/core/core.ts index f77c845..1984a1d 100644 --- a/src/core.ts +++ b/src/core/core.ts @@ -7,7 +7,7 @@ * tools directly. */ -import { SendspinDecoder } from "./audio-decoder"; +import { SendspinDecoder } from "../audio/decoder"; import { ProtocolHandler } from "./protocol-handler"; import { StateManager } from "./state-manager"; import { WebSocketManager } from "./websocket-manager"; @@ -23,7 +23,7 @@ import type { ControllerCommands, ServerStatePayload, GroupUpdatePayload, -} from "./types"; +} from "../types"; function generateRandomId(): string { return Math.random().toString(36).substring(2, 6); diff --git a/src/protocol-handler.ts b/src/core/protocol-handler.ts similarity index 99% rename from src/protocol-handler.ts rename to src/core/protocol-handler.ts index 0691f44..03ec9d2 100644 --- a/src/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -19,8 +19,8 @@ import type { StreamEnd, StreamStart, SupportedFormat, -} from "./types"; -import type { StreamHandler } from "./types"; +} from "../types"; +import type { StreamHandler } from "../types"; import type { StateManager } from "./state-manager"; import type { WebSocketManager } from "./websocket-manager"; diff --git a/src/state-manager.ts b/src/core/state-manager.ts similarity index 99% rename from src/state-manager.ts rename to src/core/state-manager.ts index 98c41ad..37c3214 100644 --- a/src/state-manager.ts +++ b/src/core/state-manager.ts @@ -3,7 +3,7 @@ import type { StreamFormat, ServerStatePayload, GroupUpdatePayload, -} from "./types"; +} from "../types"; /** * Apply a diff to an object, returning a new copy. diff --git a/src/time-filter.ts b/src/core/time-filter.ts similarity index 100% rename from src/time-filter.ts rename to src/core/time-filter.ts diff --git a/src/websocket-manager.ts b/src/core/websocket-manager.ts similarity index 100% rename from src/websocket-manager.ts rename to src/core/websocket-manager.ts diff --git a/src/index.ts b/src/index.ts index 9bc1b4d..54b297d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,5 +1,5 @@ -import { SendspinCore } from "./core"; -import { AudioScheduler } from "./audio-scheduler"; +import { SendspinCore } from "./core/core"; +import { AudioScheduler } from "./audio/scheduler"; import { SILENT_AUDIO_SRC } from "./silent-audio.generated"; import type { SendspinPlayerConfig, @@ -280,10 +280,10 @@ export class SendspinPlayer { // Re-export types for convenience export * from "./types"; -export { SendspinTimeFilter } from "./time-filter"; -export { SendspinCore } from "./core"; -export { SendspinDecoder } from "./audio-decoder"; -export { AudioScheduler } from "./audio-scheduler"; +export { SendspinTimeFilter } from "./core/time-filter"; +export { SendspinCore } from "./core/core"; +export { SendspinDecoder } from "./audio/decoder"; +export { AudioScheduler } from "./audio/scheduler"; // Export platform detection utilities export { detectIsAndroid, detectIsIOS, detectIsMobile, getDefaultSyncDelay }; From acd47e63aca5161d9f1f2647de9db07f1359f4ee Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 10:55:46 +0000 Subject: [PATCH 06/27] Fix send() typing and replace window.* with globalThis.* - WebSocketManager.send() now takes ClientMessage instead of any - All window.setTimeout/setInterval replaced with globalThis.* for non-browser environment compatibility (Node, Workers, SSR) - Timer handle types use ReturnType instead of number https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/core/protocol-handler.ts | 8 ++++---- src/core/state-manager.ts | 4 ++-- src/core/websocket-manager.ts | 7 ++++--- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index 03ec9d2..b563cfe 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -148,7 +148,7 @@ export class ProtocolHandler { this.scheduleNextTimeSyncBurstTick(); // Start periodic state updates - const stateInterval = window.setInterval( + const stateInterval = globalThis.setInterval( () => this.sendStateUpdate(), STATE_UPDATE_INTERVAL, ); @@ -159,7 +159,7 @@ export class ProtocolHandler { // Called after volume commands to prevent a pending periodic update // from sending stale hardware volume shortly after the command response. private restartStateUpdateInterval(): void { - const newInterval = window.setInterval( + const newInterval = globalThis.setInterval( () => this.sendStateUpdate(), STATE_UPDATE_INTERVAL, ); @@ -168,7 +168,7 @@ export class ProtocolHandler { // Schedule the next fixed 10s burst tick. private scheduleNextTimeSyncBurstTick(): void { - const timeSyncTimeout = window.setTimeout(() => { + const timeSyncTimeout = globalThis.setTimeout(() => { this.startTimeSyncBurstIfIdle(); this.scheduleNextTimeSyncBurstTick(); }, TIME_SYNC_BURST_INTERVAL_MS); @@ -209,7 +209,7 @@ export class ProtocolHandler { private armTimeSyncProbeTimeout(expectedClientTransmitted: number): void { this.clearTimeSyncProbeTimeout(); - this.timeSyncInFlightTimeout = window.setTimeout(() => { + this.timeSyncInFlightTimeout = globalThis.setTimeout(() => { this.handleTimeSyncProbeTimeout(expectedClientTransmitted); }, TIME_SYNC_REQUEST_TIMEOUT_MS); } diff --git a/src/core/state-manager.ts b/src/core/state-manager.ts index 37c3214..a02054d 100644 --- a/src/core/state-manager.ts +++ b/src/core/state-manager.ts @@ -56,8 +56,8 @@ export class StateManager { private _groupState: GroupUpdatePayload = {}; // Interval references for cleanup - private timeSyncInterval: number | null = null; - private stateUpdateInterval: number | null = null; + private timeSyncInterval: ReturnType | null = null; + private stateUpdateInterval: ReturnType | null = null; // Callback for state changes private onStateChangeCallback?: (state: { diff --git a/src/core/websocket-manager.ts b/src/core/websocket-manager.ts index 4d4e4d4..1d671b3 100644 --- a/src/core/websocket-manager.ts +++ b/src/core/websocket-manager.ts @@ -1,6 +1,7 @@ +import type { ClientMessage } from "../types"; export class WebSocketManager { private ws: WebSocket | null = null; - private reconnectTimeout: number | null = null; + private reconnectTimeout: ReturnType | null = null; private shouldReconnect: boolean = false; // Event handlers @@ -151,7 +152,7 @@ export class WebSocketManager { clearTimeout(this.reconnectTimeout); } - this.reconnectTimeout = window.setTimeout(() => { + this.reconnectTimeout = globalThis.setTimeout(() => { if (this.shouldReconnect) { console.log("Sendspin: Attempting to reconnect..."); this.connect( @@ -183,7 +184,7 @@ export class WebSocketManager { } // Send message to server (JSON) - send(message: any): void { + send(message: ClientMessage): void { if (this.ws && this.ws.readyState === WebSocket.OPEN) { this.ws.send(JSON.stringify(message)); } else { From 7b0ffa86eefca73dbfb2f3bd2fe81d7e82d70f6c Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 11:01:44 +0000 Subject: [PATCH 07/27] Extract sub-modules from protocol-handler and scheduler Protocol handler extractions: - codec-support.ts: getBrowserSupportedCodecs + getSupportedFormats as standalone pure functions - time-sync-manager.ts: TimeSyncManager class with all NTP burst lifecycle logic (probes, timeouts, candidate selection) - protocol-handler.ts slimmed from 604 to 342 lines Audio scheduler sub-modules (created, integration next): - clock-source.ts: Clock source selection + output timestamp validation - recorrection-monitor.ts: Drift detection with transient filtering - output-latency-tracker.ts: EMA smoothing + persistence https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/audio/clock-source.ts | 332 ++++++++++++++++++++++++++++ src/audio/output-latency-tracker.ts | 100 +++++++++ src/audio/recorrection-monitor.ts | 172 ++++++++++++++ src/core/codec-support.ts | 77 +++++++ src/core/protocol-handler.ts | 286 +----------------------- src/core/time-sync-manager.ts | 216 ++++++++++++++++++ 6 files changed, 909 insertions(+), 274 deletions(-) create mode 100644 src/audio/clock-source.ts create mode 100644 src/audio/output-latency-tracker.ts create mode 100644 src/audio/recorrection-monitor.ts create mode 100644 src/core/codec-support.ts create mode 100644 src/core/time-sync-manager.ts diff --git a/src/audio/clock-source.ts b/src/audio/clock-source.ts new file mode 100644 index 0000000..153ceea --- /dev/null +++ b/src/audio/clock-source.ts @@ -0,0 +1,332 @@ +/** + * Audio clock source selection and output timestamp validation. + * + * Manages two clock sources for AudioContext time: + * - "estimated": De-quantized AudioContext.currentTime using wall-clock slew + * - "timestamp": AudioContext.getOutputTimestamp() with extensive validation + * + * Promotes to "timestamp" after enough good samples, demotes on failures. + */ + +type AudioClockSource = "estimated" | "timestamp" | "raw"; + +interface OutputTimestampSample { + contextTimeSec: number; + performanceTimeMs: number; + nowMs: number; + predictedAudioTimeSec: number; + rawAudioTimeSec: number; +} + +const OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS = 250; +const OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS = 40; +const OUTPUT_TIMESTAMP_SLOPE_MIN = 0.95; +const OUTPUT_TIMESTAMP_SLOPE_MAX = 1.05; +const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC = 0.25; +const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC = 0.05; +const OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC = 0.005; +const OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS = 5; +const OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES = 6; +const OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS = 750; +const OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES = 2; + +// Timing estimate constants +const TIMING_MAX_SLEW_SEC = 0.002; +const TIMING_RESET_THRESHOLD_SEC = 0.5; +const TIMING_MAX_LEAD_SEC = 0.1; + +export interface TimingSnapshot { + audioContextTimeSec: number; + audioContextRawTimeSec: number; + nowMs: number; + nowUs: number; +} + +export class ClockSource { + private activeSource: AudioClockSource = "estimated"; + private _pendingCutover = false; + private _lastRejectReason: string | null = null; + + // Output timestamp validation state + private lastSample: OutputTimestampSample | null = null; + private goodSamples: number = 0; + private badSamples: number = 0; + private goodSinceMs: number | null = null; + + // Estimated time state + private estimateAudioTimeSec: number | null = null; + private estimateAtMs: number | null = null; + + get active(): AudioClockSource { + return this.activeSource; + } + + get pendingCutover(): boolean { + return this._pendingCutover; + } + + set pendingCutover(value: boolean) { + this._pendingCutover = value; + } + + get lastRejectReason(): string | null { + return this._lastRejectReason; + } + + get timestampGoodSamples(): number { + return this.goodSamples; + } + + setActive(source: AudioClockSource): boolean { + if (this.activeSource === source) return false; + this.activeSource = source; + this._pendingCutover = source === "timestamp"; + return this._pendingCutover; + } + + reset(): void { + this.activeSource = "estimated"; + this._pendingCutover = false; + this.lastSample = null; + this.goodSamples = 0; + this._lastRejectReason = null; + this.badSamples = 0; + this.goodSinceMs = null; + this.estimateAudioTimeSec = null; + this.estimateAtMs = null; + } + + private demote(reason: string): void { + this.reset(); + this._lastRejectReason = reason; + } + + private rejectSample(reason: string, catastrophic = false): void { + this.lastSample = null; + this.goodSamples = 0; + this.goodSinceMs = null; + this._lastRejectReason = reason; + + if (this.activeSource !== "timestamp") { + this.badSamples = 0; + return; + } + + this.badSamples += 1; + if ( + catastrophic || + this.badSamples >= OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES + ) { + this.demote(reason); + } + } + + + private getEstimatedTime(rawTimeSec: number, nowMs: number): number { + if (this.estimateAudioTimeSec === null) { + this.estimateAudioTimeSec = rawTimeSec; + this.estimateAtMs = nowMs; + } else if (this.estimateAtMs !== null) { + const wallDeltaSec = Math.max(0, (nowMs - this.estimateAtMs) / 1000); + const predicted = this.estimateAudioTimeSec + wallDeltaSec; + this.estimateAtMs = nowMs; + + const errorSec = rawTimeSec - predicted; + if (Math.abs(errorSec) > TIMING_RESET_THRESHOLD_SEC) { + this.estimateAudioTimeSec = rawTimeSec; + } else { + const slew = Math.max( + -TIMING_MAX_SLEW_SEC, + Math.min(TIMING_MAX_SLEW_SEC, errorSec), + ); + const next = Math.max(this.estimateAudioTimeSec, predicted + slew); + this.estimateAudioTimeSec = Math.min( + next, + rawTimeSec + TIMING_MAX_LEAD_SEC, + ); + } + } + + return this.estimateAudioTimeSec ?? rawTimeSec; + } + + private getTimestampDerivedTime( + rawTimeSec: number, + audioContext: AudioContext, + ): number | null { + const getOutputTimestamp = ( + audioContext as unknown as { + getOutputTimestamp?: () => { + contextTime: number; + performanceTime: number; + }; + } + ).getOutputTimestamp; + + if (typeof getOutputTimestamp !== "function") { + if (this.activeSource === "timestamp") { + this.demote("getOutputTimestamp unavailable"); + } + return null; + } + + try { + const ts = getOutputTimestamp.call(audioContext); + const nowMs = performance.now(); + const rawFreshnessMs = nowMs - ts.performanceTime; + if (rawFreshnessMs < -OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS) { + this.rejectSample( + `performanceTime in future (${rawFreshnessMs.toFixed(1)}ms)`, + true, + ); + return null; + } + + const freshnessMs = Math.max(0, rawFreshnessMs); + const predictedAudioTimeSec = ts.contextTime + freshnessMs / 1000; + const sample: OutputTimestampSample = { + contextTimeSec: ts.contextTime, + performanceTimeMs: ts.performanceTime, + nowMs, + predictedAudioTimeSec, + rawAudioTimeSec: rawTimeSec, + }; + + if (freshnessMs > OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS) { + this.rejectSample( + `stale timestamp (${freshnessMs.toFixed(1)}ms old)`, + true, + ); + return null; + } + + const divergenceSec = predictedAudioTimeSec - rawTimeSec; + if (Math.abs(divergenceSec) > OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC) { + this.rejectSample( + `timestamp/raw divergence ${Math.abs(divergenceSec * 1000).toFixed(1)}ms`, + true, + ); + return null; + } + + const prev = this.lastSample; + if (prev) { + const perfDeltaMs = ts.performanceTime - prev.performanceTimeMs; + if (perfDeltaMs < 0) { + this.rejectSample( + `performanceTime moved backward (${perfDeltaMs.toFixed(1)}ms)`, + true, + ); + return null; + } + + if ( + predictedAudioTimeSec < + prev.predictedAudioTimeSec - OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC + ) { + this.rejectSample( + `predicted audio time moved backward ${((prev.predictedAudioTimeSec - predictedAudioTimeSec) * 1000).toFixed(1)}ms`, + true, + ); + return null; + } + + const prevDivergenceSec = + prev.predictedAudioTimeSec - prev.rawAudioTimeSec; + if ( + Math.abs(divergenceSec - prevDivergenceSec) > + OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC + ) { + this.rejectSample( + `timestamp/raw divergence drift ${Math.abs((divergenceSec - prevDivergenceSec) * 1000).toFixed(1)}ms`, + ); + return null; + } + + if (perfDeltaMs >= OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS) { + const perfDeltaSec = perfDeltaMs / 1000; + const contextSlope = + (ts.contextTime - prev.contextTimeSec) / perfDeltaSec; + const predictedSlope = + (predictedAudioTimeSec - prev.predictedAudioTimeSec) / perfDeltaSec; + + if ( + contextSlope < OUTPUT_TIMESTAMP_SLOPE_MIN || + contextSlope > OUTPUT_TIMESTAMP_SLOPE_MAX + ) { + this.rejectSample( + `context slope ${contextSlope.toFixed(3)} out of range`, + ); + return null; + } + if ( + predictedSlope < OUTPUT_TIMESTAMP_SLOPE_MIN || + predictedSlope > OUTPUT_TIMESTAMP_SLOPE_MAX + ) { + this.rejectSample( + `predicted slope ${predictedSlope.toFixed(3)} out of range`, + ); + return null; + } + } + } + + this.lastSample = sample; + this.badSamples = 0; + if (this.goodSinceMs === null) { + this.goodSinceMs = nowMs; + } + this.goodSamples += 1; + + if ( + this.activeSource !== "timestamp" && + this.goodSamples >= OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES && + this.goodSinceMs !== null && + nowMs - this.goodSinceMs >= OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS + ) { + this.setActive("timestamp"); + this._lastRejectReason = null; + } + + return predictedAudioTimeSec; + } catch (error) { + const reason = + error instanceof Error + ? `getOutputTimestamp failed: ${error.message}` + : `getOutputTimestamp failed: ${String(error)}`; + this.rejectSample(reason, true); + return null; + } + } + + /** Get a timing snapshot with both derived and raw AudioContext times. */ + getTimingSnapshot(audioContext: AudioContext | null): TimingSnapshot { + const nowMs = performance.now(); + const nowUs = nowMs * 1000; + if (!audioContext) { + return { audioContextTimeSec: 0, audioContextRawTimeSec: 0, nowMs, nowUs }; + } + + const rawTimeSec = audioContext.currentTime; + const estimatedTimeSec = this.getEstimatedTime(rawTimeSec, nowMs); + const timestampTimeSec = this.getTimestampDerivedTime( + rawTimeSec, + audioContext, + ); + + let derivedTimeSec = + this.activeSource === "timestamp" && timestampTimeSec !== null + ? timestampTimeSec + : estimatedTimeSec; + if (!Number.isFinite(derivedTimeSec)) { + derivedTimeSec = rawTimeSec; + } + + return { + audioContextTimeSec: derivedTimeSec, + audioContextRawTimeSec: rawTimeSec, + nowMs, + nowUs, + }; + } +} diff --git a/src/audio/output-latency-tracker.ts b/src/audio/output-latency-tracker.ts new file mode 100644 index 0000000..0d6a8c6 --- /dev/null +++ b/src/audio/output-latency-tracker.ts @@ -0,0 +1,100 @@ +/** + * Output latency tracker with EMA smoothing and persistence. + * + * Tracks AudioContext.baseLatency + outputLatency using exponential moving + * average to filter browser jitter (especially Chrome). Persists the smoothed + * value to storage for cross-session consistency. + */ + +import type { SendspinStorage } from "../types"; + +const OUTPUT_LATENCY_ALPHA = 0.01; +const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; +const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000; + +export class OutputLatencyTracker { + private lastRawOutputLatencyUs: number = 0; + private smoothedOutputLatencyUs: number | null = null; + private lastLatencyPersistAtMs: number | null = null; + + constructor(private storage: SendspinStorage | null) { + this.loadPersisted(); + } + + private loadPersisted(): void { + if (!this.storage) return; + try { + const stored = this.storage.getItem(OUTPUT_LATENCY_STORAGE_KEY); + if (stored) { + const latency = parseFloat(stored); + if (!isNaN(latency) && latency >= 0) { + this.smoothedOutputLatencyUs = latency; + } + } + } catch { + // ignore + } + } + + private persist(): void { + if (!this.storage || this.smoothedOutputLatencyUs === null) return; + try { + this.storage.setItem( + OUTPUT_LATENCY_STORAGE_KEY, + this.smoothedOutputLatencyUs.toString(), + ); + } catch { + // ignore + } + } + + /** Get raw output latency in microseconds from AudioContext. */ + getRawUs(audioContext: AudioContext | null): number { + if (!audioContext) return 0; + const baseLatency = audioContext.baseLatency ?? 0; + const outputLatency = audioContext.outputLatency ?? 0; + const rawUs = (baseLatency + outputLatency) * 1_000_000; + this.lastRawOutputLatencyUs = rawUs; + return rawUs; + } + + /** Get EMA-smoothed output latency in microseconds. */ + getSmoothedUs(audioContext: AudioContext | null): number { + const rawLatencyUs = this.getRawUs(audioContext); + + if (rawLatencyUs <= 0 && this.smoothedOutputLatencyUs !== null) { + return this.smoothedOutputLatencyUs; + } + + if (this.smoothedOutputLatencyUs === null) { + this.smoothedOutputLatencyUs = rawLatencyUs; + } else { + this.smoothedOutputLatencyUs = + OUTPUT_LATENCY_ALPHA * rawLatencyUs + + (1 - OUTPUT_LATENCY_ALPHA) * this.smoothedOutputLatencyUs; + } + + const nowMs = + typeof performance !== "undefined" ? performance.now() : Date.now(); + if ( + this.lastLatencyPersistAtMs === null || + nowMs - this.lastLatencyPersistAtMs >= OUTPUT_LATENCY_PERSIST_INTERVAL_MS + ) { + this.persist(); + this.lastLatencyPersistAtMs = nowMs; + } + + return this.smoothedOutputLatencyUs; + } + + /** Get last raw reading in microseconds (for sync info display). */ + getLastRawUs(): number { + return this.lastRawOutputLatencyUs; + } + + /** Reset smoother (on stream change or audio context recreation). */ + reset(): void { + this.smoothedOutputLatencyUs = null; + this.lastRawOutputLatencyUs = 0; + } +} diff --git a/src/audio/recorrection-monitor.ts b/src/audio/recorrection-monitor.ts new file mode 100644 index 0000000..4d546ce --- /dev/null +++ b/src/audio/recorrection-monitor.ts @@ -0,0 +1,172 @@ +/** + * Recorrection monitor for detecting sustained sync drift. + * + * Runs on a periodic interval and detects when sync error exceeds a threshold + * for long enough to warrant a hard resync. The monitor only detects — the + * actual cutover execution is delegated to the scheduler via callback. + */ + +const RECORRECTION_CHECK_INTERVAL_MS = 250; +const RECORRECTION_TRIGGER_MS = 30; +const RECORRECTION_SUSTAIN_MS = 400; +const RECORRECTION_COOLDOWN_MS = 1_500; +const RECORRECTION_TRANSIENT_JUMP_MS = 25; +const RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS = + RECORRECTION_CHECK_INTERVAL_MS * 4; +const HARD_RESYNC_STARTUP_GRACE_MS = 1_000; +const HARD_RESYNC_COOLDOWN_MS = 500; + +export class RecorrectionMonitor { + private interval: ReturnType | null = null; + private breachStartedAtMs: number | null = null; + private lastRecorrectionAtMs: number = -Infinity; + private prevRawSyncErrorMs: number | null = null; + private pendingJumpSign: number | null = null; + private pendingJumpAtMs: number | null = null; + private _hardResyncGraceUntilMs: number | null = null; + private _lastHardResyncAtMs: number = -Infinity; + /** After a recorrection, scheduling must not start before this time. */ + minScheduleTimeSec: number | null = null; + + constructor(private onCheck: () => void) {} + + start(): void { + if (this.interval !== null) return; + this.interval = globalThis.setInterval( + () => this.onCheck(), + RECORRECTION_CHECK_INTERVAL_MS, + ); + } + + stop(): void { + if (this.interval !== null) { + clearInterval(this.interval); + this.interval = null; + } + this.resetCheckState(); + this.lastRecorrectionAtMs = -Infinity; + } + + clearBreachState(): void { + this.breachStartedAtMs = null; + this.pendingJumpSign = null; + this.pendingJumpAtMs = null; + } + + resetCheckState(): void { + this.clearBreachState(); + this.prevRawSyncErrorMs = null; + } + + armStartupGrace(nowMs: number, isTimestampClock: boolean): void { + if (isTimestampClock) { + this._hardResyncGraceUntilMs = null; + return; + } + if (this._hardResyncGraceUntilMs === null) { + this._hardResyncGraceUntilMs = nowMs + HARD_RESYNC_STARTUP_GRACE_MS; + } + } + + canUseHardResync(nowMs: number, isTimestampClock: boolean): boolean { + if (isTimestampClock) { + this._hardResyncGraceUntilMs = null; + } else if ( + this._hardResyncGraceUntilMs !== null && + nowMs < this._hardResyncGraceUntilMs + ) { + return false; + } + return nowMs - this._lastHardResyncAtMs >= HARD_RESYNC_COOLDOWN_MS; + } + + noteHardResync(nowMs: number): void { + this._lastHardResyncAtMs = nowMs; + } + + /** Mark a recorrection as having just happened (for cooldown). */ + markRecorrection(nowMs: number): void { + this.lastRecorrectionAtMs = nowMs; + } + + shouldIgnoreTransientJump( + rawSyncErrorMs: number, + nowMs: number, + ): boolean { + const prev = this.prevRawSyncErrorMs; + this.prevRawSyncErrorMs = rawSyncErrorMs; + + if (prev === null) { + this.pendingJumpSign = null; + this.pendingJumpAtMs = null; + return false; + } + + const jumpDeltaMs = rawSyncErrorMs - prev; + const jumpSign = Math.sign(rawSyncErrorMs); + const isJumpDetected = + Math.abs(jumpDeltaMs) >= RECORRECTION_TRANSIENT_JUMP_MS && jumpSign !== 0; + if (!isJumpDetected) { + this.pendingJumpSign = null; + this.pendingJumpAtMs = null; + return false; + } + + const isConfirmed = + this.pendingJumpSign === jumpSign && + this.pendingJumpAtMs !== null && + nowMs - this.pendingJumpAtMs <= RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS; + this.pendingJumpSign = jumpSign; + this.pendingJumpAtMs = nowMs; + if (isConfirmed) { + this.pendingJumpSign = null; + this.pendingJumpAtMs = null; + return false; + } + + return true; + } + + /** + * Evaluate whether a recorrection should fire given the current sync state. + * Returns true if the scheduler should perform a guarded cutover. + */ + shouldRecorrect( + smoothedAbsErrorMs: number, + rawSyncErrorMs: number, + nowMs: number, + ): boolean { + const isTransient = this.shouldIgnoreTransientJump(rawSyncErrorMs, nowMs); + + if (smoothedAbsErrorMs < RECORRECTION_TRIGGER_MS) { + this.clearBreachState(); + return false; + } + if (isTransient) { + this.clearBreachState(); + return false; + } + if (this.breachStartedAtMs === null) { + this.breachStartedAtMs = nowMs; + return false; + } + if (nowMs - this.breachStartedAtMs < RECORRECTION_SUSTAIN_MS) { + return false; + } + if (nowMs - this.lastRecorrectionAtMs < RECORRECTION_COOLDOWN_MS) { + return false; + } + + return true; + } + + /** Full reset (on disconnect or stream clear). */ + fullReset(): void { + this.stop(); + this._hardResyncGraceUntilMs = null; + this._lastHardResyncAtMs = -Infinity; + this.minScheduleTimeSec = null; + } +} + +export const RECORRECTION_CUTOVER_GUARD_SEC = 0.3; diff --git a/src/core/codec-support.ts b/src/core/codec-support.ts new file mode 100644 index 0000000..e096d13 --- /dev/null +++ b/src/core/codec-support.ts @@ -0,0 +1,77 @@ +import type { Codec, SupportedFormat } from "../types"; + +/** Detect which audio codecs the current browser supports. */ +export function getBrowserSupportedCodecs(): Set { + const userAgent = + typeof navigator !== "undefined" ? navigator.userAgent : ""; + const isSafari = /^((?!chrome|android).)*safari/i.test(userAgent); + const isFirefox = /firefox/i.test(userAgent); + + // Check if native Opus decoder is available (requires secure context) + const hasNativeOpus = typeof AudioDecoder !== "undefined"; + + if (!hasNativeOpus) { + if (typeof window !== "undefined" && !window.isSecureContext) { + console.warn( + "[Opus] Running in insecure context, falling back to FLAC/PCM", + ); + } else { + console.warn( + "[Opus] Native decoder not available, falling back to FLAC/PCM", + ); + } + } + + if (isSafari) { + // Safari: No FLAC support + return new Set(["pcm", "opus"] as Codec[]); + } + + if (isFirefox) { + // Firefox: Opus has audio glitches with both native and opus-encdec decoders + return new Set(["pcm", "flac"] as Codec[]); + } + + if (hasNativeOpus) { + // Native Opus available (Chrome, Edge) + return new Set(["pcm", "opus", "flac"] as Codec[]); + } + + // No WebCodecs AudioDecoder (insecure context or unsupported browser) + return new Set(["pcm", "flac"] as Codec[]); +} + +/** Build supported format list from requested codecs, filtering by browser support. */ +export function getSupportedFormats(codecs: Codec[]): SupportedFormat[] { + const browserSupported = getBrowserSupportedCodecs(); + const formats: SupportedFormat[] = []; + + for (const codec of codecs) { + if (!browserSupported.has(codec)) { + continue; + } + + if (codec === "opus") { + // Opus requires 48kHz + formats.push({ + codec: "opus", + sample_rate: 48000, + channels: 2, + bit_depth: 16, + }); + } else { + // PCM and FLAC support both sample rates + formats.push({ codec, sample_rate: 48000, channels: 2, bit_depth: 16 }); + formats.push({ codec, sample_rate: 44100, channels: 2, bit_depth: 16 }); + } + } + + if (formats.length === 0) { + throw new Error( + `No supported codecs: requested [${codecs.join(", ")}], ` + + `browser supports [${[...browserSupported].join(", ")}]`, + ); + } + + return formats; +} diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index b563cfe..8f2ad9f 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -4,7 +4,6 @@ import type { ClientGoodbye, ClientHello, ClientState, - ClientTime, Codec, ControllerCommand, ControllerCommands, @@ -18,25 +17,15 @@ import type { StreamClear, StreamEnd, StreamStart, - SupportedFormat, } from "../types"; import type { StreamHandler } from "../types"; import type { StateManager } from "./state-manager"; import type { WebSocketManager } from "./websocket-manager"; +import { TimeSyncManager } from "./time-sync-manager"; +import { getSupportedFormats } from "./codec-support"; // Constants const STATE_UPDATE_INTERVAL = 5000; // 5 seconds -const TIME_SYNC_BURST_SIZE = 8; -const TIME_SYNC_BURST_INTERVAL_MS = 10000; -const TIME_SYNC_REQUEST_TIMEOUT_MS = 2000; -const TIME_SYNC_ROBUST_SELECTION_COUNT = 3; - -interface TimeSyncSample { - measurement: number; - maxError: number; - t4: number; - rttTerm: number; -} export interface ProtocolHandlerConfig { clientName?: string; @@ -58,11 +47,7 @@ export class ProtocolHandler { private onVolumeCommand?: (volume: number, muted: boolean) => void; private onDelayCommand?: (delayMs: number) => void; private getExternalVolume?: () => { volume: number; muted: boolean }; - private timeSyncBurstActive: boolean = false; - private timeSyncBurstSentCount: number = 0; - private timeSyncInFlightClientTransmitted: number | null = null; - private timeSyncInFlightTimeout: number | null = null; - private timeSyncBurstSamples: TimeSyncSample[] = []; + private timeSyncManager: TimeSyncManager; constructor( private playerId: string, @@ -81,6 +66,11 @@ export class ProtocolHandler { this.onVolumeCommand = config.onVolumeCommand; this.onDelayCommand = config.onDelayCommand; this.getExternalVolume = config.getExternalVolume; + this.timeSyncManager = new TimeSyncManager( + wsManager, + stateManager, + timeFilter, + ); } // Handle WebSocket messages @@ -108,7 +98,7 @@ export class ProtocolHandler { break; case "server/time": - this.handleServerTime(message); + this.timeSyncManager.handleServerTime(message as ServerTime); break; case "stream/start": @@ -143,9 +133,7 @@ export class ProtocolHandler { // Per spec: Send initial client/state immediately after server/hello this.sendStateUpdate(); // Start time synchronization with fixed bursts. - this.stopTimeSync(); - this.startTimeSyncBurstIfIdle(); - this.scheduleNextTimeSyncBurstTick(); + this.timeSyncManager.startAndSchedule(); // Start periodic state updates const stateInterval = globalThis.setInterval( @@ -166,170 +154,8 @@ export class ProtocolHandler { this.stateManager.setStateUpdateInterval(newInterval); } - // Schedule the next fixed 10s burst tick. - private scheduleNextTimeSyncBurstTick(): void { - const timeSyncTimeout = globalThis.setTimeout(() => { - this.startTimeSyncBurstIfIdle(); - this.scheduleNextTimeSyncBurstTick(); - }, TIME_SYNC_BURST_INTERVAL_MS); - this.stateManager.setTimeSyncInterval(timeSyncTimeout); - } - - private startTimeSyncBurstIfIdle(): void { - if (this.timeSyncBurstActive || !this.wsManager.isConnected()) { - return; - } - - this.timeSyncBurstActive = true; - this.timeSyncBurstSentCount = 0; - this.timeSyncBurstSamples = []; - this.timeSyncInFlightClientTransmitted = null; - this.sendNextTimeSyncBurstProbe(); - } - - private sendNextTimeSyncBurstProbe(): void { - if ( - !this.timeSyncBurstActive || - this.timeSyncInFlightClientTransmitted !== null || - !this.wsManager.isConnected() - ) { - return; - } - - if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) { - this.finalizeTimeSyncBurst(); - return; - } - - const clientTransmitted = this.sendTimeSync(); - this.timeSyncBurstSentCount += 1; - this.timeSyncInFlightClientTransmitted = clientTransmitted; - this.armTimeSyncProbeTimeout(clientTransmitted); - } - - private armTimeSyncProbeTimeout(expectedClientTransmitted: number): void { - this.clearTimeSyncProbeTimeout(); - this.timeSyncInFlightTimeout = globalThis.setTimeout(() => { - this.handleTimeSyncProbeTimeout(expectedClientTransmitted); - }, TIME_SYNC_REQUEST_TIMEOUT_MS); - } - - private clearTimeSyncProbeTimeout(): void { - if (this.timeSyncInFlightTimeout !== null) { - clearTimeout(this.timeSyncInFlightTimeout); - this.timeSyncInFlightTimeout = null; - } - } - - private handleTimeSyncProbeTimeout(expectedClientTransmitted: number): void { - if ( - !this.timeSyncBurstActive || - this.timeSyncInFlightClientTransmitted !== expectedClientTransmitted - ) { - return; - } - - console.warn("Sendspin: Time sync probe timed out, aborting current burst"); - this.abortTimeSyncBurst(); - } - - private finalizeTimeSyncBurst(): void { - this.clearTimeSyncProbeTimeout(); - - const candidate = this.selectTimeSyncBurstCandidate(); - if (candidate) { - this.timeFilter.update( - candidate.measurement, - candidate.maxError, - candidate.t4, - ); - } - - this.timeSyncBurstActive = false; - this.timeSyncBurstSentCount = 0; - this.timeSyncInFlightClientTransmitted = null; - this.timeSyncBurstSamples = []; - } - - private selectTimeSyncBurstCandidate(): TimeSyncSample | null { - if (this.timeSyncBurstSamples.length === 0) { - return null; - } - - const topRttSamples = [...this.timeSyncBurstSamples] - .sort((a, b) => a.rttTerm - b.rttTerm) - .slice( - 0, - Math.min( - TIME_SYNC_ROBUST_SELECTION_COUNT, - this.timeSyncBurstSamples.length, - ), - ); - const sortedByMeasurement = [...topRttSamples].sort( - (a, b) => a.measurement - b.measurement, - ); - return sortedByMeasurement[Math.floor(sortedByMeasurement.length / 2)]; - } - - private abortTimeSyncBurst(): void { - this.clearTimeSyncProbeTimeout(); - this.timeSyncBurstActive = false; - this.timeSyncBurstSentCount = 0; - this.timeSyncInFlightClientTransmitted = null; - this.timeSyncBurstSamples = []; - } - stopTimeSync(): void { - this.stateManager.clearTimeSyncInterval(); - this.abortTimeSyncBurst(); - } - - // Handle server time synchronization - private handleServerTime(message: ServerTime): void { - if ( - !this.timeSyncBurstActive || - this.timeSyncInFlightClientTransmitted === null - ) { - return; - } - - // Per spec: client_transmitted (T1), server_received (T2), server_transmitted (T3) - const T1 = message.payload.client_transmitted; - if (T1 !== this.timeSyncInFlightClientTransmitted) { - console.warn( - "Sendspin: Ignoring out-of-order time response", - T1, - this.timeSyncInFlightClientTransmitted, - ); - return; - } - - const T4 = Math.floor(performance.now() * 1000); // client received time - const T2 = message.payload.server_received; - const T3 = message.payload.server_transmitted; - - // NTP offset calculation: measurement = ((T2 - T1) + (T3 - T4)) / 2 - const measurement = (T2 - T1 + (T3 - T4)) / 2; - - // Max error (half of round-trip time): max_error = ((T4 - T1) - (T3 - T2)) / 2 - const rttTerm = Math.max(0, T4 - T1 - (T3 - T2)); - const maxError = Math.max(1000, rttTerm / 2); - this.timeSyncBurstSamples.push({ - measurement, - maxError, - t4: T4, - rttTerm, - }); - - this.clearTimeSyncProbeTimeout(); - this.timeSyncInFlightClientTransmitted = null; - - if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) { - this.finalizeTimeSyncBurst(); - return; - } - - this.sendNextTimeSyncBurstProbe(); + this.timeSyncManager.stop(); } private handleStreamStart(message: StreamStart): void { @@ -449,7 +275,7 @@ export class ProtocolHandler { "Unknown", }, "player@v1_support": { - supported_formats: this.getSupportedFormats(), + supported_formats: getSupportedFormats(this.codecs), buffer_capacity: this.bufferCapacity, supported_commands: ["volume", "mute"], }, @@ -458,94 +284,6 @@ export class ProtocolHandler { this.wsManager.send(hello); } - // Get supported codecs for the current browser - private getBrowserSupportedCodecs(): Set { - const userAgent = - typeof navigator !== "undefined" ? navigator.userAgent : ""; - const isSafari = /^((?!chrome|android).)*safari/i.test(userAgent); - const isFirefox = /firefox/i.test(userAgent); - - // Check if native Opus decoder is available (requires secure context) - const hasNativeOpus = typeof AudioDecoder !== "undefined"; - - if (!hasNativeOpus) { - if (typeof window !== "undefined" && !window.isSecureContext) { - console.warn( - "[Opus] Running in insecure context, falling back to FLAC/PCM", - ); - } else { - console.warn( - "[Opus] Native decoder not available, falling back to FLAC/PCM", - ); - } - } - - if (isSafari) { - // Safari: No FLAC support - return new Set(["pcm", "opus"] as Codec[]); - } - - if (isFirefox) { - // Firefox: Opus has audio glitches with both native and opus-encdec decoders - return new Set(["pcm", "flac"] as Codec[]); - } - - if (hasNativeOpus) { - // Native Opus available (Chrome, Edge) - return new Set(["pcm", "opus", "flac"] as Codec[]); - } - - // No WebCodecs AudioDecoder (insecure context or unsupported browser) - return new Set(["pcm", "flac"] as Codec[]); - } - - // Build supported formats from requested codecs, filtering out unsupported ones - private getSupportedFormats(): SupportedFormat[] { - const browserSupported = this.getBrowserSupportedCodecs(); - const formats: SupportedFormat[] = []; - - for (const codec of this.codecs) { - if (!browserSupported.has(codec)) { - continue; - } - - if (codec === "opus") { - // Opus requires 48kHz - formats.push({ - codec: "opus", - sample_rate: 48000, - channels: 2, - bit_depth: 16, - }); - } else { - // PCM and FLAC support both sample rates - formats.push({ codec, sample_rate: 48000, channels: 2, bit_depth: 16 }); - formats.push({ codec, sample_rate: 44100, channels: 2, bit_depth: 16 }); - } - } - - if (formats.length === 0) { - throw new Error( - `No supported codecs: requested [${this.codecs.join(", ")}], ` + - `browser supports [${[...browserSupported].join(", ")}]`, - ); - } - - return formats; - } - - // Send time synchronization message - sendTimeSync(clientTimeUs = Math.floor(performance.now() * 1000)): number { - const message: ClientTime = { - type: "client/time" as MessageType.CLIENT_TIME, - payload: { - client_transmitted: clientTimeUs, - }, - }; - this.wsManager.send(message); - return clientTimeUs; - } - // Send state update // When skipHardwareRead is true, use stateManager values instead of reading from hardware. // This avoids race conditions when responding to volume commands. diff --git a/src/core/time-sync-manager.ts b/src/core/time-sync-manager.ts new file mode 100644 index 0000000..85de19b --- /dev/null +++ b/src/core/time-sync-manager.ts @@ -0,0 +1,216 @@ +import type { SendspinTimeFilter } from "./time-filter"; +import type { StateManager } from "./state-manager"; +import type { WebSocketManager } from "./websocket-manager"; +import type { ClientTime, MessageType, ServerTime } from "../types"; + +const TIME_SYNC_BURST_SIZE = 8; +const TIME_SYNC_BURST_INTERVAL_MS = 10000; +const TIME_SYNC_REQUEST_TIMEOUT_MS = 2000; +const TIME_SYNC_ROBUST_SELECTION_COUNT = 3; + +interface TimeSyncSample { + measurement: number; + maxError: number; + t4: number; + rttTerm: number; +} + +export class TimeSyncManager { + private timeSyncBurstActive = false; + private timeSyncBurstSentCount = 0; + private timeSyncInFlightClientTransmitted: number | null = null; + private timeSyncInFlightTimeout: ReturnType | null = null; + private timeSyncBurstSamples: TimeSyncSample[] = []; + + constructor( + private wsManager: WebSocketManager, + private stateManager: StateManager, + private timeFilter: SendspinTimeFilter, + ) {} + + // Start an initial burst and schedule recurring bursts. + startAndSchedule(): void { + this.stop(); + this.startTimeSyncBurstIfIdle(); + this.scheduleNextTimeSyncBurstTick(); + } + + // Schedule the next fixed 10s burst tick. + private scheduleNextTimeSyncBurstTick(): void { + const timeSyncTimeout = globalThis.setTimeout(() => { + this.startTimeSyncBurstIfIdle(); + this.scheduleNextTimeSyncBurstTick(); + }, TIME_SYNC_BURST_INTERVAL_MS); + this.stateManager.setTimeSyncInterval(timeSyncTimeout); + } + + private startTimeSyncBurstIfIdle(): void { + if (this.timeSyncBurstActive || !this.wsManager.isConnected()) { + return; + } + + this.timeSyncBurstActive = true; + this.timeSyncBurstSentCount = 0; + this.timeSyncBurstSamples = []; + this.timeSyncInFlightClientTransmitted = null; + this.sendNextTimeSyncBurstProbe(); + } + + private sendNextTimeSyncBurstProbe(): void { + if ( + !this.timeSyncBurstActive || + this.timeSyncInFlightClientTransmitted !== null || + !this.wsManager.isConnected() + ) { + return; + } + + if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) { + this.finalizeTimeSyncBurst(); + return; + } + + const clientTransmitted = this.sendTimeSync(); + this.timeSyncBurstSentCount += 1; + this.timeSyncInFlightClientTransmitted = clientTransmitted; + this.armTimeSyncProbeTimeout(clientTransmitted); + } + + private armTimeSyncProbeTimeout(expectedClientTransmitted: number): void { + this.clearTimeSyncProbeTimeout(); + this.timeSyncInFlightTimeout = globalThis.setTimeout(() => { + this.handleTimeSyncProbeTimeout(expectedClientTransmitted); + }, TIME_SYNC_REQUEST_TIMEOUT_MS); + } + + private clearTimeSyncProbeTimeout(): void { + if (this.timeSyncInFlightTimeout !== null) { + clearTimeout(this.timeSyncInFlightTimeout); + this.timeSyncInFlightTimeout = null; + } + } + + private handleTimeSyncProbeTimeout(expectedClientTransmitted: number): void { + if ( + !this.timeSyncBurstActive || + this.timeSyncInFlightClientTransmitted !== expectedClientTransmitted + ) { + return; + } + + console.warn("Sendspin: Time sync probe timed out, aborting current burst"); + this.abortTimeSyncBurst(); + } + + private finalizeTimeSyncBurst(): void { + this.clearTimeSyncProbeTimeout(); + + const candidate = this.selectTimeSyncBurstCandidate(); + if (candidate) { + this.timeFilter.update( + candidate.measurement, + candidate.maxError, + candidate.t4, + ); + } + + this.timeSyncBurstActive = false; + this.timeSyncBurstSentCount = 0; + this.timeSyncInFlightClientTransmitted = null; + this.timeSyncBurstSamples = []; + } + + private selectTimeSyncBurstCandidate(): TimeSyncSample | null { + if (this.timeSyncBurstSamples.length === 0) { + return null; + } + + const topRttSamples = [...this.timeSyncBurstSamples] + .sort((a, b) => a.rttTerm - b.rttTerm) + .slice( + 0, + Math.min( + TIME_SYNC_ROBUST_SELECTION_COUNT, + this.timeSyncBurstSamples.length, + ), + ); + const sortedByMeasurement = [...topRttSamples].sort( + (a, b) => a.measurement - b.measurement, + ); + return sortedByMeasurement[Math.floor(sortedByMeasurement.length / 2)]; + } + + private abortTimeSyncBurst(): void { + this.clearTimeSyncProbeTimeout(); + this.timeSyncBurstActive = false; + this.timeSyncBurstSentCount = 0; + this.timeSyncInFlightClientTransmitted = null; + this.timeSyncBurstSamples = []; + } + + // Stop all time sync activity (interval + in-flight burst). + stop(): void { + this.stateManager.clearTimeSyncInterval(); + this.abortTimeSyncBurst(); + } + + // Handle server/time response + handleServerTime(message: ServerTime): void { + if ( + !this.timeSyncBurstActive || + this.timeSyncInFlightClientTransmitted === null + ) { + return; + } + + // Per spec: client_transmitted (T1), server_received (T2), server_transmitted (T3) + const T1 = message.payload.client_transmitted; + if (T1 !== this.timeSyncInFlightClientTransmitted) { + console.warn( + "Sendspin: Ignoring out-of-order time response", + T1, + this.timeSyncInFlightClientTransmitted, + ); + return; + } + + const T4 = Math.floor(performance.now() * 1000); // client received time + const T2 = message.payload.server_received; + const T3 = message.payload.server_transmitted; + + // NTP offset calculation: measurement = ((T2 - T1) + (T3 - T4)) / 2 + const measurement = (T2 - T1 + (T3 - T4)) / 2; + + // Max error (half of round-trip time): max_error = ((T4 - T1) - (T3 - T2)) / 2 + const rttTerm = Math.max(0, T4 - T1 - (T3 - T2)); + const maxError = Math.max(1000, rttTerm / 2); + this.timeSyncBurstSamples.push({ + measurement, + maxError, + t4: T4, + rttTerm, + }); + + this.clearTimeSyncProbeTimeout(); + this.timeSyncInFlightClientTransmitted = null; + + if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) { + this.finalizeTimeSyncBurst(); + return; + } + + this.sendNextTimeSyncBurstProbe(); + } + + // Send time synchronization message + sendTimeSync(clientTimeUs = Math.floor(performance.now() * 1000)): number { + const message: ClientTime = { + type: "client/time" as MessageType.CLIENT_TIME, + payload: { + client_transmitted: clientTimeUs, + }, + }; + this.wsManager.send(message); + return clientTimeUs; + } +} From d18823c8abc6e653c41f655d7a7a2ce7dbc27038 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 11:09:36 +0000 Subject: [PATCH 08/27] Integrate scheduler sub-modules and add configurable thresholds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rewrite scheduler.ts (1581→688 lines) to delegate to: - ClockSource: output timestamp validation + clock selection - RecorrectionMonitor: drift detection with transient filtering - OutputLatencyTracker: EMA smoothing + persistence - Add CorrectionThresholds interface and correctionThresholds config option to SendspinPlayerConfig for per-mode threshold overrides - Default thresholds are deep-merged with user overrides https://claude.ai/code/session_018UYYEXUZVuQ2Z4Texa7W6m --- src/audio/scheduler.ts | 1299 +++++++--------------------------------- src/index.ts | 1 + src/types.ts | 33 + 3 files changed, 237 insertions(+), 1096 deletions(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 17e9b9b..392dce8 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -10,11 +10,18 @@ import type { AudioBufferQueueItem, AudioOutputMode, CorrectionMode, + CorrectionThresholds, DecodedAudioChunk, SendspinStorage, } from "../types"; import type { StateManager } from "../core/state-manager"; import type { SendspinTimeFilter } from "../core/time-filter"; +import { ClockSource } from "./clock-source"; +import { + RecorrectionMonitor, + RECORRECTION_CUTOVER_GUARD_SEC, +} from "./recorrection-monitor"; +import { OutputLatencyTracker } from "./output-latency-tracker"; // Sync correction constants const SAMPLE_CORRECTION_FADE_LEN = 8; @@ -31,60 +38,15 @@ for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { ((SAMPLE_CORRECTION_FADE_LEN - f) / (SAMPLE_CORRECTION_FADE_LEN + 1)) * SAMPLE_CORRECTION_FADE_STRENGTH; } -const OUTPUT_LATENCY_ALPHA = 0.01; const SYNC_ERROR_ALPHA = 0.1; -const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; -const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000; -const RECORRECTION_CHECK_INTERVAL_MS = 250; -const RECORRECTION_TRIGGER_MS = 30; -const RECORRECTION_SUSTAIN_MS = 400; -const RECORRECTION_COOLDOWN_MS = 1_500; -const RECORRECTION_CUTOVER_GUARD_SEC = 0.3; -const RECORRECTION_TRANSIENT_JUMP_MS = 25; -const RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS = - RECORRECTION_CHECK_INTERVAL_MS * 4; -const HARD_RESYNC_STARTUP_GRACE_MS = 1_000; -const HARD_RESYNC_COOLDOWN_MS = 500; const SCHEDULE_HEADROOM_SEC = 0.2; const SCHEDULE_HORIZON_PRECISE_SEC = 20; const SCHEDULE_HORIZON_GOOD_SEC = 8; const SCHEDULE_HORIZON_POOR_SEC = 4; const SCHEDULE_HORIZON_PRECISE_ERROR_MS = 2; const SCHEDULE_HORIZON_GOOD_ERROR_MS = 8; -type AudioClockSource = "estimated" | "timestamp" | "raw"; - -interface OutputTimestampSample { - contextTimeSec: number; - performanceTimeMs: number; - nowMs: number; - predictedAudioTimeSec: number; - rawAudioTimeSec: number; -} -const OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS = 250; -const OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS = 40; -const OUTPUT_TIMESTAMP_SLOPE_MIN = 0.95; -const OUTPUT_TIMESTAMP_SLOPE_MAX = 1.05; -const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC = 0.25; -const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC = 0.05; -const OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC = 0.005; -const OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS = 5; -const OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES = 6; -const OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS = 750; -const OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES = 2; - -const CORRECTION_THRESHOLDS: Record< - CorrectionMode, - { - resyncAboveMs: number; - rate2AboveMs: number; - rate1AboveMs: number; - samplesBelowMs: number; - deadbandBelowMs: number; - enableRecorrectionMonitor: boolean; - immediateDelayCutover: boolean; - } -> = { +const DEFAULT_CORRECTION_THRESHOLDS: Record = { sync: { resyncAboveMs: 200, rate2AboveMs: 35, @@ -136,43 +98,24 @@ export class AudioScheduler { private smoothedSyncErrorMs: number = 0; private resyncCount: number = 0; private currentPlaybackRate: number = 1.0; - private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" = - "none"; + private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" = "none"; private lastSamplesAdjusted: number = 0; - private lastRawOutputLatencyUs: number = 0; - private smoothedOutputLatencyUs: number | null = null; - private lastLatencyPersistAtMs: number | null = null; - - private timingEstimateAudioContextTimeSec: number | null = null; - private timingEstimateAtMs: number | null = null; - private _correctionMode: CorrectionMode = "sync"; + private correctionThresholds: Record; private _lastStatusLogMs: number = 0; - private _lastTimestampRejectReason: string | null = null; private _intervalResyncCount: number = 0; - private useOutputLatencyCompensation: boolean = true; - private recorrectionInterval: ReturnType | null = null; - private recorrectionBreachStartedAtMs: number | null = null; - private lastRecorrectionAtMs: number = -Infinity; - private recorrectionMinScheduleTimeSec: number | null = null; - private recorrectionPrevRawSyncErrorMs: number | null = null; - private recorrectionPendingJumpSign: number | null = null; - private recorrectionPendingJumpAtMs: number | null = null; - private hardResyncGraceUntilMs: number | null = null; - private lastHardResyncAtMs: number = -Infinity; - private pendingClockSourceCutover = false; - private activeAudioClockSource: AudioClockSource = "estimated"; - private outputTimestampLastSample: OutputTimestampSample | null = null; - private outputTimestampGoodSamples: number = 0; - private outputTimestampBadSamples: number = 0; - private outputTimestampGoodSinceMs: number | null = null; - + private useOutputLatencyCompensation: boolean; private scheduleTimeout: ReturnType | null = null; private queueProcessScheduled = false; + // Sub-modules + private clockSource = new ClockSource(); + private recorrectionMonitor: RecorrectionMonitor; + private latencyTracker: OutputLatencyTracker; + constructor( private stateManager: StateManager, private timeFilter: SendspinTimeFilter, @@ -184,47 +127,37 @@ export class AudioScheduler { private syncDelayMs: number = 0, private useHardwareVolume: boolean = false, correctionMode: CorrectionMode = "sync", - private storage: SendspinStorage | null = null, + storage: SendspinStorage | null = null, useOutputLatencyCompensation: boolean = true, + thresholdOverrides?: Partial>>, ) { this._correctionMode = correctionMode; this.useOutputLatencyCompensation = useOutputLatencyCompensation; this.syncDelayMs = this.sanitizeSyncDelayMs(this.syncDelayMs); - this.loadPersistedLatency(); - } - - private sanitizeSyncDelayMs(delayMs: number): number { - if (!isFinite(delayMs)) { - return 0; - } - return Math.max(0, Math.min(5000, Math.round(delayMs))); - } - private loadPersistedLatency(): void { - if (!this.storage) return; - try { - const stored = this.storage.getItem(OUTPUT_LATENCY_STORAGE_KEY); - if (stored) { - const latency = parseFloat(stored); - if (!isNaN(latency) && latency >= 0) { - this.smoothedOutputLatencyUs = latency; + // Merge user-provided threshold overrides with defaults + this.correctionThresholds = { ...DEFAULT_CORRECTION_THRESHOLDS }; + if (thresholdOverrides) { + for (const mode of Object.keys(thresholdOverrides) as CorrectionMode[]) { + const overrides = thresholdOverrides[mode]; + if (overrides) { + this.correctionThresholds[mode] = { + ...DEFAULT_CORRECTION_THRESHOLDS[mode], + ...overrides, + }; } } - } catch { - // ignore } + + this.latencyTracker = new OutputLatencyTracker(storage); + this.recorrectionMonitor = new RecorrectionMonitor( + () => this.checkRecorrection(), + ); } - private persistLatency(): void { - if (!this.storage || this.smoothedOutputLatencyUs === null) return; - try { - this.storage.setItem( - OUTPUT_LATENCY_STORAGE_KEY, - this.smoothedOutputLatencyUs.toString(), - ); - } catch { - // ignore - } + private sanitizeSyncDelayMs(delayMs: number): number { + if (!isFinite(delayMs)) return 0; + return Math.max(0, Math.min(5000, Math.round(delayMs))); } @@ -234,347 +167,43 @@ export class AudioScheduler { setCorrectionMode(mode: CorrectionMode): void { this._correctionMode = mode; - if (!this.modeUsesRecorrectionMonitor(mode)) { - this.stopRecorrectionMonitor(); + if (!this.correctionThresholds[mode].enableRecorrectionMonitor) { + this.recorrectionMonitor.stop(); } else { - this.startRecorrectionMonitor(); + this.recorrectionMonitor.start(); } } - private modeUsesRecorrectionMonitor(mode: CorrectionMode): boolean { - return CORRECTION_THRESHOLDS[mode].enableRecorrectionMonitor; - } - private get usesRecorrectionMonitor(): boolean { - return this.modeUsesRecorrectionMonitor(this._correctionMode); + return this.correctionThresholds[this._correctionMode].enableRecorrectionMonitor; } private get usesImmediateDelayCutover(): boolean { - return CORRECTION_THRESHOLDS[this._correctionMode].immediateDelayCutover; + return this.correctionThresholds[this._correctionMode].immediateDelayCutover; } private getTargetScheduledHorizonSec(): number { const errorMs = this.timeFilter.error / 1000; - if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS) { - return SCHEDULE_HORIZON_PRECISE_SEC; - } - if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS) { - return SCHEDULE_HORIZON_GOOD_SEC; - } + if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS) return SCHEDULE_HORIZON_PRECISE_SEC; + if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS) return SCHEDULE_HORIZON_GOOD_SEC; return SCHEDULE_HORIZON_POOR_SEC; } private getScheduledAheadSec(currentTimeSec: number): number { - let farthestScheduledSec = this.nextScheduleTime; + let farthest = this.nextScheduleTime; for (const entry of this.scheduledSources) { - if (entry.endTime > farthestScheduledSec) { - farthestScheduledSec = entry.endTime; - } - } - if (farthestScheduledSec <= 0) { - return 0; - } - return Math.max(0, farthestScheduledSec - currentTimeSec); - } - - private setActiveAudioClockSource(source: AudioClockSource): void { - if (this.activeAudioClockSource === source) { - return; - } - this.activeAudioClockSource = source; - this.pendingClockSourceCutover = source === "timestamp"; - if ( - this.pendingClockSourceCutover && - (this.scheduledSources.length > 0 || - this.nextPlaybackTime !== 0 || - this.lastScheduledServerTime !== 0) - ) { - this.scheduleQueueProcessing(); - } - } - - private resetOutputTimestampValidation(): void { - this.activeAudioClockSource = "estimated"; - this.pendingClockSourceCutover = false; - this.outputTimestampLastSample = null; - this.outputTimestampGoodSamples = 0; - this._lastTimestampRejectReason = null; - this.outputTimestampBadSamples = 0; - this.outputTimestampGoodSinceMs = null; - } - - private demoteOutputTimestampValidation(reason: string): void { - this.resetOutputTimestampValidation(); - this._lastTimestampRejectReason = reason; - } - - private getEstimatedAudioContextTimeSec( - rawTimeSec: number, - nowMs: number, - ): number { - const TIMING_MAX_SLEW_SEC = 0.002; - const TIMING_RESET_THRESHOLD_SEC = 0.5; - const TIMING_MAX_LEAD_SEC = 0.1; - - if (this.timingEstimateAudioContextTimeSec === null) { - this.timingEstimateAudioContextTimeSec = rawTimeSec; - this.timingEstimateAtMs = nowMs; - } else if (this.timingEstimateAtMs !== null) { - const wallDeltaSec = Math.max( - 0, - (nowMs - this.timingEstimateAtMs) / 1000, - ); - const predicted = this.timingEstimateAudioContextTimeSec + wallDeltaSec; - this.timingEstimateAtMs = nowMs; - - const errorSec = rawTimeSec - predicted; - if (Math.abs(errorSec) > TIMING_RESET_THRESHOLD_SEC) { - this.timingEstimateAudioContextTimeSec = rawTimeSec; - } else { - const slew = Math.max( - -TIMING_MAX_SLEW_SEC, - Math.min(TIMING_MAX_SLEW_SEC, errorSec), - ); - const next = Math.max( - this.timingEstimateAudioContextTimeSec, - predicted + slew, - ); - this.timingEstimateAudioContextTimeSec = Math.min( - next, - rawTimeSec + TIMING_MAX_LEAD_SEC, - ); - } - } - - return this.timingEstimateAudioContextTimeSec ?? rawTimeSec; - } - - private rejectOutputTimestampSample( - reason: string, - catastrophic: boolean = false, - ): void { - this.outputTimestampLastSample = null; - this.outputTimestampGoodSamples = 0; - this.outputTimestampGoodSinceMs = null; - this._lastTimestampRejectReason = reason; - - if (this.activeAudioClockSource !== "timestamp") { - this.outputTimestampBadSamples = 0; - return; - } - - this.outputTimestampBadSamples += 1; - if ( - catastrophic || - this.outputTimestampBadSamples >= - OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES - ) { - this.demoteOutputTimestampValidation(reason); + if (entry.endTime > farthest) farthest = entry.endTime; } - } - - private getTimestampDerivedAudioTimeSec(rawTimeSec: number): number | null { - if (!this.audioContext) { - return null; - } - - const getOutputTimestamp = ( - this.audioContext as unknown as { - getOutputTimestamp?: () => { - contextTime: number; - performanceTime: number; - }; - } - ).getOutputTimestamp; - - if (typeof getOutputTimestamp !== "function") { - if (this.activeAudioClockSource === "timestamp") { - this.demoteOutputTimestampValidation("getOutputTimestamp unavailable"); - } - return null; - } - - try { - const ts = getOutputTimestamp.call(this.audioContext); - const nowMs = performance.now(); - const rawFreshnessMs = nowMs - ts.performanceTime; - if (rawFreshnessMs < -OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS) { - this.rejectOutputTimestampSample( - `performanceTime in future (${rawFreshnessMs.toFixed(1)}ms)`, - true, - ); - return null; - } - - const freshnessMs = Math.max(0, rawFreshnessMs); - const predictedAudioTimeSec = ts.contextTime + freshnessMs / 1000; - const sample: OutputTimestampSample = { - contextTimeSec: ts.contextTime, - performanceTimeMs: ts.performanceTime, - nowMs, - predictedAudioTimeSec, - rawAudioTimeSec: rawTimeSec, - }; - - if (freshnessMs > OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS) { - this.rejectOutputTimestampSample( - `stale timestamp (${freshnessMs.toFixed(1)}ms old)`, - true, - ); - return null; - } - - const divergenceSec = predictedAudioTimeSec - rawTimeSec; - if (Math.abs(divergenceSec) > OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC) { - this.rejectOutputTimestampSample( - `timestamp/raw divergence ${Math.abs(divergenceSec * 1000).toFixed(1)}ms`, - true, - ); - return null; - } - - const lastSample = this.outputTimestampLastSample; - if (lastSample) { - const perfDeltaMs = ts.performanceTime - lastSample.performanceTimeMs; - if (perfDeltaMs < 0) { - this.rejectOutputTimestampSample( - `performanceTime moved backward (${perfDeltaMs.toFixed(1)}ms)`, - true, - ); - return null; - } - - if ( - predictedAudioTimeSec < - lastSample.predictedAudioTimeSec - OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC - ) { - this.rejectOutputTimestampSample( - `predicted audio time moved backward ${((lastSample.predictedAudioTimeSec - predictedAudioTimeSec) * 1000).toFixed(1)}ms`, - true, - ); - return null; - } - - const lastDivergenceSec = - lastSample.predictedAudioTimeSec - lastSample.rawAudioTimeSec; - if ( - Math.abs(divergenceSec - lastDivergenceSec) > - OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC - ) { - this.rejectOutputTimestampSample( - `timestamp/raw divergence drift ${Math.abs((divergenceSec - lastDivergenceSec) * 1000).toFixed(1)}ms`, - ); - return null; - } - - if (perfDeltaMs >= OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS) { - const perfDeltaSec = perfDeltaMs / 1000; - const contextSlope = - (ts.contextTime - lastSample.contextTimeSec) / perfDeltaSec; - const predictedSlope = - (predictedAudioTimeSec - lastSample.predictedAudioTimeSec) / - perfDeltaSec; - - if ( - contextSlope < OUTPUT_TIMESTAMP_SLOPE_MIN || - contextSlope > OUTPUT_TIMESTAMP_SLOPE_MAX - ) { - this.rejectOutputTimestampSample( - `context slope ${contextSlope.toFixed(3)} out of range`, - ); - return null; - } - if ( - predictedSlope < OUTPUT_TIMESTAMP_SLOPE_MIN || - predictedSlope > OUTPUT_TIMESTAMP_SLOPE_MAX - ) { - this.rejectOutputTimestampSample( - `predicted slope ${predictedSlope.toFixed(3)} out of range`, - ); - return null; - } - } - } - - this.outputTimestampLastSample = sample; - this.outputTimestampBadSamples = 0; - if (this.outputTimestampGoodSinceMs === null) { - this.outputTimestampGoodSinceMs = nowMs; - } - this.outputTimestampGoodSamples += 1; - - if ( - this.activeAudioClockSource !== "timestamp" && - this.outputTimestampGoodSamples >= - OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES && - this.outputTimestampGoodSinceMs !== null && - nowMs - this.outputTimestampGoodSinceMs >= - OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS - ) { - this.setActiveAudioClockSource("timestamp"); - this._lastTimestampRejectReason = null; - } - - return predictedAudioTimeSec; - } catch (error) { - const reason = - error instanceof Error - ? `getOutputTimestamp failed: ${error.message}` - : `getOutputTimestamp failed: ${String(error)}`; - this.rejectOutputTimestampSample(reason, true); - return null; - } - } - - private getTimingSnapshot(): { - audioContextTimeSec: number; - audioContextRawTimeSec: number; - nowMs: number; - nowUs: number; - } { - const nowMs = performance.now(); - const nowUs = nowMs * 1000; - if (!this.audioContext) { - return { - audioContextTimeSec: 0, - audioContextRawTimeSec: 0, - nowMs, - nowUs, - }; - } - - const rawTimeSec = this.audioContext.currentTime; - const estimatedTimeSec = this.getEstimatedAudioContextTimeSec( - rawTimeSec, - nowMs, - ); - const timestampTimeSec = this.getTimestampDerivedAudioTimeSec(rawTimeSec); - - let derivedTimeSec = - this.activeAudioClockSource === "timestamp" && timestampTimeSec !== null - ? timestampTimeSec - : estimatedTimeSec; - if (!Number.isFinite(derivedTimeSec)) { - derivedTimeSec = rawTimeSec; - } - - return { - audioContextTimeSec: derivedTimeSec, - audioContextRawTimeSec: rawTimeSec, - nowMs, - nowUs, - }; + return farthest <= 0 ? 0 : Math.max(0, farthest - currentTimeSec); } private resetScheduledPlaybackState(_reason?: string): void { this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; - this.recorrectionMinScheduleTimeSec = null; - this.hardResyncGraceUntilMs = null; - this.lastHardResyncAtMs = -Infinity; - this.pendingClockSourceCutover = false; - this.resetRecorrectionCheckState(); + this.recorrectionMonitor.minScheduleTimeSec = null; + this.clockSource.pendingCutover = false; + this.recorrectionMonitor.resetCheckState(); this.resetSyncErrorEma(); this.currentSyncErrorMs = 0; this.currentPlaybackRate = 1.0; @@ -585,132 +214,24 @@ export class AudioScheduler { } private pruneExpiredScheduledSources(currentTimeSec: number): void { - if (this.scheduledSources.length === 0) { - return; - } - + if (this.scheduledSources.length === 0) return; this.scheduledSources = this.scheduledSources.filter( (entry) => entry.endTime > currentTimeSec, ); - if (this.scheduledSources.length === 0) { this.resetScheduledPlaybackState("no scheduled audio ahead"); } } - private startRecorrectionMonitor(): void { - if (this.recorrectionInterval !== null) { - return; - } - this.recorrectionInterval = globalThis.setInterval( - () => this.checkRecorrection(), - RECORRECTION_CHECK_INTERVAL_MS, - ); - } - - private stopRecorrectionMonitor(): void { - if (this.recorrectionInterval !== null) { - clearInterval(this.recorrectionInterval); - this.recorrectionInterval = null; - } - this.resetRecorrectionCheckState(); - this.lastRecorrectionAtMs = -Infinity; - } - - private clearRecorrectionBreachState(): void { - this.recorrectionBreachStartedAtMs = null; - this.recorrectionPendingJumpSign = null; - this.recorrectionPendingJumpAtMs = null; - } - - private resetRecorrectionCheckState(): void { - this.clearRecorrectionBreachState(); - this.recorrectionPrevRawSyncErrorMs = null; - } - - private armHardResyncStartupGrace(nowMs: number): void { - if (this.activeAudioClockSource === "timestamp") { - this.hardResyncGraceUntilMs = null; - return; - } - if (this.hardResyncGraceUntilMs === null) { - this.hardResyncGraceUntilMs = nowMs + HARD_RESYNC_STARTUP_GRACE_MS; - } - } - - private canUseHardResync(nowMs: number): boolean { - if (this.activeAudioClockSource === "timestamp") { - this.hardResyncGraceUntilMs = null; - } else if ( - this.hardResyncGraceUntilMs !== null && - nowMs < this.hardResyncGraceUntilMs - ) { - return false; - } - - return nowMs - this.lastHardResyncAtMs >= HARD_RESYNC_COOLDOWN_MS; - } - - private noteHardResync(nowMs: number): void { - this.lastHardResyncAtMs = nowMs; - } - - private shouldIgnoreTransientRecorrectionJump( - rawSyncErrorMs: number, - nowMs: number, - ): boolean { - const prevRawSyncErrorMs = this.recorrectionPrevRawSyncErrorMs; - this.recorrectionPrevRawSyncErrorMs = rawSyncErrorMs; - - if (prevRawSyncErrorMs === null) { - this.recorrectionPendingJumpSign = null; - this.recorrectionPendingJumpAtMs = null; - return false; - } - - const jumpDeltaMs = rawSyncErrorMs - prevRawSyncErrorMs; - const jumpSign = Math.sign(rawSyncErrorMs); - const isJumpDetected = - Math.abs(jumpDeltaMs) >= RECORRECTION_TRANSIENT_JUMP_MS && jumpSign !== 0; - if (!isJumpDetected) { - this.recorrectionPendingJumpSign = null; - this.recorrectionPendingJumpAtMs = null; - return false; - } - - const isConfirmed = - this.recorrectionPendingJumpSign === jumpSign && - this.recorrectionPendingJumpAtMs !== null && - nowMs - this.recorrectionPendingJumpAtMs <= - RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS; - this.recorrectionPendingJumpSign = jumpSign; - this.recorrectionPendingJumpAtMs = nowMs; - if (isConfirmed) { - this.recorrectionPendingJumpSign = null; - this.recorrectionPendingJumpAtMs = null; - return false; - } - - return true; - } - - private performGuardedCutover( - reason: "recorrection" | "delay-change", - options: { - incrementResyncCount?: boolean; - markCooldown?: boolean; - } = {}, + _reason: "recorrection" | "delay-change", + options: { incrementResyncCount?: boolean; markCooldown?: boolean } = {}, ): void { - if (!this.audioContext) { - return; - } - + if (!this.audioContext) return; const incrementResyncCount = options.incrementResyncCount ?? false; const markCooldown = options.markCooldown ?? true; const nowMs = performance.now(); - const cutoffTime = - this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC; + const cutoffTime = this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC; if (incrementResyncCount) { this.resyncCount++; this._intervalResyncCount++; @@ -720,149 +241,65 @@ export class AudioScheduler { this.lastSamplesAdjusted = 0; this.currentPlaybackRate = 1.0; const cutResult = this.cutScheduledSources(cutoffTime); - this.recorrectionMinScheduleTimeSec = Math.max( - cutoffTime, - cutResult.keptTailEndTimeSec, - ); + this.recorrectionMonitor.minScheduleTimeSec = Math.max(cutoffTime, cutResult.keptTailEndTimeSec); this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; - this.resetRecorrectionCheckState(); - if (markCooldown) { - this.lastRecorrectionAtMs = nowMs; - } - this.noteHardResync(nowMs); - + this.recorrectionMonitor.resetCheckState(); + if (markCooldown) this.recorrectionMonitor.markRecorrection(nowMs); + this.recorrectionMonitor.noteHardResync(nowMs); this.processAudioQueue(); } private checkRecorrection(): void { - if (!this.usesRecorrectionMonitor) { - this.resetRecorrectionCheckState(); - return; - } - if (!this.audioContext || this.audioContext.state !== "running") { - this.resetRecorrectionCheckState(); - return; - } - if ( - !this.stateManager.isPlaying || - this.nextPlaybackTime === 0 || - this.lastScheduledServerTime === 0 - ) { - this.resetRecorrectionCheckState(); - return; + if (!this.usesRecorrectionMonitor) { this.recorrectionMonitor.resetCheckState(); return; } + if (!this.audioContext || this.audioContext.state !== "running") { this.recorrectionMonitor.resetCheckState(); return; } + if (!this.stateManager.isPlaying || this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) { + this.recorrectionMonitor.resetCheckState(); return; } - const { - audioContextTimeSec: audioContextTime, - audioContextRawTimeSec: audioContextRawTime, - nowMs, - nowUs, - } = this.getTimingSnapshot(); - this.pruneExpiredScheduledSources(audioContextRawTime); - const scheduledAheadSec = this.getScheduledAheadSec(audioContextRawTime); - if (scheduledAheadSec <= 0) { - this.resetRecorrectionCheckState(); - if (this.audioBufferQueue.length > 0) { - this.processAudioQueue(); - } + const { audioContextTimeSec, audioContextRawTimeSec, nowMs, nowUs } = this.clockSource.getTimingSnapshot(this.audioContext); + this.pruneExpiredScheduledSources(audioContextRawTimeSec); + if (this.getScheduledAheadSec(audioContextRawTimeSec) <= 0) { + this.recorrectionMonitor.resetCheckState(); + if (this.audioBufferQueue.length > 0) this.processAudioQueue(); return; } - const outputLatencySec = this.useOutputLatencyCompensation - ? this.getSmoothedOutputLatencyUs() / 1_000_000 - : 0; - const targetPlaybackTime = this.computeTargetPlaybackTime( - this.lastScheduledServerTime, - audioContextTime, - nowUs, - outputLatencySec, - ); + const outputLatencySec = this.useOutputLatencyCompensation ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 : 0; + const targetPlaybackTime = this.computeTargetPlaybackTime(this.lastScheduledServerTime, audioContextTimeSec, nowUs, outputLatencySec); const syncErrorMs = (this.nextPlaybackTime - targetPlaybackTime) * 1000; const smoothedSyncErrorMs = this.applySyncErrorEma(syncErrorMs); - const absErrorMs = Math.abs(smoothedSyncErrorMs); - const isTransientJump = this.shouldIgnoreTransientRecorrectionJump( - syncErrorMs, - nowMs, - ); - if (absErrorMs < RECORRECTION_TRIGGER_MS) { - this.clearRecorrectionBreachState(); - return; - } - if (isTransientJump) { - this.clearRecorrectionBreachState(); - return; - } - if (this.recorrectionBreachStartedAtMs === null) { - this.recorrectionBreachStartedAtMs = nowMs; - return; - } - if (nowMs - this.recorrectionBreachStartedAtMs < RECORRECTION_SUSTAIN_MS) { - return; - } - if (nowMs - this.lastRecorrectionAtMs < RECORRECTION_COOLDOWN_MS) { - return; - } - - this.applyRecorrectionCutover(); - } - private applyRecorrectionCutover(): void { - this.performGuardedCutover("recorrection", { - incrementResyncCount: true, - markCooldown: true, - }); + if (this.recorrectionMonitor.shouldRecorrect(Math.abs(smoothedSyncErrorMs), syncErrorMs, nowMs)) { + this.performGuardedCutover("recorrection", { incrementResyncCount: true, markCooldown: true }); + } } - getSyncDelayMs(): number { - return this.syncDelayMs; - } + getSyncDelayMs(): number { return this.syncDelayMs; } setSyncDelay(delayMs: number): void { - const sanitizedDelayMs = this.sanitizeSyncDelayMs(delayMs); - const oldDelayMs = this.syncDelayMs; - const deltaMs = sanitizedDelayMs - oldDelayMs; - this.syncDelayMs = sanitizedDelayMs; - - if (deltaMs === 0 || !this.usesImmediateDelayCutover) { - return; - } - if (!this.audioContext || this.audioContext.state !== "running") { - return; - } - if (!this.stateManager.isPlaying) { - return; - } - if ( - this.scheduledSources.length === 0 && - this.audioBufferQueue.length === 0 && - this.nextPlaybackTime === 0 - ) { - return; - } - - this.performGuardedCutover("delay-change", { - incrementResyncCount: false, - markCooldown: true, - }); + const sanitized = this.sanitizeSyncDelayMs(delayMs); + const delta = sanitized - this.syncDelayMs; + this.syncDelayMs = sanitized; + if (delta === 0 || !this.usesImmediateDelayCutover) return; + if (!this.audioContext || this.audioContext.state !== "running") return; + if (!this.stateManager.isPlaying) return; + if (this.scheduledSources.length === 0 && this.audioBufferQueue.length === 0 && this.nextPlaybackTime === 0) return; + this.performGuardedCutover("delay-change", { incrementResyncCount: false, markCooldown: true }); } get syncInfo(): { - clockDriftPercent: number; - syncErrorMs: number; - resyncCount: number; - outputLatencyMs: number; - playbackRate: number; + clockDriftPercent: number; syncErrorMs: number; resyncCount: number; + outputLatencyMs: number; playbackRate: number; correctionMethod: "none" | "samples" | "rate" | "resync"; - samplesAdjusted: number; - correctionMode: CorrectionMode; + samplesAdjusted: number; correctionMode: CorrectionMode; } { return { clockDriftPercent: this.timeFilter.drift * 100, syncErrorMs: this.currentSyncErrorMs, resyncCount: this.resyncCount, - outputLatencyMs: this.getRawOutputLatencyUs() / 1000, + outputLatencyMs: this.latencyTracker.getLastRawUs() / 1000, playbackRate: this.currentPlaybackRate, correctionMethod: this.currentCorrectionMethod, samplesAdjusted: this.lastSamplesAdjusted, @@ -871,34 +308,24 @@ export class AudioScheduler { } private emitStatusLog(nowMs: number): void { - if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000) { - return; - } + if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000) return; this._lastStatusLogMs = nowMs; let corr: string; switch (this.currentCorrectionMethod) { - case "rate": - corr = `rate@${this.currentPlaybackRate}`; - break; - case "samples": - corr = `samples:${this.lastSamplesAdjusted}`; - break; - default: - corr = this.currentCorrectionMethod; + case "rate": corr = `rate@${this.currentPlaybackRate}`; break; + case "samples": corr = `samples:${this.lastSamplesAdjusted}`; break; + default: corr = this.currentCorrectionMethod; } - const queueDepth = - this.audioBufferQueue.length + this.scheduledSources.length; - const aheadSec = this.audioContext - ? this.getScheduledAheadSec(this.audioContext.currentTime) - : 0; + const queueDepth = this.audioBufferQueue.length + this.scheduledSources.length; + const aheadSec = this.audioContext ? this.getScheduledAheadSec(this.audioContext.currentTime) : 0; let clock: string; - if (this.activeAudioClockSource === "timestamp") { - clock = `timestamp(good:${this.outputTimestampGoodSamples})`; - } else if (this._lastTimestampRejectReason) { - clock = `estimated(reject:"${this._lastTimestampRejectReason}")`; + if (this.clockSource.active === "timestamp") { + clock = `timestamp(good:${this.clockSource.timestampGoodSamples})`; + } else if (this.clockSource.lastRejectReason) { + clock = `estimated(reject:"${this.clockSource.lastRejectReason}")`; } else { clock = "estimated"; } @@ -907,125 +334,51 @@ export class AudioScheduler { ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})` : `pending(n=${this.timeFilter.count})`; - const latMs = - this.smoothedOutputLatencyUs !== null - ? Math.round(this.smoothedOutputLatencyUs / 1000) - : 0; + const latMs = this.latencyTracker.getSmoothedUs(this.audioContext) !== null + ? Math.round(this.latencyTracker.getSmoothedUs(this.audioContext) / 1000) : 0; console.log( `Sendspin: sync=${this.smoothedSyncErrorMs >= 0 ? "+" : ""}${this.smoothedSyncErrorMs.toFixed(1)}ms` + - ` corr=${corr}` + - ` q=${queueDepth}/${aheadSec.toFixed(1)}s` + - ` resyncs=${this._intervalResyncCount}` + - ` clock=${clock}` + - ` tf=${tf}` + - ` lat=${latMs}ms` + - ` mode=${this._correctionMode}` + - ` ctx=${this.audioContext?.state ?? "null"}` + - ` gen=${this.stateManager.streamGeneration}`, + ` corr=${corr} q=${queueDepth}/${aheadSec.toFixed(1)}s resyncs=${this._intervalResyncCount}` + + ` clock=${clock} tf=${tf} lat=${latMs}ms mode=${this._correctionMode}` + + ` ctx=${this.audioContext?.state ?? "null"} gen=${this.stateManager.streamGeneration}`, ); - this._intervalResyncCount = 0; } private applySyncErrorEma(inputMs: number): number { this.currentSyncErrorMs = inputMs; - this.smoothedSyncErrorMs = - SYNC_ERROR_ALPHA * inputMs + - (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs; + this.smoothedSyncErrorMs = SYNC_ERROR_ALPHA * inputMs + (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs; return this.smoothedSyncErrorMs; } - private resetSyncErrorEma(): void { - this.smoothedSyncErrorMs = 0; - } - - getRawOutputLatencyUs(): number { - if (!this.audioContext) return 0; - const baseLatency = this.audioContext.baseLatency ?? 0; - const outputLatency = this.audioContext.outputLatency ?? 0; - const rawUs = (baseLatency + outputLatency) * 1_000_000; - this.lastRawOutputLatencyUs = rawUs; - return rawUs; - } - - getSmoothedOutputLatencyUs(): number { - const rawLatencyUs = this.getRawOutputLatencyUs(); - - if (rawLatencyUs <= 0 && this.smoothedOutputLatencyUs !== null) { - return this.smoothedOutputLatencyUs; - } - - if (this.smoothedOutputLatencyUs === null) { - this.smoothedOutputLatencyUs = rawLatencyUs; - } else { - this.smoothedOutputLatencyUs = - OUTPUT_LATENCY_ALPHA * rawLatencyUs + - (1 - OUTPUT_LATENCY_ALPHA) * this.smoothedOutputLatencyUs; - } - - const nowMs = - typeof performance !== "undefined" ? performance.now() : Date.now(); - if ( - this.lastLatencyPersistAtMs === null || - nowMs - this.lastLatencyPersistAtMs >= OUTPUT_LATENCY_PERSIST_INTERVAL_MS - ) { - this.persistLatency(); - this.lastLatencyPersistAtMs = nowMs; - } + private resetSyncErrorEma(): void { this.smoothedSyncErrorMs = 0; } - return this.smoothedOutputLatencyUs; - } - - private resetLatencySmoother(): void { - this.smoothedOutputLatencyUs = null; - } private copyBuffer(buffer: AudioBuffer): AudioBuffer { if (!this.audioContext) return buffer; - - const newBuffer = this.audioContext.createBuffer( - buffer.numberOfChannels, - buffer.length, - buffer.sampleRate, - ); - + const newBuffer = this.audioContext.createBuffer(buffer.numberOfChannels, buffer.length, buffer.sampleRate); for (let ch = 0; ch < buffer.numberOfChannels; ch++) { newBuffer.getChannelData(ch).set(buffer.getChannelData(ch)); } - return newBuffer; } - private adjustBufferSamples( - buffer: AudioBuffer, - samplesToAdjust: number, - ): AudioBuffer { - if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2) { - return this.copyBuffer(buffer); - } - + private adjustBufferSamples(buffer: AudioBuffer, samplesToAdjust: number): AudioBuffer { + if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2) return this.copyBuffer(buffer); const channels = buffer.numberOfChannels; const len = buffer.length; const sampleRate = buffer.sampleRate; - try { if (samplesToAdjust > 0) { - const newBuffer = this.audioContext.createBuffer( - channels, - len + 1, - sampleRate, - ); - + const newBuffer = this.audioContext.createBuffer(channels, len + 1, sampleRate); for (let ch = 0; ch < channels; ch++) { const oldData = buffer.getChannelData(ch); const newData = newBuffer.getChannelData(ch); - newData[0] = oldData[0]; const insertedSample = (oldData[0] + oldData[1]) / 2; newData[1] = insertedSample; newData.set(oldData.subarray(1), 2); - for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { const pos = 2 + f; if (pos >= newData.length) break; @@ -1033,32 +386,22 @@ export class AudioScheduler { newData[pos] = newData[pos] * (1 - alpha) + insertedSample * alpha; } } - return newBuffer; } else { - const newBuffer = this.audioContext.createBuffer( - channels, - len - 1, - sampleRate, - ); - + const newBuffer = this.audioContext.createBuffer(channels, len - 1, sampleRate); for (let ch = 0; ch < channels; ch++) { const oldData = buffer.getChannelData(ch); const newData = newBuffer.getChannelData(ch); - newData.set(oldData.subarray(0, len - 2)); const replacementSample = (oldData[len - 2] + oldData[len - 1]) / 2; newData[len - 2] = replacementSample; - for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) { const pos = len - 3 - f; if (pos < 0) break; const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f]; - newData[pos] = - newData[pos] * (1 - alpha) + replacementSample * alpha; + newData[pos] = newData[pos] * (1 - alpha) + replacementSample * alpha; } } - return newBuffer; } } catch (e) { @@ -1068,391 +411,207 @@ export class AudioScheduler { } initAudioContext(): void { - if (this.audioContext) { - return; - } - + if (this.audioContext) return; if (this.outputMode === "media-element" && this.ownsAudioElement) { this.audioElement = document.createElement("audio"); this.audioElement.style.display = "none"; document.body.appendChild(this.audioElement); } - - if ((navigator as any).audioSession) { - (navigator as any).audioSession.type = "playback"; - } - - const streamSampleRate = - this.stateManager.currentStreamFormat?.sample_rate || 48000; + if ((navigator as any).audioSession) { (navigator as any).audioSession.type = "playback"; } + const streamSampleRate = this.stateManager.currentStreamFormat?.sample_rate || 48000; this.audioContext = new AudioContext({ sampleRate: streamSampleRate }); this.gainNode = this.audioContext.createGain(); - const audioElement = this.audioElement; - if (this.outputMode === "direct") { this.gainNode.connect(this.audioContext.destination); } else { - if (!audioElement) { - throw new Error( - "Media-element output requires an audio element to be available during initialization.", - ); - } - + if (!audioElement) throw new Error("Media-element output requires an audio element."); if (this.isAndroid && this.silentAudioSrc) { this.gainNode.connect(this.audioContext.destination); audioElement.src = this.silentAudioSrc; audioElement.loop = true; audioElement.muted = false; audioElement.volume = 1.0; - audioElement.play().catch((e) => { - console.warn("Sendspin: Audio autoplay blocked:", e); - }); + audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); } else { - this.streamDestination = - this.audioContext.createMediaStreamDestination(); + this.streamDestination = this.audioContext.createMediaStreamDestination(); this.gainNode.connect(this.streamDestination); audioElement.srcObject = this.streamDestination.stream; audioElement.volume = 1.0; - audioElement.play().catch((e) => { - console.warn("Sendspin: Audio autoplay blocked:", e); - }); + audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); } } - this.updateVolume(); - if (this.usesRecorrectionMonitor) { - this.startRecorrectionMonitor(); - } + if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start(); } async resumeAudioContext(): Promise { if (this.audioContext && this.audioContext.state === "suspended") { - try { - await this.audioContext.resume(); - console.log("Sendspin: AudioContext resumed"); - } catch (e) { - console.warn("Sendspin: Failed to resume AudioContext:", e); - return; - } - - if (this.audioBufferQueue.length > 0) { - this.scheduleQueueProcessing(); - } - if (this.usesRecorrectionMonitor) { - this.startRecorrectionMonitor(); - } + try { await this.audioContext.resume(); console.log("Sendspin: AudioContext resumed"); } + catch (e) { console.warn("Sendspin: Failed to resume AudioContext:", e); return; } + if (this.audioBufferQueue.length > 0) this.scheduleQueueProcessing(); + if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start(); } } - private cutScheduledSources(cutoffTime: number): { - requeuedCount: number; - cutCount: number; - keptTailEndTimeSec: number; - } { - if (!this.audioContext) { - return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 }; - } + private cutScheduledSources(cutoffTime: number): { requeuedCount: number; cutCount: number; keptTailEndTimeSec: number } { + if (!this.audioContext) return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 }; const stopTime = Math.max(cutoffTime, this.audioContext.currentTime); - let requeued = 0; - let cutCount = 0; - let keptTailEndTimeSec = 0; + let requeued = 0, cutCount = 0, keptTailEndTimeSec = 0; this.scheduledSources = this.scheduledSources.filter((entry) => { - if (entry.startTime < stopTime) { - keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime); - return true; - } - try { - entry.source.onended = null; - entry.source.stop(stopTime); - } catch (e) { - // Ignore errors if source already stopped - } - this.audioBufferQueue.push({ - buffer: entry.buffer, - serverTime: entry.serverTime, - generation: entry.generation, - }); - requeued++; - cutCount++; - return false; + if (entry.startTime < stopTime) { keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime); return true; } + try { entry.source.onended = null; entry.source.stop(stopTime); } catch { /* ignore */ } + this.audioBufferQueue.push({ buffer: entry.buffer, serverTime: entry.serverTime, generation: entry.generation }); + requeued++; cutCount++; return false; }); return { requeuedCount: requeued, cutCount, keptTailEndTimeSec }; } updateVolume(): void { if (!this.gainNode) return; - - if (this.useHardwareVolume) { - this.gainNode.gain.value = 1.0; - return; - } - - if (this.stateManager.muted) { - this.gainNode.gain.value = 0; - } else { - this.gainNode.gain.value = this.stateManager.volume / 100; - } + if (this.useHardwareVolume) { this.gainNode.gain.value = 1.0; return; } + this.gainNode.gain.value = this.stateManager.muted ? 0 : this.stateManager.volume / 100; } - private scheduleQueueProcessing(): void { - if (this.queueProcessScheduled) { - return; - } + if (this.queueProcessScheduled) return; this.queueProcessScheduled = true; - if (typeof globalThis.setTimeout === "function") { this.scheduleTimeout = globalThis.setTimeout(() => { - this.scheduleTimeout = null; - this.queueProcessScheduled = false; - this.processAudioQueue(); + this.scheduleTimeout = null; this.queueProcessScheduled = false; this.processAudioQueue(); }, 15); return; } - - const run = () => { - this.queueProcessScheduled = false; - this.processAudioQueue(); - }; - - if ( - typeof (globalThis as unknown as { queueMicrotask?: unknown }) - .queueMicrotask === "function" - ) { - ( - globalThis as unknown as { queueMicrotask: (cb: () => void) => void } - ).queueMicrotask(run); - } else { - Promise.resolve().then(run); - } + const run = () => { this.queueProcessScheduled = false; this.processAudioQueue(); }; + if (typeof (globalThis as unknown as { queueMicrotask?: unknown }).queueMicrotask === "function") { + (globalThis as unknown as { queueMicrotask: (cb: () => void) => void }).queueMicrotask(run); + } else { Promise.resolve().then(run); } } - /** Accept a decoded audio chunk and queue it for synchronized playback. */ handleDecodedChunk(chunk: DecodedAudioChunk): void { if (!this.audioContext || !this.gainNode) return; if (chunk.generation !== this.stateManager.streamGeneration) return; - const numChannels = chunk.samples.length; const numFrames = chunk.samples[0].length; - const audioBuffer = this.audioContext.createBuffer( - numChannels, - numFrames, - chunk.sampleRate, - ); - for (let ch = 0; ch < numChannels; ch++) { - audioBuffer.getChannelData(ch).set(chunk.samples[ch]); - } - - this.audioBufferQueue.push({ - buffer: audioBuffer, - serverTime: chunk.serverTimeUs, - generation: chunk.generation, - }); - + const audioBuffer = this.audioContext.createBuffer(numChannels, numFrames, chunk.sampleRate); + for (let ch = 0; ch < numChannels; ch++) audioBuffer.getChannelData(ch).set(chunk.samples[ch]); + this.audioBufferQueue.push({ buffer: audioBuffer, serverTime: chunk.serverTimeUs, generation: chunk.generation }); this.scheduleQueueProcessing(); } + processAudioQueue(): void { if (!this.audioContext || !this.gainNode) return; if (this.audioContext.state !== "running") return; const currentGeneration = this.stateManager.streamGeneration; - this.audioBufferQueue = this.audioBufferQueue.filter( - (chunk) => chunk.generation === currentGeneration, - ); - + this.audioBufferQueue = this.audioBufferQueue.filter((chunk) => chunk.generation === currentGeneration); this.audioBufferQueue.sort((a, b) => a.serverTime - b.serverTime); + if (!this.timeFilter.is_synchronized) return; - if (!this.timeFilter.is_synchronized) { - return; - } - - const { - audioContextTimeSec: audioContextTime, - audioContextRawTimeSec, - nowMs, - nowUs, - } = this.getTimingSnapshot(); + const { audioContextTimeSec: audioContextTime, audioContextRawTimeSec, nowMs, nowUs } = this.clockSource.getTimingSnapshot(this.audioContext); this.pruneExpiredScheduledSources(audioContextRawTimeSec); - const outputLatencySec = this.useOutputLatencyCompensation - ? this.getSmoothedOutputLatencyUs() / 1_000_000 - : 0; + const outputLatencySec = this.useOutputLatencyCompensation ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 : 0; const syncDelaySec = this.syncDelayMs / 1000; const targetScheduledHorizonSec = this.getTargetScheduledHorizonSec(); - if (this.usesRecorrectionMonitor) { - this.startRecorrectionMonitor(); - } + if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start(); - if (this.pendingClockSourceCutover) { - this.pendingClockSourceCutover = false; - if ( - this.scheduledSources.length > 0 || - this.nextPlaybackTime !== 0 || - this.lastScheduledServerTime !== 0 - ) { - this.performGuardedCutover("delay-change", { - incrementResyncCount: false, - markCooldown: false, - }); + if (this.clockSource.pendingCutover) { + this.clockSource.pendingCutover = false; + if (this.scheduledSources.length > 0 || this.nextPlaybackTime !== 0 || this.lastScheduledServerTime !== 0) { + this.performGuardedCutover("delay-change", { incrementResyncCount: false, markCooldown: false }); return; } } while (this.audioBufferQueue.length > 0) { - const scheduledAheadSec = this.getScheduledAheadSec( - audioContextRawTimeSec, - ); - if ( - this.nextPlaybackTime > 0 && - scheduledAheadSec >= targetScheduledHorizonSec - ) { - break; - } + const scheduledAheadSec = this.getScheduledAheadSec(audioContextRawTimeSec); + if (this.nextPlaybackTime > 0 && scheduledAheadSec >= targetScheduledHorizonSec) break; const chunk = this.audioBufferQueue.shift()!; - let playbackTime: number; let scheduleTime: number; let playbackRate: number; - const targetPlaybackTime = this.computeTargetPlaybackTime( - chunk.serverTime, - audioContextTime, - nowUs, - outputLatencySec, - ); + const targetPlaybackTime = this.computeTargetPlaybackTime(chunk.serverTime, audioContextTime, nowUs, outputLatencySec); + const isTimestamp = this.clockSource.active === "timestamp"; if (this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) { - this.armHardResyncStartupGrace(nowMs); + this.recorrectionMonitor.armStartupGrace(nowMs, isTimestamp); playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; - if (this.recorrectionMinScheduleTimeSec !== null) { - scheduleTime = Math.max( - scheduleTime, - this.recorrectionMinScheduleTimeSec, - ); + if (this.recorrectionMonitor.minScheduleTimeSec !== null) { + scheduleTime = Math.max(scheduleTime, this.recorrectionMonitor.minScheduleTimeSec); playbackTime = scheduleTime + syncDelaySec; } - this.recorrectionMinScheduleTimeSec = null; + this.recorrectionMonitor.minScheduleTimeSec = null; playbackRate = 1.0; chunk.buffer = this.copyBuffer(chunk.buffer); } else { - const expectedServerTime = this.lastScheduledServerTime; - const serverGapUs = chunk.serverTime - expectedServerTime; + const serverGapUs = chunk.serverTime - this.lastScheduledServerTime; const serverGapSec = serverGapUs / 1_000_000; if (Math.abs(serverGapSec) < 0.1) { const syncErrorSec = this.nextPlaybackTime - targetPlaybackTime; const syncErrorMs = syncErrorSec * 1000; - const correctionErrorMs = this.applySyncErrorEma(syncErrorMs); + const thresholds = this.correctionThresholds[this._correctionMode]; + const canHardResync = this.recorrectionMonitor.canUseHardResync(nowMs, isTimestamp); - const thresholds = CORRECTION_THRESHOLDS[this._correctionMode]; - const canUseHardResync = this.canUseHardResync(nowMs); - - if ( - Math.abs(correctionErrorMs) > thresholds.resyncAboveMs && - canUseHardResync - ) { - this.noteHardResync(nowMs); - this.resyncCount++; - this._intervalResyncCount++; + if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs && canHardResync) { + this.recorrectionMonitor.noteHardResync(nowMs); + this.resyncCount++; this._intervalResyncCount++; this.resetSyncErrorEma(); this.cutScheduledSources(targetPlaybackTime - syncDelaySec); playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; playbackRate = 1.0; - this.currentCorrectionMethod = "resync"; - this.lastSamplesAdjusted = 0; + this.currentCorrectionMethod = "resync"; this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs) { - playbackTime = this.nextPlaybackTime; - scheduleTime = this.nextScheduleTime; - playbackRate = Number.isFinite(thresholds.rate2AboveMs) - ? correctionErrorMs > 0 - ? 1.02 - : 0.98 - : 1.0; - this.currentCorrectionMethod = - playbackRate === 1.0 ? "none" : "rate"; - this.lastSamplesAdjusted = 0; + playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; + playbackRate = Number.isFinite(thresholds.rate2AboveMs) ? (correctionErrorMs > 0 ? 1.02 : 0.98) : 1.0; + this.currentCorrectionMethod = playbackRate === 1.0 ? "none" : "rate"; this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) < thresholds.deadbandBelowMs) { - playbackTime = this.nextPlaybackTime; - scheduleTime = this.nextScheduleTime; - playbackRate = 1.0; - this.currentCorrectionMethod = "none"; - this.lastSamplesAdjusted = 0; + playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; + playbackRate = 1.0; this.currentCorrectionMethod = "none"; this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) <= thresholds.samplesBelowMs) { - playbackTime = this.nextPlaybackTime; - scheduleTime = this.nextScheduleTime; - playbackRate = 1.0; + playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; playbackRate = 1.0; const samplesToAdjust = correctionErrorMs > 0 ? -1 : 1; - chunk.buffer = this.adjustBufferSamples( - chunk.buffer, - samplesToAdjust, - ); - this.currentCorrectionMethod = "samples"; - this.lastSamplesAdjusted = samplesToAdjust; + chunk.buffer = this.adjustBufferSamples(chunk.buffer, samplesToAdjust); + this.currentCorrectionMethod = "samples"; this.lastSamplesAdjusted = samplesToAdjust; } else { - playbackTime = this.nextPlaybackTime; - scheduleTime = this.nextScheduleTime; + playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; const absErrorMs = Math.abs(correctionErrorMs); - if (correctionErrorMs > 0) { - playbackRate = - absErrorMs >= thresholds.rate2AboveMs - ? 1.02 - : absErrorMs >= thresholds.rate1AboveMs - ? 1.01 - : 1.0; + playbackRate = absErrorMs >= thresholds.rate2AboveMs ? 1.02 : absErrorMs >= thresholds.rate1AboveMs ? 1.01 : 1.0; } else { - playbackRate = - absErrorMs >= thresholds.rate2AboveMs - ? 0.98 - : absErrorMs >= thresholds.rate1AboveMs - ? 0.99 - : 1.0; + playbackRate = absErrorMs >= thresholds.rate2AboveMs ? 0.98 : absErrorMs >= thresholds.rate1AboveMs ? 0.99 : 1.0; } - - this.currentCorrectionMethod = - playbackRate === 1.0 ? "none" : "rate"; - this.lastSamplesAdjusted = 0; + this.currentCorrectionMethod = playbackRate === 1.0 ? "none" : "rate"; this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } } else { - this.noteHardResync(nowMs); - this.resyncCount++; - this._intervalResyncCount++; + this.recorrectionMonitor.noteHardResync(nowMs); + this.resyncCount++; this._intervalResyncCount++; this.cutScheduledSources(targetPlaybackTime - syncDelaySec); - playbackTime = targetPlaybackTime; - scheduleTime = playbackTime - syncDelaySec; - playbackRate = 1.0; - this.currentCorrectionMethod = "resync"; - this.lastSamplesAdjusted = 0; + playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; + playbackRate = 1.0; this.currentCorrectionMethod = "resync"; this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } } this.currentPlaybackRate = playbackRate; - if (playbackTime < audioContextRawTimeSec) { - this.nextPlaybackTime = 0; - this.nextScheduleTime = 0; - this.lastScheduledServerTime = 0; - continue; + this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; continue; } - const effectiveScheduleTime = Math.max( - scheduleTime, - audioContextRawTimeSec, - ); - const effectivePlaybackTime = - effectiveScheduleTime + (playbackTime - scheduleTime); - + const effectiveScheduleTime = Math.max(scheduleTime, audioContextRawTimeSec); + const effectivePlaybackTime = effectiveScheduleTime + (playbackTime - scheduleTime); const source = this.audioContext.createBufferSource(); source.buffer = chunk.buffer; source.playbackRate.value = playbackRate; @@ -1462,16 +621,11 @@ export class AudioScheduler { const actualDuration = chunk.buffer.duration / playbackRate; this.nextPlaybackTime = effectivePlaybackTime + actualDuration; this.nextScheduleTime = effectiveScheduleTime + actualDuration; - this.lastScheduledServerTime = - chunk.serverTime + chunk.buffer.duration * 1_000_000; + this.lastScheduledServerTime = chunk.serverTime + chunk.buffer.duration * 1_000_000; const scheduledEntry = { - source, - startTime: effectiveScheduleTime, - endTime: effectiveScheduleTime + actualDuration, - buffer: chunk.buffer, - serverTime: chunk.serverTime, - generation: chunk.generation, + source, startTime: effectiveScheduleTime, endTime: effectiveScheduleTime + actualDuration, + buffer: chunk.buffer, serverTime: chunk.serverTime, generation: chunk.generation, }; this.scheduledSources.push(scheduledEntry); source.onended = () => { @@ -1479,103 +633,56 @@ export class AudioScheduler { if (idx > -1) this.scheduledSources.splice(idx, 1); if (this.scheduledSources.length === 0) { this.resetScheduledPlaybackState("all scheduled audio ended"); - if (this.audioBufferQueue.length > 0) { - this.processAudioQueue(); - } + if (this.audioBufferQueue.length > 0) this.processAudioQueue(); } }; } this.emitStatusLog(nowMs); } - private computeTargetPlaybackTime( - serverTimeUs: number, - audioContextTime: number, - nowUs: number, - outputLatencySec: number, - ): number { + private computeTargetPlaybackTime(serverTimeUs: number, audioContextTime: number, nowUs: number, outputLatencySec: number): number { const chunkClientTimeUs = this.timeFilter.computeClientTime(serverTimeUs); - const deltaUs = chunkClientTimeUs - nowUs; - const deltaSec = deltaUs / 1_000_000; - return ( - audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec - ); + const deltaSec = (chunkClientTimeUs - nowUs) / 1_000_000; + return audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec; } startAudioElement(): void { - if (this.outputMode === "media-element" && this.audioElement) { - if (this.audioElement.paused) { - this.audioElement.play().catch((e) => { - console.warn("Sendspin: Failed to start audio element:", e); - }); - } + if (this.outputMode === "media-element" && this.audioElement?.paused) { + this.audioElement.play().catch((e) => { console.warn("Sendspin: Failed to start audio element:", e); }); } } stopAudioElement(): void { - if (this.outputMode === "media-element" && this.audioElement) { - if (!this.audioElement.paused) { - this.audioElement.pause(); - } + if (this.outputMode === "media-element" && this.audioElement && !this.audioElement.paused) { + this.audioElement.pause(); } } clearBuffers(): void { - this.stopRecorrectionMonitor(); - - this.scheduledSources.forEach((entry) => { - try { - entry.source.stop(); - } catch (e) { - // Ignore errors if source already stopped - } - }); + this.recorrectionMonitor.stop(); + this.scheduledSources.forEach((entry) => { try { entry.source.stop(); } catch { /* ignore */ } }); this.scheduledSources = []; - this.audioBufferQueue = []; - if (this.scheduleTimeout !== null) { - clearTimeout(this.scheduleTimeout); - this.scheduleTimeout = null; - } + if (this.scheduleTimeout !== null) { clearTimeout(this.scheduleTimeout); this.scheduleTimeout = null; } this.queueProcessScheduled = false; - this.stateManager.resetStreamAnchors(); - this.resetScheduledPlaybackState(); this.resyncCount = 0; - this.lastRawOutputLatencyUs = 0; - this.resetLatencySmoother(); - this.timingEstimateAudioContextTimeSec = null; - this.timingEstimateAtMs = null; - this.resetOutputTimestampValidation(); + this.latencyTracker.reset(); + this.clockSource.reset(); } close(): void { this.clearBuffers(); - - if (this.audioContext) { - this.audioContext.close(); - this.audioContext = null; - } - + if (this.audioContext) { this.audioContext.close(); this.audioContext = null; } this.gainNode = null; this.streamDestination = null; - if (this.outputMode === "media-element" && this.audioElement) { - this.audioElement.pause(); - this.audioElement.srcObject = null; - this.audioElement.loop = false; - this.audioElement.removeAttribute("src"); - this.audioElement.load(); - - if (this.ownsAudioElement) { - this.audioElement.remove(); - this.audioElement = undefined; - } + this.audioElement.pause(); this.audioElement.srcObject = null; + this.audioElement.loop = false; this.audioElement.removeAttribute("src"); this.audioElement.load(); + if (this.ownsAudioElement) { this.audioElement.remove(); this.audioElement = undefined; } } } - getAudioContext(): AudioContext | null { - return this.audioContext; - } + getAudioContext(): AudioContext | null { return this.audioContext; } } diff --git a/src/index.ts b/src/index.ts index 54b297d..695d022 100644 --- a/src/index.ts +++ b/src/index.ts @@ -122,6 +122,7 @@ export class SendspinPlayer { config.correctionMode ?? "sync", storage, config.useOutputLatencyCompensation ?? true, + config.correctionThresholds, ); // Wire core events to scheduler diff --git a/src/types.ts b/src/types.ts index e4ff599..441af1e 100644 --- a/src/types.ts +++ b/src/types.ts @@ -256,6 +256,27 @@ export type Codec = "pcm" | "opus" | "flac"; */ export type CorrectionMode = "sync" | "quality" | "quality-local"; +/** + * Sync correction thresholds for a single correction mode. + * All values are in milliseconds unless noted. + */ +export interface CorrectionThresholds { + /** Hard resync when sync error exceeds this (ms) */ + resyncAboveMs: number; + /** Use ±2% playback rate when error exceeds this (ms). Infinity = disabled. */ + rate2AboveMs: number; + /** Use ±1% playback rate when error exceeds this (ms). Infinity = disabled. */ + rate1AboveMs: number; + /** Use sample insertion/deletion when error is below this (ms). 0 = disabled. */ + samplesBelowMs: number; + /** No correction when error is below this (ms) */ + deadbandBelowMs: number; + /** Whether the recorrection monitor runs in this mode */ + enableRecorrectionMonitor: boolean; + /** Whether runtime sync delay changes trigger immediate cutover */ + immediateDelayCutover: boolean; +} + export interface SupportedFormat { codec: string; channels: number; @@ -329,6 +350,18 @@ export interface SendspinPlayerConfig { */ correctionMode?: CorrectionMode; + /** + * Override default correction thresholds per mode. + * Partially override any mode — unspecified fields keep their defaults. + * + * @example + * // Make "sync" mode tolerate more drift before hard resyncing + * correctionThresholds: { sync: { resyncAboveMs: 400 } } + */ + correctionThresholds?: Partial< + Record> + >; + /** * Use browser's output latency API for automatic latency compensation. * When enabled, reads AudioContext.baseLatency and outputLatency to From e427b8c8421f8c17a27205fe9ec42a4e91bedffe Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Wed, 15 Apr 2026 14:47:52 +0200 Subject: [PATCH 09/27] Format --- src/audio/clock-source.ts | 8 +- src/audio/recorrection-monitor.ts | 5 +- src/audio/scheduler.ts | 557 +++++++++++++++++++++++------- src/core/codec-support.ts | 3 +- src/core/core.ts | 14 +- src/core/protocol-handler.ts | 5 +- 6 files changed, 452 insertions(+), 140 deletions(-) diff --git a/src/audio/clock-source.ts b/src/audio/clock-source.ts index e49c34c..b121879 100644 --- a/src/audio/clock-source.ts +++ b/src/audio/clock-source.ts @@ -131,7 +131,6 @@ export class ClockSource { } } - private getEstimatedTime(rawTimeSec: number, nowMs: number): number { if (this.estimateAudioTimeSec === null) { this.estimateAudioTimeSec = rawTimeSec; @@ -327,7 +326,12 @@ export class ClockSource { const nowMs = performance.now(); const nowUs = nowMs * 1000; if (!audioContext) { - return { audioContextTimeSec: 0, audioContextRawTimeSec: 0, nowMs, nowUs }; + return { + audioContextTimeSec: 0, + audioContextRawTimeSec: 0, + nowMs, + nowUs, + }; } const rawTimeSec = audioContext.currentTime; diff --git a/src/audio/recorrection-monitor.ts b/src/audio/recorrection-monitor.ts index 4d546ce..e128929 100644 --- a/src/audio/recorrection-monitor.ts +++ b/src/audio/recorrection-monitor.ts @@ -89,10 +89,7 @@ export class RecorrectionMonitor { this.lastRecorrectionAtMs = nowMs; } - shouldIgnoreTransientJump( - rawSyncErrorMs: number, - nowMs: number, - ): boolean { + shouldIgnoreTransientJump(rawSyncErrorMs: number, nowMs: number): boolean { const prev = this.prevRawSyncErrorMs; this.prevRawSyncErrorMs = rawSyncErrorMs; diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 84a7fff..3df0bbf 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -50,7 +50,10 @@ const SCHEDULE_REFILL_THRESHOLD_FRACTION = 0.5; const SCHEDULE_REFILL_MIN_THRESHOLD_SEC = 0.1; const SCHEDULE_REFILL_MAX_THRESHOLD_SEC = 5; -const DEFAULT_CORRECTION_THRESHOLDS: Record = { +const DEFAULT_CORRECTION_THRESHOLDS: Record< + CorrectionMode, + CorrectionThresholds +> = { sync: { resyncAboveMs: 200, rate2AboveMs: 35, @@ -102,7 +105,8 @@ export class AudioScheduler { private smoothedSyncErrorMs: number = 0; private resyncCount: number = 0; private currentPlaybackRate: number = 1.0; - private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" = "none"; + private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" = + "none"; private lastSamplesAdjusted: number = 0; private _correctionMode: CorrectionMode = "sync"; @@ -135,7 +139,9 @@ export class AudioScheduler { correctionMode: CorrectionMode = "sync", storage: SendspinStorage | null = null, useOutputLatencyCompensation: boolean = true, - thresholdOverrides?: Partial>>, + thresholdOverrides?: Partial< + Record> + >, ) { this._correctionMode = correctionMode; this.useOutputLatencyCompensation = useOutputLatencyCompensation; @@ -159,8 +165,8 @@ export class AudioScheduler { if (this.isCastRuntime) { this.clockSource.disableTimestampPromotion(); } - this.recorrectionMonitor = new RecorrectionMonitor( - () => this.checkRecorrection(), + this.recorrectionMonitor = new RecorrectionMonitor(() => + this.checkRecorrection(), ); } @@ -169,7 +175,6 @@ export class AudioScheduler { return Math.max(0, Math.min(5000, Math.round(delayMs))); } - get correctionMode(): CorrectionMode { return this._correctionMode; } @@ -184,11 +189,13 @@ export class AudioScheduler { } private get usesRecorrectionMonitor(): boolean { - return this.correctionThresholds[this._correctionMode].enableRecorrectionMonitor; + return this.correctionThresholds[this._correctionMode] + .enableRecorrectionMonitor; } private get usesImmediateDelayCutover(): boolean { - return this.correctionThresholds[this._correctionMode].immediateDelayCutover; + return this.correctionThresholds[this._correctionMode] + .immediateDelayCutover; } private getTargetScheduledHorizonSec(): number { @@ -196,8 +203,10 @@ export class AudioScheduler { return CAST_SCHEDULE_HORIZON_SEC; } const errorMs = this.timeFilter.error / 1000; - if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS) return SCHEDULE_HORIZON_PRECISE_SEC; - if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS) return SCHEDULE_HORIZON_GOOD_SEC; + if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS) + return SCHEDULE_HORIZON_PRECISE_SEC; + if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS) + return SCHEDULE_HORIZON_GOOD_SEC; return SCHEDULE_HORIZON_POOR_SEC; } @@ -243,7 +252,8 @@ export class AudioScheduler { const incrementResyncCount = options.incrementResyncCount ?? false; const markCooldown = options.markCooldown ?? true; const nowMs = performance.now(); - const cutoffTime = this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC; + const cutoffTime = + this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC; if (incrementResyncCount) { this.resyncCount++; this._intervalResyncCount++; @@ -253,7 +263,10 @@ export class AudioScheduler { this.lastSamplesAdjusted = 0; this.currentPlaybackRate = 1.0; const cutResult = this.cutScheduledSources(cutoffTime); - this.recorrectionMonitor.minScheduleTimeSec = Math.max(cutoffTime, cutResult.keptTailEndTimeSec); + this.recorrectionMonitor.minScheduleTimeSec = Math.max( + cutoffTime, + cutResult.keptTailEndTimeSec, + ); this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; @@ -264,13 +277,25 @@ export class AudioScheduler { } private checkRecorrection(): void { - if (!this.usesRecorrectionMonitor) { this.recorrectionMonitor.resetCheckState(); return; } - if (!this.audioContext || this.audioContext.state !== "running") { this.recorrectionMonitor.resetCheckState(); return; } - if (!this.stateManager.isPlaying || this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) { - this.recorrectionMonitor.resetCheckState(); return; + if (!this.usesRecorrectionMonitor) { + this.recorrectionMonitor.resetCheckState(); + return; + } + if (!this.audioContext || this.audioContext.state !== "running") { + this.recorrectionMonitor.resetCheckState(); + return; + } + if ( + !this.stateManager.isPlaying || + this.nextPlaybackTime === 0 || + this.lastScheduledServerTime === 0 + ) { + this.recorrectionMonitor.resetCheckState(); + return; } - const { audioContextTimeSec, audioContextRawTimeSec, nowMs, nowUs } = this.clockSource.getTimingSnapshot(this.audioContext); + const { audioContextTimeSec, audioContextRawTimeSec, nowMs, nowUs } = + this.clockSource.getTimingSnapshot(this.audioContext); this.pruneExpiredScheduledSources(audioContextRawTimeSec); if (this.getScheduledAheadSec(audioContextRawTimeSec) <= 0) { this.recorrectionMonitor.resetCheckState(); @@ -278,17 +303,35 @@ export class AudioScheduler { return; } - const outputLatencySec = this.useOutputLatencyCompensation ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 : 0; - const targetPlaybackTime = this.computeTargetPlaybackTime(this.lastScheduledServerTime, audioContextTimeSec, nowUs, outputLatencySec); + const outputLatencySec = this.useOutputLatencyCompensation + ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 + : 0; + const targetPlaybackTime = this.computeTargetPlaybackTime( + this.lastScheduledServerTime, + audioContextTimeSec, + nowUs, + outputLatencySec, + ); const syncErrorMs = (this.nextPlaybackTime - targetPlaybackTime) * 1000; const smoothedSyncErrorMs = this.applySyncErrorEma(syncErrorMs); - if (this.recorrectionMonitor.shouldRecorrect(Math.abs(smoothedSyncErrorMs), syncErrorMs, nowMs)) { - this.performGuardedCutover("recorrection", { incrementResyncCount: true, markCooldown: true }); + if ( + this.recorrectionMonitor.shouldRecorrect( + Math.abs(smoothedSyncErrorMs), + syncErrorMs, + nowMs, + ) + ) { + this.performGuardedCutover("recorrection", { + incrementResyncCount: true, + markCooldown: true, + }); } } - getSyncDelayMs(): number { return this.syncDelayMs; } + getSyncDelayMs(): number { + return this.syncDelayMs; + } setSyncDelay(delayMs: number): void { const sanitized = this.sanitizeSyncDelayMs(delayMs); @@ -297,15 +340,27 @@ export class AudioScheduler { if (delta === 0 || !this.usesImmediateDelayCutover) return; if (!this.audioContext || this.audioContext.state !== "running") return; if (!this.stateManager.isPlaying) return; - if (this.scheduledSources.length === 0 && this.audioBufferQueue.length === 0 && this.nextPlaybackTime === 0) return; - this.performGuardedCutover("delay-change", { incrementResyncCount: false, markCooldown: true }); + if ( + this.scheduledSources.length === 0 && + this.audioBufferQueue.length === 0 && + this.nextPlaybackTime === 0 + ) + return; + this.performGuardedCutover("delay-change", { + incrementResyncCount: false, + markCooldown: true, + }); } get syncInfo(): { - clockDriftPercent: number; syncErrorMs: number; resyncCount: number; - outputLatencyMs: number; playbackRate: number; + clockDriftPercent: number; + syncErrorMs: number; + resyncCount: number; + outputLatencyMs: number; + playbackRate: number; correctionMethod: "none" | "samples" | "rate" | "resync"; - samplesAdjusted: number; correctionMode: CorrectionMode; + samplesAdjusted: number; + correctionMode: CorrectionMode; } { return { clockDriftPercent: this.timeFilter.drift * 100, @@ -320,18 +375,27 @@ export class AudioScheduler { } private emitStatusLog(nowMs: number): void { - if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000) return; + if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000) + return; this._lastStatusLogMs = nowMs; let corr: string; switch (this.currentCorrectionMethod) { - case "rate": corr = `rate@${this.currentPlaybackRate}`; break; - case "samples": corr = `samples:${this.lastSamplesAdjusted}`; break; - default: corr = this.currentCorrectionMethod; + case "rate": + corr = `rate@${this.currentPlaybackRate}`; + break; + case "samples": + corr = `samples:${this.lastSamplesAdjusted}`; + break; + default: + corr = this.currentCorrectionMethod; } - const queueDepth = this.audioBufferQueue.length + this.scheduledSources.length; - const aheadSec = this.audioContext ? this.getScheduledAheadSec(this.audioContext.currentTime) : 0; + const queueDepth = + this.audioBufferQueue.length + this.scheduledSources.length; + const aheadSec = this.audioContext + ? this.getScheduledAheadSec(this.audioContext.currentTime) + : 0; let clock: string; if (this.clockSource.timestampPromotionDisabled) { @@ -348,8 +412,12 @@ export class AudioScheduler { ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})` : `pending(n=${this.timeFilter.count})`; - const latMs = this.latencyTracker.getSmoothedUs(this.audioContext) !== null - ? Math.round(this.latencyTracker.getSmoothedUs(this.audioContext) / 1000) : 0; + const latMs = + this.latencyTracker.getSmoothedUs(this.audioContext) !== null + ? Math.round( + this.latencyTracker.getSmoothedUs(this.audioContext) / 1000, + ) + : 0; console.log( `Sendspin: sync=${this.smoothedSyncErrorMs >= 0 ? "+" : ""}${this.smoothedSyncErrorMs.toFixed(1)}ms` + @@ -362,30 +430,45 @@ export class AudioScheduler { private applySyncErrorEma(inputMs: number): number { this.currentSyncErrorMs = inputMs; - this.smoothedSyncErrorMs = SYNC_ERROR_ALPHA * inputMs + (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs; + this.smoothedSyncErrorMs = + SYNC_ERROR_ALPHA * inputMs + + (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs; return this.smoothedSyncErrorMs; } - private resetSyncErrorEma(): void { this.smoothedSyncErrorMs = 0; } - + private resetSyncErrorEma(): void { + this.smoothedSyncErrorMs = 0; + } private copyBuffer(buffer: AudioBuffer): AudioBuffer { if (!this.audioContext) return buffer; - const newBuffer = this.audioContext.createBuffer(buffer.numberOfChannels, buffer.length, buffer.sampleRate); + const newBuffer = this.audioContext.createBuffer( + buffer.numberOfChannels, + buffer.length, + buffer.sampleRate, + ); for (let ch = 0; ch < buffer.numberOfChannels; ch++) { newBuffer.getChannelData(ch).set(buffer.getChannelData(ch)); } return newBuffer; } - private adjustBufferSamples(buffer: AudioBuffer, samplesToAdjust: number): AudioBuffer { - if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2) return this.copyBuffer(buffer); + private adjustBufferSamples( + buffer: AudioBuffer, + samplesToAdjust: number, + ): AudioBuffer { + if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2) + return this.copyBuffer(buffer); const channels = buffer.numberOfChannels; const len = buffer.length; const sampleRate = buffer.sampleRate; try { if (samplesToAdjust > 0) { - const newBuffer = this.audioContext.createBuffer(channels, len + 1, sampleRate); + const newBuffer = this.audioContext.createBuffer( + channels, + len + 1, + sampleRate, + ); for (let ch = 0; ch < channels; ch++) { const oldData = buffer.getChannelData(ch); const newData = newBuffer.getChannelData(ch); @@ -402,7 +485,11 @@ export class AudioScheduler { } return newBuffer; } else { - const newBuffer = this.audioContext.createBuffer(channels, len - 1, sampleRate); + const newBuffer = this.audioContext.createBuffer( + channels, + len - 1, + sampleRate, + ); for (let ch = 0; ch < channels; ch++) { const oldData = buffer.getChannelData(ch); const newData = newBuffer.getChannelData(ch); @@ -413,7 +500,8 @@ export class AudioScheduler { const pos = len - 3 - f; if (pos < 0) break; const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f]; - newData[pos] = newData[pos] * (1 - alpha) + replacementSample * alpha; + newData[pos] = + newData[pos] * (1 - alpha) + replacementSample * alpha; } } return newBuffer; @@ -431,28 +519,37 @@ export class AudioScheduler { this.audioElement.style.display = "none"; document.body.appendChild(this.audioElement); } - if ((navigator as any).audioSession) { (navigator as any).audioSession.type = "playback"; } - const streamSampleRate = this.stateManager.currentStreamFormat?.sample_rate || 48000; + if ((navigator as any).audioSession) { + (navigator as any).audioSession.type = "playback"; + } + const streamSampleRate = + this.stateManager.currentStreamFormat?.sample_rate || 48000; this.audioContext = new AudioContext({ sampleRate: streamSampleRate }); this.gainNode = this.audioContext.createGain(); const audioElement = this.audioElement; if (this.outputMode === "direct") { this.gainNode.connect(this.audioContext.destination); } else { - if (!audioElement) throw new Error("Media-element output requires an audio element."); + if (!audioElement) + throw new Error("Media-element output requires an audio element."); if (this.isAndroid && this.silentAudioSrc) { this.gainNode.connect(this.audioContext.destination); audioElement.src = this.silentAudioSrc; audioElement.loop = true; audioElement.muted = false; audioElement.volume = 1.0; - audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); + audioElement.play().catch((e) => { + console.warn("Sendspin: Audio autoplay blocked:", e); + }); } else { - this.streamDestination = this.audioContext.createMediaStreamDestination(); + this.streamDestination = + this.audioContext.createMediaStreamDestination(); this.gainNode.connect(this.streamDestination); audioElement.srcObject = this.streamDestination.stream; audioElement.volume = 1.0; - audioElement.play().catch((e) => { console.warn("Sendspin: Audio autoplay blocked:", e); }); + audioElement.play().catch((e) => { + console.warn("Sendspin: Audio autoplay blocked:", e); + }); } } this.updateVolume(); @@ -461,30 +558,61 @@ export class AudioScheduler { async resumeAudioContext(): Promise { if (this.audioContext && this.audioContext.state === "suspended") { - try { await this.audioContext.resume(); console.log("Sendspin: AudioContext resumed"); } - catch (e) { console.warn("Sendspin: Failed to resume AudioContext:", e); return; } + try { + await this.audioContext.resume(); + console.log("Sendspin: AudioContext resumed"); + } catch (e) { + console.warn("Sendspin: Failed to resume AudioContext:", e); + return; + } if (this.audioBufferQueue.length > 0) this.scheduleQueueProcessing(); if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start(); } } - private cutScheduledSources(cutoffTime: number): { requeuedCount: number; cutCount: number; keptTailEndTimeSec: number } { - if (!this.audioContext) return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 }; + private cutScheduledSources(cutoffTime: number): { + requeuedCount: number; + cutCount: number; + keptTailEndTimeSec: number; + } { + if (!this.audioContext) + return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 }; const stopTime = Math.max(cutoffTime, this.audioContext.currentTime); - let requeued = 0, cutCount = 0, keptTailEndTimeSec = 0; + let requeued = 0, + cutCount = 0, + keptTailEndTimeSec = 0; this.scheduledSources = this.scheduledSources.filter((entry) => { - if (entry.startTime < stopTime) { keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime); return true; } - try { entry.source.onended = null; entry.source.stop(stopTime); } catch { /* ignore */ } - this.audioBufferQueue.push({ buffer: entry.buffer, serverTime: entry.serverTime, generation: entry.generation }); - requeued++; cutCount++; return false; + if (entry.startTime < stopTime) { + keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime); + return true; + } + try { + entry.source.onended = null; + entry.source.stop(stopTime); + } catch { + /* ignore */ + } + this.audioBufferQueue.push({ + buffer: entry.buffer, + serverTime: entry.serverTime, + generation: entry.generation, + }); + requeued++; + cutCount++; + return false; }); return { requeuedCount: requeued, cutCount, keptTailEndTimeSec }; } updateVolume(): void { if (!this.gainNode) return; - if (this.useHardwareVolume) { this.gainNode.gain.value = 1.0; return; } - this.gainNode.gain.value = this.stateManager.muted ? 0 : this.stateManager.volume / 100; + if (this.useHardwareVolume) { + this.gainNode.gain.value = 1.0; + return; + } + this.gainNode.gain.value = this.stateManager.muted + ? 0 + : this.stateManager.volume / 100; } measureBufferedPlaybackRunwaySec(): number { @@ -493,38 +621,75 @@ export class AudioScheduler { this.pruneExpiredScheduledSources(currentTimeSec); const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec); const queuedAheadSec = this.audioBufferQueue.reduce( - (totalSec, chunk) => totalSec + chunk.buffer.duration, 0, + (totalSec, chunk) => totalSec + chunk.buffer.duration, + 0, ); return Math.max(0, scheduledAheadSec + queuedAheadSec); } private cancelScheduledRefill(): void { - if (this.refillTimeout !== null) { clearTimeout(this.refillTimeout); this.refillTimeout = null; } + if (this.refillTimeout !== null) { + clearTimeout(this.refillTimeout); + this.refillTimeout = null; + } } - private getScheduledRefillThresholdSec(targetScheduledHorizonSec: number): number { - return Math.max(SCHEDULE_REFILL_MIN_THRESHOLD_SEC, - Math.min(SCHEDULE_REFILL_MAX_THRESHOLD_SEC, targetScheduledHorizonSec * SCHEDULE_REFILL_THRESHOLD_FRACTION)); + private getScheduledRefillThresholdSec( + targetScheduledHorizonSec: number, + ): number { + return Math.max( + SCHEDULE_REFILL_MIN_THRESHOLD_SEC, + Math.min( + SCHEDULE_REFILL_MAX_THRESHOLD_SEC, + targetScheduledHorizonSec * SCHEDULE_REFILL_THRESHOLD_FRACTION, + ), + ); } private scheduleQueueRefill(targetScheduledHorizonSec: number): void { this.cancelScheduledRefill(); - if (!this.audioContext || this.audioContext.state !== "running" || !this.stateManager.isPlaying || this.audioBufferQueue.length === 0) return; + if ( + !this.audioContext || + this.audioContext.state !== "running" || + !this.stateManager.isPlaying || + this.audioBufferQueue.length === 0 + ) + return; const currentTimeSec = this.audioContext.currentTime; this.pruneExpiredScheduledSources(currentTimeSec); const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec); - const refillThresholdSec = this.getScheduledRefillThresholdSec(targetScheduledHorizonSec); - if (scheduledAheadSec <= refillThresholdSec) { this.scheduleQueueProcessing(); return; } + const refillThresholdSec = this.getScheduledRefillThresholdSec( + targetScheduledHorizonSec, + ); + if (scheduledAheadSec <= refillThresholdSec) { + this.scheduleQueueProcessing(); + return; + } const delayMs = (scheduledAheadSec - refillThresholdSec) * 1000; const runRefill = () => { this.refillTimeout = null; - if (!this.audioContext || this.audioContext.state !== "running" || !this.stateManager.isPlaying || this.audioBufferQueue.length === 0) return; + if ( + !this.audioContext || + this.audioContext.state !== "running" || + !this.stateManager.isPlaying || + this.audioBufferQueue.length === 0 + ) + return; this.scheduleQueueProcessing(); }; - if (typeof globalThis.setTimeout === "function") { this.refillTimeout = globalThis.setTimeout(runRefill, delayMs); return; } + if (typeof globalThis.setTimeout === "function") { + this.refillTimeout = globalThis.setTimeout(runRefill, delayMs); + return; + } this.refillTimeout = null; - if (typeof (globalThis as unknown as { queueMicrotask?: unknown }).queueMicrotask === "function") { - (globalThis as unknown as { queueMicrotask: (cb: () => void) => void }).queueMicrotask(runRefill); return; + if ( + typeof (globalThis as unknown as { queueMicrotask?: unknown }) + .queueMicrotask === "function" + ) { + ( + globalThis as unknown as { queueMicrotask: (cb: () => void) => void } + ).queueMicrotask(runRefill); + return; } void Promise.resolve().then(runRefill); } @@ -535,14 +700,26 @@ export class AudioScheduler { this.queueProcessScheduled = true; if (typeof globalThis.setTimeout === "function") { this.scheduleTimeout = globalThis.setTimeout(() => { - this.scheduleTimeout = null; this.queueProcessScheduled = false; this.processAudioQueue(); + this.scheduleTimeout = null; + this.queueProcessScheduled = false; + this.processAudioQueue(); }, 15); return; } - const run = () => { this.queueProcessScheduled = false; this.processAudioQueue(); }; - if (typeof (globalThis as unknown as { queueMicrotask?: unknown }).queueMicrotask === "function") { - (globalThis as unknown as { queueMicrotask: (cb: () => void) => void }).queueMicrotask(run); - } else { Promise.resolve().then(run); } + const run = () => { + this.queueProcessScheduled = false; + this.processAudioQueue(); + }; + if ( + typeof (globalThis as unknown as { queueMicrotask?: unknown }) + .queueMicrotask === "function" + ) { + ( + globalThis as unknown as { queueMicrotask: (cb: () => void) => void } + ).queueMicrotask(run); + } else { + Promise.resolve().then(run); + } } handleDecodedChunk(chunk: DecodedAudioChunk): void { @@ -550,27 +727,44 @@ export class AudioScheduler { if (chunk.generation !== this.stateManager.streamGeneration) return; const numChannels = chunk.samples.length; const numFrames = chunk.samples[0].length; - const audioBuffer = this.audioContext.createBuffer(numChannels, numFrames, chunk.sampleRate); - for (let ch = 0; ch < numChannels; ch++) audioBuffer.getChannelData(ch).set(chunk.samples[ch]); - this.audioBufferQueue.push({ buffer: audioBuffer, serverTime: chunk.serverTimeUs, generation: chunk.generation }); + const audioBuffer = this.audioContext.createBuffer( + numChannels, + numFrames, + chunk.sampleRate, + ); + for (let ch = 0; ch < numChannels; ch++) + audioBuffer.getChannelData(ch).set(chunk.samples[ch]); + this.audioBufferQueue.push({ + buffer: audioBuffer, + serverTime: chunk.serverTimeUs, + generation: chunk.generation, + }); this.scheduleQueueProcessing(); } - processAudioQueue(): void { this.cancelScheduledRefill(); if (!this.audioContext || !this.gainNode) return; if (this.audioContext.state !== "running") return; const currentGeneration = this.stateManager.streamGeneration; - this.audioBufferQueue = this.audioBufferQueue.filter((chunk) => chunk.generation === currentGeneration); + this.audioBufferQueue = this.audioBufferQueue.filter( + (chunk) => chunk.generation === currentGeneration, + ); this.audioBufferQueue.sort((a, b) => a.serverTime - b.serverTime); if (!this.timeFilter.is_synchronized) return; - const { audioContextTimeSec: audioContextTime, audioContextRawTimeSec, nowMs, nowUs } = this.clockSource.getTimingSnapshot(this.audioContext); + const { + audioContextTimeSec: audioContextTime, + audioContextRawTimeSec, + nowMs, + nowUs, + } = this.clockSource.getTimingSnapshot(this.audioContext); this.pruneExpiredScheduledSources(audioContextRawTimeSec); - const outputLatencySec = this.useOutputLatencyCompensation ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 : 0; + const outputLatencySec = this.useOutputLatencyCompensation + ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000 + : 0; const syncDelaySec = this.syncDelayMs / 1000; const targetScheduledHorizonSec = this.getTargetScheduledHorizonSec(); @@ -578,22 +772,40 @@ export class AudioScheduler { if (this.clockSource.pendingCutover) { this.clockSource.pendingCutover = false; - if (this.scheduledSources.length > 0 || this.nextPlaybackTime !== 0 || this.lastScheduledServerTime !== 0) { - this.performGuardedCutover("delay-change", { incrementResyncCount: false, markCooldown: false }); + if ( + this.scheduledSources.length > 0 || + this.nextPlaybackTime !== 0 || + this.lastScheduledServerTime !== 0 + ) { + this.performGuardedCutover("delay-change", { + incrementResyncCount: false, + markCooldown: false, + }); return; } } while (this.audioBufferQueue.length > 0) { - const scheduledAheadSec = this.getScheduledAheadSec(audioContextRawTimeSec); - if (this.nextPlaybackTime > 0 && scheduledAheadSec >= targetScheduledHorizonSec) break; + const scheduledAheadSec = this.getScheduledAheadSec( + audioContextRawTimeSec, + ); + if ( + this.nextPlaybackTime > 0 && + scheduledAheadSec >= targetScheduledHorizonSec + ) + break; const chunk = this.audioBufferQueue.shift()!; let playbackTime: number; let scheduleTime: number; let playbackRate: number; - const targetPlaybackTime = this.computeTargetPlaybackTime(chunk.serverTime, audioContextTime, nowUs, outputLatencySec); + const targetPlaybackTime = this.computeTargetPlaybackTime( + chunk.serverTime, + audioContextTime, + nowUs, + outputLatencySec, + ); const isTimestamp = this.clockSource.active === "timestamp"; if (this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) { @@ -601,7 +813,10 @@ export class AudioScheduler { playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; if (this.recorrectionMonitor.minScheduleTimeSec !== null) { - scheduleTime = Math.max(scheduleTime, this.recorrectionMonitor.minScheduleTimeSec); + scheduleTime = Math.max( + scheduleTime, + this.recorrectionMonitor.minScheduleTimeSec, + ); playbackTime = scheduleTime + syncDelaySec; } this.recorrectionMonitor.minScheduleTimeSec = null; @@ -616,63 +831,111 @@ export class AudioScheduler { const syncErrorMs = syncErrorSec * 1000; const correctionErrorMs = this.applySyncErrorEma(syncErrorMs); const thresholds = this.correctionThresholds[this._correctionMode]; - const canHardResync = this.recorrectionMonitor.canUseHardResync(nowMs, isTimestamp); - - if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs && canHardResync) { + const canHardResync = this.recorrectionMonitor.canUseHardResync( + nowMs, + isTimestamp, + ); + + if ( + Math.abs(correctionErrorMs) > thresholds.resyncAboveMs && + canHardResync + ) { this.recorrectionMonitor.noteHardResync(nowMs); - this.resyncCount++; this._intervalResyncCount++; + this.resyncCount++; + this._intervalResyncCount++; this.resetSyncErrorEma(); this.cutScheduledSources(targetPlaybackTime - syncDelaySec); playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; playbackRate = 1.0; - this.currentCorrectionMethod = "resync"; this.lastSamplesAdjusted = 0; + this.currentCorrectionMethod = "resync"; + this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs) { - playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; - playbackRate = Number.isFinite(thresholds.rate2AboveMs) ? (correctionErrorMs > 0 ? 1.02 : 0.98) : 1.0; - this.currentCorrectionMethod = playbackRate === 1.0 ? "none" : "rate"; this.lastSamplesAdjusted = 0; + playbackTime = this.nextPlaybackTime; + scheduleTime = this.nextScheduleTime; + playbackRate = Number.isFinite(thresholds.rate2AboveMs) + ? correctionErrorMs > 0 + ? 1.02 + : 0.98 + : 1.0; + this.currentCorrectionMethod = + playbackRate === 1.0 ? "none" : "rate"; + this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) < thresholds.deadbandBelowMs) { - playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; - playbackRate = 1.0; this.currentCorrectionMethod = "none"; this.lastSamplesAdjusted = 0; + playbackTime = this.nextPlaybackTime; + scheduleTime = this.nextScheduleTime; + playbackRate = 1.0; + this.currentCorrectionMethod = "none"; + this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } else if (Math.abs(correctionErrorMs) <= thresholds.samplesBelowMs) { - playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; playbackRate = 1.0; + playbackTime = this.nextPlaybackTime; + scheduleTime = this.nextScheduleTime; + playbackRate = 1.0; const samplesToAdjust = correctionErrorMs > 0 ? -1 : 1; - chunk.buffer = this.adjustBufferSamples(chunk.buffer, samplesToAdjust); - this.currentCorrectionMethod = "samples"; this.lastSamplesAdjusted = samplesToAdjust; + chunk.buffer = this.adjustBufferSamples( + chunk.buffer, + samplesToAdjust, + ); + this.currentCorrectionMethod = "samples"; + this.lastSamplesAdjusted = samplesToAdjust; } else { - playbackTime = this.nextPlaybackTime; scheduleTime = this.nextScheduleTime; + playbackTime = this.nextPlaybackTime; + scheduleTime = this.nextScheduleTime; const absErrorMs = Math.abs(correctionErrorMs); if (correctionErrorMs > 0) { - playbackRate = absErrorMs >= thresholds.rate2AboveMs ? 1.02 : absErrorMs >= thresholds.rate1AboveMs ? 1.01 : 1.0; + playbackRate = + absErrorMs >= thresholds.rate2AboveMs + ? 1.02 + : absErrorMs >= thresholds.rate1AboveMs + ? 1.01 + : 1.0; } else { - playbackRate = absErrorMs >= thresholds.rate2AboveMs ? 0.98 : absErrorMs >= thresholds.rate1AboveMs ? 0.99 : 1.0; + playbackRate = + absErrorMs >= thresholds.rate2AboveMs + ? 0.98 + : absErrorMs >= thresholds.rate1AboveMs + ? 0.99 + : 1.0; } - this.currentCorrectionMethod = playbackRate === 1.0 ? "none" : "rate"; this.lastSamplesAdjusted = 0; + this.currentCorrectionMethod = + playbackRate === 1.0 ? "none" : "rate"; + this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } } else { // Gap detected in server timestamps - hard resync (gated on cooldown) if (this.recorrectionMonitor.canUseHardResync(nowMs, isTimestamp)) { this.recorrectionMonitor.noteHardResync(nowMs); - this.resyncCount++; this._intervalResyncCount++; + this.resyncCount++; + this._intervalResyncCount++; this.cutScheduledSources(targetPlaybackTime - syncDelaySec); } - playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; - playbackRate = 1.0; this.currentCorrectionMethod = "resync"; this.lastSamplesAdjusted = 0; + playbackTime = targetPlaybackTime; + scheduleTime = playbackTime - syncDelaySec; + playbackRate = 1.0; + this.currentCorrectionMethod = "resync"; + this.lastSamplesAdjusted = 0; chunk.buffer = this.copyBuffer(chunk.buffer); } } this.currentPlaybackRate = playbackRate; if (playbackTime < audioContextRawTimeSec) { - this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; continue; + this.nextPlaybackTime = 0; + this.nextScheduleTime = 0; + this.lastScheduledServerTime = 0; + continue; } - const effectiveScheduleTime = Math.max(scheduleTime, audioContextRawTimeSec); - const effectivePlaybackTime = effectiveScheduleTime + (playbackTime - scheduleTime); + const effectiveScheduleTime = Math.max( + scheduleTime, + audioContextRawTimeSec, + ); + const effectivePlaybackTime = + effectiveScheduleTime + (playbackTime - scheduleTime); const source = this.audioContext.createBufferSource(); source.buffer = chunk.buffer; source.playbackRate.value = playbackRate; @@ -682,11 +945,16 @@ export class AudioScheduler { const actualDuration = chunk.buffer.duration / playbackRate; this.nextPlaybackTime = effectivePlaybackTime + actualDuration; this.nextScheduleTime = effectiveScheduleTime + actualDuration; - this.lastScheduledServerTime = chunk.serverTime + chunk.buffer.duration * 1_000_000; + this.lastScheduledServerTime = + chunk.serverTime + chunk.buffer.duration * 1_000_000; const scheduledEntry = { - source, startTime: effectiveScheduleTime, endTime: effectiveScheduleTime + actualDuration, - buffer: chunk.buffer, serverTime: chunk.serverTime, generation: chunk.generation, + source, + startTime: effectiveScheduleTime, + endTime: effectiveScheduleTime + actualDuration, + buffer: chunk.buffer, + serverTime: chunk.serverTime, + generation: chunk.generation, }; this.scheduledSources.push(scheduledEntry); source.onended = () => { @@ -702,20 +970,33 @@ export class AudioScheduler { this.emitStatusLog(nowMs); } - private computeTargetPlaybackTime(serverTimeUs: number, audioContextTime: number, nowUs: number, outputLatencySec: number): number { + private computeTargetPlaybackTime( + serverTimeUs: number, + audioContextTime: number, + nowUs: number, + outputLatencySec: number, + ): number { const chunkClientTimeUs = this.timeFilter.computeClientTime(serverTimeUs); const deltaSec = (chunkClientTimeUs - nowUs) / 1_000_000; - return audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec; + return ( + audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec + ); } startAudioElement(): void { if (this.outputMode === "media-element" && this.audioElement?.paused) { - this.audioElement.play().catch((e) => { console.warn("Sendspin: Failed to start audio element:", e); }); + this.audioElement.play().catch((e) => { + console.warn("Sendspin: Failed to start audio element:", e); + }); } } stopAudioElement(): void { - if (this.outputMode === "media-element" && this.audioElement && !this.audioElement.paused) { + if ( + this.outputMode === "media-element" && + this.audioElement && + !this.audioElement.paused + ) { this.audioElement.pause(); } } @@ -723,10 +1004,19 @@ export class AudioScheduler { clearBuffers(): void { this.recorrectionMonitor.stop(); this.cancelScheduledRefill(); - this.scheduledSources.forEach((entry) => { try { entry.source.stop(); } catch { /* ignore */ } }); + this.scheduledSources.forEach((entry) => { + try { + entry.source.stop(); + } catch { + /* ignore */ + } + }); this.scheduledSources = []; this.audioBufferQueue = []; - if (this.scheduleTimeout !== null) { clearTimeout(this.scheduleTimeout); this.scheduleTimeout = null; } + if (this.scheduleTimeout !== null) { + clearTimeout(this.scheduleTimeout); + this.scheduleTimeout = null; + } this.queueProcessScheduled = false; this.stateManager.resetStreamAnchors(); this.resetScheduledPlaybackState(); @@ -737,15 +1027,26 @@ export class AudioScheduler { close(): void { this.clearBuffers(); - if (this.audioContext) { this.audioContext.close(); this.audioContext = null; } + if (this.audioContext) { + this.audioContext.close(); + this.audioContext = null; + } this.gainNode = null; this.streamDestination = null; if (this.outputMode === "media-element" && this.audioElement) { - this.audioElement.pause(); this.audioElement.srcObject = null; - this.audioElement.loop = false; this.audioElement.removeAttribute("src"); this.audioElement.load(); - if (this.ownsAudioElement) { this.audioElement.remove(); this.audioElement = undefined; } + this.audioElement.pause(); + this.audioElement.srcObject = null; + this.audioElement.loop = false; + this.audioElement.removeAttribute("src"); + this.audioElement.load(); + if (this.ownsAudioElement) { + this.audioElement.remove(); + this.audioElement = undefined; + } } } - getAudioContext(): AudioContext | null { return this.audioContext; } + getAudioContext(): AudioContext | null { + return this.audioContext; + } } diff --git a/src/core/codec-support.ts b/src/core/codec-support.ts index e096d13..46bf25d 100644 --- a/src/core/codec-support.ts +++ b/src/core/codec-support.ts @@ -2,8 +2,7 @@ import type { Codec, SupportedFormat } from "../types"; /** Detect which audio codecs the current browser supports. */ export function getBrowserSupportedCodecs(): Set { - const userAgent = - typeof navigator !== "undefined" ? navigator.userAgent : ""; + const userAgent = typeof navigator !== "undefined" ? navigator.userAgent : ""; const isSafari = /^((?!chrome|android).)*safari/i.test(userAgent); const isFirefox = /firefox/i.test(userAgent); diff --git a/src/core/core.ts b/src/core/core.ts index 4548723..1085fae 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -42,7 +42,10 @@ export class SendspinCore implements StreamHandler { // Stream events — consumers (e.g., SendspinPlayer) subscribe to these private _onAudioData?: (chunk: DecodedAudioChunk) => void; - private _onStreamStart?: (format: StreamFormat, isFormatUpdate: boolean) => void; + private _onStreamStart?: ( + format: StreamFormat, + isFormatUpdate: boolean, + ) => void; private _onStreamClear?: () => void; private _onStreamEnd?: () => void; private _onVolumeUpdate?: () => void; @@ -56,7 +59,10 @@ export class SendspinCore implements StreamHandler { const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`; this.config = { ...config, playerId, clientName }; - this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(config.syncDelay ?? 0))); + this._syncDelayMs = Math.max( + 0, + Math.min(5000, Math.round(config.syncDelay ?? 0)), + ); this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12); this.stateManager = new StateManager(config.onStateChange); @@ -138,7 +144,9 @@ export class SendspinCore implements StreamHandler { set onAudioData(cb: ((chunk: DecodedAudioChunk) => void) | undefined) { this._onAudioData = cb; } - set onStreamStart(cb: ((format: StreamFormat, isFormatUpdate: boolean) => void) | undefined) { + set onStreamStart( + cb: ((format: StreamFormat, isFormatUpdate: boolean) => void) | undefined, + ) { this._onStreamStart = cb; } set onStreamClear(cb: (() => void) | undefined) { diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index 8f2ad9f..68a0f22 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -175,7 +175,10 @@ export class ProtocolHandler { `BitDepth=${this.stateManager.currentStreamFormat.bit_depth}bit`, ); - this.streamHandler.handleStreamStart(this.stateManager.currentStreamFormat, isFormatUpdate); + this.streamHandler.handleStreamStart( + this.stateManager.currentStreamFormat, + isFormatUpdate, + ); this.stateManager.isPlaying = true; From b7a5a5a83a9c790ca873a0236c15441ac654dc74 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:08:40 +0200 Subject: [PATCH 10/27] refactor: drop dead useOutputLatencyCompensation from ProtocolHandler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stored, never read — only the audio scheduler consumes the flag. --- src/core/protocol-handler.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index 68a0f22..f25f956 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -35,7 +35,6 @@ export interface ProtocolHandlerConfig { onVolumeCommand?: (volume: number, muted: boolean) => void; onDelayCommand?: (delayMs: number) => void; getExternalVolume?: () => { volume: number; muted: boolean }; - useOutputLatencyCompensation?: boolean; } export class ProtocolHandler { @@ -43,7 +42,6 @@ export class ProtocolHandler { private codecs: Codec[]; private bufferCapacity: number; private useHardwareVolume: boolean; - private useOutputLatencyCompensation: boolean; private onVolumeCommand?: (volume: number, muted: boolean) => void; private onDelayCommand?: (delayMs: number) => void; private getExternalVolume?: () => { volume: number; muted: boolean }; @@ -61,8 +59,6 @@ export class ProtocolHandler { this.codecs = config.codecs ?? ["opus", "flac", "pcm"]; this.bufferCapacity = config.bufferCapacity ?? 1024 * 1024 * 5; // 5MB default this.useHardwareVolume = config.useHardwareVolume ?? false; - this.useOutputLatencyCompensation = - config.useOutputLatencyCompensation ?? true; this.onVolumeCommand = config.onVolumeCommand; this.onDelayCommand = config.onDelayCommand; this.getExternalVolume = config.getExternalVolume; From 53f090559823dee96d0c437a1c38f3b01ad1542a Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:09:36 +0200 Subject: [PATCH 11/27] refactor: localize wsUrl in SendspinCore.connect --- src/core/core.ts | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/core/core.ts b/src/core/core.ts index 1085fae..c73e80d 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -37,7 +37,6 @@ export class SendspinCore implements StreamHandler { private decoder: SendspinDecoder; private config: SendspinCoreConfig; - private wsUrl: string = ""; private _syncDelayMs: number; // Stream events — consumers (e.g., SendspinPlayer) subscribe to these @@ -208,15 +207,9 @@ export class SendspinCore implements StreamHandler { } const url = new URL(this.config.baseUrl); const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:"; - this.wsUrl = `${wsProtocol}//${url.host}/sendspin`; + const wsUrl = `${wsProtocol}//${url.host}/sendspin`; - await this.wsManager.connect( - this.wsUrl, - onOpen, - onMessage, - onError, - onClose, - ); + await this.wsManager.connect(wsUrl, onOpen, onMessage, onError, onClose); } } From bb62fb1411ac3954afaac146d3544684be7c6256 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:10:16 +0200 Subject: [PATCH 12/27] fix: double getSmoothedUs call in `emitStatusLog` getSmoothedUs advances the EMA and triggers persistence as side effects. Refactor to capture it once instead. --- src/audio/scheduler.ts | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 3df0bbf..23a67dc 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -412,12 +412,8 @@ export class AudioScheduler { ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})` : `pending(n=${this.timeFilter.count})`; - const latMs = - this.latencyTracker.getSmoothedUs(this.audioContext) !== null - ? Math.round( - this.latencyTracker.getSmoothedUs(this.audioContext) / 1000, - ) - : 0; + const smoothedLatUs = this.latencyTracker.getSmoothedUs(this.audioContext); + const latMs = Math.round(smoothedLatUs / 1000); console.log( `Sendspin: sync=${this.smoothedSyncErrorMs >= 0 ? "+" : ""}${this.smoothedSyncErrorMs.toFixed(1)}ms` + From e3f288f21822083b0fe918e83ec0ba38ca0dac5a Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:10:35 +0200 Subject: [PATCH 13/27] fix: reset hard-resync cooldown across stream boundaries clearBuffers previously called recorrectionMonitor.stop(), which preserved _lastHardResyncAtMs and _hardResyncGraceUntilMs. Stale cooldown then blocked an early hard resync on the next stream. Use fullReset() to wipe these too. --- src/audio/scheduler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 23a67dc..bcc81e9 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -998,7 +998,7 @@ export class AudioScheduler { } clearBuffers(): void { - this.recorrectionMonitor.stop(); + this.recorrectionMonitor.fullReset(); this.cancelScheduledRefill(); this.scheduledSources.forEach((entry) => { try { From f8502a02bdbe901bdf0c4fddb3ea1a4db264e7cf Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:11:48 +0200 Subject: [PATCH 14/27] refactor: extract shared clampSyncDelayMs helper Consolidates four duplicate clamp expressions across core, protocol-handler, and scheduler. Preserves the isFinite guard the scheduler already had. --- src/audio/scheduler.ts | 10 +++------- src/core/core.ts | 10 ++++------ src/core/protocol-handler.ts | 5 +++-- src/sync-delay.ts | 6 ++++++ 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 src/sync-delay.ts diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index bcc81e9..fac23bd 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -22,6 +22,7 @@ import { RECORRECTION_CUTOVER_GUARD_SEC, } from "./recorrection-monitor"; import { OutputLatencyTracker } from "./output-latency-tracker"; +import { clampSyncDelayMs } from "../sync-delay"; // Sync correction constants const SAMPLE_CORRECTION_FADE_LEN = 8; @@ -145,7 +146,7 @@ export class AudioScheduler { ) { this._correctionMode = correctionMode; this.useOutputLatencyCompensation = useOutputLatencyCompensation; - this.syncDelayMs = this.sanitizeSyncDelayMs(this.syncDelayMs); + this.syncDelayMs = clampSyncDelayMs(this.syncDelayMs); // Merge user-provided threshold overrides with defaults this.correctionThresholds = { ...DEFAULT_CORRECTION_THRESHOLDS }; @@ -170,11 +171,6 @@ export class AudioScheduler { ); } - private sanitizeSyncDelayMs(delayMs: number): number { - if (!isFinite(delayMs)) return 0; - return Math.max(0, Math.min(5000, Math.round(delayMs))); - } - get correctionMode(): CorrectionMode { return this._correctionMode; } @@ -334,7 +330,7 @@ export class AudioScheduler { } setSyncDelay(delayMs: number): void { - const sanitized = this.sanitizeSyncDelayMs(delayMs); + const sanitized = clampSyncDelayMs(delayMs); const delta = sanitized - this.syncDelayMs; this.syncDelayMs = sanitized; if (delta === 0 || !this.usesImmediateDelayCutover) return; diff --git a/src/core/core.ts b/src/core/core.ts index c73e80d..e91b233 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -12,6 +12,7 @@ import { ProtocolHandler } from "./protocol-handler"; import { StateManager } from "./state-manager"; import { WebSocketManager } from "./websocket-manager"; import { SendspinTimeFilter } from "./time-filter"; +import { clampSyncDelayMs } from "../sync-delay"; import type { SendspinCoreConfig, DecodedAudioChunk, @@ -58,10 +59,7 @@ export class SendspinCore implements StreamHandler { const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`; this.config = { ...config, playerId, clientName }; - this._syncDelayMs = Math.max( - 0, - Math.min(5000, Math.round(config.syncDelay ?? 0)), - ); + this._syncDelayMs = clampSyncDelayMs(config.syncDelay ?? 0); this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12); this.stateManager = new StateManager(config.onStateChange); @@ -128,7 +126,7 @@ export class SendspinCore implements StreamHandler { } handleSyncDelayChange(delayMs: number): void { - this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(delayMs))); + this._syncDelayMs = clampSyncDelayMs(delayMs); this._onSyncDelayChange?.(this._syncDelayMs); } @@ -246,7 +244,7 @@ export class SendspinCore implements StreamHandler { // ======================================== setSyncDelay(delayMs: number): void { - this._syncDelayMs = Math.max(0, Math.min(5000, Math.round(delayMs))); + this._syncDelayMs = clampSyncDelayMs(delayMs); this._onSyncDelayChange?.(this._syncDelayMs); this.protocolHandler.sendStateUpdate(); } diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index f25f956..e517949 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -23,6 +23,7 @@ import type { StateManager } from "./state-manager"; import type { WebSocketManager } from "./websocket-manager"; import { TimeSyncManager } from "./time-sync-manager"; import { getSupportedFormats } from "./codec-support"; +import { clampSyncDelayMs } from "../sync-delay"; // Constants const STATE_UPDATE_INTERVAL = 5000; // 5 seconds @@ -242,7 +243,7 @@ export class ProtocolHandler { case "set_static_delay": { const delay = playerCommand.static_delay_ms; if (typeof delay === "number" && isFinite(delay)) { - const clamped = Math.max(0, Math.min(5000, Math.round(delay))); + const clamped = clampSyncDelayMs(delay); this.streamHandler.handleSyncDelayChange(clamped); this.onDelayCommand?.(clamped); } @@ -296,7 +297,7 @@ export class ProtocolHandler { } const syncDelayMs = this.streamHandler.getSyncDelayMs(); - const staticDelayMs = Math.max(0, Math.min(5000, Math.round(syncDelayMs))); + const staticDelayMs = clampSyncDelayMs(syncDelayMs); const message: ClientState = { type: "client/state" as MessageType.CLIENT_STATE, diff --git a/src/sync-delay.ts b/src/sync-delay.ts new file mode 100644 index 0000000..f0a7c1b --- /dev/null +++ b/src/sync-delay.ts @@ -0,0 +1,6 @@ +export const SYNC_DELAY_MAX_MS = 5000; + +export function clampSyncDelayMs(delayMs: number): number { + if (!isFinite(delayMs)) return 0; + return Math.max(0, Math.min(SYNC_DELAY_MAX_MS, Math.round(delayMs))); +} From 1135d646352c98223099aa0efe952b86802cf336 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:12:59 +0200 Subject: [PATCH 15/27] fix: restore audio-context-missing diagnostic in scheduler Old handleBinaryMessage logged when an audio chunk arrived before an AudioContext existed. The split decoder no longer owns a context, so the warn moves to scheduler.handleDecodedChunk where the nullability is checked. --- src/audio/scheduler.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index fac23bd..cb76c98 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -715,7 +715,10 @@ export class AudioScheduler { } handleDecodedChunk(chunk: DecodedAudioChunk): void { - if (!this.audioContext || !this.gainNode) return; + if (!this.audioContext || !this.gainNode) { + console.warn("Sendspin: Received audio chunk but no audio context"); + return; + } if (chunk.generation !== this.stateManager.streamGeneration) return; const numChannels = chunk.samples.length; const numFrames = chunk.samples[0].length; From bfb9b09f4b74697404b2b0b7f617468cb53c94d6 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:13:38 +0200 Subject: [PATCH 16/27] fix: tighten WebSocketManager.adopt contract MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three related fixes to the adopted-socket path: - Throw synchronously when passed a CLOSING or CLOSED socket instead of silently no-op'ing — the caller almost certainly wants to know. - Null out the old socket's handlers before calling close() so its late async onclose event cannot fire the newly-wired close handler. - Return a Promise that resolves on open (or rejects on early close), so SendspinCore.connect() can await a CONNECTING adopted socket before the caller's next API call races the session. --- src/core/core.ts | 2 +- src/core/websocket-manager.ts | 60 +++++++++++++++++++++++++++-------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/src/core/core.ts b/src/core/core.ts index e91b233..3b9ca1a 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -189,7 +189,7 @@ export class SendspinCore implements StreamHandler { if (this.config.webSocket) { // Adopt externally-managed WebSocket - this.wsManager.adopt( + await this.wsManager.adopt( this.config.webSocket, onOpen, onMessage, diff --git a/src/core/websocket-manager.ts b/src/core/websocket-manager.ts index 1d671b3..8e9d677 100644 --- a/src/core/websocket-manager.ts +++ b/src/core/websocket-manager.ts @@ -16,6 +16,9 @@ export class WebSocketManager { * Adopt an existing WebSocket connection. * The caller is responsible for having already opened the socket. * Reconnection is disabled for adopted sockets. + * + * Returns a Promise that resolves once the adopted socket is open. Throws + * synchronously if the socket is already CLOSING or CLOSED. */ adopt( ws: WebSocket, @@ -23,16 +26,31 @@ export class WebSocketManager { onMessage?: (event: MessageEvent) => void, onError?: (error: Event) => void, onClose?: () => void, - ): void { + ): Promise { + if ( + ws.readyState !== WebSocket.OPEN && + ws.readyState !== WebSocket.CONNECTING + ) { + throw new Error( + `Sendspin: Cannot adopt WebSocket in readyState ${ws.readyState} (must be OPEN or CONNECTING)`, + ); + } + // Store handlers this.onOpenHandler = onOpen; this.onMessageHandler = onMessage; this.onErrorHandler = onError; this.onCloseHandler = onClose; - // Close any existing connection + // Detach handlers from any existing socket so its async close event + // cannot fire into the newly-adopted session. if (this.ws) { - this.ws.close(); + const old = this.ws; + old.onopen = null; + old.onmessage = null; + old.onerror = null; + old.onclose = null; + old.close(); this.ws = null; } @@ -61,21 +79,35 @@ export class WebSocketManager { } }; - // If already open, fire onOpen immediately - if (ws.readyState === WebSocket.OPEN) { - console.log("Sendspin: Adopted open WebSocket"); - if (this.onOpenHandler) { - this.onOpenHandler(); - } - } else if (ws.readyState === WebSocket.CONNECTING) { - // Wait for it to open - this.ws.onopen = () => { - console.log("Sendspin: Adopted WebSocket connected"); + return new Promise((resolve, reject) => { + const fireOpen = () => { if (this.onOpenHandler) { this.onOpenHandler(); } + resolve(); }; - } + + if (ws.readyState === WebSocket.OPEN) { + console.log("Sendspin: Adopted open WebSocket"); + fireOpen(); + return; + } + + // CONNECTING: wait for open or early close. + const prevOnClose = this.ws!.onclose; + this.ws!.onopen = () => { + console.log("Sendspin: Adopted WebSocket connected"); + fireOpen(); + }; + this.ws!.onclose = (event: CloseEvent) => { + if (prevOnClose) { + prevOnClose.call(this.ws!, event); + } + reject( + new Error("Sendspin: Adopted WebSocket closed before opening"), + ); + }; + }); } // Connect to WebSocket server From 3487dc2a430ad3d496c0e26e0d1f4f171edc9f52 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:14:30 +0200 Subject: [PATCH 17/27] fix: kick audio queue on clock-source promotion to timestamp Old setActiveAudioClockSource kicked the queue when promoting to timestamp mid-stream. The split ClockSource.setActive only flipped _pendingCutover, letting the next tick (up to 250ms later) pick up the cutover. Restore the immediate kick via an onPromotion callback wired to scheduleQueueProcessing. --- src/audio/clock-source.ts | 8 ++++++++ src/audio/scheduler.ts | 5 +++++ src/core/websocket-manager.ts | 4 +--- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/audio/clock-source.ts b/src/audio/clock-source.ts index b121879..9399c7e 100644 --- a/src/audio/clock-source.ts +++ b/src/audio/clock-source.ts @@ -47,6 +47,7 @@ export class ClockSource { private _pendingCutover = false; private _lastRejectReason: string | null = null; private _timestampPromotionDisabled = false; + private _onPromotion?: () => void; // Output timestamp validation state private lastSample: OutputTimestampSample | null = null; @@ -91,9 +92,16 @@ export class ClockSource { if (this.activeSource === source) return false; this.activeSource = source; this._pendingCutover = source === "timestamp"; + if (this._pendingCutover) { + this._onPromotion?.(); + } return this._pendingCutover; } + onPromotion(cb: () => void): void { + this._onPromotion = cb; + } + reset(): void { this.activeSource = "estimated"; this._pendingCutover = false; diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index cb76c98..980a437 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -166,6 +166,11 @@ export class AudioScheduler { if (this.isCastRuntime) { this.clockSource.disableTimestampPromotion(); } + this.clockSource.onPromotion(() => { + if (this.audioBufferQueue.length > 0 || this.scheduledSources.length > 0) { + this.scheduleQueueProcessing(); + } + }); this.recorrectionMonitor = new RecorrectionMonitor(() => this.checkRecorrection(), ); diff --git a/src/core/websocket-manager.ts b/src/core/websocket-manager.ts index 8e9d677..34ecd3c 100644 --- a/src/core/websocket-manager.ts +++ b/src/core/websocket-manager.ts @@ -103,9 +103,7 @@ export class WebSocketManager { if (prevOnClose) { prevOnClose.call(this.ws!, event); } - reject( - new Error("Sendspin: Adopted WebSocket closed before opening"), - ); + reject(new Error("Sendspin: Adopted WebSocket closed before opening")); }; }); } From d6974553e5a84ef7fc1db767b597baac125a78bd Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:15:56 +0200 Subject: [PATCH 18/27] fix: clean up playback state on transport close via Core API - SendspinCore onClose now clears the periodic state-update interval so it doesn't spam "WebSocket not connected" warnings every 5s for standalone Core consumers who have no cleanup path of their own. - Add SendspinCore.resetPlaybackState() to reset isPlaying / currentStreamFormat without tearing down the connection. - SendspinPlayer uses the new method instead of reaching into core._stateManager directly. --- src/audio/scheduler.ts | 5 ++++- src/core/core.ts | 13 +++++++++++++ src/index.ts | 4 +--- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 980a437..3d00f1c 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -167,7 +167,10 @@ export class AudioScheduler { this.clockSource.disableTimestampPromotion(); } this.clockSource.onPromotion(() => { - if (this.audioBufferQueue.length > 0 || this.scheduledSources.length > 0) { + if ( + this.audioBufferQueue.length > 0 || + this.scheduledSources.length > 0 + ) { this.scheduleQueueProcessing(); } }); diff --git a/src/core/core.ts b/src/core/core.ts index 3b9ca1a..becdc48 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -183,6 +183,9 @@ export class SendspinCore implements StreamHandler { }; const onClose = () => { this.protocolHandler.stopTimeSync(); + // Stop periodic state-update sends so they don't spam + // "WebSocket not connected" warnings after the transport is gone. + this.stateManager.clearStateUpdateInterval(); console.log("Sendspin: Connection closed"); this._onConnectionClose?.(); }; @@ -211,6 +214,16 @@ export class SendspinCore implements StreamHandler { } } + /** + * Reset playback-related state (isPlaying, currentStreamFormat) without + * tearing down the connection. Intended for transport-loss cleanup after + * any buffered audio has finished draining. + */ + resetPlaybackState(): void { + this.stateManager.isPlaying = false; + this.stateManager.currentStreamFormat = null; + } + disconnect(reason: GoodbyeReason = "shutdown"): void { if (this.wsManager.isConnected()) { this.protocolHandler.sendGoodbye(reason); diff --git a/src/index.ts b/src/index.ts index 458e13a..6db7143 100644 --- a/src/index.ts +++ b/src/index.ts @@ -179,7 +179,6 @@ export class SendspinPlayer { if (this.suppressDisconnectPlaybackReset) { return; } - this.core._stateManager.clearStateUpdateInterval(); this.scheduleDisconnectPlaybackReset(); }; } @@ -197,8 +196,7 @@ export class SendspinPlayer { return; } this.scheduler.clearBuffers(); - this.core._stateManager.currentStreamFormat = null; - this.core._stateManager.isPlaying = false; + this.core.resetPlaybackState(); this.scheduler.stopAudioElement(); if (typeof navigator !== "undefined" && navigator.mediaSession) { navigator.mediaSession.playbackState = "paused"; From 9c91363f0a3ec86d09efac8e9400eed8b0ca7466 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:16:44 +0200 Subject: [PATCH 19/27] fix: derive FLAC OfflineAudioContext channel count from stream format Was hardcoded to 2 channels, which decoded mono FLAC into stereo and would silently drop channels beyond stereo. Use format.channels and rebuild the cached context when it changes. --- src/audio/decoder.ts | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/audio/decoder.ts b/src/audio/decoder.ts index 820f672..303e113 100644 --- a/src/audio/decoder.ts +++ b/src/audio/decoder.ts @@ -29,6 +29,7 @@ export class SendspinDecoder { // FLAC decoding context (OfflineAudioContext, no playback needed) private flacDecodingContext: OfflineAudioContext | null = null; private flacDecodingContextSampleRate: number = 0; + private flacDecodingContextChannels: number = 0; constructor( onDecodedChunk: (chunk: DecodedAudioChunk) => void, @@ -156,13 +157,22 @@ export class SendspinDecoder { // FLAC Decoder (uses OfflineAudioContext) // ======================================== - private getFlacDecodingContext(sampleRate: number): OfflineAudioContext { + private getFlacDecodingContext( + sampleRate: number, + channels: number, + ): OfflineAudioContext { if ( !this.flacDecodingContext || - this.flacDecodingContextSampleRate !== sampleRate + this.flacDecodingContextSampleRate !== sampleRate || + this.flacDecodingContextChannels !== channels ) { - this.flacDecodingContext = new OfflineAudioContext(2, 1, sampleRate); + this.flacDecodingContext = new OfflineAudioContext( + channels, + 1, + sampleRate, + ); this.flacDecodingContextSampleRate = sampleRate; + this.flacDecodingContextChannels = channels; } return this.flacDecodingContext; } @@ -186,7 +196,10 @@ export class SendspinDecoder { dataToEncode = combined.buffer; } - const ctx = this.getFlacDecodingContext(format.sample_rate); + const ctx = this.getFlacDecodingContext( + format.sample_rate, + format.channels, + ); const audioBuffer = await ctx.decodeAudioData(dataToEncode); // Extract Float32Array per channel from AudioBuffer @@ -606,5 +619,6 @@ export class SendspinDecoder { this.flacDecodingContext = null; this.flacDecodingContextSampleRate = 0; + this.flacDecodingContextChannels = 0; } } From 0f1455f5ab27e2467a28f17584a9b0638c6f23ad Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:17:23 +0200 Subject: [PATCH 20/27] fix: report live output latency in syncInfo syncInfo.outputLatencyMs was reading a cached value set by the last getSmoothedUs call, so it could lag up to a recorrection interval behind the AudioContext. Read baseLatency + outputLatency directly. The now-unused cache and accessor are removed. --- src/audio/output-latency-tracker.ts | 11 +---------- src/audio/scheduler.ts | 2 +- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/audio/output-latency-tracker.ts b/src/audio/output-latency-tracker.ts index 0d6a8c6..9d8db24 100644 --- a/src/audio/output-latency-tracker.ts +++ b/src/audio/output-latency-tracker.ts @@ -13,7 +13,6 @@ const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000; export class OutputLatencyTracker { - private lastRawOutputLatencyUs: number = 0; private smoothedOutputLatencyUs: number | null = null; private lastLatencyPersistAtMs: number | null = null; @@ -53,9 +52,7 @@ export class OutputLatencyTracker { if (!audioContext) return 0; const baseLatency = audioContext.baseLatency ?? 0; const outputLatency = audioContext.outputLatency ?? 0; - const rawUs = (baseLatency + outputLatency) * 1_000_000; - this.lastRawOutputLatencyUs = rawUs; - return rawUs; + return (baseLatency + outputLatency) * 1_000_000; } /** Get EMA-smoothed output latency in microseconds. */ @@ -87,14 +84,8 @@ export class OutputLatencyTracker { return this.smoothedOutputLatencyUs; } - /** Get last raw reading in microseconds (for sync info display). */ - getLastRawUs(): number { - return this.lastRawOutputLatencyUs; - } - /** Reset smoother (on stream change or audio context recreation). */ reset(): void { this.smoothedOutputLatencyUs = null; - this.lastRawOutputLatencyUs = 0; } } diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 3d00f1c..ae48870 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -370,7 +370,7 @@ export class AudioScheduler { clockDriftPercent: this.timeFilter.drift * 100, syncErrorMs: this.currentSyncErrorMs, resyncCount: this.resyncCount, - outputLatencyMs: this.latencyTracker.getLastRawUs() / 1000, + outputLatencyMs: this.latencyTracker.getRawUs(this.audioContext) / 1000, playbackRate: this.currentPlaybackRate, correctionMethod: this.currentCorrectionMethod, samplesAdjusted: this.lastSamplesAdjusted, From ad09e82a2fe76a76eea0d1edaee771213ca4833a Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:17:52 +0200 Subject: [PATCH 21/27] refactor: encapsulate RecorrectionMonitor.minScheduleTimeSec behind accessors --- src/audio/recorrection-monitor.ts | 16 ++++++++++++++-- src/audio/scheduler.ts | 17 +++++++---------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/audio/recorrection-monitor.ts b/src/audio/recorrection-monitor.ts index e128929..b691091 100644 --- a/src/audio/recorrection-monitor.ts +++ b/src/audio/recorrection-monitor.ts @@ -26,7 +26,19 @@ export class RecorrectionMonitor { private _hardResyncGraceUntilMs: number | null = null; private _lastHardResyncAtMs: number = -Infinity; /** After a recorrection, scheduling must not start before this time. */ - minScheduleTimeSec: number | null = null; + private _minScheduleTimeSec: number | null = null; + + get minScheduleTimeSec(): number | null { + return this._minScheduleTimeSec; + } + + setMinScheduleTime(timeSec: number | null): void { + this._minScheduleTimeSec = timeSec; + } + + clearMinScheduleTime(): void { + this._minScheduleTimeSec = null; + } constructor(private onCheck: () => void) {} @@ -162,7 +174,7 @@ export class RecorrectionMonitor { this.stop(); this._hardResyncGraceUntilMs = null; this._lastHardResyncAtMs = -Infinity; - this.minScheduleTimeSec = null; + this._minScheduleTimeSec = null; } } diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index ae48870..9266817 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -226,7 +226,7 @@ export class AudioScheduler { this.nextPlaybackTime = 0; this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; - this.recorrectionMonitor.minScheduleTimeSec = null; + this.recorrectionMonitor.clearMinScheduleTime(); this.clockSource.pendingCutover = false; this.recorrectionMonitor.resetCheckState(); this.resetSyncErrorEma(); @@ -267,9 +267,8 @@ export class AudioScheduler { this.lastSamplesAdjusted = 0; this.currentPlaybackRate = 1.0; const cutResult = this.cutScheduledSources(cutoffTime); - this.recorrectionMonitor.minScheduleTimeSec = Math.max( - cutoffTime, - cutResult.keptTailEndTimeSec, + this.recorrectionMonitor.setMinScheduleTime( + Math.max(cutoffTime, cutResult.keptTailEndTimeSec), ); this.nextPlaybackTime = 0; this.nextScheduleTime = 0; @@ -815,14 +814,12 @@ export class AudioScheduler { this.recorrectionMonitor.armStartupGrace(nowMs, isTimestamp); playbackTime = targetPlaybackTime; scheduleTime = playbackTime - syncDelaySec; - if (this.recorrectionMonitor.minScheduleTimeSec !== null) { - scheduleTime = Math.max( - scheduleTime, - this.recorrectionMonitor.minScheduleTimeSec, - ); + const minScheduleTimeSec = this.recorrectionMonitor.minScheduleTimeSec; + if (minScheduleTimeSec !== null) { + scheduleTime = Math.max(scheduleTime, minScheduleTimeSec); playbackTime = scheduleTime + syncDelaySec; } - this.recorrectionMonitor.minScheduleTimeSec = null; + this.recorrectionMonitor.clearMinScheduleTime(); playbackRate = 1.0; chunk.buffer = this.copyBuffer(chunk.buffer); } else { From bf78a0123dd4ac674a124fa04328bd8d6b431c2e Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:18:57 +0200 Subject: [PATCH 22/27] refactor: move internal plumbing types out of public types.ts AudioBufferQueueItem and StreamHandler are internal glue between SendspinCore and the audio scheduler / protocol handler. Exporting them via index.ts's export * shrinks future refactor freedom and leaks implementation detail. Move to src/internal-types.ts, which is not re-exported. --- src/audio/scheduler.ts | 2 +- src/core/core.ts | 2 +- src/core/protocol-handler.ts | 2 +- src/internal-types.ts | 26 ++++++++++++++++++++++++++ src/types.ts | 20 -------------------- 5 files changed, 29 insertions(+), 23 deletions(-) create mode 100644 src/internal-types.ts diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 9266817..d14f81f 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -7,13 +7,13 @@ */ import type { - AudioBufferQueueItem, AudioOutputMode, CorrectionMode, CorrectionThresholds, DecodedAudioChunk, SendspinStorage, } from "../types"; +import type { AudioBufferQueueItem } from "../internal-types"; import type { StateManager } from "../core/state-manager"; import type { SendspinTimeFilter } from "../core/time-filter"; import { ClockSource } from "./clock-source"; diff --git a/src/core/core.ts b/src/core/core.ts index becdc48..f71e106 100644 --- a/src/core/core.ts +++ b/src/core/core.ts @@ -17,7 +17,6 @@ import type { SendspinCoreConfig, DecodedAudioChunk, StreamFormat, - StreamHandler, GoodbyeReason, PlayerState, ControllerCommand, @@ -25,6 +24,7 @@ import type { ServerStatePayload, GroupUpdatePayload, } from "../types"; +import type { StreamHandler } from "../internal-types"; function generateRandomId(): string { return Math.random().toString(36).substring(2, 6); diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts index e517949..c5378b1 100644 --- a/src/core/protocol-handler.ts +++ b/src/core/protocol-handler.ts @@ -18,7 +18,7 @@ import type { StreamEnd, StreamStart, } from "../types"; -import type { StreamHandler } from "../types"; +import type { StreamHandler } from "../internal-types"; import type { StateManager } from "./state-manager"; import type { WebSocketManager } from "./websocket-manager"; import { TimeSyncManager } from "./time-sync-manager"; diff --git a/src/internal-types.ts b/src/internal-types.ts new file mode 100644 index 0000000..8a83a6e --- /dev/null +++ b/src/internal-types.ts @@ -0,0 +1,26 @@ +/** + * Internal plumbing types shared across SDK modules but not part of the + * public API surface. These are intentionally NOT re-exported from index.ts. + */ + +import type { StreamFormat } from "./types"; + +export interface AudioBufferQueueItem { + buffer: AudioBuffer; + serverTime: number; + generation: number; +} + +/** + * Interface for protocol handler to call into the audio subsystem. + * Implemented by SendspinCore as the bridge between protocol and audio. + */ +export interface StreamHandler { + handleBinaryMessage(data: ArrayBuffer): void; + handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void; + handleStreamClear(): void; + handleStreamEnd(): void; + handleVolumeUpdate(): void; + handleSyncDelayChange(delayMs: number): void; + getSyncDelayMs(): number; +} diff --git a/src/types.ts b/src/types.ts index 441af1e..a091a93 100644 --- a/src/types.ts +++ b/src/types.ts @@ -423,12 +423,6 @@ export interface SendspinPlayerConfig { storage?: SendspinStorage | null; } -export interface AudioBufferQueueItem { - buffer: AudioBuffer; - serverTime: number; - generation: number; -} - /** * A decoded audio chunk with raw PCM samples. * Emitted by SendspinCore after decoding compressed audio. @@ -504,20 +498,6 @@ export interface SendspinCoreConfig { }) => void; } -/** - * Interface for protocol handler to call into the audio subsystem. - * Implemented by SendspinCore as the bridge between protocol and audio. - */ -export interface StreamHandler { - handleBinaryMessage(data: ArrayBuffer): void; - handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void; - handleStreamClear(): void; - handleStreamEnd(): void; - handleVolumeUpdate(): void; - handleSyncDelayChange(delayMs: number): void; - getSyncDelayMs(): number; -} - /** * Storage interface for persisting SDK state. * Compatible with Web Storage API (localStorage/sessionStorage). From 02d2d2be1ebbf787b2b2a6eae31213a26eebd0b4 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:20:15 +0200 Subject: [PATCH 23/27] refactor: deduplicate player and core config via extends The two config interfaces duplicated ~12 fields with parallel docstrings that had already started to drift. Make Player's config extend Core's, and promote the richer docstrings (codec notes, syncDelay range, hardware-volume semantics) to the Core base so both surfaces see them. --- src/types.ts | 138 ++++++++++++++++----------------------------------- 1 file changed, 42 insertions(+), 96 deletions(-) diff --git a/src/types.ts b/src/types.ts index a091a93..fe87bdc 100644 --- a/src/types.ts +++ b/src/types.ts @@ -284,59 +284,13 @@ export interface SupportedFormat { bit_depth: number; } -export interface SendspinPlayerConfig { - /** Unique player identifier. Auto-generated if not provided. */ - playerId?: string; - - /** - * Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095"). - * Required unless webSocket is provided. - */ - baseUrl?: string; - - /** Human-readable name for this player. Auto-generated if not provided. */ - clientName?: string; - - /** - * Pre-established WebSocket connection. - * When provided, the player adopts this socket instead of creating one from baseUrl. - * The socket must connect to the Sendspin /sendspin endpoint. - * Auto-reconnect is disabled for externally-managed sockets. - */ - webSocket?: WebSocket; - +export interface SendspinPlayerConfig extends SendspinCoreConfig { /** * HTMLAudioElement for media-element output mode. * Auto-created on mobile browsers if not provided. */ audioElement?: HTMLAudioElement; - /** - * Codecs to use for audio streaming, in priority order. - * Unsupported codecs for the current browser are automatically filtered out: - * - Safari: No FLAC support - * - Firefox: No Opus (audio glitches with both native and opus-encdec decoders) - * - Browsers with WebCodecs (Chrome, Edge): All codecs - * - Browsers without WebCodecs (e.g., insecure context or older browsers): No Opus - * - * Default: ["opus", "flac", "pcm"] - */ - codecs?: Codec[]; - - /** - * Buffer capacity in bytes. Defaults to 5MB for media-element, 1.5MB for direct. - */ - bufferCapacity?: number; - - /** - * Static sync delay in milliseconds. - * Positive values make playback earlier to compensate for downstream device latency. - * Allowed range: 0-5000. - * Runtime update behavior depends on the active correction mode settings. - * Defaults to a browser/platform-specific heuristic value if not provided. - */ - syncDelay?: number; - /** * Sync correction mode: * - "sync" (default): Corrects out of sync playback using all methods and may use pitch-changing @@ -374,48 +328,6 @@ export interface SendspinPlayerConfig { */ useOutputLatencyCompensation?: boolean; - /** Callback when player state changes (local or from server) */ - onStateChange?: (state: { - isPlaying: boolean; - volume: number; - muted: boolean; - playerState: PlayerState; - /** Cached server state (merged from server/state messages) */ - serverState: ServerStatePayload; - /** Cached group state (merged from group/update messages) */ - groupState: GroupUpdatePayload; - }) => void; - - /** - * Use hardware/external volume control instead of software gain. - * When true, the internal gain node stays at 1.0 and volume commands - * are delegated to the onVolumeCommand callback. - * - * Default: false - */ - useHardwareVolume?: boolean; - - /** - * Callback when server sends volume/mute commands. - * Only called when useHardwareVolume is true. - * The app should apply the volume to hardware (e.g., Cast system volume). - */ - onVolumeCommand?: (volume: number, muted: boolean) => void; - - /** - * Callback when server sends a set_static_delay command. - * Called with the new static delay in milliseconds (0-5000). - */ - onDelayCommand?: (delayMs: number) => void; - - /** - * Getter for external volume state. - * Called periodically when reporting state to server if useHardwareVolume is true. - * Should return current hardware volume (0-100) and muted state. - * Not called immediately after volume commands to wait for hardware to apply the change. - */ - getExternalVolume?: () => { volume: number; muted: boolean }; - /** * Storage for persisting SDK state (e.g., cached output latency). * Defaults to localStorage. Pass null to disable persistence. @@ -457,11 +369,19 @@ export interface SendspinCoreConfig { /** * Codecs to use for audio streaming, in priority order. + * Unsupported codecs for the current browser are automatically filtered out: + * - Safari: No FLAC support + * - Firefox: No Opus (audio glitches with both native and opus-encdec decoders) + * - Browsers with WebCodecs (Chrome, Edge): All codecs + * - Browsers without WebCodecs (e.g., insecure context or older browsers): No Opus + * * Default: ["opus", "flac", "pcm"] */ codecs?: Codec[]; - /** Buffer capacity in bytes. Defaults to 5MB. */ + /** + * Buffer capacity in bytes. Defaults to 5MB for media-element, 1.5MB for direct. + */ bufferCapacity?: number; /** @@ -472,28 +392,54 @@ export interface SendspinCoreConfig { */ webSocket?: WebSocket; - /** Static sync delay in milliseconds (0-5000). */ + /** + * Static sync delay in milliseconds. + * Positive values make playback earlier to compensate for downstream device latency. + * Allowed range: 0-5000. + * Runtime update behavior depends on the active correction mode settings. + * Defaults to a browser/platform-specific heuristic value if not provided. + */ syncDelay?: number; - /** Use hardware/external volume control instead of software gain. */ + /** + * Use hardware/external volume control instead of software gain. + * When true, the internal gain node stays at 1.0 and volume commands + * are delegated to the onVolumeCommand callback. + * + * Default: false + */ useHardwareVolume?: boolean; - /** Callback when server sends volume/mute commands (hardware volume mode). */ + /** + * Callback when server sends volume/mute commands. + * Only called when useHardwareVolume is true. + * The app should apply the volume to hardware (e.g., Cast system volume). + */ onVolumeCommand?: (volume: number, muted: boolean) => void; - /** Callback when server sends a set_static_delay command. */ + /** + * Callback when server sends a set_static_delay command. + * Called with the new static delay in milliseconds (0-5000). + */ onDelayCommand?: (delayMs: number) => void; - /** Getter for external volume state (hardware volume mode). */ + /** + * Getter for external volume state. + * Called periodically when reporting state to server if useHardwareVolume is true. + * Should return current hardware volume (0-100) and muted state. + * Not called immediately after volume commands to wait for hardware to apply the change. + */ getExternalVolume?: () => { volume: number; muted: boolean }; - /** Callback when player state changes */ + /** Callback when player state changes (local or from server). */ onStateChange?: (state: { isPlaying: boolean; volume: number; muted: boolean; playerState: PlayerState; + /** Cached server state (merged from server/state messages) */ serverState: ServerStatePayload; + /** Cached group state (merged from group/update messages) */ groupState: GroupUpdatePayload; }) => void; } From 9f15ed2d616c9bfc713e415bec7c922929e66780 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 09:20:59 +0200 Subject: [PATCH 24/27] refactor: convert AudioScheduler constructor to options object The 14 positional parameters were a maintainability trap: two adjacent booleans (isAndroid, isCastRuntime, ownsAudioElement, useHardwareVolume) meant a wrong-order call site could still typecheck. Take a named AudioSchedulerOptions instead. --- src/audio/scheduler.ts | 68 ++++++++++++++++++++++++++++-------------- src/index.ts | 24 +++++++-------- 2 files changed, 58 insertions(+), 34 deletions(-) diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index d14f81f..0a27532 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -51,6 +51,25 @@ const SCHEDULE_REFILL_THRESHOLD_FRACTION = 0.5; const SCHEDULE_REFILL_MIN_THRESHOLD_SEC = 0.1; const SCHEDULE_REFILL_MAX_THRESHOLD_SEC = 5; +export interface AudioSchedulerOptions { + stateManager: StateManager; + timeFilter: SendspinTimeFilter; + outputMode?: AudioOutputMode; + audioElement?: HTMLAudioElement; + isAndroid?: boolean; + isCastRuntime?: boolean; + ownsAudioElement?: boolean; + silentAudioSrc?: string; + syncDelayMs?: number; + useHardwareVolume?: boolean; + correctionMode?: CorrectionMode; + storage?: SendspinStorage | null; + useOutputLatencyCompensation?: boolean; + correctionThresholds?: Partial< + Record> + >; +} + const DEFAULT_CORRECTION_THRESHOLDS: Record< CorrectionMode, CorrectionThresholds @@ -126,30 +145,35 @@ export class AudioScheduler { private recorrectionMonitor: RecorrectionMonitor; private latencyTracker: OutputLatencyTracker; - constructor( - private stateManager: StateManager, - private timeFilter: SendspinTimeFilter, - private outputMode: AudioOutputMode = "direct", - private audioElement?: HTMLAudioElement, - private isAndroid: boolean = false, - private isCastRuntime: boolean = false, - private ownsAudioElement: boolean = false, - private silentAudioSrc?: string, - private syncDelayMs: number = 0, - private useHardwareVolume: boolean = false, - correctionMode: CorrectionMode = "sync", - storage: SendspinStorage | null = null, - useOutputLatencyCompensation: boolean = true, - thresholdOverrides?: Partial< - Record> - >, - ) { - this._correctionMode = correctionMode; - this.useOutputLatencyCompensation = useOutputLatencyCompensation; - this.syncDelayMs = clampSyncDelayMs(this.syncDelayMs); + private stateManager: StateManager; + private timeFilter: SendspinTimeFilter; + private outputMode: AudioOutputMode; + private audioElement?: HTMLAudioElement; + private isAndroid: boolean; + private isCastRuntime: boolean; + private ownsAudioElement: boolean; + private silentAudioSrc?: string; + private syncDelayMs: number; + private useHardwareVolume: boolean; + + constructor(options: AudioSchedulerOptions) { + this.stateManager = options.stateManager; + this.timeFilter = options.timeFilter; + this.outputMode = options.outputMode ?? "direct"; + this.audioElement = options.audioElement; + this.isAndroid = options.isAndroid ?? false; + this.isCastRuntime = options.isCastRuntime ?? false; + this.ownsAudioElement = options.ownsAudioElement ?? false; + this.silentAudioSrc = options.silentAudioSrc; + this.syncDelayMs = clampSyncDelayMs(options.syncDelayMs ?? 0); + this.useHardwareVolume = options.useHardwareVolume ?? false; + this._correctionMode = options.correctionMode ?? "sync"; + this.useOutputLatencyCompensation = + options.useOutputLatencyCompensation ?? true; // Merge user-provided threshold overrides with defaults this.correctionThresholds = { ...DEFAULT_CORRECTION_THRESHOLDS }; + const thresholdOverrides = options.correctionThresholds; if (thresholdOverrides) { for (const mode of Object.keys(thresholdOverrides) as CorrectionMode[]) { const overrides = thresholdOverrides[mode]; @@ -162,7 +186,7 @@ export class AudioScheduler { } } - this.latencyTracker = new OutputLatencyTracker(storage); + this.latencyTracker = new OutputLatencyTracker(options.storage ?? null); if (this.isCastRuntime) { this.clockSource.disableTimestampPromotion(); } diff --git a/src/index.ts b/src/index.ts index 6db7143..0298215 100644 --- a/src/index.ts +++ b/src/index.ts @@ -122,22 +122,22 @@ export class SendspinPlayer { storage = localStorage; } - this.scheduler = new AudioScheduler( - this.core._stateManager, - this.core._timeFilter, + this.scheduler = new AudioScheduler({ + stateManager: this.core._stateManager, + timeFilter: this.core._timeFilter, outputMode, - config.audioElement, + audioElement: config.audioElement, isAndroid, isCastRuntime, - this.ownsAudioElement, - isAndroid ? SILENT_AUDIO_SRC : undefined, - syncDelay, - config.useHardwareVolume ?? false, - config.correctionMode ?? "sync", + ownsAudioElement: this.ownsAudioElement, + silentAudioSrc: isAndroid ? SILENT_AUDIO_SRC : undefined, + syncDelayMs: syncDelay, + useHardwareVolume: config.useHardwareVolume ?? false, + correctionMode: config.correctionMode ?? "sync", storage, - config.useOutputLatencyCompensation ?? true, - config.correctionThresholds, - ); + useOutputLatencyCompensation: config.useOutputLatencyCompensation ?? true, + correctionThresholds: config.correctionThresholds, + }); // Wire core events to scheduler this.core.onAudioData = (chunk) => { From f7d18b856085aa6c103a4c2c163d9823fc7ead17 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 11:13:08 +0200 Subject: [PATCH 25/27] docs: document new SDK surfaces in README Add examples for the adopted-WebSocket path, correctionThresholds overrides, and using SendspinCore standalone for decoded-PCM consumers. --- README.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/README.md b/README.md index 2e5412d..fac2a3a 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,65 @@ player.sendCommand('switch'); // Switch group player.disconnect('user_request'); ``` +## Advanced configuration + +### Bring your own WebSocket + +Provide an already-open (or CONNECTING) `WebSocket` via `webSocket` to let the +player adopt it instead of creating a new one. Useful when the connection is +managed by a surrounding app framework. Auto-reconnect is disabled for adopted +sockets. + +```typescript +const ws = new WebSocket('ws://your-server:8095/sendspin'); +const player = new SendspinPlayer({ + playerId: 'my-player', + clientName: 'My Player', + webSocket: ws, +}); +await player.connect(); +``` + +### Tuning correction thresholds + +Override the per-mode thresholds that control when/how the scheduler corrects +drift. Unspecified fields keep their defaults. + +```typescript +const player = new SendspinPlayer({ + baseUrl: 'http://your-server:8095', + correctionMode: 'sync', + correctionThresholds: { + sync: { + resyncAboveMs: 400, // tolerate more drift before hard resync + deadbandBelowMs: 2, // ignore errors under 2ms + }, + }, +}); +``` + +### Core + scheduler as separate layers + +Apps that need the decoded PCM stream (e.g. visualizers) can use +`SendspinCore` on its own and skip the playback layer. `SendspinCore` emits +`DecodedAudioChunk` events; `AudioScheduler` is the Web Audio consumer that +`SendspinPlayer` wires for you. + +```typescript +import { SendspinCore } from '@sendspin/sendspin-js'; + +const core = new SendspinCore({ + baseUrl: 'http://your-server:8095', +}); + +core.onAudioData = (chunk) => { + // chunk.samples: Float32Array per channel + // chunk.sampleRate, chunk.serverTimeUs, chunk.generation +}; + +await core.connect(); +``` + ## Local development ``` From 9ac8e3d4962820ae29cb1ae9d20ab6d20b19fd87 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 11:13:19 +0200 Subject: [PATCH 26/27] feat(sample-player): expose correctionThresholds tuning Add an Advanced section with resyncAboveMs / deadbandBelowMs inputs that feed into correctionThresholds on connect, persisted in localStorage. --- index.html | 31 +++++++++++++++++++++++++++++ public/app.js | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/index.html b/index.html index 65db424..02875a0 100644 --- a/index.html +++ b/index.html @@ -261,6 +261,37 @@

Local Controls

so it may drift vs. other devices. +
+ Advanced: correction thresholds +

+ Override per-mode tuning for the active correction mode. + Applied on connect — reconnect to apply changes. +

+
+ + + Hard resync when smoothed sync error exceeds this. +
+
+ + + Ignore errors smaller than this — no correction. +
+
diff --git a/public/app.js b/public/app.js index a9207cf..6be5100 100644 --- a/public/app.js +++ b/public/app.js @@ -24,6 +24,8 @@ const STORAGE_KEYS = { MUTED: "sendspin-muted", SYNC_DELAY: "sendspin-sync-delay", CORRECTION_MODE: "sendspin-correction-mode", + RESYNC_THRESHOLD: "sendspin-resync-threshold", + DEADBAND_THRESHOLD: "sendspin-deadband-threshold", }; // DOM Elements @@ -39,6 +41,8 @@ const muteIcon = document.getElementById("mute-icon"); const syncDelayInput = document.getElementById("sync-delay"); const applySyncDelayBtn = document.getElementById("apply-sync-delay"); const correctionModeSelect = document.getElementById("correction-mode"); +const resyncThresholdInput = document.getElementById("resync-threshold"); +const deadbandThresholdInput = document.getElementById("deadband-threshold"); const groupVolumeSlider = document.getElementById("group-volume-slider"); const groupVolumeValue = document.getElementById("group-volume-value"); const groupMuteBtn = document.getElementById("group-mute-btn"); @@ -426,6 +430,20 @@ function loadSettings() { if (savedCorrectionMode !== null) { correctionModeSelect.value = savedCorrectionMode; } + + const savedResyncThreshold = localStorage.getItem( + STORAGE_KEYS.RESYNC_THRESHOLD, + ); + if (savedResyncThreshold !== null && resyncThresholdInput) { + resyncThresholdInput.value = savedResyncThreshold; + } + + const savedDeadbandThreshold = localStorage.getItem( + STORAGE_KEYS.DEADBAND_THRESHOLD, + ); + if (savedDeadbandThreshold !== null && deadbandThresholdInput) { + deadbandThresholdInput.value = savedDeadbandThreshold; + } } /** @@ -456,6 +474,28 @@ function sanitizeSyncDelay(delay) { return Math.max(0, Math.min(5000, Math.round(delay))); } +/** + * Build a correctionThresholds override from the Advanced inputs. + * Returns undefined when no override is set for the current mode. + */ +function buildCorrectionThresholds(mode) { + const overrides = {}; + const resyncRaw = resyncThresholdInput?.value; + const deadbandRaw = deadbandThresholdInput?.value; + const resync = resyncRaw ? parseFloat(resyncRaw) : NaN; + const deadband = deadbandRaw ? parseFloat(deadbandRaw) : NaN; + if (Number.isFinite(resync) && resync >= 0) { + overrides.resyncAboveMs = resync; + } + if (Number.isFinite(deadband) && deadband >= 0) { + overrides.deadbandBelowMs = deadband; + } + if (Object.keys(overrides).length === 0) { + return undefined; + } + return { [mode]: overrides }; +} + /** * Save correction mode to localStorage */ @@ -518,12 +558,15 @@ async function connect() { const savedCorrectionMode = localStorage.getItem(STORAGE_KEYS.CORRECTION_MODE) || "sync"; + const correctionThresholds = buildCorrectionThresholds(savedCorrectionMode); + player = new SendspinPlayer({ playerId: getPlayerId(), baseUrl: serverUrl, clientName: "Sendspin Sample Player", syncDelay: sanitizedSyncDelay, correctionMode: savedCorrectionMode, + correctionThresholds, onStateChange, }); @@ -684,6 +727,18 @@ function init() { muteBtn.addEventListener("click", toggleMute); applySyncDelayBtn.addEventListener("click", applySyncDelay); correctionModeSelect.addEventListener("change", applyCorrectionMode); + resyncThresholdInput?.addEventListener("change", () => { + localStorage.setItem( + STORAGE_KEYS.RESYNC_THRESHOLD, + resyncThresholdInput.value, + ); + }); + deadbandThresholdInput?.addEventListener("change", () => { + localStorage.setItem( + STORAGE_KEYS.DEADBAND_THRESHOLD, + deadbandThresholdInput.value, + ); + }); groupVolumeSlider.addEventListener("input", () => { player.sendCommand("volume", { volume: parseInt(groupVolumeSlider.value, 10), From 42c8e28a28841ce3db785d17531ce4234c4a7d58 Mon Sep 17 00:00:00 2001 From: Maxim Raznatovski Date: Mon, 20 Apr 2026 10:32:47 +0200 Subject: [PATCH 27/27] fix: clear hard-resync cooldown on mid-stream playback reset --- src/audio/recorrection-monitor.ts | 5 +++++ src/audio/scheduler.ts | 1 + 2 files changed, 6 insertions(+) diff --git a/src/audio/recorrection-monitor.ts b/src/audio/recorrection-monitor.ts index b691091..4596139 100644 --- a/src/audio/recorrection-monitor.ts +++ b/src/audio/recorrection-monitor.ts @@ -70,6 +70,11 @@ export class RecorrectionMonitor { this.prevRawSyncErrorMs = null; } + clearHardResyncCooldown(): void { + this._hardResyncGraceUntilMs = null; + this._lastHardResyncAtMs = -Infinity; + } + armStartupGrace(nowMs: number, isTimestampClock: boolean): void { if (isTimestampClock) { this._hardResyncGraceUntilMs = null; diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts index 0a27532..253f279 100644 --- a/src/audio/scheduler.ts +++ b/src/audio/scheduler.ts @@ -251,6 +251,7 @@ export class AudioScheduler { this.nextScheduleTime = 0; this.lastScheduledServerTime = 0; this.recorrectionMonitor.clearMinScheduleTime(); + this.recorrectionMonitor.clearHardResyncCooldown(); this.clockSource.pendingCutover = false; this.recorrectionMonitor.resetCheckState(); this.resetSyncErrorEma();