diff --git a/README.md b/README.md
index 2e5412d..fac2a3a 100644
--- a/README.md
+++ b/README.md
@@ -65,6 +65,65 @@ player.sendCommand('switch'); // Switch group
player.disconnect('user_request');
```
+## Advanced configuration
+
+### Bring your own WebSocket
+
+Provide an already-open (or CONNECTING) `WebSocket` via `webSocket` to let the
+player adopt it instead of creating a new one. Useful when the connection is
+managed by a surrounding app framework. Auto-reconnect is disabled for adopted
+sockets.
+
+```typescript
+const ws = new WebSocket('ws://your-server:8095/sendspin');
+const player = new SendspinPlayer({
+ playerId: 'my-player',
+ clientName: 'My Player',
+ webSocket: ws,
+});
+await player.connect();
+```
+
+### Tuning correction thresholds
+
+Override the per-mode thresholds that control when/how the scheduler corrects
+drift. Unspecified fields keep their defaults.
+
+```typescript
+const player = new SendspinPlayer({
+ baseUrl: 'http://your-server:8095',
+ correctionMode: 'sync',
+ correctionThresholds: {
+ sync: {
+ resyncAboveMs: 400, // tolerate more drift before hard resync
+ deadbandBelowMs: 2, // ignore errors under 2ms
+ },
+ },
+});
+```
+
+### Core + scheduler as separate layers
+
+Apps that need the decoded PCM stream (e.g. visualizers) can use
+`SendspinCore` on its own and skip the playback layer. `SendspinCore` emits
+`DecodedAudioChunk` events; `AudioScheduler` is the Web Audio consumer that
+`SendspinPlayer` wires for you.
+
+```typescript
+import { SendspinCore } from '@sendspin/sendspin-js';
+
+const core = new SendspinCore({
+ baseUrl: 'http://your-server:8095',
+});
+
+core.onAudioData = (chunk) => {
+ // chunk.samples: Float32Array per channel
+ // chunk.sampleRate, chunk.serverTimeUs, chunk.generation
+};
+
+await core.connect();
+```
+
## Local development
```
diff --git a/index.html b/index.html
index 65db424..02875a0 100644
--- a/index.html
+++ b/index.html
@@ -261,6 +261,37 @@
Local Controls
so it may drift vs. other devices.
+
+ Advanced: correction thresholds
+
+ Override per-mode tuning for the active correction mode.
+ Applied on connect — reconnect to apply changes.
+
+
+ Resync above (ms)
+
+ Hard resync when smoothed sync error exceeds this.
+
+
+ Deadband below (ms)
+
+ Ignore errors smaller than this — no correction.
+
+
diff --git a/public/app.js b/public/app.js
index a9207cf..6be5100 100644
--- a/public/app.js
+++ b/public/app.js
@@ -24,6 +24,8 @@ const STORAGE_KEYS = {
MUTED: "sendspin-muted",
SYNC_DELAY: "sendspin-sync-delay",
CORRECTION_MODE: "sendspin-correction-mode",
+ RESYNC_THRESHOLD: "sendspin-resync-threshold",
+ DEADBAND_THRESHOLD: "sendspin-deadband-threshold",
};
// DOM Elements
@@ -39,6 +41,8 @@ const muteIcon = document.getElementById("mute-icon");
const syncDelayInput = document.getElementById("sync-delay");
const applySyncDelayBtn = document.getElementById("apply-sync-delay");
const correctionModeSelect = document.getElementById("correction-mode");
+const resyncThresholdInput = document.getElementById("resync-threshold");
+const deadbandThresholdInput = document.getElementById("deadband-threshold");
const groupVolumeSlider = document.getElementById("group-volume-slider");
const groupVolumeValue = document.getElementById("group-volume-value");
const groupMuteBtn = document.getElementById("group-mute-btn");
@@ -426,6 +430,20 @@ function loadSettings() {
if (savedCorrectionMode !== null) {
correctionModeSelect.value = savedCorrectionMode;
}
+
+ const savedResyncThreshold = localStorage.getItem(
+ STORAGE_KEYS.RESYNC_THRESHOLD,
+ );
+ if (savedResyncThreshold !== null && resyncThresholdInput) {
+ resyncThresholdInput.value = savedResyncThreshold;
+ }
+
+ const savedDeadbandThreshold = localStorage.getItem(
+ STORAGE_KEYS.DEADBAND_THRESHOLD,
+ );
+ if (savedDeadbandThreshold !== null && deadbandThresholdInput) {
+ deadbandThresholdInput.value = savedDeadbandThreshold;
+ }
}
/**
@@ -456,6 +474,28 @@ function sanitizeSyncDelay(delay) {
return Math.max(0, Math.min(5000, Math.round(delay)));
}
+/**
+ * Build a correctionThresholds override from the Advanced inputs.
+ * Returns undefined when no override is set for the current mode.
+ */
+function buildCorrectionThresholds(mode) {
+ const overrides = {};
+ const resyncRaw = resyncThresholdInput?.value;
+ const deadbandRaw = deadbandThresholdInput?.value;
+ const resync = resyncRaw ? parseFloat(resyncRaw) : NaN;
+ const deadband = deadbandRaw ? parseFloat(deadbandRaw) : NaN;
+ if (Number.isFinite(resync) && resync >= 0) {
+ overrides.resyncAboveMs = resync;
+ }
+ if (Number.isFinite(deadband) && deadband >= 0) {
+ overrides.deadbandBelowMs = deadband;
+ }
+ if (Object.keys(overrides).length === 0) {
+ return undefined;
+ }
+ return { [mode]: overrides };
+}
+
/**
* Save correction mode to localStorage
*/
@@ -518,12 +558,15 @@ async function connect() {
const savedCorrectionMode =
localStorage.getItem(STORAGE_KEYS.CORRECTION_MODE) || "sync";
+ const correctionThresholds = buildCorrectionThresholds(savedCorrectionMode);
+
player = new SendspinPlayer({
playerId: getPlayerId(),
baseUrl: serverUrl,
clientName: "Sendspin Sample Player",
syncDelay: sanitizedSyncDelay,
correctionMode: savedCorrectionMode,
+ correctionThresholds,
onStateChange,
});
@@ -684,6 +727,18 @@ function init() {
muteBtn.addEventListener("click", toggleMute);
applySyncDelayBtn.addEventListener("click", applySyncDelay);
correctionModeSelect.addEventListener("change", applyCorrectionMode);
+ resyncThresholdInput?.addEventListener("change", () => {
+ localStorage.setItem(
+ STORAGE_KEYS.RESYNC_THRESHOLD,
+ resyncThresholdInput.value,
+ );
+ });
+ deadbandThresholdInput?.addEventListener("change", () => {
+ localStorage.setItem(
+ STORAGE_KEYS.DEADBAND_THRESHOLD,
+ deadbandThresholdInput.value,
+ );
+ });
groupVolumeSlider.addEventListener("input", () => {
player.sendCommand("volume", {
volume: parseInt(groupVolumeSlider.value, 10),
diff --git a/src/audio-processor.ts b/src/audio-processor.ts
deleted file mode 100644
index 8611336..0000000
--- a/src/audio-processor.ts
+++ /dev/null
@@ -1,2385 +0,0 @@
-import type {
- AudioBufferQueueItem,
- StreamFormat,
- AudioOutputMode,
- CorrectionMode,
- SendspinStorage,
-} from "./types";
-import type { StateManager } from "./state-manager";
-import type { SendspinTimeFilter } from "./time-filter";
-
-// Sync correction constants
-const SAMPLE_CORRECTION_FADE_LEN = 8; // samples to blend around correction points
-// Blend budget across the whole fade window.
-// We derive per-sample strength from fade length so longer fades become gentler.
-// 1.0 means the whole fade applies roughly a full-strength blend in total.
-const SAMPLE_CORRECTION_TARGET_BLEND_SUM = 1.0;
-const SAMPLE_CORRECTION_FADE_STRENGTH = Math.min(
- 1,
- (2 * SAMPLE_CORRECTION_TARGET_BLEND_SUM) / SAMPLE_CORRECTION_FADE_LEN,
-);
-const SAMPLE_CORRECTION_FADE_ALPHAS = new Float32Array(
- SAMPLE_CORRECTION_FADE_LEN,
-);
-for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
- SAMPLE_CORRECTION_FADE_ALPHAS[f] =
- ((SAMPLE_CORRECTION_FADE_LEN - f) / (SAMPLE_CORRECTION_FADE_LEN + 1)) *
- SAMPLE_CORRECTION_FADE_STRENGTH;
-}
-const OUTPUT_LATENCY_ALPHA = 0.01; // EMA smoothing factor for outputLatency
-const SYNC_ERROR_ALPHA = 0.1; // EMA smoothing factor for sync error (filters jitter)
-const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us"; // LocalStorage key
-const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000;
-const RECORRECTION_CHECK_INTERVAL_MS = 250;
-const RECORRECTION_TRIGGER_MS = 30;
-const RECORRECTION_SUSTAIN_MS = 400;
-const RECORRECTION_COOLDOWN_MS = 1_500;
-const RECORRECTION_CUTOVER_GUARD_SEC = 0.3;
-const RECORRECTION_TRANSIENT_JUMP_MS = 25;
-const RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS =
- RECORRECTION_CHECK_INTERVAL_MS * 4;
-const HARD_RESYNC_STARTUP_GRACE_MS = 1_000;
-const HARD_RESYNC_COOLDOWN_MS = 500;
-const SCHEDULE_HEADROOM_SEC = 0.2;
-const SCHEDULE_HORIZON_PRECISE_SEC = 20;
-const SCHEDULE_HORIZON_GOOD_SEC = 8;
-const SCHEDULE_HORIZON_POOR_SEC = 4;
-const CAST_SCHEDULE_HORIZON_SEC = 1.5;
-const SCHEDULE_HORIZON_PRECISE_ERROR_MS = 2;
-const SCHEDULE_HORIZON_GOOD_ERROR_MS = 8;
-const SCHEDULE_REFILL_THRESHOLD_FRACTION = 0.5;
-const SCHEDULE_REFILL_MIN_THRESHOLD_SEC = 0.1;
-const SCHEDULE_REFILL_MAX_THRESHOLD_SEC = 5;
-type AudioClockSource = "estimated" | "timestamp" | "raw";
-
-interface OutputTimestampSample {
- contextTimeSec: number;
- performanceTimeMs: number;
- nowMs: number;
- predictedAudioTimeSec: number;
- rawAudioTimeSec: number;
-}
-
-const OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS = 250;
-const OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS = 40;
-const OUTPUT_TIMESTAMP_SLOPE_MIN = 0.95;
-const OUTPUT_TIMESTAMP_SLOPE_MAX = 1.05;
-const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC = 0.25;
-const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC = 0.05;
-const OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC = 0.005;
-const OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS = 5;
-const OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES = 6;
-const OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS = 750;
-const OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES = 2;
-
-// Mode-specific sync correction thresholds
-const CORRECTION_THRESHOLDS: Record<
- CorrectionMode,
- {
- resyncAboveMs: number; // ms - hard resync for extreme errors
- rate2AboveMs: number; // ms - use 2% rate above this
- rate1AboveMs: number; // ms - use 1% rate above this
- samplesBelowMs: number; // ms - use sample manipulation below this
- deadbandBelowMs: number; // ms - don't correct if error < this
- enableRecorrectionMonitor: boolean; // Whether recorrection monitor should run in this mode
- immediateDelayCutover: boolean; // Whether runtime static delay should trigger immediate cutover
- }
-> = {
- sync: {
- resyncAboveMs: 200, // Hard resync for large errors
- rate2AboveMs: 35, // Use 2% rate when error exceeds this
- rate1AboveMs: 8, // Use 1% rate when error exceeds this
- samplesBelowMs: 8, // Use sample insertion/deletion below this
- deadbandBelowMs: 1, // Ignore corrections below this
- enableRecorrectionMonitor: true,
- immediateDelayCutover: true,
- },
- quality: {
- resyncAboveMs: 35, // Tighter resync threshold to avoid drifting too far
- rate2AboveMs: Infinity, // Disabled - never use rate correction
- rate1AboveMs: Infinity, // Disabled - never use rate correction
- samplesBelowMs: 35, // Use sample insertion/deletion below this
- deadbandBelowMs: 1, // Keep deadband tight for accurate sync
- enableRecorrectionMonitor: false,
- immediateDelayCutover: false,
- },
- "quality-local": {
- resyncAboveMs: 600, // Last resort only (prefer keeping uninterrupted playback even if out of sync)
- rate2AboveMs: Infinity, // Disabled - never use rate correction
- rate1AboveMs: Infinity, // Disabled - never use rate correction
- samplesBelowMs: 0, // Disabled - never use sample corrections (prioritize smooth local playback)
- deadbandBelowMs: 5, // Larger deadband to avoid frequent small adjustments
- enableRecorrectionMonitor: false,
- immediateDelayCutover: false,
- },
-};
-
-export class AudioProcessor {
- private audioContext: AudioContext | null = null;
- private gainNode: GainNode | null = null;
- private streamDestination: MediaStreamAudioDestinationNode | null = null;
- private audioBufferQueue: AudioBufferQueueItem[] = [];
- private scheduledSources: {
- source: AudioBufferSourceNode;
- startTime: number;
- endTime: number;
- buffer: AudioBuffer;
- serverTime: number;
- generation: number;
- }[] = [];
-
- // Seamless playback tracking
- private nextPlaybackTime: number = 0; // AudioContext time when audio should reach the output
- private nextScheduleTime: number = 0; // AudioContext time for source.start() (delayed, for Web Audio)
- private lastScheduledServerTime: number = 0; // Server timestamp of last scheduled chunk end
-
- // Sync tracking (for debugging/display)
- private currentSyncErrorMs: number = 0;
- private smoothedSyncErrorMs: number = 0; // EMA-filtered sync error for corrections
- private resyncCount: number = 0;
- private currentPlaybackRate: number = 1.0;
- private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" =
- "none";
- private lastSamplesAdjusted: number = 0;
-
- // Output latency smoothing (EMA to filter Chrome jitter)
- private lastRawOutputLatencyUs: number = 0;
- private smoothedOutputLatencyUs: number | null = null;
- private lastLatencyPersistAtMs: number | null = null;
-
- private timingEstimateAudioContextTimeSec: number | null = null;
- private timingEstimateAtMs: number | null = null;
-
- // Correction mode
- private _correctionMode: CorrectionMode = "sync";
-
- // Periodic status logging
- private _lastStatusLogMs: number = 0;
- private _lastTimestampRejectReason: string | null = null;
- private _intervalResyncCount: number = 0;
-
- // Native Opus decoder (uses WebCodecs API)
- private webCodecsDecoder: AudioDecoder | null = null;
- private webCodecsDecoderReady: Promise | null = null;
- private webCodecsFormat: StreamFormat | null = null;
- private useNativeOpus: boolean = true; // false when WebCodecs unavailable
-
- // Fallback Opus decoder (opus-encdec library)
- private opusDecoder: any = null;
- private opusDecoderModule: any = null;
- private opusDecoderReady: Promise | null = null;
-
- private useOutputLatencyCompensation: boolean = true;
- private nativeDecoderQueue: Array<{
- serverTimeUs: number;
- generation: number;
- }> = [];
- private recorrectionInterval: ReturnType | null = null;
- private recorrectionBreachStartedAtMs: number | null = null;
- private lastRecorrectionAtMs: number = -Infinity;
- private recorrectionMinScheduleTimeSec: number | null = null;
- private recorrectionPrevRawSyncErrorMs: number | null = null;
- private recorrectionPendingJumpSign: number | null = null;
- private recorrectionPendingJumpAtMs: number | null = null;
- private hardResyncGraceUntilMs: number | null = null;
- private lastHardResyncAtMs: number = -Infinity;
- private pendingClockSourceCutover = false;
- private activeAudioClockSource: AudioClockSource = "estimated";
- private outputTimestampLastSample: OutputTimestampSample | null = null;
- private outputTimestampGoodSamples: number = 0;
- private outputTimestampBadSamples: number = 0;
- private outputTimestampGoodSinceMs: number | null = null;
-
- constructor(
- private stateManager: StateManager,
- private timeFilter: SendspinTimeFilter,
- private outputMode: AudioOutputMode = "direct",
- private audioElement?: HTMLAudioElement,
- private isAndroid: boolean = false,
- private isCastRuntime: boolean = false,
- private ownsAudioElement: boolean = false,
- private silentAudioSrc?: string,
- private syncDelayMs: number = 0,
- private useHardwareVolume: boolean = false,
- correctionMode: CorrectionMode = "sync",
- private storage: SendspinStorage | null = null,
- useOutputLatencyCompensation: boolean = true,
- ) {
- this._correctionMode = correctionMode;
- this.useOutputLatencyCompensation = useOutputLatencyCompensation;
- this.syncDelayMs = this.sanitizeSyncDelayMs(this.syncDelayMs);
-
- // Load persisted output latency from storage
- this.loadPersistedLatency();
- }
-
- private sanitizeSyncDelayMs(delayMs: number): number {
- if (!isFinite(delayMs)) {
- return 0;
- }
- return Math.max(0, Math.min(5000, Math.round(delayMs)));
- }
-
- // Load persisted output latency from storage
- private loadPersistedLatency(): void {
- if (!this.storage) return;
- try {
- const stored = this.storage.getItem(OUTPUT_LATENCY_STORAGE_KEY);
- if (stored) {
- const latency = parseFloat(stored);
- if (!isNaN(latency) && latency >= 0) {
- this.smoothedOutputLatencyUs = latency;
- }
- }
- } catch {
- // Storage may fail depending on the implementation, ignore errors
- }
- }
-
- // Persist output latency to storage
- private persistLatency(): void {
- if (!this.storage || this.smoothedOutputLatencyUs === null) return;
- try {
- this.storage.setItem(
- OUTPUT_LATENCY_STORAGE_KEY,
- this.smoothedOutputLatencyUs.toString(),
- );
- } catch {
- // Storage may fail depending on the implementation, ignore errors
- }
- }
-
- // Get current correction mode
- get correctionMode(): CorrectionMode {
- return this._correctionMode;
- }
-
- // Set correction mode at runtime
- setCorrectionMode(mode: CorrectionMode): void {
- this._correctionMode = mode;
- if (!this.modeUsesRecorrectionMonitor(mode)) {
- this.stopRecorrectionMonitor();
- } else {
- this.startRecorrectionMonitor();
- }
- }
-
- private modeUsesRecorrectionMonitor(mode: CorrectionMode): boolean {
- return CORRECTION_THRESHOLDS[mode].enableRecorrectionMonitor;
- }
-
- private get usesRecorrectionMonitor(): boolean {
- return this.modeUsesRecorrectionMonitor(this._correctionMode);
- }
-
- private get usesImmediateDelayCutover(): boolean {
- return CORRECTION_THRESHOLDS[this._correctionMode].immediateDelayCutover;
- }
-
- private getTargetScheduledHorizonSec(): number {
- if (this.isCastRuntime) {
- return CAST_SCHEDULE_HORIZON_SEC;
- }
- const errorMs = this.timeFilter.error / 1000;
- if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS) {
- return SCHEDULE_HORIZON_PRECISE_SEC;
- }
- if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS) {
- return SCHEDULE_HORIZON_GOOD_SEC;
- }
- return SCHEDULE_HORIZON_POOR_SEC;
- }
-
- private getScheduledAheadSec(currentTimeSec: number): number {
- let farthestScheduledSec = this.nextScheduleTime;
- for (const entry of this.scheduledSources) {
- if (entry.endTime > farthestScheduledSec) {
- farthestScheduledSec = entry.endTime;
- }
- }
- if (farthestScheduledSec <= 0) {
- return 0;
- }
- return Math.max(0, farthestScheduledSec - currentTimeSec);
- }
-
- private setActiveAudioClockSource(source: AudioClockSource): void {
- if (this.activeAudioClockSource === source) {
- return;
- }
- this.activeAudioClockSource = source;
- this.pendingClockSourceCutover = source === "timestamp";
- if (
- this.pendingClockSourceCutover &&
- (this.scheduledSources.length > 0 ||
- this.nextPlaybackTime !== 0 ||
- this.lastScheduledServerTime !== 0)
- ) {
- this.scheduleQueueProcessing();
- }
- }
-
- private resetOutputTimestampValidation(): void {
- this.activeAudioClockSource = "estimated";
- this.pendingClockSourceCutover = false;
- this.outputTimestampLastSample = null;
- this.outputTimestampGoodSamples = 0;
- this._lastTimestampRejectReason = null;
- this.outputTimestampBadSamples = 0;
- this.outputTimestampGoodSinceMs = null;
- }
-
- private demoteOutputTimestampValidation(reason: string): void {
- this.resetOutputTimestampValidation();
- this._lastTimestampRejectReason = reason;
- }
-
- private getEstimatedAudioContextTimeSec(
- rawTimeSec: number,
- nowMs: number,
- ): number {
- // Fallback: de-quantize `currentTime` using wall clock and slew toward the raw value.
- // Key goal: avoid discrete ~10/20ms jumps in derived audio time.
- const TIMING_MAX_SLEW_SEC = 0.002; // max correction per snapshot (2ms)
- const TIMING_RESET_THRESHOLD_SEC = 0.5; // snap if mapping is clearly invalid
- const TIMING_MAX_LEAD_SEC = 0.1; // don't run far ahead of raw time
-
- if (this.timingEstimateAudioContextTimeSec === null) {
- this.timingEstimateAudioContextTimeSec = rawTimeSec;
- this.timingEstimateAtMs = nowMs;
- } else if (this.timingEstimateAtMs !== null) {
- const wallDeltaSec = Math.max(
- 0,
- (nowMs - this.timingEstimateAtMs) / 1000,
- );
- const predicted = this.timingEstimateAudioContextTimeSec + wallDeltaSec;
- this.timingEstimateAtMs = nowMs;
-
- const errorSec = rawTimeSec - predicted;
- if (Math.abs(errorSec) > TIMING_RESET_THRESHOLD_SEC) {
- this.timingEstimateAudioContextTimeSec = rawTimeSec;
- } else {
- const slew = Math.max(
- -TIMING_MAX_SLEW_SEC,
- Math.min(TIMING_MAX_SLEW_SEC, errorSec),
- );
- // Keep monotonic and bounded vs raw time.
- const next = Math.max(
- this.timingEstimateAudioContextTimeSec,
- predicted + slew,
- );
- this.timingEstimateAudioContextTimeSec = Math.min(
- next,
- rawTimeSec + TIMING_MAX_LEAD_SEC,
- );
- }
- }
-
- return this.timingEstimateAudioContextTimeSec ?? rawTimeSec;
- }
-
- private rejectOutputTimestampSample(
- reason: string,
- catastrophic: boolean = false,
- ): void {
- this.outputTimestampLastSample = null;
- this.outputTimestampGoodSamples = 0;
- this.outputTimestampGoodSinceMs = null;
- this._lastTimestampRejectReason = reason;
-
- if (this.activeAudioClockSource !== "timestamp") {
- this.outputTimestampBadSamples = 0;
- return;
- }
-
- this.outputTimestampBadSamples += 1;
- if (
- catastrophic ||
- this.outputTimestampBadSamples >=
- OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES
- ) {
- this.demoteOutputTimestampValidation(reason);
- }
- }
-
- private getTimestampDerivedAudioTimeSec(rawTimeSec: number): number | null {
- if (this.isCastRuntime) {
- if (
- this.activeAudioClockSource !== "estimated" ||
- this.outputTimestampLastSample !== null ||
- this.outputTimestampGoodSamples !== 0 ||
- this._lastTimestampRejectReason !== null
- ) {
- this.resetOutputTimestampValidation();
- }
- return null;
- }
-
- if (!this.audioContext) {
- return null;
- }
-
- const getOutputTimestamp = (
- this.audioContext as unknown as {
- getOutputTimestamp?: () => {
- contextTime: number;
- performanceTime: number;
- };
- }
- ).getOutputTimestamp;
-
- if (typeof getOutputTimestamp !== "function") {
- if (this.activeAudioClockSource === "timestamp") {
- this.demoteOutputTimestampValidation("getOutputTimestamp unavailable");
- }
- return null;
- }
-
- try {
- const ts = getOutputTimestamp.call(this.audioContext);
- // Sample performance.now() after getOutputTimestamp() so we validate the
- // timestamp against a contemporaneous wall-clock reading instead of an
- // earlier one taken before the browser produced the timestamp snapshot.
- const nowMs = performance.now();
- const rawFreshnessMs = nowMs - ts.performanceTime;
- if (rawFreshnessMs < -OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS) {
- this.rejectOutputTimestampSample(
- `performanceTime in future (${rawFreshnessMs.toFixed(1)}ms)`,
- true,
- );
- return null;
- }
-
- const freshnessMs = Math.max(0, rawFreshnessMs);
- const predictedAudioTimeSec = ts.contextTime + freshnessMs / 1000;
- const sample: OutputTimestampSample = {
- contextTimeSec: ts.contextTime,
- performanceTimeMs: ts.performanceTime,
- nowMs,
- predictedAudioTimeSec,
- rawAudioTimeSec: rawTimeSec,
- };
-
- if (freshnessMs > OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS) {
- this.rejectOutputTimestampSample(
- `stale timestamp (${freshnessMs.toFixed(1)}ms old)`,
- true,
- );
- return null;
- }
-
- const divergenceSec = predictedAudioTimeSec - rawTimeSec;
- if (Math.abs(divergenceSec) > OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC) {
- this.rejectOutputTimestampSample(
- `timestamp/raw divergence ${Math.abs(divergenceSec * 1000).toFixed(1)}ms`,
- true,
- );
- return null;
- }
-
- const lastSample = this.outputTimestampLastSample;
- if (lastSample) {
- const perfDeltaMs = ts.performanceTime - lastSample.performanceTimeMs;
- if (perfDeltaMs < 0) {
- this.rejectOutputTimestampSample(
- `performanceTime moved backward (${perfDeltaMs.toFixed(1)}ms)`,
- true,
- );
- return null;
- }
-
- if (
- predictedAudioTimeSec <
- lastSample.predictedAudioTimeSec - OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC
- ) {
- this.rejectOutputTimestampSample(
- `predicted audio time moved backward ${((lastSample.predictedAudioTimeSec - predictedAudioTimeSec) * 1000).toFixed(1)}ms`,
- true,
- );
- return null;
- }
-
- const lastDivergenceSec =
- lastSample.predictedAudioTimeSec - lastSample.rawAudioTimeSec;
- if (
- Math.abs(divergenceSec - lastDivergenceSec) >
- OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC
- ) {
- this.rejectOutputTimestampSample(
- `timestamp/raw divergence drift ${Math.abs((divergenceSec - lastDivergenceSec) * 1000).toFixed(1)}ms`,
- );
- return null;
- }
-
- if (perfDeltaMs >= OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS) {
- const perfDeltaSec = perfDeltaMs / 1000;
- const contextSlope =
- (ts.contextTime - lastSample.contextTimeSec) / perfDeltaSec;
- const predictedSlope =
- (predictedAudioTimeSec - lastSample.predictedAudioTimeSec) /
- perfDeltaSec;
-
- if (
- contextSlope < OUTPUT_TIMESTAMP_SLOPE_MIN ||
- contextSlope > OUTPUT_TIMESTAMP_SLOPE_MAX
- ) {
- this.rejectOutputTimestampSample(
- `context slope ${contextSlope.toFixed(3)} out of range`,
- );
- return null;
- }
- if (
- predictedSlope < OUTPUT_TIMESTAMP_SLOPE_MIN ||
- predictedSlope > OUTPUT_TIMESTAMP_SLOPE_MAX
- ) {
- this.rejectOutputTimestampSample(
- `predicted slope ${predictedSlope.toFixed(3)} out of range`,
- );
- return null;
- }
- }
- }
-
- this.outputTimestampLastSample = sample;
- this.outputTimestampBadSamples = 0;
- if (this.outputTimestampGoodSinceMs === null) {
- this.outputTimestampGoodSinceMs = nowMs;
- }
- this.outputTimestampGoodSamples += 1;
-
- if (
- this.activeAudioClockSource !== "timestamp" &&
- this.outputTimestampGoodSamples >=
- OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES &&
- this.outputTimestampGoodSinceMs !== null &&
- nowMs - this.outputTimestampGoodSinceMs >=
- OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS
- ) {
- this.setActiveAudioClockSource("timestamp");
- this._lastTimestampRejectReason = null;
- }
-
- return predictedAudioTimeSec;
- } catch (error) {
- const reason =
- error instanceof Error
- ? `getOutputTimestamp failed: ${error.message}`
- : `getOutputTimestamp failed: ${String(error)}`;
- this.rejectOutputTimestampSample(reason, true);
- return null;
- }
- }
-
- private getTimingSnapshot(): {
- audioContextTimeSec: number; // derived; use for target-time math
- audioContextRawTimeSec: number; // raw; use for comparisons (late drops/headroom)
- nowMs: number;
- nowUs: number;
- } {
- const nowMs = performance.now();
- const nowUs = nowMs * 1000;
- if (!this.audioContext) {
- return {
- audioContextTimeSec: 0,
- audioContextRawTimeSec: 0,
- nowMs,
- nowUs,
- };
- }
-
- const rawTimeSec = this.audioContext.currentTime;
- const estimatedTimeSec = this.getEstimatedAudioContextTimeSec(
- rawTimeSec,
- nowMs,
- );
- const timestampTimeSec = this.getTimestampDerivedAudioTimeSec(rawTimeSec);
-
- let derivedTimeSec =
- this.activeAudioClockSource === "timestamp" && timestampTimeSec !== null
- ? timestampTimeSec
- : estimatedTimeSec;
- if (!Number.isFinite(derivedTimeSec)) {
- derivedTimeSec = rawTimeSec;
- }
-
- return {
- audioContextTimeSec: derivedTimeSec,
- audioContextRawTimeSec: rawTimeSec,
- nowMs,
- nowUs,
- };
- }
-
- private resetScheduledPlaybackState(_reason?: string): void {
- this.nextPlaybackTime = 0;
- this.nextScheduleTime = 0;
- this.lastScheduledServerTime = 0;
- this.recorrectionMinScheduleTimeSec = null;
- this.hardResyncGraceUntilMs = null;
- this.lastHardResyncAtMs = -Infinity;
- this.pendingClockSourceCutover = false;
- this.resetRecorrectionCheckState();
- this.resetSyncErrorEma();
- this.currentSyncErrorMs = 0;
- this.currentPlaybackRate = 1.0;
- this.currentCorrectionMethod = "none";
- this.lastSamplesAdjusted = 0;
- this._lastStatusLogMs = 0;
- this._intervalResyncCount = 0;
- }
-
- private pruneExpiredScheduledSources(currentTimeSec: number): void {
- if (this.scheduledSources.length === 0) {
- return;
- }
-
- this.scheduledSources = this.scheduledSources.filter(
- (entry) => entry.endTime > currentTimeSec,
- );
-
- if (this.scheduledSources.length === 0) {
- this.resetScheduledPlaybackState("no scheduled audio ahead");
- }
- }
-
- private startRecorrectionMonitor(): void {
- if (this.recorrectionInterval !== null) {
- return;
- }
- this.recorrectionInterval = globalThis.setInterval(
- () => this.checkRecorrection(),
- RECORRECTION_CHECK_INTERVAL_MS,
- );
- }
-
- private stopRecorrectionMonitor(): void {
- if (this.recorrectionInterval !== null) {
- clearInterval(this.recorrectionInterval);
- this.recorrectionInterval = null;
- }
- this.resetRecorrectionCheckState();
- this.lastRecorrectionAtMs = -Infinity;
- }
-
- private clearRecorrectionBreachState(): void {
- this.recorrectionBreachStartedAtMs = null;
- this.recorrectionPendingJumpSign = null;
- this.recorrectionPendingJumpAtMs = null;
- }
-
- private resetRecorrectionCheckState(): void {
- this.clearRecorrectionBreachState();
- this.recorrectionPrevRawSyncErrorMs = null;
- }
-
- private armHardResyncStartupGrace(nowMs: number): void {
- if (this.activeAudioClockSource === "timestamp") {
- this.hardResyncGraceUntilMs = null;
- return;
- }
- if (this.hardResyncGraceUntilMs === null) {
- this.hardResyncGraceUntilMs = nowMs + HARD_RESYNC_STARTUP_GRACE_MS;
- }
- }
-
- private canUseHardResync(nowMs: number): boolean {
- if (this.activeAudioClockSource === "timestamp") {
- this.hardResyncGraceUntilMs = null;
- } else if (
- this.hardResyncGraceUntilMs !== null &&
- nowMs < this.hardResyncGraceUntilMs
- ) {
- return false;
- }
-
- return nowMs - this.lastHardResyncAtMs >= HARD_RESYNC_COOLDOWN_MS;
- }
-
- private noteHardResync(nowMs: number): void {
- this.lastHardResyncAtMs = nowMs;
- }
-
- private shouldIgnoreTransientRecorrectionJump(
- rawSyncErrorMs: number,
- nowMs: number,
- ): boolean {
- const prevRawSyncErrorMs = this.recorrectionPrevRawSyncErrorMs;
- this.recorrectionPrevRawSyncErrorMs = rawSyncErrorMs;
-
- if (prevRawSyncErrorMs === null) {
- this.recorrectionPendingJumpSign = null;
- this.recorrectionPendingJumpAtMs = null;
- return false;
- }
-
- const jumpDeltaMs = rawSyncErrorMs - prevRawSyncErrorMs;
- const jumpSign = Math.sign(rawSyncErrorMs);
- const isJumpDetected =
- Math.abs(jumpDeltaMs) >= RECORRECTION_TRANSIENT_JUMP_MS && jumpSign !== 0;
- if (!isJumpDetected) {
- this.recorrectionPendingJumpSign = null;
- this.recorrectionPendingJumpAtMs = null;
- return false;
- }
-
- const isConfirmed =
- this.recorrectionPendingJumpSign === jumpSign &&
- this.recorrectionPendingJumpAtMs !== null &&
- nowMs - this.recorrectionPendingJumpAtMs <=
- RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS;
- this.recorrectionPendingJumpSign = jumpSign;
- this.recorrectionPendingJumpAtMs = nowMs;
- if (isConfirmed) {
- this.recorrectionPendingJumpSign = null;
- this.recorrectionPendingJumpAtMs = null;
- return false;
- }
-
- return true;
- }
-
- private performGuardedCutover(
- reason: "recorrection" | "delay-change",
- options: {
- incrementResyncCount?: boolean;
- markCooldown?: boolean;
- } = {},
- ): void {
- if (!this.audioContext) {
- return;
- }
-
- const incrementResyncCount = options.incrementResyncCount ?? false;
- const markCooldown = options.markCooldown ?? true;
- const nowMs = performance.now();
- const cutoffTime =
- this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC;
- if (incrementResyncCount) {
- this.resyncCount++;
- this._intervalResyncCount++;
- }
- this.resetSyncErrorEma();
- this.currentCorrectionMethod = "resync";
- this.lastSamplesAdjusted = 0;
- this.currentPlaybackRate = 1.0;
- const cutResult = this.cutScheduledSources(cutoffTime);
- this.recorrectionMinScheduleTimeSec = Math.max(
- cutoffTime,
- cutResult.keptTailEndTimeSec,
- );
- this.nextPlaybackTime = 0;
- this.nextScheduleTime = 0;
- this.lastScheduledServerTime = 0;
- this.resetRecorrectionCheckState();
- if (markCooldown) {
- this.lastRecorrectionAtMs = nowMs;
- }
- this.noteHardResync(nowMs);
-
- this.processAudioQueue();
- }
-
- private checkRecorrection(): void {
- if (!this.usesRecorrectionMonitor) {
- this.resetRecorrectionCheckState();
- return;
- }
- if (!this.audioContext || this.audioContext.state !== "running") {
- this.resetRecorrectionCheckState();
- return;
- }
- if (
- !this.stateManager.isPlaying ||
- this.nextPlaybackTime === 0 ||
- this.lastScheduledServerTime === 0
- ) {
- this.resetRecorrectionCheckState();
- return;
- }
-
- const {
- audioContextTimeSec: audioContextTime,
- audioContextRawTimeSec: audioContextRawTime,
- nowMs,
- nowUs,
- } = this.getTimingSnapshot();
- this.pruneExpiredScheduledSources(audioContextRawTime);
- const scheduledAheadSec = this.getScheduledAheadSec(audioContextRawTime);
- if (scheduledAheadSec <= 0) {
- this.resetRecorrectionCheckState();
- if (this.audioBufferQueue.length > 0) {
- this.processAudioQueue();
- }
- return;
- }
-
- const outputLatencySec = this.useOutputLatencyCompensation
- ? this.getSmoothedOutputLatencyUs() / 1_000_000
- : 0;
- const targetPlaybackTime = this.computeTargetPlaybackTime(
- this.lastScheduledServerTime,
- audioContextTime,
- nowUs,
- outputLatencySec,
- );
- const syncErrorMs = (this.nextPlaybackTime - targetPlaybackTime) * 1000;
- const smoothedSyncErrorMs = this.applySyncErrorEma(syncErrorMs);
- const absErrorMs = Math.abs(smoothedSyncErrorMs);
- const isTransientJump = this.shouldIgnoreTransientRecorrectionJump(
- syncErrorMs,
- nowMs,
- );
- if (absErrorMs < RECORRECTION_TRIGGER_MS) {
- this.clearRecorrectionBreachState();
- return;
- }
- if (isTransientJump) {
- this.clearRecorrectionBreachState();
- return;
- }
- if (this.recorrectionBreachStartedAtMs === null) {
- this.recorrectionBreachStartedAtMs = nowMs;
- return;
- }
- if (nowMs - this.recorrectionBreachStartedAtMs < RECORRECTION_SUSTAIN_MS) {
- return;
- }
- if (nowMs - this.lastRecorrectionAtMs < RECORRECTION_COOLDOWN_MS) {
- return;
- }
-
- this.applyRecorrectionCutover();
- }
-
- private applyRecorrectionCutover(): void {
- this.performGuardedCutover("recorrection", {
- incrementResyncCount: true,
- markCooldown: true,
- });
- }
-
- getSyncDelayMs(): number {
- return this.syncDelayMs;
- }
-
- // Refresh expired scheduled-source bookkeeping before measuring how much
- // queued and already-scheduled audio remains available for playback.
- measureBufferedPlaybackRunwaySec(): number {
- if (!this.audioContext) {
- return 0;
- }
-
- const currentTimeSec = this.audioContext.currentTime;
- this.pruneExpiredScheduledSources(currentTimeSec);
- const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec);
- const queuedAheadSec = this.audioBufferQueue.reduce(
- (totalSec, chunk) => totalSec + chunk.buffer.duration,
- 0,
- );
- return Math.max(0, scheduledAheadSec + queuedAheadSec);
- }
-
- // Update sync delay at runtime
- setSyncDelay(delayMs: number): void {
- const sanitizedDelayMs = this.sanitizeSyncDelayMs(delayMs);
- const oldDelayMs = this.syncDelayMs;
- const deltaMs = sanitizedDelayMs - oldDelayMs;
- this.syncDelayMs = sanitizedDelayMs;
-
- if (deltaMs === 0 || !this.usesImmediateDelayCutover) {
- return;
- }
- if (!this.audioContext || this.audioContext.state !== "running") {
- return;
- }
- if (!this.stateManager.isPlaying) {
- return;
- }
- if (
- this.scheduledSources.length === 0 &&
- this.audioBufferQueue.length === 0 &&
- this.nextPlaybackTime === 0
- ) {
- return;
- }
-
- this.performGuardedCutover("delay-change", {
- incrementResyncCount: false,
- markCooldown: true,
- });
- }
-
- // Get current sync info for debugging/display
- get syncInfo(): {
- clockDriftPercent: number;
- syncErrorMs: number;
- resyncCount: number;
- outputLatencyMs: number;
- playbackRate: number;
- correctionMethod: "none" | "samples" | "rate" | "resync";
- samplesAdjusted: number;
- correctionMode: CorrectionMode;
- } {
- return {
- clockDriftPercent: this.timeFilter.drift * 100,
- syncErrorMs: this.currentSyncErrorMs,
- resyncCount: this.resyncCount,
- outputLatencyMs: this.getRawOutputLatencyUs() / 1000,
- playbackRate: this.currentPlaybackRate,
- correctionMethod: this.currentCorrectionMethod,
- samplesAdjusted: this.lastSamplesAdjusted,
- correctionMode: this._correctionMode,
- };
- }
-
- private emitStatusLog(nowMs: number): void {
- if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000) {
- return;
- }
- this._lastStatusLogMs = nowMs;
-
- // corr field
- let corr: string;
- switch (this.currentCorrectionMethod) {
- case "rate":
- corr = `rate@${this.currentPlaybackRate}`;
- break;
- case "samples":
- corr = `samples:${this.lastSamplesAdjusted}`;
- break;
- default:
- corr = this.currentCorrectionMethod;
- }
-
- // q field
- const queueDepth =
- this.audioBufferQueue.length + this.scheduledSources.length;
- const aheadSec = this.audioContext
- ? this.getScheduledAheadSec(this.audioContext.currentTime)
- : 0;
-
- // clock field
- let clock: string;
- if (this.isCastRuntime) {
- clock = "estimated(cast-disabled)";
- } else if (this.activeAudioClockSource === "timestamp") {
- clock = `timestamp(good:${this.outputTimestampGoodSamples})`;
- } else if (this._lastTimestampRejectReason) {
- clock = `estimated(reject:"${this._lastTimestampRejectReason}")`;
- } else {
- clock = "estimated";
- }
-
- // tf field
- const tf = this.timeFilter.is_synchronized
- ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})`
- : `pending(n=${this.timeFilter.count})`;
-
- // lat field
- const latMs =
- this.smoothedOutputLatencyUs !== null
- ? Math.round(this.smoothedOutputLatencyUs / 1000)
- : 0;
-
- console.log(
- `Sendspin: sync=${this.smoothedSyncErrorMs >= 0 ? "+" : ""}${this.smoothedSyncErrorMs.toFixed(1)}ms` +
- ` corr=${corr}` +
- ` q=${queueDepth}/${aheadSec.toFixed(1)}s` +
- ` resyncs=${this._intervalResyncCount}` +
- ` clock=${clock}` +
- ` tf=${tf}` +
- ` lat=${latMs}ms` +
- ` mode=${this._correctionMode}` +
- ` ctx=${this.audioContext?.state ?? "null"}` +
- ` gen=${this.stateManager.streamGeneration}`,
- );
-
- this._intervalResyncCount = 0;
- }
-
- private applySyncErrorEma(inputMs: number): number {
- this.currentSyncErrorMs = inputMs;
- this.smoothedSyncErrorMs =
- SYNC_ERROR_ALPHA * inputMs +
- (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs;
- return this.smoothedSyncErrorMs;
- }
-
- private resetSyncErrorEma(): void {
- this.smoothedSyncErrorMs = 0;
- }
-
- // Get raw output latency in microseconds (for Kalman filter input)
- getRawOutputLatencyUs(): number {
- if (!this.audioContext) return 0;
- const baseLatency = this.audioContext.baseLatency ?? 0;
- const outputLatency = this.audioContext.outputLatency ?? 0;
- const rawUs = (baseLatency + outputLatency) * 1_000_000; // Convert seconds to microseconds
- this.lastRawOutputLatencyUs = rawUs;
- return rawUs;
- }
-
- // Get smoothed output latency in microseconds (filters Chrome jitter)
- getSmoothedOutputLatencyUs(): number {
- const rawLatencyUs = this.getRawOutputLatencyUs();
-
- // Some browsers report 0 until playback is active; treat 0 as "unknown"
- // and keep the last good estimate to avoid poisoning sync.
- if (rawLatencyUs <= 0 && this.smoothedOutputLatencyUs !== null) {
- return this.smoothedOutputLatencyUs;
- }
-
- if (this.smoothedOutputLatencyUs === null) {
- this.smoothedOutputLatencyUs = rawLatencyUs;
- } else {
- this.smoothedOutputLatencyUs =
- OUTPUT_LATENCY_ALPHA * rawLatencyUs +
- (1 - OUTPUT_LATENCY_ALPHA) * this.smoothedOutputLatencyUs;
- }
-
- const nowMs =
- typeof performance !== "undefined" ? performance.now() : Date.now();
- if (
- this.lastLatencyPersistAtMs === null ||
- nowMs - this.lastLatencyPersistAtMs >= OUTPUT_LATENCY_PERSIST_INTERVAL_MS
- ) {
- this.persistLatency();
- this.lastLatencyPersistAtMs = nowMs;
- }
-
- return this.smoothedOutputLatencyUs;
- }
-
- // Reset latency smoother (call on stream change or audio context recreation)
- private resetLatencySmoother(): void {
- this.smoothedOutputLatencyUs = null;
- }
-
- // Create a fresh copy of an AudioBuffer
- // Some decoders produce buffers with boundary artifacts - copying fixes this
- private copyBuffer(buffer: AudioBuffer): AudioBuffer {
- if (!this.audioContext) return buffer;
-
- const newBuffer = this.audioContext.createBuffer(
- buffer.numberOfChannels,
- buffer.length,
- buffer.sampleRate,
- );
-
- for (let ch = 0; ch < buffer.numberOfChannels; ch++) {
- newBuffer.getChannelData(ch).set(buffer.getChannelData(ch));
- }
-
- return newBuffer;
- }
-
- // Adjust buffer by inserting or deleting 1 sample using interpolation
- // Insert: [A, B, ...] → [A, (A+B)/2, B, ...] (at start)
- // Delete: [..., Y, Z] → [..., (Y+Z)/2] (at end)
- private adjustBufferSamples(
- buffer: AudioBuffer,
- samplesToAdjust: number,
- ): AudioBuffer {
- if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2) {
- return this.copyBuffer(buffer);
- }
-
- const channels = buffer.numberOfChannels;
- const len = buffer.length;
- const sampleRate = buffer.sampleRate;
-
- try {
- if (samplesToAdjust > 0) {
- // Insert 1 sample at START: [A, B, ...] → [A, (A+B)/2, B, ...]
- const newBuffer = this.audioContext.createBuffer(
- channels,
- len + 1,
- sampleRate,
- );
-
- for (let ch = 0; ch < channels; ch++) {
- const oldData = buffer.getChannelData(ch);
- const newData = newBuffer.getChannelData(ch);
-
- newData[0] = oldData[0];
- const insertedSample = (oldData[0] + oldData[1]) / 2;
- newData[1] = insertedSample;
- newData.set(oldData.subarray(1), 2);
-
- // After inserting one synthetic sample, gently pull the next few real samples toward it.
- // This smooths the splice and avoids a hard step immediately after the insertion point.
- for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
- const pos = 2 + f;
- if (pos >= newData.length) break;
- const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f];
- newData[pos] = newData[pos] * (1 - alpha) + insertedSample * alpha;
- }
- }
-
- return newBuffer;
- } else {
- // Delete 1 sample at END: [..., Y, Z] → [..., (Y+Z)/2]
- const newBuffer = this.audioContext.createBuffer(
- channels,
- len - 1,
- sampleRate,
- );
-
- for (let ch = 0; ch < channels; ch++) {
- const oldData = buffer.getChannelData(ch);
- const newData = newBuffer.getChannelData(ch);
-
- newData.set(oldData.subarray(0, len - 2));
- const replacementSample = (oldData[len - 2] + oldData[len - 1]) / 2;
- newData[len - 2] = replacementSample;
-
- // Before a deletion collapse, gently pull the preceding samples toward the replacement.
- // This smooths entry into the new boundary formed by skipping one sample.
- for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
- const pos = len - 3 - f;
- if (pos < 0) break;
- const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f];
- newData[pos] =
- newData[pos] * (1 - alpha) + replacementSample * alpha;
- }
- }
-
- return newBuffer;
- }
- } catch (e) {
- console.error("Sendspin: adjustBufferSamples error:", e);
- return buffer;
- }
- }
-
- // Initialize AudioContext with platform-specific setup
- initAudioContext(): void {
- if (this.audioContext) {
- return; // Already initialized
- }
-
- if (this.outputMode === "media-element" && this.ownsAudioElement) {
- this.audioElement = document.createElement("audio");
- this.audioElement.style.display = "none";
- document.body.appendChild(this.audioElement);
- }
-
- // Set audio session to "playback" so audio continues when iOS device is muted
- // (iOS 17+, no-op on other platforms)
- if ((navigator as any).audioSession) {
- (navigator as any).audioSession.type = "playback";
- }
-
- const streamSampleRate =
- this.stateManager.currentStreamFormat?.sample_rate || 48000;
- this.audioContext = new AudioContext({ sampleRate: streamSampleRate });
- this.gainNode = this.audioContext.createGain();
-
- const audioElement = this.audioElement;
-
- if (this.outputMode === "direct") {
- // Direct output to audioContext.destination (e.g., Cast receiver)
- this.gainNode.connect(this.audioContext.destination);
- } else {
- if (!audioElement) {
- throw new Error(
- "Media-element output requires an audio element to be available during initialization.",
- );
- }
-
- if (this.isAndroid && this.silentAudioSrc) {
- // Android MediaSession workaround: Play almost-silent audio file
- // Android browsers don't support MediaSession with MediaStream from Web Audio API
- // Solution: Loop almost-silent audio to keep MediaSession active
- // Real audio plays through Web Audio API → audioContext.destination
- this.gainNode.connect(this.audioContext.destination);
-
- // Use almost-silent audio file to trick Android into showing MediaSession
- audioElement.src = this.silentAudioSrc;
- audioElement.loop = true;
- // CRITICAL: Do NOT mute - Android requires audible audio for MediaSession
- audioElement.muted = false;
- // Set volume to 100% (the file itself is almost silent)
- audioElement.volume = 1.0;
- // Start playing to activate MediaSession
- audioElement.play().catch((e) => {
- console.warn("Sendspin: Audio autoplay blocked:", e);
- });
- } else {
- // iOS/Desktop: Use MediaStream approach for background playback
- // Create MediaStreamDestination to bridge Web Audio API to HTML5 audio element
- this.streamDestination =
- this.audioContext.createMediaStreamDestination();
- this.gainNode.connect(this.streamDestination);
- // Do NOT connect to audioContext.destination to avoid echo
-
- // Connect to HTML5 audio element for iOS background playback
- audioElement.srcObject = this.streamDestination.stream;
- audioElement.volume = 1.0;
- // Start playing to activate MediaSession
- audioElement.play().catch((e) => {
- console.warn("Sendspin: Audio autoplay blocked:", e);
- });
- }
- }
-
- this.updateVolume();
- if (this.usesRecorrectionMonitor) {
- this.startRecorrectionMonitor();
- }
- }
-
- // Resume AudioContext if suspended (required for browser autoplay policies)
- async resumeAudioContext(): Promise {
- if (this.audioContext && this.audioContext.state === "suspended") {
- try {
- await this.audioContext.resume();
- console.log("Sendspin: AudioContext resumed");
- } catch (e) {
- console.warn("Sendspin: Failed to resume AudioContext:", e);
- return;
- }
-
- if (this.audioBufferQueue.length > 0) {
- this.scheduleQueueProcessing();
- }
- if (this.usesRecorrectionMonitor) {
- this.startRecorrectionMonitor();
- }
- }
- }
-
- private cutScheduledSources(cutoffTime: number): {
- requeuedCount: number;
- cutCount: number;
- keptTailEndTimeSec: number;
- } {
- if (!this.audioContext) {
- return {
- requeuedCount: 0,
- cutCount: 0,
- keptTailEndTimeSec: 0,
- };
- }
- const stopTime = Math.max(cutoffTime, this.audioContext.currentTime);
- let requeued = 0;
- let cutCount = 0;
- let keptTailEndTimeSec = 0;
- this.scheduledSources = this.scheduledSources.filter((entry) => {
- // Keep sources scheduled before stopTime to avoid cutting mid-buffer artifacts.
- if (entry.startTime < stopTime) {
- keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime);
- return true;
- }
- try {
- entry.source.onended = null;
- entry.source.stop(stopTime);
- } catch (e) {
- // Ignore errors if source already stopped
- }
- this.audioBufferQueue.push({
- buffer: entry.buffer,
- serverTime: entry.serverTime,
- generation: entry.generation,
- });
- requeued++;
- cutCount++;
- return false;
- });
- return {
- requeuedCount: requeued,
- cutCount,
- keptTailEndTimeSec,
- };
- }
-
- // Update volume based on current state
- updateVolume(): void {
- if (!this.gainNode) return;
-
- // Hardware volume mode: keep software gain at 1.0, external handles volume
- if (this.useHardwareVolume) {
- this.gainNode.gain.value = 1.0;
- return;
- }
-
- if (this.stateManager.muted) {
- this.gainNode.gain.value = 0;
- } else {
- this.gainNode.gain.value = this.stateManager.volume / 100;
- }
- }
-
- // Decode audio data based on codec
- async decodeAudioData(
- audioData: ArrayBuffer,
- format: StreamFormat,
- ): Promise {
- if (!this.audioContext) return null;
-
- try {
- if (format.codec === "opus") {
- // Opus fallback path - native decoder uses async queueToNativeOpusDecoder
- return await this.decodeOpusWithEncdec(audioData, format);
- } else if (format.codec === "flac") {
- // FLAC can be decoded by the browser's native decoder
- // If codec_header is provided, prepend it to the audio data
- let dataToEncode = audioData;
- if (format.codec_header) {
- // Decode Base64 codec header
- const headerBytes = Uint8Array.from(atob(format.codec_header), (c) =>
- c.charCodeAt(0),
- );
- // Concatenate header + audio data
- const combined = new Uint8Array(
- headerBytes.length + audioData.byteLength,
- );
- combined.set(headerBytes, 0);
- combined.set(new Uint8Array(audioData), headerBytes.length);
- dataToEncode = combined.buffer;
- }
- return await this.audioContext.decodeAudioData(dataToEncode);
- } else if (format.codec === "pcm") {
- // PCM data needs manual decoding
- return this.decodePCMData(audioData, format);
- }
- } catch (error) {
- console.error("Error decoding audio data:", error);
- }
-
- return null;
- }
-
- // Initialize native Opus decoder
- private async initWebCodecsDecoder(format: StreamFormat): Promise {
- const tryConfigureExistingDecoder = (): boolean => {
- if (!this.webCodecsDecoder) return false;
-
- const matchesFormat =
- !!this.webCodecsFormat &&
- this.webCodecsFormat.sample_rate === format.sample_rate &&
- this.webCodecsFormat.channels === format.channels;
-
- if (this.webCodecsDecoder.state === "configured" && matchesFormat) {
- return true;
- }
-
- if (this.webCodecsDecoder.state === "closed") {
- return false;
- }
-
- try {
- this.webCodecsDecoder.configure({
- codec: "opus",
- sampleRate: format.sample_rate,
- numberOfChannels: format.channels,
- });
- this.webCodecsFormat = format;
- return true;
- } catch {
- return false;
- }
- };
-
- if (tryConfigureExistingDecoder()) {
- return;
- }
-
- if (this.webCodecsDecoderReady) {
- await this.webCodecsDecoderReady;
- if (tryConfigureExistingDecoder()) {
- return;
- }
-
- try {
- this.webCodecsDecoder?.close();
- } catch {
- // Ignore close errors; we'll recreate below.
- }
- this.webCodecsDecoder = null;
- this.webCodecsDecoderReady = null;
- this.webCodecsFormat = null;
- }
-
- if (this.webCodecsDecoderReady) {
- await this.webCodecsDecoderReady;
- return;
- }
-
- this.webCodecsDecoderReady = this.createWebCodecsDecoder(format);
- await this.webCodecsDecoderReady;
- }
-
- // Create and configure native Opus decoder (WebCodecs)
- private async createWebCodecsDecoder(format: StreamFormat): Promise {
- if (typeof AudioDecoder === "undefined") {
- this.useNativeOpus = false;
- return;
- }
-
- try {
- const support = await AudioDecoder.isConfigSupported({
- codec: "opus",
- sampleRate: format.sample_rate,
- numberOfChannels: format.channels,
- });
-
- if (!support.supported) {
- console.log(
- "[NativeOpus] WebCodecs Opus not supported, will use fallback",
- );
- this.useNativeOpus = false;
- return;
- }
-
- this.webCodecsDecoder = new AudioDecoder({
- output: (audioData: AudioData) => this.handleAudioData(audioData),
- error: (error: Error) => {
- console.error("[NativeOpus] WebCodecs decoder error:", error);
- },
- });
-
- this.webCodecsDecoder.configure({
- codec: "opus",
- sampleRate: format.sample_rate,
- numberOfChannels: format.channels,
- });
-
- this.webCodecsFormat = format;
- console.log(
- `[NativeOpus] Using WebCodecs AudioDecoder: ${format.sample_rate}Hz, ${format.channels}ch`,
- );
- } catch (error) {
- console.warn(
- "[NativeOpus] WebCodecs init failed, will use fallback:",
- error,
- );
- this.useNativeOpus = false;
- }
- }
-
- // Handle decoded audio data from native Opus decoder
- private handleAudioData(audioData: AudioData): void {
- try {
- const outputTimestampUs = Number(audioData.timestamp);
- const metadata = this.nativeDecoderQueue.shift();
-
- if (!metadata) {
- console.warn(
- `[NativeOpus] Dropping frame with empty decode queue (out ts=${outputTimestampUs})`,
- );
- audioData.close();
- return;
- }
-
- const { serverTimeUs, generation } = metadata;
- if (generation !== this.stateManager.streamGeneration) {
- console.warn(
- `[NativeOpus] Dropping old-stream frame (ts=${serverTimeUs}, gen=${generation} != current=${this.stateManager.streamGeneration})`,
- );
- audioData.close();
- return;
- }
-
- const channels = audioData.numberOfChannels;
- const frames = audioData.numberOfFrames;
- const fmt = audioData.format;
-
- let interleaved: Float32Array;
-
- if (fmt === "f32-planar") {
- interleaved = new Float32Array(frames * channels);
- for (let ch = 0; ch < channels; ch++) {
- const channelData = new Float32Array(frames);
- audioData.copyTo(channelData, { planeIndex: ch });
- for (let i = 0; i < frames; i++) {
- interleaved[i * channels + ch] = channelData[i];
- }
- }
- } else if (fmt === "f32") {
- interleaved = new Float32Array(frames * channels);
- audioData.copyTo(interleaved, { planeIndex: 0 });
- } else if (fmt === "s16-planar") {
- interleaved = new Float32Array(frames * channels);
- for (let ch = 0; ch < channels; ch++) {
- const channelData = new Int16Array(frames);
- audioData.copyTo(channelData, { planeIndex: ch });
- for (let i = 0; i < frames; i++) {
- interleaved[i * channels + ch] = channelData[i] / 32768.0;
- }
- }
- } else if (fmt === "s16") {
- const int16Data = new Int16Array(frames * channels);
- audioData.copyTo(int16Data, { planeIndex: 0 });
- interleaved = new Float32Array(frames * channels);
- for (let i = 0; i < frames * channels; i++) {
- interleaved[i] = int16Data[i] / 32768.0;
- }
- } else {
- console.warn(`[NativeOpus] Unsupported AudioData format: ${fmt}`);
- audioData.close();
- return;
- }
-
- this.handleNativeOpusOutput(interleaved, serverTimeUs, channels);
- audioData.close();
- } catch (e) {
- console.error("[NativeOpus] Error in output callback:", e);
- audioData.close();
- }
- }
-
- private resolveOpusDecoderModule(moduleExport: any): any {
- const maybeDefault = moduleExport?.default;
- const maybeCommonJs = moduleExport?.["module.exports"];
- const resolved = maybeDefault ?? maybeCommonJs ?? moduleExport;
-
- if (!resolved || typeof resolved !== "object") {
- throw new Error("[Opus] Invalid libopus decoder module export");
- }
- return resolved;
- }
-
- private resolveOggOpusDecoderClass(wrapperExport: any): any {
- const maybeDefault = wrapperExport?.default;
- const maybeCommonJs = wrapperExport?.["module.exports"];
- const wrapper = maybeDefault ?? maybeCommonJs ?? wrapperExport;
- const resolved = wrapper?.OggOpusDecoder ?? wrapper;
-
- if (typeof resolved !== "function") {
- throw new Error("[Opus] OggOpusDecoder class export not found");
- }
- return resolved;
- }
-
- private async waitForOpusReady(target: {
- isReady: boolean;
- onready?: () => void;
- }): Promise {
- if (target.isReady) return;
-
- if (Object.isExtensible(target)) {
- await new Promise((resolve) => {
- target.onready = () => resolve();
- });
- return;
- }
-
- while (!target.isReady) {
- await new Promise((resolve) => setTimeout(resolve, 20));
- }
- }
-
- // Initialize opus-encdec decoder (fallback when WebCodecs unavailable)
- private async initOpusEncdecDecoder(format: StreamFormat): Promise {
- if (this.opusDecoderReady) {
- await this.opusDecoderReady;
- return;
- }
-
- this.opusDecoderReady = (async () => {
- console.log("[Opus] Initializing decoder (opus-encdec)...");
-
- // Dynamically import the pure JavaScript decoder (not WASM) to avoid bundling issues
- const [DecoderModuleExport, DecoderWrapperExport] = await Promise.all([
- import("opus-encdec/dist/libopus-decoder.js"),
- import("opus-encdec/src/oggOpusDecoder.js"),
- ]);
-
- this.opusDecoderModule =
- this.resolveOpusDecoderModule(DecoderModuleExport);
-
- const OggOpusDecoderClass =
- this.resolveOggOpusDecoderClass(DecoderWrapperExport);
-
- // Wait for Module to be ready (async asm.js initialization)
- await this.waitForOpusReady(this.opusDecoderModule);
-
- // Create decoder instance
- this.opusDecoder = new OggOpusDecoderClass(
- {
- rawOpus: true, // We're decoding raw Opus packets, not Ogg containers
- decoderSampleRate: format.sample_rate,
- outputBufferSampleRate: format.sample_rate,
- numberOfChannels: format.channels,
- },
- this.opusDecoderModule,
- );
-
- // Wait for decoder to be ready if needed
- await this.waitForOpusReady(this.opusDecoder);
-
- console.log("[Opus] Decoder ready");
- })();
-
- await this.opusDecoderReady;
- }
-
- // Handle native Opus decoder output - creates AudioBuffer and adds to queue
- private handleNativeOpusOutput(
- interleaved: Float32Array,
- serverTimeUs: number,
- channels: number,
- ): void {
- if (!this.audioContext || !this.webCodecsFormat) {
- return;
- }
-
- const numFrames = interleaved.length / channels;
- const audioBuffer = this.audioContext.createBuffer(
- channels,
- numFrames,
- this.webCodecsFormat.sample_rate,
- );
-
- // De-interleave samples into separate channels
- for (let ch = 0; ch < channels; ch++) {
- const channelData = audioBuffer.getChannelData(ch);
- for (let i = 0; i < numFrames; i++) {
- channelData[i] = interleaved[i * channels + ch];
- }
- }
-
- // Add to queue directly
- this.audioBufferQueue.push({
- buffer: audioBuffer,
- serverTime: serverTimeUs,
- generation: this.stateManager.streamGeneration,
- });
-
- this.scheduleQueueProcessing();
- }
-
- private scheduleTimeout: ReturnType | null = null;
- private refillTimeout: ReturnType | null = null;
- private queueProcessScheduled = false;
-
- // Schedule queue processing without starvation.
- // Uses a short timeout to allow out-of-order async decodes (FLAC) to batch.
- // TODO: Consider a "max-wait" watchdog if timer throttling/clamping causes excessive scheduling latency.
- private scheduleQueueProcessing(): void {
- this.cancelScheduledRefill();
-
- if (this.queueProcessScheduled) {
- return;
- }
- this.queueProcessScheduled = true;
-
- if (typeof globalThis.setTimeout === "function") {
- this.scheduleTimeout = globalThis.setTimeout(() => {
- this.scheduleTimeout = null;
- this.queueProcessScheduled = false;
- this.processAudioQueue();
- }, 15);
- return;
- }
-
- const run = () => {
- this.queueProcessScheduled = false;
- this.processAudioQueue();
- };
-
- if (
- typeof (globalThis as unknown as { queueMicrotask?: unknown })
- .queueMicrotask === "function"
- ) {
- (
- globalThis as unknown as { queueMicrotask: (cb: () => void) => void }
- ).queueMicrotask(run);
- } else {
- Promise.resolve().then(run);
- }
- }
-
- private cancelScheduledRefill(): void {
- if (this.refillTimeout !== null) {
- clearTimeout(this.refillTimeout);
- this.refillTimeout = null;
- }
- }
-
- private getScheduledRefillThresholdSec(
- targetScheduledHorizonSec: number,
- ): number {
- return Math.max(
- SCHEDULE_REFILL_MIN_THRESHOLD_SEC,
- Math.min(
- SCHEDULE_REFILL_MAX_THRESHOLD_SEC,
- targetScheduledHorizonSec * SCHEDULE_REFILL_THRESHOLD_FRACTION,
- ),
- );
- }
-
- private scheduleQueueRefill(targetScheduledHorizonSec: number): void {
- this.cancelScheduledRefill();
-
- if (
- !this.audioContext ||
- this.audioContext.state !== "running" ||
- !this.stateManager.isPlaying ||
- this.audioBufferQueue.length === 0
- ) {
- return;
- }
-
- const currentTimeSec = this.audioContext.currentTime;
- this.pruneExpiredScheduledSources(currentTimeSec);
- const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec);
- const refillThresholdSec = this.getScheduledRefillThresholdSec(
- targetScheduledHorizonSec,
- );
-
- if (scheduledAheadSec <= refillThresholdSec) {
- this.scheduleQueueProcessing();
- return;
- }
-
- const runRefill = () => {
- this.refillTimeout = null;
- if (
- !this.audioContext ||
- this.audioContext.state !== "running" ||
- !this.stateManager.isPlaying ||
- this.audioBufferQueue.length === 0
- ) {
- return;
- }
- this.scheduleQueueProcessing();
- };
-
- const delayMs = (scheduledAheadSec - refillThresholdSec) * 1000;
- if (typeof globalThis.setTimeout === "function") {
- this.refillTimeout = globalThis.setTimeout(runRefill, delayMs);
- return;
- }
-
- this.refillTimeout = null;
- if (
- typeof (globalThis as unknown as { queueMicrotask?: unknown })
- .queueMicrotask === "function"
- ) {
- (
- globalThis as unknown as { queueMicrotask: (cb: () => void) => void }
- ).queueMicrotask(runRefill);
- return;
- }
-
- void Promise.resolve().then(runRefill);
- }
-
- // Queue Opus packet to native decoder for async decoding (non-blocking)
- private queueToNativeOpusDecoder(
- audioData: ArrayBuffer,
- serverTimeUs: number,
- generation: number,
- ): boolean {
- if (
- !this.webCodecsDecoder ||
- this.webCodecsDecoder.state !== "configured"
- ) {
- return false;
- }
-
- try {
- this.nativeDecoderQueue.push({
- serverTimeUs,
- generation,
- });
-
- const chunk = new EncodedAudioChunk({
- type: "key", // Opus packets are self-contained
- // Keep server time as timestamp for easier debugging/inspection.
- timestamp: serverTimeUs,
- data: audioData,
- });
-
- // Queue for async decoding (non-blocking)
- this.webCodecsDecoder.decode(chunk);
- return true;
- } catch (error) {
- if (this.nativeDecoderQueue.length > 0) {
- this.nativeDecoderQueue.pop();
- }
- console.error("[NativeOpus] WebCodecs queue error:", error);
- return false;
- }
- }
-
- // Decode using opus-encdec library (fallback)
- private async decodeOpusWithEncdec(
- audioData: ArrayBuffer,
- format: StreamFormat,
- ): Promise {
- if (!this.audioContext) {
- return null;
- }
-
- try {
- // Initialize fallback decoder if needed
- await this.initOpusEncdecDecoder(format);
-
- // Decode the raw Opus packet
- const uint8Array = new Uint8Array(audioData);
- const decodedSamples: Float32Array[] = [];
-
- this.opusDecoder.decodeRaw(uint8Array, (samples: Float32Array) => {
- // Copy samples since they're from WASM heap
- decodedSamples.push(new Float32Array(samples));
- });
-
- if (decodedSamples.length === 0) {
- console.warn("[Opus] Fallback decoder produced no samples");
- return null;
- }
-
- // Convert interleaved samples to AudioBuffer
- const interleavedSamples = decodedSamples[0];
- const numFrames = interleavedSamples.length / format.channels;
-
- const audioBuffer = this.audioContext.createBuffer(
- format.channels,
- numFrames,
- format.sample_rate,
- );
-
- // De-interleave samples into separate channels
- for (let ch = 0; ch < format.channels; ch++) {
- const channelData = audioBuffer.getChannelData(ch);
- for (let i = 0; i < numFrames; i++) {
- channelData[i] = interleavedSamples[i * format.channels + ch];
- }
- }
-
- return audioBuffer;
- } catch (error) {
- console.error("[Opus] Decode error:", error);
- return null;
- }
- }
-
- // Decode PCM audio data
- private decodePCMData(
- audioData: ArrayBuffer,
- format: StreamFormat,
- ): AudioBuffer | null {
- if (!this.audioContext) return null;
-
- const bytesPerSample = (format.bit_depth || 16) / 8;
- const dataView = new DataView(audioData);
- const numSamples =
- audioData.byteLength / (bytesPerSample * format.channels);
-
- const audioBuffer = this.audioContext.createBuffer(
- format.channels,
- numSamples,
- format.sample_rate,
- );
-
- // Decode PCM data (interleaved format)
- for (let channel = 0; channel < format.channels; channel++) {
- const channelData = audioBuffer.getChannelData(channel);
- for (let i = 0; i < numSamples; i++) {
- const offset = (i * format.channels + channel) * bytesPerSample;
- let sample = 0;
-
- if (format.bit_depth === 16) {
- sample = dataView.getInt16(offset, true) / 32768.0;
- } else if (format.bit_depth === 24) {
- // 24-bit is stored in 3 bytes (little-endian)
- const byte1 = dataView.getUint8(offset);
- const byte2 = dataView.getUint8(offset + 1);
- const byte3 = dataView.getUint8(offset + 2);
- // Reconstruct as signed 24-bit value
- let int24 = (byte3 << 16) | (byte2 << 8) | byte1;
- // Sign extend if necessary
- if (int24 & 0x800000) {
- int24 |= 0xff000000;
- }
- sample = int24 / 8388608.0;
- } else if (format.bit_depth === 32) {
- sample = dataView.getInt32(offset, true) / 2147483648.0;
- }
-
- channelData[i] = sample;
- }
- }
-
- return audioBuffer;
- }
-
- // Handle binary audio message
- async handleBinaryMessage(data: ArrayBuffer): Promise {
- const format = this.stateManager.currentStreamFormat;
- if (!format) {
- console.warn("Sendspin: Received audio chunk but no stream format set");
- return;
- }
- if (!this.audioContext) {
- console.warn("Sendspin: Received audio chunk but no audio context");
- return;
- }
- if (!this.gainNode) {
- console.warn("Sendspin: Received audio chunk but no gain node");
- return;
- }
-
- // Capture stream generation before async decode
- const generation = this.stateManager.streamGeneration;
-
- // First byte contains role type and message slot
- // Spec: bits 7-2 identify role type (6 bits), bits 1-0 identify message slot (2 bits)
- const firstByte = new Uint8Array(data)[0];
-
- // Type 4 is audio chunk (Player role, slot 0) - IDs 4-7 are player role
- if (firstByte === 4) {
- // Next 8 bytes are server timestamp in microseconds (big-endian int64)
- const timestampView = new DataView(data, 1, 8);
- // Read as big-endian int64 and convert to number
- const serverTimeUs = Number(timestampView.getBigInt64(0, false));
-
- // Rest is audio data
- const audioData = data.slice(9);
-
- // For Opus: use native decoder (non-blocking async path)
- if (format.codec === "opus" && this.useNativeOpus) {
- await this.initWebCodecsDecoder(format);
-
- if (this.useNativeOpus && this.webCodecsDecoder) {
- if (
- this.queueToNativeOpusDecoder(audioData, serverTimeUs, generation)
- ) {
- return; // Async path - callback handles queue
- }
- // Fall through to fallback on error
- }
- }
-
- // Fallback decode path (PCM, FLAC, or Opus via opus-encdec)
- const audioBuffer = await this.decodeAudioData(audioData, format);
-
- if (audioBuffer) {
- // Check if stream generation changed during async decode
- if (generation !== this.stateManager.streamGeneration) {
- return;
- }
-
- // Add to queue for ordered playback
- this.audioBufferQueue.push({
- buffer: audioBuffer,
- serverTime: serverTimeUs,
- generation: generation,
- });
-
- this.scheduleQueueProcessing();
- } else {
- console.error("Sendspin: Failed to decode audio buffer");
- }
- }
- }
-
- // Process the audio queue and schedule chunks in order
- processAudioQueue(): void {
- this.cancelScheduledRefill();
-
- if (!this.audioContext || !this.gainNode) return;
- if (this.audioContext.state !== "running") return;
-
- // Filter out any chunks from old streams (safety check)
- const currentGeneration = this.stateManager.streamGeneration;
- this.audioBufferQueue = this.audioBufferQueue.filter(
- (chunk) => chunk.generation === currentGeneration,
- );
-
- // Sort queue by server timestamp to ensure proper ordering
- this.audioBufferQueue.sort((a, b) => a.serverTime - b.serverTime);
-
- // Don't schedule until time sync is ready
- if (!this.timeFilter.is_synchronized) {
- return;
- }
-
- const {
- audioContextTimeSec: audioContextTime,
- audioContextRawTimeSec,
- nowMs,
- nowUs,
- } = this.getTimingSnapshot();
- this.pruneExpiredScheduledSources(audioContextRawTimeSec);
-
- const outputLatencySec = this.useOutputLatencyCompensation
- ? this.getSmoothedOutputLatencyUs() / 1_000_000
- : 0;
- const syncDelaySec = this.syncDelayMs / 1000;
- const targetScheduledHorizonSec = this.getTargetScheduledHorizonSec();
-
- if (this.usesRecorrectionMonitor) {
- this.startRecorrectionMonitor();
- }
-
- if (this.pendingClockSourceCutover) {
- this.pendingClockSourceCutover = false;
- if (
- this.scheduledSources.length > 0 ||
- this.nextPlaybackTime !== 0 ||
- this.lastScheduledServerTime !== 0
- ) {
- this.performGuardedCutover("delay-change", {
- incrementResyncCount: false,
- markCooldown: false,
- });
- return;
- }
- }
-
- // Schedule chunks until we have enough future audio to survive short JS throttling.
- while (this.audioBufferQueue.length > 0) {
- const scheduledAheadSec = this.getScheduledAheadSec(
- audioContextRawTimeSec,
- );
- if (
- this.nextPlaybackTime > 0 &&
- scheduledAheadSec >= targetScheduledHorizonSec
- ) {
- break;
- }
-
- const chunk = this.audioBufferQueue.shift()!;
-
- let playbackTime: number;
- let scheduleTime: number;
- let playbackRate: number;
-
- // Always compute the drift-corrected target time
- const targetPlaybackTime = this.computeTargetPlaybackTime(
- chunk.serverTime,
- audioContextTime,
- nowUs,
- outputLatencySec,
- );
-
- // First chunk or after a gap: calculate from server timestamp
- if (this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) {
- this.armHardResyncStartupGrace(nowMs);
- playbackTime = targetPlaybackTime;
- scheduleTime = playbackTime - syncDelaySec;
- if (this.recorrectionMinScheduleTimeSec !== null) {
- scheduleTime = Math.max(
- scheduleTime,
- this.recorrectionMinScheduleTimeSec,
- );
- playbackTime = scheduleTime + syncDelaySec;
- }
- this.recorrectionMinScheduleTimeSec = null;
- playbackRate = 1.0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- } else {
- // Subsequent chunks: schedule back-to-back for seamless playback
- // Check if this chunk is contiguous with the last one
- const expectedServerTime = this.lastScheduledServerTime;
- const serverGapUs = chunk.serverTime - expectedServerTime;
- const serverGapSec = serverGapUs / 1_000_000;
-
- if (Math.abs(serverGapSec) < 0.1) {
- // Chunk is contiguous (within 100ms)
- // Calculate sync error (positive = behind target, negative = ahead)
- const syncErrorSec = this.nextPlaybackTime - targetPlaybackTime;
- const syncErrorMs = syncErrorSec * 1000;
-
- // Apply EMA smoothing to filter jitter - use smoothed value for corrections
- const correctionErrorMs = this.applySyncErrorEma(syncErrorMs);
-
- // Get thresholds for current correction mode
- const thresholds = CORRECTION_THRESHOLDS[this._correctionMode];
- const canUseHardResync = this.canUseHardResync(nowMs);
-
- if (
- Math.abs(correctionErrorMs) > thresholds.resyncAboveMs &&
- canUseHardResync
- ) {
- // Tier 4: Hard resync if sync error exceeds threshold
- this.noteHardResync(nowMs);
- this.resyncCount++;
- this._intervalResyncCount++;
- this.resetSyncErrorEma();
- this.cutScheduledSources(targetPlaybackTime - syncDelaySec);
- playbackTime = targetPlaybackTime;
- scheduleTime = playbackTime - syncDelaySec;
- playbackRate = 1.0;
- this.currentCorrectionMethod = "resync";
- this.lastSamplesAdjusted = 0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- } else if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs) {
- // We cannot hard resync right now because startup grace or the
- // cooldown is active, so use the strongest smooth correction instead.
- playbackTime = this.nextPlaybackTime;
- scheduleTime = this.nextScheduleTime;
- playbackRate = Number.isFinite(thresholds.rate2AboveMs)
- ? correctionErrorMs > 0
- ? 1.02
- : 0.98
- : 1.0;
- this.currentCorrectionMethod =
- playbackRate === 1.0 ? "none" : "rate";
- this.lastSamplesAdjusted = 0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- } else if (Math.abs(correctionErrorMs) < thresholds.deadbandBelowMs) {
- // Tier 1: Within deadband - no correction needed
- playbackTime = this.nextPlaybackTime;
- scheduleTime = this.nextScheduleTime;
- playbackRate = 1.0;
- this.currentCorrectionMethod = "none";
- this.lastSamplesAdjusted = 0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- } else if (Math.abs(correctionErrorMs) <= thresholds.samplesBelowMs) {
- // Tier 2: Small error - use single sample insertion/deletion
- playbackTime = this.nextPlaybackTime;
- scheduleTime = this.nextScheduleTime;
- playbackRate = 1.0;
- const samplesToAdjust = correctionErrorMs > 0 ? -1 : 1;
- chunk.buffer = this.adjustBufferSamples(
- chunk.buffer,
- samplesToAdjust,
- );
- this.currentCorrectionMethod = "samples";
- this.lastSamplesAdjusted = samplesToAdjust;
- } else {
- // Tier 3: Medium error - use playback rate adjustment
- playbackTime = this.nextPlaybackTime;
- scheduleTime = this.nextScheduleTime;
- const absErrorMs = Math.abs(correctionErrorMs);
-
- if (correctionErrorMs > 0) {
- playbackRate =
- absErrorMs >= thresholds.rate2AboveMs
- ? 1.02
- : absErrorMs >= thresholds.rate1AboveMs
- ? 1.01
- : 1.0;
- } else {
- playbackRate =
- absErrorMs >= thresholds.rate2AboveMs
- ? 0.98
- : absErrorMs >= thresholds.rate1AboveMs
- ? 0.99
- : 1.0;
- }
-
- this.currentCorrectionMethod =
- playbackRate === 1.0 ? "none" : "rate";
- this.lastSamplesAdjusted = 0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- }
- } else {
- // Gap detected in server timestamps - hard resync
- if (this.canUseHardResync(nowMs)) {
- this.noteHardResync(nowMs);
- this.resyncCount++;
- this._intervalResyncCount++;
- this.cutScheduledSources(targetPlaybackTime - syncDelaySec);
- }
- playbackTime = targetPlaybackTime;
- scheduleTime = playbackTime - syncDelaySec;
- playbackRate = 1.0;
- this.currentCorrectionMethod = "resync";
- this.lastSamplesAdjusted = 0;
- chunk.buffer = this.copyBuffer(chunk.buffer);
- }
- }
-
- // Track current rate for debugging
- this.currentPlaybackRate = playbackRate;
-
- // Drop only if we already missed the logical playback time. Missing the
- // early-start window just means we apply less sync delay for this chunk.
- if (playbackTime < audioContextRawTimeSec) {
- // Reset seamless tracking since we dropped a chunk
- this.nextPlaybackTime = 0;
- this.nextScheduleTime = 0;
- this.lastScheduledServerTime = 0;
- continue;
- }
-
- const effectiveScheduleTime = Math.max(
- scheduleTime,
- audioContextRawTimeSec,
- );
- const effectivePlaybackTime =
- effectiveScheduleTime + (playbackTime - scheduleTime);
-
- const source = this.audioContext.createBufferSource();
- source.buffer = chunk.buffer;
- source.playbackRate.value = playbackRate; // Apply rate correction
- source.connect(this.gainNode);
- source.start(effectiveScheduleTime);
-
- // Track for seamless scheduling of next chunk
- // Account for actual duration with playback rate adjustment
- const actualDuration = chunk.buffer.duration / playbackRate;
- this.nextPlaybackTime = effectivePlaybackTime + actualDuration;
- this.nextScheduleTime = effectiveScheduleTime + actualDuration;
- this.lastScheduledServerTime =
- chunk.serverTime + chunk.buffer.duration * 1_000_000;
-
- const scheduledEntry = {
- source,
- startTime: effectiveScheduleTime,
- endTime: effectiveScheduleTime + actualDuration,
- buffer: chunk.buffer,
- serverTime: chunk.serverTime,
- generation: chunk.generation,
- };
- this.scheduledSources.push(scheduledEntry);
- source.onended = () => {
- const idx = this.scheduledSources.indexOf(scheduledEntry);
- if (idx > -1) this.scheduledSources.splice(idx, 1);
- if (this.scheduledSources.length === 0) {
- this.resetScheduledPlaybackState("all scheduled audio ended");
- if (this.audioBufferQueue.length > 0) {
- this.processAudioQueue();
- }
- }
- };
- }
- this.scheduleQueueRefill(targetScheduledHorizonSec);
- this.emitStatusLog(nowMs);
- }
-
- private computeTargetPlaybackTime(
- serverTimeUs: number,
- audioContextTime: number,
- nowUs: number,
- outputLatencySec: number,
- ): number {
- const chunkClientTimeUs = this.timeFilter.computeClientTime(serverTimeUs);
- const deltaUs = chunkClientTimeUs - nowUs;
- const deltaSec = deltaUs / 1_000_000;
- return (
- audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec
- );
- }
-
- // Start audio element playback (for MediaSession)
- startAudioElement(): void {
- if (this.outputMode === "media-element" && this.audioElement) {
- if (this.audioElement.paused) {
- this.audioElement.play().catch((e) => {
- console.warn("Sendspin: Failed to start audio element:", e);
- });
- }
- }
- // No-op for direct mode
- }
-
- // Stop audio element playback (for MediaSession)
- stopAudioElement(): void {
- if (this.outputMode === "media-element" && this.audioElement) {
- if (!this.audioElement.paused) {
- this.audioElement.pause();
- }
- }
- // No-op for direct mode
- }
-
- // Clear all audio buffers and scheduled sources
- clearBuffers(): void {
- this.stopRecorrectionMonitor();
- this.cancelScheduledRefill();
-
- // Stop all scheduled audio sources
- this.scheduledSources.forEach((entry) => {
- try {
- entry.source.stop();
- } catch (e) {
- // Ignore errors if source already stopped
- }
- });
- this.scheduledSources = [];
-
- // Clear buffers and reset scheduling state
- this.audioBufferQueue = [];
- if (this.scheduleTimeout !== null) {
- clearTimeout(this.scheduleTimeout);
- this.scheduleTimeout = null;
- }
- this.queueProcessScheduled = false;
-
- // Drop any pending native Opus decode outputs from the previous stream.
- // We close and recreate the decoder on next use to ensure stale callbacks
- // cannot be correlated with new-stream metadata.
- this.nativeDecoderQueue = [];
- try {
- this.webCodecsDecoder?.close();
- } catch {
- // Ignore close errors
- }
- this.webCodecsDecoder = null;
- this.webCodecsDecoderReady = null;
- this.webCodecsFormat = null;
-
- // Reset stream anchors
- this.stateManager.resetStreamAnchors();
-
- // Reset sync stats and timing sources
- this.resetScheduledPlaybackState();
- this.resyncCount = 0;
- this.lastRawOutputLatencyUs = 0;
- this.resetLatencySmoother();
- this.timingEstimateAudioContextTimeSec = null;
- this.timingEstimateAtMs = null;
- this.resetOutputTimestampValidation();
- }
-
- // Cleanup and close AudioContext
- close(): void {
- this.clearBuffers();
-
- if (this.audioContext) {
- this.audioContext.close();
- this.audioContext = null;
- }
-
- // Clean up native Opus decoder
- if (this.webCodecsDecoder) {
- try {
- this.webCodecsDecoder.close();
- } catch (e) {
- // Ignore if already closed
- }
- this.webCodecsDecoder = null;
- this.webCodecsDecoderReady = null;
- this.webCodecsFormat = null;
- }
-
- // Clean up fallback Opus decoder
- if (this.opusDecoder) {
- this.opusDecoder = null;
- this.opusDecoderModule = null;
- this.opusDecoderReady = null;
- }
-
- // Reset native Opus flag for next session
- this.useNativeOpus = true;
-
- this.gainNode = null;
- this.streamDestination = null;
-
- // Always stop and clear the audio element on full disconnect/teardown.
- if (this.outputMode === "media-element" && this.audioElement) {
- this.audioElement.pause();
- this.audioElement.srcObject = null;
- this.audioElement.loop = false;
- this.audioElement.removeAttribute("src");
- this.audioElement.load();
-
- if (this.ownsAudioElement) {
- this.audioElement.remove();
- this.audioElement = undefined;
- }
- }
- }
-
- // Get AudioContext for external use
- getAudioContext(): AudioContext | null {
- return this.audioContext;
- }
-}
diff --git a/src/audio/clock-source.ts b/src/audio/clock-source.ts
new file mode 100644
index 0000000..9399c7e
--- /dev/null
+++ b/src/audio/clock-source.ts
@@ -0,0 +1,367 @@
+/**
+ * Audio clock source selection and output timestamp validation.
+ *
+ * Manages two clock sources for AudioContext time:
+ * - "estimated": De-quantized AudioContext.currentTime using wall-clock slew
+ * - "timestamp": AudioContext.getOutputTimestamp() with extensive validation
+ *
+ * Promotes to "timestamp" after enough good samples, demotes on failures.
+ */
+
+type AudioClockSource = "estimated" | "timestamp" | "raw";
+
+interface OutputTimestampSample {
+ contextTimeSec: number;
+ performanceTimeMs: number;
+ nowMs: number;
+ predictedAudioTimeSec: number;
+ rawAudioTimeSec: number;
+}
+
+const OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS = 250;
+const OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS = 40;
+const OUTPUT_TIMESTAMP_SLOPE_MIN = 0.95;
+const OUTPUT_TIMESTAMP_SLOPE_MAX = 1.05;
+const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC = 0.25;
+const OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC = 0.05;
+const OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC = 0.005;
+const OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS = 5;
+const OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES = 6;
+const OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS = 750;
+const OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES = 2;
+
+// Timing estimate constants
+const TIMING_MAX_SLEW_SEC = 0.002;
+const TIMING_RESET_THRESHOLD_SEC = 0.5;
+const TIMING_MAX_LEAD_SEC = 0.1;
+
+export interface TimingSnapshot {
+ audioContextTimeSec: number;
+ audioContextRawTimeSec: number;
+ nowMs: number;
+ nowUs: number;
+}
+
+export class ClockSource {
+ private activeSource: AudioClockSource = "estimated";
+ private _pendingCutover = false;
+ private _lastRejectReason: string | null = null;
+ private _timestampPromotionDisabled = false;
+ private _onPromotion?: () => void;
+
+ // Output timestamp validation state
+ private lastSample: OutputTimestampSample | null = null;
+ private goodSamples: number = 0;
+ private badSamples: number = 0;
+ private goodSinceMs: number | null = null;
+
+ // Estimated time state
+ private estimateAudioTimeSec: number | null = null;
+ private estimateAtMs: number | null = null;
+
+ get active(): AudioClockSource {
+ return this.activeSource;
+ }
+
+ get pendingCutover(): boolean {
+ return this._pendingCutover;
+ }
+
+ set pendingCutover(value: boolean) {
+ this._pendingCutover = value;
+ }
+
+ get lastRejectReason(): string | null {
+ return this._lastRejectReason;
+ }
+
+ get timestampGoodSamples(): number {
+ return this.goodSamples;
+ }
+
+ get timestampPromotionDisabled(): boolean {
+ return this._timestampPromotionDisabled;
+ }
+
+ /** Disable timestamp promotion (e.g., on Cast receivers to avoid rate oscillations). */
+ disableTimestampPromotion(): void {
+ this._timestampPromotionDisabled = true;
+ }
+
+ setActive(source: AudioClockSource): boolean {
+ if (this.activeSource === source) return false;
+ this.activeSource = source;
+ this._pendingCutover = source === "timestamp";
+ if (this._pendingCutover) {
+ this._onPromotion?.();
+ }
+ return this._pendingCutover;
+ }
+
+ onPromotion(cb: () => void): void {
+ this._onPromotion = cb;
+ }
+
+ reset(): void {
+ this.activeSource = "estimated";
+ this._pendingCutover = false;
+ this.lastSample = null;
+ this.goodSamples = 0;
+ this._lastRejectReason = null;
+ this.badSamples = 0;
+ this.goodSinceMs = null;
+ this.estimateAudioTimeSec = null;
+ this.estimateAtMs = null;
+ }
+
+ private demote(reason: string): void {
+ this.reset();
+ this._lastRejectReason = reason;
+ }
+
+ private rejectSample(reason: string, catastrophic = false): void {
+ this.lastSample = null;
+ this.goodSamples = 0;
+ this.goodSinceMs = null;
+ this._lastRejectReason = reason;
+
+ if (this.activeSource !== "timestamp") {
+ this.badSamples = 0;
+ return;
+ }
+
+ this.badSamples += 1;
+ if (
+ catastrophic ||
+ this.badSamples >= OUTPUT_TIMESTAMP_MAX_CONSECUTIVE_BAD_SAMPLES
+ ) {
+ this.demote(reason);
+ }
+ }
+
+ private getEstimatedTime(rawTimeSec: number, nowMs: number): number {
+ if (this.estimateAudioTimeSec === null) {
+ this.estimateAudioTimeSec = rawTimeSec;
+ this.estimateAtMs = nowMs;
+ } else if (this.estimateAtMs !== null) {
+ const wallDeltaSec = Math.max(0, (nowMs - this.estimateAtMs) / 1000);
+ const predicted = this.estimateAudioTimeSec + wallDeltaSec;
+ this.estimateAtMs = nowMs;
+
+ const errorSec = rawTimeSec - predicted;
+ if (Math.abs(errorSec) > TIMING_RESET_THRESHOLD_SEC) {
+ this.estimateAudioTimeSec = rawTimeSec;
+ } else {
+ const slew = Math.max(
+ -TIMING_MAX_SLEW_SEC,
+ Math.min(TIMING_MAX_SLEW_SEC, errorSec),
+ );
+ const next = Math.max(this.estimateAudioTimeSec, predicted + slew);
+ this.estimateAudioTimeSec = Math.min(
+ next,
+ rawTimeSec + TIMING_MAX_LEAD_SEC,
+ );
+ }
+ }
+
+ return this.estimateAudioTimeSec ?? rawTimeSec;
+ }
+
+ private getTimestampDerivedTime(
+ rawTimeSec: number,
+ audioContext: AudioContext,
+ ): number | null {
+ // On Cast receivers, stay on the estimated clock to avoid rate oscillations.
+ if (this._timestampPromotionDisabled) {
+ if (
+ this.activeSource !== "estimated" ||
+ this.lastSample !== null ||
+ this.goodSamples !== 0 ||
+ this._lastRejectReason !== null
+ ) {
+ this.reset();
+ }
+ return null;
+ }
+
+ const getOutputTimestamp = (
+ audioContext as unknown as {
+ getOutputTimestamp?: () => {
+ contextTime: number;
+ performanceTime: number;
+ };
+ }
+ ).getOutputTimestamp;
+
+ if (typeof getOutputTimestamp !== "function") {
+ if (this.activeSource === "timestamp") {
+ this.demote("getOutputTimestamp unavailable");
+ }
+ return null;
+ }
+
+ try {
+ const ts = getOutputTimestamp.call(audioContext);
+ const nowMs = performance.now();
+ const rawFreshnessMs = nowMs - ts.performanceTime;
+ if (rawFreshnessMs < -OUTPUT_TIMESTAMP_FUTURE_TOLERANCE_MS) {
+ this.rejectSample(
+ `performanceTime in future (${rawFreshnessMs.toFixed(1)}ms)`,
+ true,
+ );
+ return null;
+ }
+
+ const freshnessMs = Math.max(0, rawFreshnessMs);
+ const predictedAudioTimeSec = ts.contextTime + freshnessMs / 1000;
+ const sample: OutputTimestampSample = {
+ contextTimeSec: ts.contextTime,
+ performanceTimeMs: ts.performanceTime,
+ nowMs,
+ predictedAudioTimeSec,
+ rawAudioTimeSec: rawTimeSec,
+ };
+
+ if (freshnessMs > OUTPUT_TIMESTAMP_MAX_FRESHNESS_MS) {
+ this.rejectSample(
+ `stale timestamp (${freshnessMs.toFixed(1)}ms old)`,
+ true,
+ );
+ return null;
+ }
+
+ const divergenceSec = predictedAudioTimeSec - rawTimeSec;
+ if (Math.abs(divergenceSec) > OUTPUT_TIMESTAMP_MAX_DIVERGENCE_SEC) {
+ this.rejectSample(
+ `timestamp/raw divergence ${Math.abs(divergenceSec * 1000).toFixed(1)}ms`,
+ true,
+ );
+ return null;
+ }
+
+ const prev = this.lastSample;
+ if (prev) {
+ const perfDeltaMs = ts.performanceTime - prev.performanceTimeMs;
+ if (perfDeltaMs < 0) {
+ this.rejectSample(
+ `performanceTime moved backward (${perfDeltaMs.toFixed(1)}ms)`,
+ true,
+ );
+ return null;
+ }
+
+ if (
+ predictedAudioTimeSec <
+ prev.predictedAudioTimeSec - OUTPUT_TIMESTAMP_MAX_BACKWARD_SEC
+ ) {
+ this.rejectSample(
+ `predicted audio time moved backward ${((prev.predictedAudioTimeSec - predictedAudioTimeSec) * 1000).toFixed(1)}ms`,
+ true,
+ );
+ return null;
+ }
+
+ const prevDivergenceSec =
+ prev.predictedAudioTimeSec - prev.rawAudioTimeSec;
+ if (
+ Math.abs(divergenceSec - prevDivergenceSec) >
+ OUTPUT_TIMESTAMP_MAX_DIVERGENCE_DELTA_SEC
+ ) {
+ this.rejectSample(
+ `timestamp/raw divergence drift ${Math.abs((divergenceSec - prevDivergenceSec) * 1000).toFixed(1)}ms`,
+ );
+ return null;
+ }
+
+ if (perfDeltaMs >= OUTPUT_TIMESTAMP_MIN_SAMPLE_INTERVAL_MS) {
+ const perfDeltaSec = perfDeltaMs / 1000;
+ const contextSlope =
+ (ts.contextTime - prev.contextTimeSec) / perfDeltaSec;
+ const predictedSlope =
+ (predictedAudioTimeSec - prev.predictedAudioTimeSec) / perfDeltaSec;
+
+ if (
+ contextSlope < OUTPUT_TIMESTAMP_SLOPE_MIN ||
+ contextSlope > OUTPUT_TIMESTAMP_SLOPE_MAX
+ ) {
+ this.rejectSample(
+ `context slope ${contextSlope.toFixed(3)} out of range`,
+ );
+ return null;
+ }
+ if (
+ predictedSlope < OUTPUT_TIMESTAMP_SLOPE_MIN ||
+ predictedSlope > OUTPUT_TIMESTAMP_SLOPE_MAX
+ ) {
+ this.rejectSample(
+ `predicted slope ${predictedSlope.toFixed(3)} out of range`,
+ );
+ return null;
+ }
+ }
+ }
+
+ this.lastSample = sample;
+ this.badSamples = 0;
+ if (this.goodSinceMs === null) {
+ this.goodSinceMs = nowMs;
+ }
+ this.goodSamples += 1;
+
+ if (
+ this.activeSource !== "timestamp" &&
+ this.goodSamples >= OUTPUT_TIMESTAMP_PROMOTION_MIN_GOOD_SAMPLES &&
+ this.goodSinceMs !== null &&
+ nowMs - this.goodSinceMs >= OUTPUT_TIMESTAMP_PROMOTION_MIN_SPAN_MS
+ ) {
+ this.setActive("timestamp");
+ this._lastRejectReason = null;
+ }
+
+ return predictedAudioTimeSec;
+ } catch (error) {
+ const reason =
+ error instanceof Error
+ ? `getOutputTimestamp failed: ${error.message}`
+ : `getOutputTimestamp failed: ${String(error)}`;
+ this.rejectSample(reason, true);
+ return null;
+ }
+ }
+
+ /** Get a timing snapshot with both derived and raw AudioContext times. */
+ getTimingSnapshot(audioContext: AudioContext | null): TimingSnapshot {
+ const nowMs = performance.now();
+ const nowUs = nowMs * 1000;
+ if (!audioContext) {
+ return {
+ audioContextTimeSec: 0,
+ audioContextRawTimeSec: 0,
+ nowMs,
+ nowUs,
+ };
+ }
+
+ const rawTimeSec = audioContext.currentTime;
+ const estimatedTimeSec = this.getEstimatedTime(rawTimeSec, nowMs);
+ const timestampTimeSec = this.getTimestampDerivedTime(
+ rawTimeSec,
+ audioContext,
+ );
+
+ let derivedTimeSec =
+ this.activeSource === "timestamp" && timestampTimeSec !== null
+ ? timestampTimeSec
+ : estimatedTimeSec;
+ if (!Number.isFinite(derivedTimeSec)) {
+ derivedTimeSec = rawTimeSec;
+ }
+
+ return {
+ audioContextTimeSec: derivedTimeSec,
+ audioContextRawTimeSec: rawTimeSec,
+ nowMs,
+ nowUs,
+ };
+ }
+}
diff --git a/src/audio/decoder.ts b/src/audio/decoder.ts
new file mode 100644
index 0000000..303e113
--- /dev/null
+++ b/src/audio/decoder.ts
@@ -0,0 +1,624 @@
+/**
+ * Audio decoder pipeline for Sendspin protocol.
+ *
+ * Decodes compressed audio (PCM, Opus, FLAC) into raw Float32Array PCM samples.
+ * This module has no Web Audio playback concerns — it only produces decoded data.
+ */
+
+import type { StreamFormat, DecodedAudioChunk } from "../types";
+
+export class SendspinDecoder {
+ private onDecodedChunk: (chunk: DecodedAudioChunk) => void;
+ private currentGeneration: () => number;
+
+ // Native Opus decoder (WebCodecs API)
+ private webCodecsDecoder: AudioDecoder | null = null;
+ private webCodecsDecoderReady: Promise | null = null;
+ private webCodecsFormat: StreamFormat | null = null;
+ private useNativeOpus: boolean = true;
+ private nativeDecoderQueue: Array<{
+ serverTimeUs: number;
+ generation: number;
+ }> = [];
+
+ // Fallback Opus decoder (opus-encdec library)
+ private opusDecoder: any = null;
+ private opusDecoderModule: any = null;
+ private opusDecoderReady: Promise | null = null;
+
+ // FLAC decoding context (OfflineAudioContext, no playback needed)
+ private flacDecodingContext: OfflineAudioContext | null = null;
+ private flacDecodingContextSampleRate: number = 0;
+ private flacDecodingContextChannels: number = 0;
+
+ constructor(
+ onDecodedChunk: (chunk: DecodedAudioChunk) => void,
+ currentGeneration: () => number,
+ ) {
+ this.onDecodedChunk = onDecodedChunk;
+ this.currentGeneration = currentGeneration;
+ }
+
+ /**
+ * Handle a binary audio message from the WebSocket.
+ * Parses the message, decodes the audio, and emits a DecodedAudioChunk.
+ */
+ async handleBinaryMessage(
+ data: ArrayBuffer,
+ format: StreamFormat,
+ generation: number,
+ ): Promise {
+ // First byte contains role type and message slot
+ const firstByte = new Uint8Array(data)[0];
+
+ // Type 4 is audio chunk (Player role, slot 0)
+ if (firstByte === 4) {
+ // Next 8 bytes are server timestamp in microseconds (big-endian int64)
+ const timestampView = new DataView(data, 1, 8);
+ const serverTimeUs = Number(timestampView.getBigInt64(0, false));
+
+ // Rest is audio data
+ const audioData = data.slice(9);
+
+ // For Opus: use native decoder (non-blocking async path)
+ if (format.codec === "opus" && this.useNativeOpus) {
+ await this.initWebCodecsDecoder(format);
+
+ if (this.useNativeOpus && this.webCodecsDecoder) {
+ if (
+ this.queueToNativeOpusDecoder(audioData, serverTimeUs, generation)
+ ) {
+ return; // Async path - callback handles output
+ }
+ // Fall through to fallback on error
+ }
+ }
+
+ // Fallback decode path (PCM, FLAC, or Opus via opus-encdec)
+ try {
+ const decoded = await this.decode(audioData, format);
+
+ if (decoded && generation === this.currentGeneration()) {
+ this.onDecodedChunk({
+ samples: decoded.samples,
+ sampleRate: decoded.sampleRate,
+ serverTimeUs,
+ generation,
+ });
+ }
+ } catch (error) {
+ console.error("Sendspin: Failed to decode audio buffer:", error);
+ }
+ }
+ }
+
+ private async decode(
+ audioData: ArrayBuffer,
+ format: StreamFormat,
+ ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> {
+ if (format.codec === "opus") {
+ return this.decodeOpusWithEncdec(audioData, format);
+ } else if (format.codec === "flac") {
+ return this.decodeFLAC(audioData, format);
+ } else if (format.codec === "pcm") {
+ return this.decodePCM(audioData, format);
+ }
+ return null;
+ }
+
+ // ========================================
+ // PCM Decoder
+ // ========================================
+
+ private decodePCM(
+ audioData: ArrayBuffer,
+ format: StreamFormat,
+ ): { samples: Float32Array[]; sampleRate: number } | null {
+ const bytesPerSample = (format.bit_depth || 16) / 8;
+ const dataView = new DataView(audioData);
+ const numSamples =
+ audioData.byteLength / (bytesPerSample * format.channels);
+
+ const samples: Float32Array[] = [];
+ for (let ch = 0; ch < format.channels; ch++) {
+ samples.push(new Float32Array(numSamples));
+ }
+
+ // Decode PCM data (interleaved format)
+ for (let channel = 0; channel < format.channels; channel++) {
+ const channelData = samples[channel];
+ for (let i = 0; i < numSamples; i++) {
+ const offset = (i * format.channels + channel) * bytesPerSample;
+ let sample = 0;
+
+ if (format.bit_depth === 16) {
+ sample = dataView.getInt16(offset, true) / 32768.0;
+ } else if (format.bit_depth === 24) {
+ const byte1 = dataView.getUint8(offset);
+ const byte2 = dataView.getUint8(offset + 1);
+ const byte3 = dataView.getUint8(offset + 2);
+ let int24 = (byte3 << 16) | (byte2 << 8) | byte1;
+ if (int24 & 0x800000) {
+ int24 |= 0xff000000;
+ }
+ sample = int24 / 8388608.0;
+ } else if (format.bit_depth === 32) {
+ sample = dataView.getInt32(offset, true) / 2147483648.0;
+ }
+
+ channelData[i] = sample;
+ }
+ }
+
+ return { samples, sampleRate: format.sample_rate };
+ }
+
+ // ========================================
+ // FLAC Decoder (uses OfflineAudioContext)
+ // ========================================
+
+ private getFlacDecodingContext(
+ sampleRate: number,
+ channels: number,
+ ): OfflineAudioContext {
+ if (
+ !this.flacDecodingContext ||
+ this.flacDecodingContextSampleRate !== sampleRate ||
+ this.flacDecodingContextChannels !== channels
+ ) {
+ this.flacDecodingContext = new OfflineAudioContext(
+ channels,
+ 1,
+ sampleRate,
+ );
+ this.flacDecodingContextSampleRate = sampleRate;
+ this.flacDecodingContextChannels = channels;
+ }
+ return this.flacDecodingContext;
+ }
+
+ private async decodeFLAC(
+ audioData: ArrayBuffer,
+ format: StreamFormat,
+ ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> {
+ try {
+ let dataToEncode = audioData;
+ if (format.codec_header) {
+ // Decode Base64 codec header and prepend to audio data
+ const headerBytes = Uint8Array.from(atob(format.codec_header), (c) =>
+ c.charCodeAt(0),
+ );
+ const combined = new Uint8Array(
+ headerBytes.length + audioData.byteLength,
+ );
+ combined.set(headerBytes, 0);
+ combined.set(new Uint8Array(audioData), headerBytes.length);
+ dataToEncode = combined.buffer;
+ }
+
+ const ctx = this.getFlacDecodingContext(
+ format.sample_rate,
+ format.channels,
+ );
+ const audioBuffer = await ctx.decodeAudioData(dataToEncode);
+
+ // Extract Float32Array per channel from AudioBuffer
+ const samples: Float32Array[] = [];
+ for (let ch = 0; ch < audioBuffer.numberOfChannels; ch++) {
+ samples.push(new Float32Array(audioBuffer.getChannelData(ch)));
+ }
+
+ return { samples, sampleRate: audioBuffer.sampleRate };
+ } catch (error) {
+ console.error("Error decoding FLAC data:", error);
+ return null;
+ }
+ }
+
+ // ========================================
+ // Opus - Native WebCodecs Decoder
+ // ========================================
+
+ private async initWebCodecsDecoder(format: StreamFormat): Promise {
+ const tryConfigureExistingDecoder = (): boolean => {
+ if (!this.webCodecsDecoder) return false;
+
+ const matchesFormat =
+ !!this.webCodecsFormat &&
+ this.webCodecsFormat.sample_rate === format.sample_rate &&
+ this.webCodecsFormat.channels === format.channels;
+
+ if (this.webCodecsDecoder.state === "configured" && matchesFormat) {
+ return true;
+ }
+
+ if (this.webCodecsDecoder.state === "closed") {
+ return false;
+ }
+
+ try {
+ this.webCodecsDecoder.configure({
+ codec: "opus",
+ sampleRate: format.sample_rate,
+ numberOfChannels: format.channels,
+ });
+ this.webCodecsFormat = format;
+ return true;
+ } catch {
+ return false;
+ }
+ };
+
+ if (tryConfigureExistingDecoder()) {
+ return;
+ }
+
+ if (this.webCodecsDecoderReady) {
+ await this.webCodecsDecoderReady;
+ if (tryConfigureExistingDecoder()) {
+ return;
+ }
+
+ try {
+ this.webCodecsDecoder?.close();
+ } catch {
+ // Ignore close errors; we'll recreate below.
+ }
+ this.webCodecsDecoder = null;
+ this.webCodecsDecoderReady = null;
+ this.webCodecsFormat = null;
+ }
+
+ if (this.webCodecsDecoderReady) {
+ await this.webCodecsDecoderReady;
+ return;
+ }
+
+ this.webCodecsDecoderReady = this.createWebCodecsDecoder(format);
+ await this.webCodecsDecoderReady;
+ }
+
+ private async createWebCodecsDecoder(format: StreamFormat): Promise {
+ if (typeof AudioDecoder === "undefined") {
+ this.useNativeOpus = false;
+ return;
+ }
+
+ try {
+ const support = await AudioDecoder.isConfigSupported({
+ codec: "opus",
+ sampleRate: format.sample_rate,
+ numberOfChannels: format.channels,
+ });
+
+ if (!support.supported) {
+ console.log(
+ "[NativeOpus] WebCodecs Opus not supported, will use fallback",
+ );
+ this.useNativeOpus = false;
+ return;
+ }
+
+ this.webCodecsDecoder = new AudioDecoder({
+ output: (audioData: AudioData) => this.handleAudioData(audioData),
+ error: (error: Error) => {
+ console.error("[NativeOpus] WebCodecs decoder error:", error);
+ },
+ });
+
+ this.webCodecsDecoder.configure({
+ codec: "opus",
+ sampleRate: format.sample_rate,
+ numberOfChannels: format.channels,
+ });
+
+ this.webCodecsFormat = format;
+ console.log(
+ `[NativeOpus] Using WebCodecs AudioDecoder: ${format.sample_rate}Hz, ${format.channels}ch`,
+ );
+ } catch (error) {
+ console.warn(
+ "[NativeOpus] WebCodecs init failed, will use fallback:",
+ error,
+ );
+ this.useNativeOpus = false;
+ }
+ }
+
+ // Handle decoded audio data from native Opus decoder
+ private handleAudioData(audioData: AudioData): void {
+ try {
+ const outputTimestampUs = Number(audioData.timestamp);
+ const metadata = this.nativeDecoderQueue.shift();
+
+ if (!metadata) {
+ console.warn(
+ `[NativeOpus] Dropping frame with empty decode queue (out ts=${outputTimestampUs})`,
+ );
+ audioData.close();
+ return;
+ }
+
+ const { serverTimeUs, generation } = metadata;
+ if (generation !== this.currentGeneration()) {
+ console.warn(
+ `[NativeOpus] Dropping old-stream frame (ts=${serverTimeUs}, gen=${generation} != current=${this.currentGeneration()})`,
+ );
+ audioData.close();
+ return;
+ }
+
+ const channels = audioData.numberOfChannels;
+ const frames = audioData.numberOfFrames;
+ const fmt = audioData.format;
+
+ let interleaved: Float32Array;
+
+ if (fmt === "f32-planar") {
+ interleaved = new Float32Array(frames * channels);
+ for (let ch = 0; ch < channels; ch++) {
+ const channelData = new Float32Array(frames);
+ audioData.copyTo(channelData, { planeIndex: ch });
+ for (let i = 0; i < frames; i++) {
+ interleaved[i * channels + ch] = channelData[i];
+ }
+ }
+ } else if (fmt === "f32") {
+ interleaved = new Float32Array(frames * channels);
+ audioData.copyTo(interleaved, { planeIndex: 0 });
+ } else if (fmt === "s16-planar") {
+ interleaved = new Float32Array(frames * channels);
+ for (let ch = 0; ch < channels; ch++) {
+ const channelData = new Int16Array(frames);
+ audioData.copyTo(channelData, { planeIndex: ch });
+ for (let i = 0; i < frames; i++) {
+ interleaved[i * channels + ch] = channelData[i] / 32768.0;
+ }
+ }
+ } else if (fmt === "s16") {
+ const int16Data = new Int16Array(frames * channels);
+ audioData.copyTo(int16Data, { planeIndex: 0 });
+ interleaved = new Float32Array(frames * channels);
+ for (let i = 0; i < frames * channels; i++) {
+ interleaved[i] = int16Data[i] / 32768.0;
+ }
+ } else {
+ console.warn(`[NativeOpus] Unsupported AudioData format: ${fmt}`);
+ audioData.close();
+ return;
+ }
+
+ this.emitDeinterleavedChunk(
+ interleaved,
+ serverTimeUs,
+ channels,
+ generation,
+ );
+ audioData.close();
+ } catch (e) {
+ console.error("[NativeOpus] Error in output callback:", e);
+ audioData.close();
+ }
+ }
+
+ private emitDeinterleavedChunk(
+ interleaved: Float32Array,
+ serverTimeUs: number,
+ channels: number,
+ generation: number,
+ ): void {
+ if (!this.webCodecsFormat) return;
+
+ const numFrames = interleaved.length / channels;
+ const samples: Float32Array[] = [];
+
+ for (let ch = 0; ch < channels; ch++) {
+ const channelData = new Float32Array(numFrames);
+ for (let i = 0; i < numFrames; i++) {
+ channelData[i] = interleaved[i * channels + ch];
+ }
+ samples.push(channelData);
+ }
+
+ this.onDecodedChunk({
+ samples,
+ sampleRate: this.webCodecsFormat.sample_rate,
+ serverTimeUs,
+ generation,
+ });
+ }
+
+ private queueToNativeOpusDecoder(
+ audioData: ArrayBuffer,
+ serverTimeUs: number,
+ generation: number,
+ ): boolean {
+ if (
+ !this.webCodecsDecoder ||
+ this.webCodecsDecoder.state !== "configured"
+ ) {
+ return false;
+ }
+
+ try {
+ this.nativeDecoderQueue.push({
+ serverTimeUs,
+ generation,
+ });
+
+ const chunk = new EncodedAudioChunk({
+ type: "key",
+ timestamp: serverTimeUs,
+ data: audioData,
+ });
+
+ this.webCodecsDecoder.decode(chunk);
+ return true;
+ } catch (error) {
+ if (this.nativeDecoderQueue.length > 0) {
+ this.nativeDecoderQueue.pop();
+ }
+ console.error("[NativeOpus] WebCodecs queue error:", error);
+ return false;
+ }
+ }
+
+ // ========================================
+ // Opus - Fallback (opus-encdec library)
+ // ========================================
+
+ private resolveOpusDecoderModule(moduleExport: any): any {
+ const maybeDefault = moduleExport?.default;
+ const maybeCommonJs = moduleExport?.["module.exports"];
+ const resolved = maybeDefault ?? maybeCommonJs ?? moduleExport;
+
+ if (!resolved || typeof resolved !== "object") {
+ throw new Error("[Opus] Invalid libopus decoder module export");
+ }
+ return resolved;
+ }
+
+ private resolveOggOpusDecoderClass(wrapperExport: any): any {
+ const maybeDefault = wrapperExport?.default;
+ const maybeCommonJs = wrapperExport?.["module.exports"];
+ const wrapper = maybeDefault ?? maybeCommonJs ?? wrapperExport;
+ const resolved = wrapper?.OggOpusDecoder ?? wrapper;
+
+ if (typeof resolved !== "function") {
+ throw new Error("[Opus] OggOpusDecoder class export not found");
+ }
+ return resolved;
+ }
+
+ private async waitForOpusReady(target: {
+ isReady: boolean;
+ onready?: () => void;
+ }): Promise {
+ if (target.isReady) return;
+
+ if (Object.isExtensible(target)) {
+ await new Promise((resolve) => {
+ target.onready = () => resolve();
+ });
+ return;
+ }
+
+ while (!target.isReady) {
+ await new Promise((resolve) => setTimeout(resolve, 20));
+ }
+ }
+
+ private async initOpusEncdecDecoder(format: StreamFormat): Promise {
+ if (this.opusDecoderReady) {
+ await this.opusDecoderReady;
+ return;
+ }
+
+ this.opusDecoderReady = (async () => {
+ console.log("[Opus] Initializing decoder (opus-encdec)...");
+
+ const [DecoderModuleExport, DecoderWrapperExport] = await Promise.all([
+ import("opus-encdec/dist/libopus-decoder.js"),
+ import("opus-encdec/src/oggOpusDecoder.js"),
+ ]);
+
+ this.opusDecoderModule =
+ this.resolveOpusDecoderModule(DecoderModuleExport);
+
+ const OggOpusDecoderClass =
+ this.resolveOggOpusDecoderClass(DecoderWrapperExport);
+
+ await this.waitForOpusReady(this.opusDecoderModule);
+
+ this.opusDecoder = new OggOpusDecoderClass(
+ {
+ rawOpus: true,
+ decoderSampleRate: format.sample_rate,
+ outputBufferSampleRate: format.sample_rate,
+ numberOfChannels: format.channels,
+ },
+ this.opusDecoderModule,
+ );
+
+ await this.waitForOpusReady(this.opusDecoder);
+
+ console.log("[Opus] Decoder ready");
+ })();
+
+ await this.opusDecoderReady;
+ }
+
+ private async decodeOpusWithEncdec(
+ audioData: ArrayBuffer,
+ format: StreamFormat,
+ ): Promise<{ samples: Float32Array[]; sampleRate: number } | null> {
+ try {
+ await this.initOpusEncdecDecoder(format);
+
+ const uint8Array = new Uint8Array(audioData);
+ const decodedSamples: Float32Array[] = [];
+
+ this.opusDecoder.decodeRaw(uint8Array, (samples: Float32Array) => {
+ decodedSamples.push(new Float32Array(samples));
+ });
+
+ if (decodedSamples.length === 0) {
+ console.warn("[Opus] Fallback decoder produced no samples");
+ return null;
+ }
+
+ // Convert interleaved samples to per-channel arrays
+ const interleavedSamples = decodedSamples[0];
+ const numFrames = interleavedSamples.length / format.channels;
+ const samples: Float32Array[] = [];
+
+ for (let ch = 0; ch < format.channels; ch++) {
+ const channelData = new Float32Array(numFrames);
+ for (let i = 0; i < numFrames; i++) {
+ channelData[i] = interleavedSamples[i * format.channels + ch];
+ }
+ samples.push(channelData);
+ }
+
+ return { samples, sampleRate: format.sample_rate };
+ } catch (error) {
+ console.error("[Opus] Decode error:", error);
+ return null;
+ }
+ }
+
+ // ========================================
+ // Lifecycle
+ // ========================================
+
+ /** Clear decoder state (on stream change/clear). Drops in-flight async decodes. */
+ clearState(): void {
+ this.nativeDecoderQueue = [];
+ try {
+ this.webCodecsDecoder?.close();
+ } catch {
+ // Ignore close errors
+ }
+ this.webCodecsDecoder = null;
+ this.webCodecsDecoderReady = null;
+ this.webCodecsFormat = null;
+ }
+
+ /** Full cleanup (on disconnect). Releases all decoder resources. */
+ close(): void {
+ this.clearState();
+
+ if (this.opusDecoder) {
+ this.opusDecoder = null;
+ this.opusDecoderModule = null;
+ this.opusDecoderReady = null;
+ }
+
+ // Reset native Opus flag for next session
+ this.useNativeOpus = true;
+
+ this.flacDecodingContext = null;
+ this.flacDecodingContextSampleRate = 0;
+ this.flacDecodingContextChannels = 0;
+ }
+}
diff --git a/src/audio/output-latency-tracker.ts b/src/audio/output-latency-tracker.ts
new file mode 100644
index 0000000..9d8db24
--- /dev/null
+++ b/src/audio/output-latency-tracker.ts
@@ -0,0 +1,91 @@
+/**
+ * Output latency tracker with EMA smoothing and persistence.
+ *
+ * Tracks AudioContext.baseLatency + outputLatency using exponential moving
+ * average to filter browser jitter (especially Chrome). Persists the smoothed
+ * value to storage for cross-session consistency.
+ */
+
+import type { SendspinStorage } from "../types";
+
+const OUTPUT_LATENCY_ALPHA = 0.01;
+const OUTPUT_LATENCY_STORAGE_KEY = "sendspin-output-latency-us";
+const OUTPUT_LATENCY_PERSIST_INTERVAL_MS = 10_000;
+
+export class OutputLatencyTracker {
+ private smoothedOutputLatencyUs: number | null = null;
+ private lastLatencyPersistAtMs: number | null = null;
+
+ constructor(private storage: SendspinStorage | null) {
+ this.loadPersisted();
+ }
+
+ private loadPersisted(): void {
+ if (!this.storage) return;
+ try {
+ const stored = this.storage.getItem(OUTPUT_LATENCY_STORAGE_KEY);
+ if (stored) {
+ const latency = parseFloat(stored);
+ if (!isNaN(latency) && latency >= 0) {
+ this.smoothedOutputLatencyUs = latency;
+ }
+ }
+ } catch {
+ // ignore
+ }
+ }
+
+ private persist(): void {
+ if (!this.storage || this.smoothedOutputLatencyUs === null) return;
+ try {
+ this.storage.setItem(
+ OUTPUT_LATENCY_STORAGE_KEY,
+ this.smoothedOutputLatencyUs.toString(),
+ );
+ } catch {
+ // ignore
+ }
+ }
+
+ /** Get raw output latency in microseconds from AudioContext. */
+ getRawUs(audioContext: AudioContext | null): number {
+ if (!audioContext) return 0;
+ const baseLatency = audioContext.baseLatency ?? 0;
+ const outputLatency = audioContext.outputLatency ?? 0;
+ return (baseLatency + outputLatency) * 1_000_000;
+ }
+
+ /** Get EMA-smoothed output latency in microseconds. */
+ getSmoothedUs(audioContext: AudioContext | null): number {
+ const rawLatencyUs = this.getRawUs(audioContext);
+
+ if (rawLatencyUs <= 0 && this.smoothedOutputLatencyUs !== null) {
+ return this.smoothedOutputLatencyUs;
+ }
+
+ if (this.smoothedOutputLatencyUs === null) {
+ this.smoothedOutputLatencyUs = rawLatencyUs;
+ } else {
+ this.smoothedOutputLatencyUs =
+ OUTPUT_LATENCY_ALPHA * rawLatencyUs +
+ (1 - OUTPUT_LATENCY_ALPHA) * this.smoothedOutputLatencyUs;
+ }
+
+ const nowMs =
+ typeof performance !== "undefined" ? performance.now() : Date.now();
+ if (
+ this.lastLatencyPersistAtMs === null ||
+ nowMs - this.lastLatencyPersistAtMs >= OUTPUT_LATENCY_PERSIST_INTERVAL_MS
+ ) {
+ this.persist();
+ this.lastLatencyPersistAtMs = nowMs;
+ }
+
+ return this.smoothedOutputLatencyUs;
+ }
+
+ /** Reset smoother (on stream change or audio context recreation). */
+ reset(): void {
+ this.smoothedOutputLatencyUs = null;
+ }
+}
diff --git a/src/audio/recorrection-monitor.ts b/src/audio/recorrection-monitor.ts
new file mode 100644
index 0000000..4596139
--- /dev/null
+++ b/src/audio/recorrection-monitor.ts
@@ -0,0 +1,186 @@
+/**
+ * Recorrection monitor for detecting sustained sync drift.
+ *
+ * Runs on a periodic interval and detects when sync error exceeds a threshold
+ * for long enough to warrant a hard resync. The monitor only detects — the
+ * actual cutover execution is delegated to the scheduler via callback.
+ */
+
+const RECORRECTION_CHECK_INTERVAL_MS = 250;
+const RECORRECTION_TRIGGER_MS = 30;
+const RECORRECTION_SUSTAIN_MS = 400;
+const RECORRECTION_COOLDOWN_MS = 1_500;
+const RECORRECTION_TRANSIENT_JUMP_MS = 25;
+const RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS =
+ RECORRECTION_CHECK_INTERVAL_MS * 4;
+const HARD_RESYNC_STARTUP_GRACE_MS = 1_000;
+const HARD_RESYNC_COOLDOWN_MS = 500;
+
+export class RecorrectionMonitor {
+ private interval: ReturnType | null = null;
+ private breachStartedAtMs: number | null = null;
+ private lastRecorrectionAtMs: number = -Infinity;
+ private prevRawSyncErrorMs: number | null = null;
+ private pendingJumpSign: number | null = null;
+ private pendingJumpAtMs: number | null = null;
+ private _hardResyncGraceUntilMs: number | null = null;
+ private _lastHardResyncAtMs: number = -Infinity;
+ /** After a recorrection, scheduling must not start before this time. */
+ private _minScheduleTimeSec: number | null = null;
+
+ get minScheduleTimeSec(): number | null {
+ return this._minScheduleTimeSec;
+ }
+
+ setMinScheduleTime(timeSec: number | null): void {
+ this._minScheduleTimeSec = timeSec;
+ }
+
+ clearMinScheduleTime(): void {
+ this._minScheduleTimeSec = null;
+ }
+
+ constructor(private onCheck: () => void) {}
+
+ start(): void {
+ if (this.interval !== null) return;
+ this.interval = globalThis.setInterval(
+ () => this.onCheck(),
+ RECORRECTION_CHECK_INTERVAL_MS,
+ );
+ }
+
+ stop(): void {
+ if (this.interval !== null) {
+ clearInterval(this.interval);
+ this.interval = null;
+ }
+ this.resetCheckState();
+ this.lastRecorrectionAtMs = -Infinity;
+ }
+
+ clearBreachState(): void {
+ this.breachStartedAtMs = null;
+ this.pendingJumpSign = null;
+ this.pendingJumpAtMs = null;
+ }
+
+ resetCheckState(): void {
+ this.clearBreachState();
+ this.prevRawSyncErrorMs = null;
+ }
+
+ clearHardResyncCooldown(): void {
+ this._hardResyncGraceUntilMs = null;
+ this._lastHardResyncAtMs = -Infinity;
+ }
+
+ armStartupGrace(nowMs: number, isTimestampClock: boolean): void {
+ if (isTimestampClock) {
+ this._hardResyncGraceUntilMs = null;
+ return;
+ }
+ if (this._hardResyncGraceUntilMs === null) {
+ this._hardResyncGraceUntilMs = nowMs + HARD_RESYNC_STARTUP_GRACE_MS;
+ }
+ }
+
+ canUseHardResync(nowMs: number, isTimestampClock: boolean): boolean {
+ if (isTimestampClock) {
+ this._hardResyncGraceUntilMs = null;
+ } else if (
+ this._hardResyncGraceUntilMs !== null &&
+ nowMs < this._hardResyncGraceUntilMs
+ ) {
+ return false;
+ }
+ return nowMs - this._lastHardResyncAtMs >= HARD_RESYNC_COOLDOWN_MS;
+ }
+
+ noteHardResync(nowMs: number): void {
+ this._lastHardResyncAtMs = nowMs;
+ }
+
+ /** Mark a recorrection as having just happened (for cooldown). */
+ markRecorrection(nowMs: number): void {
+ this.lastRecorrectionAtMs = nowMs;
+ }
+
+ shouldIgnoreTransientJump(rawSyncErrorMs: number, nowMs: number): boolean {
+ const prev = this.prevRawSyncErrorMs;
+ this.prevRawSyncErrorMs = rawSyncErrorMs;
+
+ if (prev === null) {
+ this.pendingJumpSign = null;
+ this.pendingJumpAtMs = null;
+ return false;
+ }
+
+ const jumpDeltaMs = rawSyncErrorMs - prev;
+ const jumpSign = Math.sign(rawSyncErrorMs);
+ const isJumpDetected =
+ Math.abs(jumpDeltaMs) >= RECORRECTION_TRANSIENT_JUMP_MS && jumpSign !== 0;
+ if (!isJumpDetected) {
+ this.pendingJumpSign = null;
+ this.pendingJumpAtMs = null;
+ return false;
+ }
+
+ const isConfirmed =
+ this.pendingJumpSign === jumpSign &&
+ this.pendingJumpAtMs !== null &&
+ nowMs - this.pendingJumpAtMs <= RECORRECTION_TRANSIENT_CONFIRM_WINDOW_MS;
+ this.pendingJumpSign = jumpSign;
+ this.pendingJumpAtMs = nowMs;
+ if (isConfirmed) {
+ this.pendingJumpSign = null;
+ this.pendingJumpAtMs = null;
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Evaluate whether a recorrection should fire given the current sync state.
+ * Returns true if the scheduler should perform a guarded cutover.
+ */
+ shouldRecorrect(
+ smoothedAbsErrorMs: number,
+ rawSyncErrorMs: number,
+ nowMs: number,
+ ): boolean {
+ const isTransient = this.shouldIgnoreTransientJump(rawSyncErrorMs, nowMs);
+
+ if (smoothedAbsErrorMs < RECORRECTION_TRIGGER_MS) {
+ this.clearBreachState();
+ return false;
+ }
+ if (isTransient) {
+ this.clearBreachState();
+ return false;
+ }
+ if (this.breachStartedAtMs === null) {
+ this.breachStartedAtMs = nowMs;
+ return false;
+ }
+ if (nowMs - this.breachStartedAtMs < RECORRECTION_SUSTAIN_MS) {
+ return false;
+ }
+ if (nowMs - this.lastRecorrectionAtMs < RECORRECTION_COOLDOWN_MS) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /** Full reset (on disconnect or stream clear). */
+ fullReset(): void {
+ this.stop();
+ this._hardResyncGraceUntilMs = null;
+ this._lastHardResyncAtMs = -Infinity;
+ this._minScheduleTimeSec = null;
+ }
+}
+
+export const RECORRECTION_CUTOVER_GUARD_SEC = 0.3;
diff --git a/src/audio/scheduler.ts b/src/audio/scheduler.ts
new file mode 100644
index 0000000..253f279
--- /dev/null
+++ b/src/audio/scheduler.ts
@@ -0,0 +1,1077 @@
+/**
+ * Audio scheduler for synchronized playback.
+ *
+ * Handles Web Audio API scheduling, sync correction, AudioContext management,
+ * volume control, and output routing. Receives pre-decoded audio chunks
+ * (DecodedAudioChunk) from SendspinCore and schedules them for playback.
+ */
+
+import type {
+ AudioOutputMode,
+ CorrectionMode,
+ CorrectionThresholds,
+ DecodedAudioChunk,
+ SendspinStorage,
+} from "../types";
+import type { AudioBufferQueueItem } from "../internal-types";
+import type { StateManager } from "../core/state-manager";
+import type { SendspinTimeFilter } from "../core/time-filter";
+import { ClockSource } from "./clock-source";
+import {
+ RecorrectionMonitor,
+ RECORRECTION_CUTOVER_GUARD_SEC,
+} from "./recorrection-monitor";
+import { OutputLatencyTracker } from "./output-latency-tracker";
+import { clampSyncDelayMs } from "../sync-delay";
+
+// Sync correction constants
+const SAMPLE_CORRECTION_FADE_LEN = 8;
+const SAMPLE_CORRECTION_TARGET_BLEND_SUM = 1.0;
+const SAMPLE_CORRECTION_FADE_STRENGTH = Math.min(
+ 1,
+ (2 * SAMPLE_CORRECTION_TARGET_BLEND_SUM) / SAMPLE_CORRECTION_FADE_LEN,
+);
+const SAMPLE_CORRECTION_FADE_ALPHAS = new Float32Array(
+ SAMPLE_CORRECTION_FADE_LEN,
+);
+for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
+ SAMPLE_CORRECTION_FADE_ALPHAS[f] =
+ ((SAMPLE_CORRECTION_FADE_LEN - f) / (SAMPLE_CORRECTION_FADE_LEN + 1)) *
+ SAMPLE_CORRECTION_FADE_STRENGTH;
+}
+const SYNC_ERROR_ALPHA = 0.1;
+const SCHEDULE_HEADROOM_SEC = 0.2;
+const SCHEDULE_HORIZON_PRECISE_SEC = 20;
+const SCHEDULE_HORIZON_GOOD_SEC = 8;
+const SCHEDULE_HORIZON_POOR_SEC = 4;
+const CAST_SCHEDULE_HORIZON_SEC = 1.5;
+const SCHEDULE_HORIZON_PRECISE_ERROR_MS = 2;
+const SCHEDULE_HORIZON_GOOD_ERROR_MS = 8;
+const SCHEDULE_REFILL_THRESHOLD_FRACTION = 0.5;
+const SCHEDULE_REFILL_MIN_THRESHOLD_SEC = 0.1;
+const SCHEDULE_REFILL_MAX_THRESHOLD_SEC = 5;
+
+export interface AudioSchedulerOptions {
+ stateManager: StateManager;
+ timeFilter: SendspinTimeFilter;
+ outputMode?: AudioOutputMode;
+ audioElement?: HTMLAudioElement;
+ isAndroid?: boolean;
+ isCastRuntime?: boolean;
+ ownsAudioElement?: boolean;
+ silentAudioSrc?: string;
+ syncDelayMs?: number;
+ useHardwareVolume?: boolean;
+ correctionMode?: CorrectionMode;
+ storage?: SendspinStorage | null;
+ useOutputLatencyCompensation?: boolean;
+ correctionThresholds?: Partial<
+ Record>
+ >;
+}
+
+const DEFAULT_CORRECTION_THRESHOLDS: Record<
+ CorrectionMode,
+ CorrectionThresholds
+> = {
+ sync: {
+ resyncAboveMs: 200,
+ rate2AboveMs: 35,
+ rate1AboveMs: 8,
+ samplesBelowMs: 8,
+ deadbandBelowMs: 1,
+ enableRecorrectionMonitor: true,
+ immediateDelayCutover: true,
+ },
+ quality: {
+ resyncAboveMs: 35,
+ rate2AboveMs: Infinity,
+ rate1AboveMs: Infinity,
+ samplesBelowMs: 35,
+ deadbandBelowMs: 1,
+ enableRecorrectionMonitor: false,
+ immediateDelayCutover: false,
+ },
+ "quality-local": {
+ resyncAboveMs: 600,
+ rate2AboveMs: Infinity,
+ rate1AboveMs: Infinity,
+ samplesBelowMs: 0,
+ deadbandBelowMs: 5,
+ enableRecorrectionMonitor: false,
+ immediateDelayCutover: false,
+ },
+};
+
+export class AudioScheduler {
+ private audioContext: AudioContext | null = null;
+ private gainNode: GainNode | null = null;
+ private streamDestination: MediaStreamAudioDestinationNode | null = null;
+ private audioBufferQueue: AudioBufferQueueItem[] = [];
+ private scheduledSources: {
+ source: AudioBufferSourceNode;
+ startTime: number;
+ endTime: number;
+ buffer: AudioBuffer;
+ serverTime: number;
+ generation: number;
+ }[] = [];
+
+ private nextPlaybackTime: number = 0;
+ private nextScheduleTime: number = 0;
+ private lastScheduledServerTime: number = 0;
+
+ private currentSyncErrorMs: number = 0;
+ private smoothedSyncErrorMs: number = 0;
+ private resyncCount: number = 0;
+ private currentPlaybackRate: number = 1.0;
+ private currentCorrectionMethod: "none" | "samples" | "rate" | "resync" =
+ "none";
+ private lastSamplesAdjusted: number = 0;
+
+ private _correctionMode: CorrectionMode = "sync";
+ private correctionThresholds: Record;
+
+ private _lastStatusLogMs: number = 0;
+ private _intervalResyncCount: number = 0;
+
+ private useOutputLatencyCompensation: boolean;
+ private scheduleTimeout: ReturnType | null = null;
+ private refillTimeout: ReturnType | null = null;
+ private queueProcessScheduled = false;
+
+ // Sub-modules
+ private clockSource = new ClockSource();
+ private recorrectionMonitor: RecorrectionMonitor;
+ private latencyTracker: OutputLatencyTracker;
+
+ private stateManager: StateManager;
+ private timeFilter: SendspinTimeFilter;
+ private outputMode: AudioOutputMode;
+ private audioElement?: HTMLAudioElement;
+ private isAndroid: boolean;
+ private isCastRuntime: boolean;
+ private ownsAudioElement: boolean;
+ private silentAudioSrc?: string;
+ private syncDelayMs: number;
+ private useHardwareVolume: boolean;
+
+ constructor(options: AudioSchedulerOptions) {
+ this.stateManager = options.stateManager;
+ this.timeFilter = options.timeFilter;
+ this.outputMode = options.outputMode ?? "direct";
+ this.audioElement = options.audioElement;
+ this.isAndroid = options.isAndroid ?? false;
+ this.isCastRuntime = options.isCastRuntime ?? false;
+ this.ownsAudioElement = options.ownsAudioElement ?? false;
+ this.silentAudioSrc = options.silentAudioSrc;
+ this.syncDelayMs = clampSyncDelayMs(options.syncDelayMs ?? 0);
+ this.useHardwareVolume = options.useHardwareVolume ?? false;
+ this._correctionMode = options.correctionMode ?? "sync";
+ this.useOutputLatencyCompensation =
+ options.useOutputLatencyCompensation ?? true;
+
+ // Merge user-provided threshold overrides with defaults
+ this.correctionThresholds = { ...DEFAULT_CORRECTION_THRESHOLDS };
+ const thresholdOverrides = options.correctionThresholds;
+ if (thresholdOverrides) {
+ for (const mode of Object.keys(thresholdOverrides) as CorrectionMode[]) {
+ const overrides = thresholdOverrides[mode];
+ if (overrides) {
+ this.correctionThresholds[mode] = {
+ ...DEFAULT_CORRECTION_THRESHOLDS[mode],
+ ...overrides,
+ };
+ }
+ }
+ }
+
+ this.latencyTracker = new OutputLatencyTracker(options.storage ?? null);
+ if (this.isCastRuntime) {
+ this.clockSource.disableTimestampPromotion();
+ }
+ this.clockSource.onPromotion(() => {
+ if (
+ this.audioBufferQueue.length > 0 ||
+ this.scheduledSources.length > 0
+ ) {
+ this.scheduleQueueProcessing();
+ }
+ });
+ this.recorrectionMonitor = new RecorrectionMonitor(() =>
+ this.checkRecorrection(),
+ );
+ }
+
+ get correctionMode(): CorrectionMode {
+ return this._correctionMode;
+ }
+
+ setCorrectionMode(mode: CorrectionMode): void {
+ this._correctionMode = mode;
+ if (!this.correctionThresholds[mode].enableRecorrectionMonitor) {
+ this.recorrectionMonitor.stop();
+ } else {
+ this.recorrectionMonitor.start();
+ }
+ }
+
+ private get usesRecorrectionMonitor(): boolean {
+ return this.correctionThresholds[this._correctionMode]
+ .enableRecorrectionMonitor;
+ }
+
+ private get usesImmediateDelayCutover(): boolean {
+ return this.correctionThresholds[this._correctionMode]
+ .immediateDelayCutover;
+ }
+
+ private getTargetScheduledHorizonSec(): number {
+ if (this.isCastRuntime) {
+ return CAST_SCHEDULE_HORIZON_SEC;
+ }
+ const errorMs = this.timeFilter.error / 1000;
+ if (errorMs < SCHEDULE_HORIZON_PRECISE_ERROR_MS)
+ return SCHEDULE_HORIZON_PRECISE_SEC;
+ if (errorMs <= SCHEDULE_HORIZON_GOOD_ERROR_MS)
+ return SCHEDULE_HORIZON_GOOD_SEC;
+ return SCHEDULE_HORIZON_POOR_SEC;
+ }
+
+ private getScheduledAheadSec(currentTimeSec: number): number {
+ let farthest = this.nextScheduleTime;
+ for (const entry of this.scheduledSources) {
+ if (entry.endTime > farthest) farthest = entry.endTime;
+ }
+ return farthest <= 0 ? 0 : Math.max(0, farthest - currentTimeSec);
+ }
+
+ private resetScheduledPlaybackState(_reason?: string): void {
+ this.nextPlaybackTime = 0;
+ this.nextScheduleTime = 0;
+ this.lastScheduledServerTime = 0;
+ this.recorrectionMonitor.clearMinScheduleTime();
+ this.recorrectionMonitor.clearHardResyncCooldown();
+ this.clockSource.pendingCutover = false;
+ this.recorrectionMonitor.resetCheckState();
+ this.resetSyncErrorEma();
+ this.currentSyncErrorMs = 0;
+ this.currentPlaybackRate = 1.0;
+ this.currentCorrectionMethod = "none";
+ this.lastSamplesAdjusted = 0;
+ this._lastStatusLogMs = 0;
+ this._intervalResyncCount = 0;
+ }
+
+ private pruneExpiredScheduledSources(currentTimeSec: number): void {
+ if (this.scheduledSources.length === 0) return;
+ this.scheduledSources = this.scheduledSources.filter(
+ (entry) => entry.endTime > currentTimeSec,
+ );
+ if (this.scheduledSources.length === 0) {
+ this.resetScheduledPlaybackState("no scheduled audio ahead");
+ }
+ }
+
+ private performGuardedCutover(
+ _reason: "recorrection" | "delay-change",
+ options: { incrementResyncCount?: boolean; markCooldown?: boolean } = {},
+ ): void {
+ if (!this.audioContext) return;
+ const incrementResyncCount = options.incrementResyncCount ?? false;
+ const markCooldown = options.markCooldown ?? true;
+ const nowMs = performance.now();
+ const cutoffTime =
+ this.audioContext.currentTime + RECORRECTION_CUTOVER_GUARD_SEC;
+ if (incrementResyncCount) {
+ this.resyncCount++;
+ this._intervalResyncCount++;
+ }
+ this.resetSyncErrorEma();
+ this.currentCorrectionMethod = "resync";
+ this.lastSamplesAdjusted = 0;
+ this.currentPlaybackRate = 1.0;
+ const cutResult = this.cutScheduledSources(cutoffTime);
+ this.recorrectionMonitor.setMinScheduleTime(
+ Math.max(cutoffTime, cutResult.keptTailEndTimeSec),
+ );
+ this.nextPlaybackTime = 0;
+ this.nextScheduleTime = 0;
+ this.lastScheduledServerTime = 0;
+ this.recorrectionMonitor.resetCheckState();
+ if (markCooldown) this.recorrectionMonitor.markRecorrection(nowMs);
+ this.recorrectionMonitor.noteHardResync(nowMs);
+ this.processAudioQueue();
+ }
+
+ private checkRecorrection(): void {
+ if (!this.usesRecorrectionMonitor) {
+ this.recorrectionMonitor.resetCheckState();
+ return;
+ }
+ if (!this.audioContext || this.audioContext.state !== "running") {
+ this.recorrectionMonitor.resetCheckState();
+ return;
+ }
+ if (
+ !this.stateManager.isPlaying ||
+ this.nextPlaybackTime === 0 ||
+ this.lastScheduledServerTime === 0
+ ) {
+ this.recorrectionMonitor.resetCheckState();
+ return;
+ }
+
+ const { audioContextTimeSec, audioContextRawTimeSec, nowMs, nowUs } =
+ this.clockSource.getTimingSnapshot(this.audioContext);
+ this.pruneExpiredScheduledSources(audioContextRawTimeSec);
+ if (this.getScheduledAheadSec(audioContextRawTimeSec) <= 0) {
+ this.recorrectionMonitor.resetCheckState();
+ if (this.audioBufferQueue.length > 0) this.processAudioQueue();
+ return;
+ }
+
+ const outputLatencySec = this.useOutputLatencyCompensation
+ ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000
+ : 0;
+ const targetPlaybackTime = this.computeTargetPlaybackTime(
+ this.lastScheduledServerTime,
+ audioContextTimeSec,
+ nowUs,
+ outputLatencySec,
+ );
+ const syncErrorMs = (this.nextPlaybackTime - targetPlaybackTime) * 1000;
+ const smoothedSyncErrorMs = this.applySyncErrorEma(syncErrorMs);
+
+ if (
+ this.recorrectionMonitor.shouldRecorrect(
+ Math.abs(smoothedSyncErrorMs),
+ syncErrorMs,
+ nowMs,
+ )
+ ) {
+ this.performGuardedCutover("recorrection", {
+ incrementResyncCount: true,
+ markCooldown: true,
+ });
+ }
+ }
+
+ getSyncDelayMs(): number {
+ return this.syncDelayMs;
+ }
+
+ setSyncDelay(delayMs: number): void {
+ const sanitized = clampSyncDelayMs(delayMs);
+ const delta = sanitized - this.syncDelayMs;
+ this.syncDelayMs = sanitized;
+ if (delta === 0 || !this.usesImmediateDelayCutover) return;
+ if (!this.audioContext || this.audioContext.state !== "running") return;
+ if (!this.stateManager.isPlaying) return;
+ if (
+ this.scheduledSources.length === 0 &&
+ this.audioBufferQueue.length === 0 &&
+ this.nextPlaybackTime === 0
+ )
+ return;
+ this.performGuardedCutover("delay-change", {
+ incrementResyncCount: false,
+ markCooldown: true,
+ });
+ }
+
+ get syncInfo(): {
+ clockDriftPercent: number;
+ syncErrorMs: number;
+ resyncCount: number;
+ outputLatencyMs: number;
+ playbackRate: number;
+ correctionMethod: "none" | "samples" | "rate" | "resync";
+ samplesAdjusted: number;
+ correctionMode: CorrectionMode;
+ } {
+ return {
+ clockDriftPercent: this.timeFilter.drift * 100,
+ syncErrorMs: this.currentSyncErrorMs,
+ resyncCount: this.resyncCount,
+ outputLatencyMs: this.latencyTracker.getRawUs(this.audioContext) / 1000,
+ playbackRate: this.currentPlaybackRate,
+ correctionMethod: this.currentCorrectionMethod,
+ samplesAdjusted: this.lastSamplesAdjusted,
+ correctionMode: this._correctionMode,
+ };
+ }
+
+ private emitStatusLog(nowMs: number): void {
+ if (this._lastStatusLogMs !== 0 && nowMs - this._lastStatusLogMs < 10_000)
+ return;
+ this._lastStatusLogMs = nowMs;
+
+ let corr: string;
+ switch (this.currentCorrectionMethod) {
+ case "rate":
+ corr = `rate@${this.currentPlaybackRate}`;
+ break;
+ case "samples":
+ corr = `samples:${this.lastSamplesAdjusted}`;
+ break;
+ default:
+ corr = this.currentCorrectionMethod;
+ }
+
+ const queueDepth =
+ this.audioBufferQueue.length + this.scheduledSources.length;
+ const aheadSec = this.audioContext
+ ? this.getScheduledAheadSec(this.audioContext.currentTime)
+ : 0;
+
+ let clock: string;
+ if (this.clockSource.timestampPromotionDisabled) {
+ clock = "estimated(cast-disabled)";
+ } else if (this.clockSource.active === "timestamp") {
+ clock = `timestamp(good:${this.clockSource.timestampGoodSamples})`;
+ } else if (this.clockSource.lastRejectReason) {
+ clock = `estimated(reject:"${this.clockSource.lastRejectReason}")`;
+ } else {
+ clock = "estimated";
+ }
+
+ const tf = this.timeFilter.is_synchronized
+ ? `synced(err=${(this.timeFilter.error / 1000).toFixed(1)}ms,drift=${this.timeFilter.drift.toFixed(3)},n=${this.timeFilter.count})`
+ : `pending(n=${this.timeFilter.count})`;
+
+ const smoothedLatUs = this.latencyTracker.getSmoothedUs(this.audioContext);
+ const latMs = Math.round(smoothedLatUs / 1000);
+
+ console.log(
+ `Sendspin: sync=${this.smoothedSyncErrorMs >= 0 ? "+" : ""}${this.smoothedSyncErrorMs.toFixed(1)}ms` +
+ ` corr=${corr} q=${queueDepth}/${aheadSec.toFixed(1)}s resyncs=${this._intervalResyncCount}` +
+ ` clock=${clock} tf=${tf} lat=${latMs}ms mode=${this._correctionMode}` +
+ ` ctx=${this.audioContext?.state ?? "null"} gen=${this.stateManager.streamGeneration}`,
+ );
+ this._intervalResyncCount = 0;
+ }
+
+ private applySyncErrorEma(inputMs: number): number {
+ this.currentSyncErrorMs = inputMs;
+ this.smoothedSyncErrorMs =
+ SYNC_ERROR_ALPHA * inputMs +
+ (1 - SYNC_ERROR_ALPHA) * this.smoothedSyncErrorMs;
+ return this.smoothedSyncErrorMs;
+ }
+
+ private resetSyncErrorEma(): void {
+ this.smoothedSyncErrorMs = 0;
+ }
+
+ private copyBuffer(buffer: AudioBuffer): AudioBuffer {
+ if (!this.audioContext) return buffer;
+ const newBuffer = this.audioContext.createBuffer(
+ buffer.numberOfChannels,
+ buffer.length,
+ buffer.sampleRate,
+ );
+ for (let ch = 0; ch < buffer.numberOfChannels; ch++) {
+ newBuffer.getChannelData(ch).set(buffer.getChannelData(ch));
+ }
+ return newBuffer;
+ }
+
+ private adjustBufferSamples(
+ buffer: AudioBuffer,
+ samplesToAdjust: number,
+ ): AudioBuffer {
+ if (!this.audioContext || samplesToAdjust === 0 || buffer.length < 2)
+ return this.copyBuffer(buffer);
+ const channels = buffer.numberOfChannels;
+ const len = buffer.length;
+ const sampleRate = buffer.sampleRate;
+ try {
+ if (samplesToAdjust > 0) {
+ const newBuffer = this.audioContext.createBuffer(
+ channels,
+ len + 1,
+ sampleRate,
+ );
+ for (let ch = 0; ch < channels; ch++) {
+ const oldData = buffer.getChannelData(ch);
+ const newData = newBuffer.getChannelData(ch);
+ newData[0] = oldData[0];
+ const insertedSample = (oldData[0] + oldData[1]) / 2;
+ newData[1] = insertedSample;
+ newData.set(oldData.subarray(1), 2);
+ for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
+ const pos = 2 + f;
+ if (pos >= newData.length) break;
+ const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f];
+ newData[pos] = newData[pos] * (1 - alpha) + insertedSample * alpha;
+ }
+ }
+ return newBuffer;
+ } else {
+ const newBuffer = this.audioContext.createBuffer(
+ channels,
+ len - 1,
+ sampleRate,
+ );
+ for (let ch = 0; ch < channels; ch++) {
+ const oldData = buffer.getChannelData(ch);
+ const newData = newBuffer.getChannelData(ch);
+ newData.set(oldData.subarray(0, len - 2));
+ const replacementSample = (oldData[len - 2] + oldData[len - 1]) / 2;
+ newData[len - 2] = replacementSample;
+ for (let f = 0; f < SAMPLE_CORRECTION_FADE_LEN; f++) {
+ const pos = len - 3 - f;
+ if (pos < 0) break;
+ const alpha = SAMPLE_CORRECTION_FADE_ALPHAS[f];
+ newData[pos] =
+ newData[pos] * (1 - alpha) + replacementSample * alpha;
+ }
+ }
+ return newBuffer;
+ }
+ } catch (e) {
+ console.error("Sendspin: adjustBufferSamples error:", e);
+ return buffer;
+ }
+ }
+
+ initAudioContext(): void {
+ if (this.audioContext) return;
+ if (this.outputMode === "media-element" && this.ownsAudioElement) {
+ this.audioElement = document.createElement("audio");
+ this.audioElement.style.display = "none";
+ document.body.appendChild(this.audioElement);
+ }
+ if ((navigator as any).audioSession) {
+ (navigator as any).audioSession.type = "playback";
+ }
+ const streamSampleRate =
+ this.stateManager.currentStreamFormat?.sample_rate || 48000;
+ this.audioContext = new AudioContext({ sampleRate: streamSampleRate });
+ this.gainNode = this.audioContext.createGain();
+ const audioElement = this.audioElement;
+ if (this.outputMode === "direct") {
+ this.gainNode.connect(this.audioContext.destination);
+ } else {
+ if (!audioElement)
+ throw new Error("Media-element output requires an audio element.");
+ if (this.isAndroid && this.silentAudioSrc) {
+ this.gainNode.connect(this.audioContext.destination);
+ audioElement.src = this.silentAudioSrc;
+ audioElement.loop = true;
+ audioElement.muted = false;
+ audioElement.volume = 1.0;
+ audioElement.play().catch((e) => {
+ console.warn("Sendspin: Audio autoplay blocked:", e);
+ });
+ } else {
+ this.streamDestination =
+ this.audioContext.createMediaStreamDestination();
+ this.gainNode.connect(this.streamDestination);
+ audioElement.srcObject = this.streamDestination.stream;
+ audioElement.volume = 1.0;
+ audioElement.play().catch((e) => {
+ console.warn("Sendspin: Audio autoplay blocked:", e);
+ });
+ }
+ }
+ this.updateVolume();
+ if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start();
+ }
+
+ async resumeAudioContext(): Promise {
+ if (this.audioContext && this.audioContext.state === "suspended") {
+ try {
+ await this.audioContext.resume();
+ console.log("Sendspin: AudioContext resumed");
+ } catch (e) {
+ console.warn("Sendspin: Failed to resume AudioContext:", e);
+ return;
+ }
+ if (this.audioBufferQueue.length > 0) this.scheduleQueueProcessing();
+ if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start();
+ }
+ }
+
+ private cutScheduledSources(cutoffTime: number): {
+ requeuedCount: number;
+ cutCount: number;
+ keptTailEndTimeSec: number;
+ } {
+ if (!this.audioContext)
+ return { requeuedCount: 0, cutCount: 0, keptTailEndTimeSec: 0 };
+ const stopTime = Math.max(cutoffTime, this.audioContext.currentTime);
+ let requeued = 0,
+ cutCount = 0,
+ keptTailEndTimeSec = 0;
+ this.scheduledSources = this.scheduledSources.filter((entry) => {
+ if (entry.startTime < stopTime) {
+ keptTailEndTimeSec = Math.max(keptTailEndTimeSec, entry.endTime);
+ return true;
+ }
+ try {
+ entry.source.onended = null;
+ entry.source.stop(stopTime);
+ } catch {
+ /* ignore */
+ }
+ this.audioBufferQueue.push({
+ buffer: entry.buffer,
+ serverTime: entry.serverTime,
+ generation: entry.generation,
+ });
+ requeued++;
+ cutCount++;
+ return false;
+ });
+ return { requeuedCount: requeued, cutCount, keptTailEndTimeSec };
+ }
+
+ updateVolume(): void {
+ if (!this.gainNode) return;
+ if (this.useHardwareVolume) {
+ this.gainNode.gain.value = 1.0;
+ return;
+ }
+ this.gainNode.gain.value = this.stateManager.muted
+ ? 0
+ : this.stateManager.volume / 100;
+ }
+
+ measureBufferedPlaybackRunwaySec(): number {
+ if (!this.audioContext) return 0;
+ const currentTimeSec = this.audioContext.currentTime;
+ this.pruneExpiredScheduledSources(currentTimeSec);
+ const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec);
+ const queuedAheadSec = this.audioBufferQueue.reduce(
+ (totalSec, chunk) => totalSec + chunk.buffer.duration,
+ 0,
+ );
+ return Math.max(0, scheduledAheadSec + queuedAheadSec);
+ }
+
+ private cancelScheduledRefill(): void {
+ if (this.refillTimeout !== null) {
+ clearTimeout(this.refillTimeout);
+ this.refillTimeout = null;
+ }
+ }
+
+ private getScheduledRefillThresholdSec(
+ targetScheduledHorizonSec: number,
+ ): number {
+ return Math.max(
+ SCHEDULE_REFILL_MIN_THRESHOLD_SEC,
+ Math.min(
+ SCHEDULE_REFILL_MAX_THRESHOLD_SEC,
+ targetScheduledHorizonSec * SCHEDULE_REFILL_THRESHOLD_FRACTION,
+ ),
+ );
+ }
+
+ private scheduleQueueRefill(targetScheduledHorizonSec: number): void {
+ this.cancelScheduledRefill();
+ if (
+ !this.audioContext ||
+ this.audioContext.state !== "running" ||
+ !this.stateManager.isPlaying ||
+ this.audioBufferQueue.length === 0
+ )
+ return;
+ const currentTimeSec = this.audioContext.currentTime;
+ this.pruneExpiredScheduledSources(currentTimeSec);
+ const scheduledAheadSec = this.getScheduledAheadSec(currentTimeSec);
+ const refillThresholdSec = this.getScheduledRefillThresholdSec(
+ targetScheduledHorizonSec,
+ );
+ if (scheduledAheadSec <= refillThresholdSec) {
+ this.scheduleQueueProcessing();
+ return;
+ }
+ const delayMs = (scheduledAheadSec - refillThresholdSec) * 1000;
+ const runRefill = () => {
+ this.refillTimeout = null;
+ if (
+ !this.audioContext ||
+ this.audioContext.state !== "running" ||
+ !this.stateManager.isPlaying ||
+ this.audioBufferQueue.length === 0
+ )
+ return;
+ this.scheduleQueueProcessing();
+ };
+ if (typeof globalThis.setTimeout === "function") {
+ this.refillTimeout = globalThis.setTimeout(runRefill, delayMs);
+ return;
+ }
+ this.refillTimeout = null;
+ if (
+ typeof (globalThis as unknown as { queueMicrotask?: unknown })
+ .queueMicrotask === "function"
+ ) {
+ (
+ globalThis as unknown as { queueMicrotask: (cb: () => void) => void }
+ ).queueMicrotask(runRefill);
+ return;
+ }
+ void Promise.resolve().then(runRefill);
+ }
+
+ private scheduleQueueProcessing(): void {
+ this.cancelScheduledRefill();
+ if (this.queueProcessScheduled) return;
+ this.queueProcessScheduled = true;
+ if (typeof globalThis.setTimeout === "function") {
+ this.scheduleTimeout = globalThis.setTimeout(() => {
+ this.scheduleTimeout = null;
+ this.queueProcessScheduled = false;
+ this.processAudioQueue();
+ }, 15);
+ return;
+ }
+ const run = () => {
+ this.queueProcessScheduled = false;
+ this.processAudioQueue();
+ };
+ if (
+ typeof (globalThis as unknown as { queueMicrotask?: unknown })
+ .queueMicrotask === "function"
+ ) {
+ (
+ globalThis as unknown as { queueMicrotask: (cb: () => void) => void }
+ ).queueMicrotask(run);
+ } else {
+ Promise.resolve().then(run);
+ }
+ }
+
+ handleDecodedChunk(chunk: DecodedAudioChunk): void {
+ if (!this.audioContext || !this.gainNode) {
+ console.warn("Sendspin: Received audio chunk but no audio context");
+ return;
+ }
+ if (chunk.generation !== this.stateManager.streamGeneration) return;
+ const numChannels = chunk.samples.length;
+ const numFrames = chunk.samples[0].length;
+ const audioBuffer = this.audioContext.createBuffer(
+ numChannels,
+ numFrames,
+ chunk.sampleRate,
+ );
+ for (let ch = 0; ch < numChannels; ch++)
+ audioBuffer.getChannelData(ch).set(chunk.samples[ch]);
+ this.audioBufferQueue.push({
+ buffer: audioBuffer,
+ serverTime: chunk.serverTimeUs,
+ generation: chunk.generation,
+ });
+ this.scheduleQueueProcessing();
+ }
+
+ processAudioQueue(): void {
+ this.cancelScheduledRefill();
+ if (!this.audioContext || !this.gainNode) return;
+ if (this.audioContext.state !== "running") return;
+
+ const currentGeneration = this.stateManager.streamGeneration;
+ this.audioBufferQueue = this.audioBufferQueue.filter(
+ (chunk) => chunk.generation === currentGeneration,
+ );
+ this.audioBufferQueue.sort((a, b) => a.serverTime - b.serverTime);
+ if (!this.timeFilter.is_synchronized) return;
+
+ const {
+ audioContextTimeSec: audioContextTime,
+ audioContextRawTimeSec,
+ nowMs,
+ nowUs,
+ } = this.clockSource.getTimingSnapshot(this.audioContext);
+ this.pruneExpiredScheduledSources(audioContextRawTimeSec);
+
+ const outputLatencySec = this.useOutputLatencyCompensation
+ ? this.latencyTracker.getSmoothedUs(this.audioContext) / 1_000_000
+ : 0;
+ const syncDelaySec = this.syncDelayMs / 1000;
+ const targetScheduledHorizonSec = this.getTargetScheduledHorizonSec();
+
+ if (this.usesRecorrectionMonitor) this.recorrectionMonitor.start();
+
+ if (this.clockSource.pendingCutover) {
+ this.clockSource.pendingCutover = false;
+ if (
+ this.scheduledSources.length > 0 ||
+ this.nextPlaybackTime !== 0 ||
+ this.lastScheduledServerTime !== 0
+ ) {
+ this.performGuardedCutover("delay-change", {
+ incrementResyncCount: false,
+ markCooldown: false,
+ });
+ return;
+ }
+ }
+
+ while (this.audioBufferQueue.length > 0) {
+ const scheduledAheadSec = this.getScheduledAheadSec(
+ audioContextRawTimeSec,
+ );
+ if (
+ this.nextPlaybackTime > 0 &&
+ scheduledAheadSec >= targetScheduledHorizonSec
+ )
+ break;
+
+ const chunk = this.audioBufferQueue.shift()!;
+ let playbackTime: number;
+ let scheduleTime: number;
+ let playbackRate: number;
+
+ const targetPlaybackTime = this.computeTargetPlaybackTime(
+ chunk.serverTime,
+ audioContextTime,
+ nowUs,
+ outputLatencySec,
+ );
+ const isTimestamp = this.clockSource.active === "timestamp";
+
+ if (this.nextPlaybackTime === 0 || this.lastScheduledServerTime === 0) {
+ this.recorrectionMonitor.armStartupGrace(nowMs, isTimestamp);
+ playbackTime = targetPlaybackTime;
+ scheduleTime = playbackTime - syncDelaySec;
+ const minScheduleTimeSec = this.recorrectionMonitor.minScheduleTimeSec;
+ if (minScheduleTimeSec !== null) {
+ scheduleTime = Math.max(scheduleTime, minScheduleTimeSec);
+ playbackTime = scheduleTime + syncDelaySec;
+ }
+ this.recorrectionMonitor.clearMinScheduleTime();
+ playbackRate = 1.0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ } else {
+ const serverGapUs = chunk.serverTime - this.lastScheduledServerTime;
+ const serverGapSec = serverGapUs / 1_000_000;
+
+ if (Math.abs(serverGapSec) < 0.1) {
+ const syncErrorSec = this.nextPlaybackTime - targetPlaybackTime;
+ const syncErrorMs = syncErrorSec * 1000;
+ const correctionErrorMs = this.applySyncErrorEma(syncErrorMs);
+ const thresholds = this.correctionThresholds[this._correctionMode];
+ const canHardResync = this.recorrectionMonitor.canUseHardResync(
+ nowMs,
+ isTimestamp,
+ );
+
+ if (
+ Math.abs(correctionErrorMs) > thresholds.resyncAboveMs &&
+ canHardResync
+ ) {
+ this.recorrectionMonitor.noteHardResync(nowMs);
+ this.resyncCount++;
+ this._intervalResyncCount++;
+ this.resetSyncErrorEma();
+ this.cutScheduledSources(targetPlaybackTime - syncDelaySec);
+ playbackTime = targetPlaybackTime;
+ scheduleTime = playbackTime - syncDelaySec;
+ playbackRate = 1.0;
+ this.currentCorrectionMethod = "resync";
+ this.lastSamplesAdjusted = 0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ } else if (Math.abs(correctionErrorMs) > thresholds.resyncAboveMs) {
+ playbackTime = this.nextPlaybackTime;
+ scheduleTime = this.nextScheduleTime;
+ playbackRate = Number.isFinite(thresholds.rate2AboveMs)
+ ? correctionErrorMs > 0
+ ? 1.02
+ : 0.98
+ : 1.0;
+ this.currentCorrectionMethod =
+ playbackRate === 1.0 ? "none" : "rate";
+ this.lastSamplesAdjusted = 0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ } else if (Math.abs(correctionErrorMs) < thresholds.deadbandBelowMs) {
+ playbackTime = this.nextPlaybackTime;
+ scheduleTime = this.nextScheduleTime;
+ playbackRate = 1.0;
+ this.currentCorrectionMethod = "none";
+ this.lastSamplesAdjusted = 0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ } else if (Math.abs(correctionErrorMs) <= thresholds.samplesBelowMs) {
+ playbackTime = this.nextPlaybackTime;
+ scheduleTime = this.nextScheduleTime;
+ playbackRate = 1.0;
+ const samplesToAdjust = correctionErrorMs > 0 ? -1 : 1;
+ chunk.buffer = this.adjustBufferSamples(
+ chunk.buffer,
+ samplesToAdjust,
+ );
+ this.currentCorrectionMethod = "samples";
+ this.lastSamplesAdjusted = samplesToAdjust;
+ } else {
+ playbackTime = this.nextPlaybackTime;
+ scheduleTime = this.nextScheduleTime;
+ const absErrorMs = Math.abs(correctionErrorMs);
+ if (correctionErrorMs > 0) {
+ playbackRate =
+ absErrorMs >= thresholds.rate2AboveMs
+ ? 1.02
+ : absErrorMs >= thresholds.rate1AboveMs
+ ? 1.01
+ : 1.0;
+ } else {
+ playbackRate =
+ absErrorMs >= thresholds.rate2AboveMs
+ ? 0.98
+ : absErrorMs >= thresholds.rate1AboveMs
+ ? 0.99
+ : 1.0;
+ }
+ this.currentCorrectionMethod =
+ playbackRate === 1.0 ? "none" : "rate";
+ this.lastSamplesAdjusted = 0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ }
+ } else {
+ // Gap detected in server timestamps - hard resync (gated on cooldown)
+ if (this.recorrectionMonitor.canUseHardResync(nowMs, isTimestamp)) {
+ this.recorrectionMonitor.noteHardResync(nowMs);
+ this.resyncCount++;
+ this._intervalResyncCount++;
+ this.cutScheduledSources(targetPlaybackTime - syncDelaySec);
+ }
+ playbackTime = targetPlaybackTime;
+ scheduleTime = playbackTime - syncDelaySec;
+ playbackRate = 1.0;
+ this.currentCorrectionMethod = "resync";
+ this.lastSamplesAdjusted = 0;
+ chunk.buffer = this.copyBuffer(chunk.buffer);
+ }
+ }
+
+ this.currentPlaybackRate = playbackRate;
+ if (playbackTime < audioContextRawTimeSec) {
+ this.nextPlaybackTime = 0;
+ this.nextScheduleTime = 0;
+ this.lastScheduledServerTime = 0;
+ continue;
+ }
+
+ const effectiveScheduleTime = Math.max(
+ scheduleTime,
+ audioContextRawTimeSec,
+ );
+ const effectivePlaybackTime =
+ effectiveScheduleTime + (playbackTime - scheduleTime);
+ const source = this.audioContext.createBufferSource();
+ source.buffer = chunk.buffer;
+ source.playbackRate.value = playbackRate;
+ source.connect(this.gainNode);
+ source.start(effectiveScheduleTime);
+
+ const actualDuration = chunk.buffer.duration / playbackRate;
+ this.nextPlaybackTime = effectivePlaybackTime + actualDuration;
+ this.nextScheduleTime = effectiveScheduleTime + actualDuration;
+ this.lastScheduledServerTime =
+ chunk.serverTime + chunk.buffer.duration * 1_000_000;
+
+ const scheduledEntry = {
+ source,
+ startTime: effectiveScheduleTime,
+ endTime: effectiveScheduleTime + actualDuration,
+ buffer: chunk.buffer,
+ serverTime: chunk.serverTime,
+ generation: chunk.generation,
+ };
+ this.scheduledSources.push(scheduledEntry);
+ source.onended = () => {
+ const idx = this.scheduledSources.indexOf(scheduledEntry);
+ if (idx > -1) this.scheduledSources.splice(idx, 1);
+ if (this.scheduledSources.length === 0) {
+ this.resetScheduledPlaybackState("all scheduled audio ended");
+ if (this.audioBufferQueue.length > 0) this.processAudioQueue();
+ }
+ };
+ }
+ this.scheduleQueueRefill(targetScheduledHorizonSec);
+ this.emitStatusLog(nowMs);
+ }
+
+ private computeTargetPlaybackTime(
+ serverTimeUs: number,
+ audioContextTime: number,
+ nowUs: number,
+ outputLatencySec: number,
+ ): number {
+ const chunkClientTimeUs = this.timeFilter.computeClientTime(serverTimeUs);
+ const deltaSec = (chunkClientTimeUs - nowUs) / 1_000_000;
+ return (
+ audioContextTime + deltaSec + SCHEDULE_HEADROOM_SEC - outputLatencySec
+ );
+ }
+
+ startAudioElement(): void {
+ if (this.outputMode === "media-element" && this.audioElement?.paused) {
+ this.audioElement.play().catch((e) => {
+ console.warn("Sendspin: Failed to start audio element:", e);
+ });
+ }
+ }
+
+ stopAudioElement(): void {
+ if (
+ this.outputMode === "media-element" &&
+ this.audioElement &&
+ !this.audioElement.paused
+ ) {
+ this.audioElement.pause();
+ }
+ }
+
+ clearBuffers(): void {
+ this.recorrectionMonitor.fullReset();
+ this.cancelScheduledRefill();
+ this.scheduledSources.forEach((entry) => {
+ try {
+ entry.source.stop();
+ } catch {
+ /* ignore */
+ }
+ });
+ this.scheduledSources = [];
+ this.audioBufferQueue = [];
+ if (this.scheduleTimeout !== null) {
+ clearTimeout(this.scheduleTimeout);
+ this.scheduleTimeout = null;
+ }
+ this.queueProcessScheduled = false;
+ this.stateManager.resetStreamAnchors();
+ this.resetScheduledPlaybackState();
+ this.resyncCount = 0;
+ this.latencyTracker.reset();
+ this.clockSource.reset();
+ }
+
+ close(): void {
+ this.clearBuffers();
+ if (this.audioContext) {
+ this.audioContext.close();
+ this.audioContext = null;
+ }
+ this.gainNode = null;
+ this.streamDestination = null;
+ if (this.outputMode === "media-element" && this.audioElement) {
+ this.audioElement.pause();
+ this.audioElement.srcObject = null;
+ this.audioElement.loop = false;
+ this.audioElement.removeAttribute("src");
+ this.audioElement.load();
+ if (this.ownsAudioElement) {
+ this.audioElement.remove();
+ this.audioElement = undefined;
+ }
+ }
+ }
+
+ getAudioContext(): AudioContext | null {
+ return this.audioContext;
+ }
+}
diff --git a/src/core/codec-support.ts b/src/core/codec-support.ts
new file mode 100644
index 0000000..46bf25d
--- /dev/null
+++ b/src/core/codec-support.ts
@@ -0,0 +1,76 @@
+import type { Codec, SupportedFormat } from "../types";
+
+/** Detect which audio codecs the current browser supports. */
+export function getBrowserSupportedCodecs(): Set {
+ const userAgent = typeof navigator !== "undefined" ? navigator.userAgent : "";
+ const isSafari = /^((?!chrome|android).)*safari/i.test(userAgent);
+ const isFirefox = /firefox/i.test(userAgent);
+
+ // Check if native Opus decoder is available (requires secure context)
+ const hasNativeOpus = typeof AudioDecoder !== "undefined";
+
+ if (!hasNativeOpus) {
+ if (typeof window !== "undefined" && !window.isSecureContext) {
+ console.warn(
+ "[Opus] Running in insecure context, falling back to FLAC/PCM",
+ );
+ } else {
+ console.warn(
+ "[Opus] Native decoder not available, falling back to FLAC/PCM",
+ );
+ }
+ }
+
+ if (isSafari) {
+ // Safari: No FLAC support
+ return new Set(["pcm", "opus"] as Codec[]);
+ }
+
+ if (isFirefox) {
+ // Firefox: Opus has audio glitches with both native and opus-encdec decoders
+ return new Set(["pcm", "flac"] as Codec[]);
+ }
+
+ if (hasNativeOpus) {
+ // Native Opus available (Chrome, Edge)
+ return new Set(["pcm", "opus", "flac"] as Codec[]);
+ }
+
+ // No WebCodecs AudioDecoder (insecure context or unsupported browser)
+ return new Set(["pcm", "flac"] as Codec[]);
+}
+
+/** Build supported format list from requested codecs, filtering by browser support. */
+export function getSupportedFormats(codecs: Codec[]): SupportedFormat[] {
+ const browserSupported = getBrowserSupportedCodecs();
+ const formats: SupportedFormat[] = [];
+
+ for (const codec of codecs) {
+ if (!browserSupported.has(codec)) {
+ continue;
+ }
+
+ if (codec === "opus") {
+ // Opus requires 48kHz
+ formats.push({
+ codec: "opus",
+ sample_rate: 48000,
+ channels: 2,
+ bit_depth: 16,
+ });
+ } else {
+ // PCM and FLAC support both sample rates
+ formats.push({ codec, sample_rate: 48000, channels: 2, bit_depth: 16 });
+ formats.push({ codec, sample_rate: 44100, channels: 2, bit_depth: 16 });
+ }
+ }
+
+ if (formats.length === 0) {
+ throw new Error(
+ `No supported codecs: requested [${codecs.join(", ")}], ` +
+ `browser supports [${[...browserSupported].join(", ")}]`,
+ );
+ }
+
+ return formats;
+}
diff --git a/src/core/core.ts b/src/core/core.ts
new file mode 100644
index 0000000..f71e106
--- /dev/null
+++ b/src/core/core.ts
@@ -0,0 +1,365 @@
+/**
+ * SendspinCore: Protocol + decoding layer.
+ *
+ * Manages the WebSocket connection, Sendspin protocol, time synchronization,
+ * state management, and audio decoding. Emits decoded PCM audio chunks that
+ * can be consumed by SendspinPlayer for playback, or by visualization/analysis
+ * tools directly.
+ */
+
+import { SendspinDecoder } from "../audio/decoder";
+import { ProtocolHandler } from "./protocol-handler";
+import { StateManager } from "./state-manager";
+import { WebSocketManager } from "./websocket-manager";
+import { SendspinTimeFilter } from "./time-filter";
+import { clampSyncDelayMs } from "../sync-delay";
+import type {
+ SendspinCoreConfig,
+ DecodedAudioChunk,
+ StreamFormat,
+ GoodbyeReason,
+ PlayerState,
+ ControllerCommand,
+ ControllerCommands,
+ ServerStatePayload,
+ GroupUpdatePayload,
+} from "../types";
+import type { StreamHandler } from "../internal-types";
+
+function generateRandomId(): string {
+ return Math.random().toString(36).substring(2, 6);
+}
+
+export class SendspinCore implements StreamHandler {
+ private wsManager: WebSocketManager;
+ private protocolHandler: ProtocolHandler;
+ private stateManager: StateManager;
+ private timeFilter: SendspinTimeFilter;
+ private decoder: SendspinDecoder;
+
+ private config: SendspinCoreConfig;
+ private _syncDelayMs: number;
+
+ // Stream events — consumers (e.g., SendspinPlayer) subscribe to these
+ private _onAudioData?: (chunk: DecodedAudioChunk) => void;
+ private _onStreamStart?: (
+ format: StreamFormat,
+ isFormatUpdate: boolean,
+ ) => void;
+ private _onStreamClear?: () => void;
+ private _onStreamEnd?: () => void;
+ private _onVolumeUpdate?: () => void;
+ private _onSyncDelayChange?: (delayMs: number) => void;
+ private _onConnectionOpen?: () => void;
+ private _onConnectionClose?: () => void;
+
+ constructor(config: SendspinCoreConfig) {
+ const randomId = generateRandomId();
+ const playerId = config.playerId ?? `sendspin-js-${randomId}`;
+ const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`;
+
+ this.config = { ...config, playerId, clientName };
+ this._syncDelayMs = clampSyncDelayMs(config.syncDelay ?? 0);
+
+ this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12);
+ this.stateManager = new StateManager(config.onStateChange);
+
+ this.decoder = new SendspinDecoder(
+ (chunk) => this._onAudioData?.(chunk),
+ () => this.stateManager.streamGeneration,
+ );
+
+ this.wsManager = new WebSocketManager();
+
+ this.protocolHandler = new ProtocolHandler(
+ playerId,
+ this.wsManager,
+ this, // this class implements StreamHandler
+ this.stateManager,
+ this.timeFilter,
+ {
+ clientName,
+ codecs: config.codecs,
+ bufferCapacity: config.bufferCapacity,
+ useHardwareVolume: config.useHardwareVolume,
+ onVolumeCommand: config.onVolumeCommand,
+ onDelayCommand: config.onDelayCommand,
+ getExternalVolume: config.getExternalVolume,
+ },
+ );
+ }
+
+ // ========================================
+ // StreamHandler implementation
+ // (called by ProtocolHandler)
+ // ========================================
+
+ handleBinaryMessage(data: ArrayBuffer): void {
+ const format = this.stateManager.currentStreamFormat;
+ if (!format) {
+ console.warn("Sendspin: Received audio chunk but no stream format set");
+ return;
+ }
+ const generation = this.stateManager.streamGeneration;
+ this.decoder.handleBinaryMessage(data, format, generation);
+ }
+
+ handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void {
+ if (!isFormatUpdate) {
+ this.decoder.clearState();
+ }
+ this._onStreamStart?.(format, isFormatUpdate);
+ }
+
+ handleStreamClear(): void {
+ this.decoder.clearState();
+ this._onStreamClear?.();
+ }
+
+ handleStreamEnd(): void {
+ this.decoder.clearState();
+ this._onStreamEnd?.();
+ }
+
+ handleVolumeUpdate(): void {
+ this._onVolumeUpdate?.();
+ }
+
+ handleSyncDelayChange(delayMs: number): void {
+ this._syncDelayMs = clampSyncDelayMs(delayMs);
+ this._onSyncDelayChange?.(this._syncDelayMs);
+ }
+
+ getSyncDelayMs(): number {
+ return this._syncDelayMs;
+ }
+
+ // ========================================
+ // Event registration
+ // ========================================
+
+ set onAudioData(cb: ((chunk: DecodedAudioChunk) => void) | undefined) {
+ this._onAudioData = cb;
+ }
+ set onStreamStart(
+ cb: ((format: StreamFormat, isFormatUpdate: boolean) => void) | undefined,
+ ) {
+ this._onStreamStart = cb;
+ }
+ set onStreamClear(cb: (() => void) | undefined) {
+ this._onStreamClear = cb;
+ }
+ set onStreamEnd(cb: (() => void) | undefined) {
+ this._onStreamEnd = cb;
+ }
+ set onVolumeUpdate(cb: (() => void) | undefined) {
+ this._onVolumeUpdate = cb;
+ }
+ set onSyncDelayChange(cb: ((delayMs: number) => void) | undefined) {
+ this._onSyncDelayChange = cb;
+ }
+ set onConnectionOpen(cb: (() => void) | undefined) {
+ this._onConnectionOpen = cb;
+ }
+ set onConnectionClose(cb: (() => void) | undefined) {
+ this._onConnectionClose = cb;
+ }
+
+ // ========================================
+ // Connection
+ // ========================================
+
+ async connect(): Promise {
+ const onOpen = () => {
+ this._onConnectionOpen?.();
+ console.log("Sendspin: Using player_id:", this.config.playerId);
+ this.protocolHandler.sendClientHello();
+ };
+ const onMessage = (event: MessageEvent) => {
+ this.protocolHandler.handleMessage(event);
+ };
+ const onError = (error: Event) => {
+ console.error("Sendspin: WebSocket error", error);
+ };
+ const onClose = () => {
+ this.protocolHandler.stopTimeSync();
+ // Stop periodic state-update sends so they don't spam
+ // "WebSocket not connected" warnings after the transport is gone.
+ this.stateManager.clearStateUpdateInterval();
+ console.log("Sendspin: Connection closed");
+ this._onConnectionClose?.();
+ };
+
+ if (this.config.webSocket) {
+ // Adopt externally-managed WebSocket
+ await this.wsManager.adopt(
+ this.config.webSocket,
+ onOpen,
+ onMessage,
+ onError,
+ onClose,
+ );
+ } else {
+ // Create connection from baseUrl
+ if (!this.config.baseUrl) {
+ throw new Error(
+ "SendspinCore requires either baseUrl or webSocket to be provided.",
+ );
+ }
+ const url = new URL(this.config.baseUrl);
+ const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:";
+ const wsUrl = `${wsProtocol}//${url.host}/sendspin`;
+
+ await this.wsManager.connect(wsUrl, onOpen, onMessage, onError, onClose);
+ }
+ }
+
+ /**
+ * Reset playback-related state (isPlaying, currentStreamFormat) without
+ * tearing down the connection. Intended for transport-loss cleanup after
+ * any buffered audio has finished draining.
+ */
+ resetPlaybackState(): void {
+ this.stateManager.isPlaying = false;
+ this.stateManager.currentStreamFormat = null;
+ }
+
+ disconnect(reason: GoodbyeReason = "shutdown"): void {
+ if (this.wsManager.isConnected()) {
+ this.protocolHandler.sendGoodbye(reason);
+ }
+ this.protocolHandler.stopTimeSync();
+ this.stateManager.clearAllIntervals();
+ this.wsManager.disconnect();
+ this.decoder.close();
+ this.timeFilter.reset();
+ this.stateManager.reset();
+ }
+
+ // ========================================
+ // Volume / Mute
+ // ========================================
+
+ setVolume(volume: number): void {
+ this.stateManager.volume = volume;
+ this._onVolumeUpdate?.();
+ this.protocolHandler.sendStateUpdate();
+ }
+
+ setMuted(muted: boolean): void {
+ this.stateManager.muted = muted;
+ this._onVolumeUpdate?.();
+ this.protocolHandler.sendStateUpdate();
+ }
+
+ // ========================================
+ // Sync delay
+ // ========================================
+
+ setSyncDelay(delayMs: number): void {
+ this._syncDelayMs = clampSyncDelayMs(delayMs);
+ this._onSyncDelayChange?.(this._syncDelayMs);
+ this.protocolHandler.sendStateUpdate();
+ }
+
+ // ========================================
+ // Controller commands
+ // ========================================
+
+ sendCommand(
+ command: T,
+ params: ControllerCommands[T],
+ ): void {
+ const supportedCommands =
+ this.stateManager.serverState.controller?.supported_commands;
+ if (supportedCommands && !supportedCommands.includes(command)) {
+ throw new Error(
+ `Command '${command}' is not supported by the server. ` +
+ `Supported commands: ${supportedCommands.join(", ")}`,
+ );
+ }
+ this.protocolHandler.sendCommand(command, params);
+ }
+
+ // ========================================
+ // State getters
+ // ========================================
+
+ get isPlaying(): boolean {
+ return this.stateManager.isPlaying;
+ }
+
+ get volume(): number {
+ return this.stateManager.volume;
+ }
+
+ get muted(): boolean {
+ return this.stateManager.muted;
+ }
+
+ get playerState(): PlayerState {
+ return this.stateManager.playerState;
+ }
+
+ get currentFormat(): StreamFormat | null {
+ return this.stateManager.currentStreamFormat;
+ }
+
+ get isConnected(): boolean {
+ return this.wsManager.isConnected();
+ }
+
+ get timeSyncInfo(): { synced: boolean; offset: number; error: number } {
+ return {
+ synced: this.timeFilter.is_synchronized,
+ offset: Math.round(this.timeFilter.offset / 1000),
+ error: Math.round(this.timeFilter.error / 1000),
+ };
+ }
+
+ getCurrentServerTimeUs(): number {
+ return this.timeFilter.computeServerTime(
+ Math.floor(performance.now() * 1000),
+ );
+ }
+
+ get trackProgress(): {
+ positionMs: number;
+ durationMs: number;
+ playbackSpeed: number;
+ } | null {
+ const metadata = this.stateManager.serverState.metadata;
+ if (!metadata?.progress || metadata.timestamp === undefined) {
+ return null;
+ }
+
+ const serverTimeUs = this.getCurrentServerTimeUs();
+ const elapsedUs = serverTimeUs - metadata.timestamp;
+ const positionMs =
+ metadata.progress.track_progress +
+ (elapsedUs * metadata.progress.playback_speed) / 1_000_000;
+
+ return {
+ positionMs: Math.max(
+ 0,
+ Math.min(positionMs, metadata.progress.track_duration),
+ ),
+ durationMs: metadata.progress.track_duration,
+ playbackSpeed: metadata.progress.playback_speed / 1000,
+ };
+ }
+
+ // ========================================
+ // Internal accessors (for SendspinPlayer)
+ // ========================================
+
+ /** @internal */
+ get _stateManager(): StateManager {
+ return this.stateManager;
+ }
+
+ /** @internal */
+ get _timeFilter(): SendspinTimeFilter {
+ return this.timeFilter;
+ }
+}
diff --git a/src/core/protocol-handler.ts b/src/core/protocol-handler.ts
new file mode 100644
index 0000000..c5378b1
--- /dev/null
+++ b/src/core/protocol-handler.ts
@@ -0,0 +1,342 @@
+import type { SendspinTimeFilter } from "./time-filter";
+import type {
+ ClientCommand,
+ ClientGoodbye,
+ ClientHello,
+ ClientState,
+ Codec,
+ ControllerCommand,
+ ControllerCommands,
+ GoodbyeReason,
+ GroupUpdate,
+ MessageType,
+ ServerCommand,
+ ServerMessage,
+ ServerState,
+ ServerTime,
+ StreamClear,
+ StreamEnd,
+ StreamStart,
+} from "../types";
+import type { StreamHandler } from "../internal-types";
+import type { StateManager } from "./state-manager";
+import type { WebSocketManager } from "./websocket-manager";
+import { TimeSyncManager } from "./time-sync-manager";
+import { getSupportedFormats } from "./codec-support";
+import { clampSyncDelayMs } from "../sync-delay";
+
+// Constants
+const STATE_UPDATE_INTERVAL = 5000; // 5 seconds
+
+export interface ProtocolHandlerConfig {
+ clientName?: string;
+ codecs?: Codec[];
+ bufferCapacity?: number;
+ useHardwareVolume?: boolean;
+ onVolumeCommand?: (volume: number, muted: boolean) => void;
+ onDelayCommand?: (delayMs: number) => void;
+ getExternalVolume?: () => { volume: number; muted: boolean };
+}
+
+export class ProtocolHandler {
+ private clientName: string;
+ private codecs: Codec[];
+ private bufferCapacity: number;
+ private useHardwareVolume: boolean;
+ private onVolumeCommand?: (volume: number, muted: boolean) => void;
+ private onDelayCommand?: (delayMs: number) => void;
+ private getExternalVolume?: () => { volume: number; muted: boolean };
+ private timeSyncManager: TimeSyncManager;
+
+ constructor(
+ private playerId: string,
+ private wsManager: WebSocketManager,
+ private streamHandler: StreamHandler,
+ private stateManager: StateManager,
+ private timeFilter: SendspinTimeFilter,
+ config: ProtocolHandlerConfig = {},
+ ) {
+ this.clientName = config.clientName ?? "Sendspin Player";
+ this.codecs = config.codecs ?? ["opus", "flac", "pcm"];
+ this.bufferCapacity = config.bufferCapacity ?? 1024 * 1024 * 5; // 5MB default
+ this.useHardwareVolume = config.useHardwareVolume ?? false;
+ this.onVolumeCommand = config.onVolumeCommand;
+ this.onDelayCommand = config.onDelayCommand;
+ this.getExternalVolume = config.getExternalVolume;
+ this.timeSyncManager = new TimeSyncManager(
+ wsManager,
+ stateManager,
+ timeFilter,
+ );
+ }
+
+ // Handle WebSocket messages
+ handleMessage(event: MessageEvent): void {
+ if (typeof event.data === "string") {
+ // JSON message
+ const message = JSON.parse(event.data) as ServerMessage;
+ this.handleServerMessage(message);
+ } else if (event.data instanceof ArrayBuffer) {
+ // Binary message (audio chunk)
+ this.streamHandler.handleBinaryMessage(event.data);
+ } else if (event.data instanceof Blob) {
+ // Convert Blob to ArrayBuffer
+ event.data.arrayBuffer().then((buffer) => {
+ this.streamHandler.handleBinaryMessage(buffer);
+ });
+ }
+ }
+
+ // Handle server messages
+ private handleServerMessage(message: ServerMessage): void {
+ switch (message.type) {
+ case "server/hello":
+ this.handleServerHello();
+ break;
+
+ case "server/time":
+ this.timeSyncManager.handleServerTime(message as ServerTime);
+ break;
+
+ case "stream/start":
+ this.handleStreamStart(message as StreamStart);
+ break;
+
+ case "stream/clear":
+ this.handleStreamClear(message as StreamClear);
+ break;
+
+ case "stream/end":
+ this.handleStreamEnd(message as StreamEnd);
+ break;
+
+ case "server/command":
+ this.handleServerCommand(message as ServerCommand);
+ break;
+
+ case "server/state":
+ this.stateManager.updateServerState((message as ServerState).payload);
+ break;
+
+ case "group/update":
+ this.stateManager.updateGroupState((message as GroupUpdate).payload);
+ break;
+ }
+ }
+
+ // Handle server hello
+ private handleServerHello(): void {
+ console.log("Sendspin: Connected to server");
+ // Per spec: Send initial client/state immediately after server/hello
+ this.sendStateUpdate();
+ // Start time synchronization with fixed bursts.
+ this.timeSyncManager.startAndSchedule();
+
+ // Start periodic state updates
+ const stateInterval = globalThis.setInterval(
+ () => this.sendStateUpdate(),
+ STATE_UPDATE_INTERVAL,
+ );
+ this.stateManager.setStateUpdateInterval(stateInterval);
+ }
+
+ // Restart the periodic state update interval.
+ // Called after volume commands to prevent a pending periodic update
+ // from sending stale hardware volume shortly after the command response.
+ private restartStateUpdateInterval(): void {
+ const newInterval = globalThis.setInterval(
+ () => this.sendStateUpdate(),
+ STATE_UPDATE_INTERVAL,
+ );
+ this.stateManager.setStateUpdateInterval(newInterval);
+ }
+
+ stopTimeSync(): void {
+ this.timeSyncManager.stop();
+ }
+
+ private handleStreamStart(message: StreamStart): void {
+ const isFormatUpdate = this.stateManager.currentStreamFormat !== null;
+
+ this.stateManager.currentStreamFormat = message.payload.player;
+ console.log(
+ isFormatUpdate
+ ? "Sendspin: Stream format updated"
+ : "Sendspin: Stream started",
+ this.stateManager.currentStreamFormat,
+ );
+ console.log(
+ `Sendspin: Codec=${this.stateManager.currentStreamFormat.codec.toUpperCase()}, ` +
+ `SampleRate=${this.stateManager.currentStreamFormat.sample_rate}Hz, ` +
+ `Channels=${this.stateManager.currentStreamFormat.channels}, ` +
+ `BitDepth=${this.stateManager.currentStreamFormat.bit_depth}bit`,
+ );
+
+ this.streamHandler.handleStreamStart(
+ this.stateManager.currentStreamFormat,
+ isFormatUpdate,
+ );
+
+ this.stateManager.isPlaying = true;
+
+ // Explicitly set playbackState for Android (if mediaSession available)
+ if (typeof navigator !== "undefined" && navigator.mediaSession) {
+ navigator.mediaSession.playbackState = "playing";
+ }
+ }
+
+ private handleStreamClear(message: StreamClear): void {
+ const roles = message.payload.roles;
+ if (!roles || roles.includes("player")) {
+ console.log("Sendspin: Stream clear (seek)");
+ this.streamHandler.handleStreamClear();
+ }
+ }
+
+ private handleStreamEnd(message: StreamEnd): void {
+ const roles = message.payload?.roles;
+ if (!roles || roles.includes("player")) {
+ console.log("Sendspin: Stream ended");
+ this.streamHandler.handleStreamEnd();
+
+ this.stateManager.currentStreamFormat = null;
+ this.stateManager.isPlaying = false;
+
+ if (typeof navigator !== "undefined" && navigator.mediaSession) {
+ navigator.mediaSession.playbackState = "paused";
+ }
+
+ this.sendStateUpdate();
+ }
+ }
+
+ // Handle server commands
+ private handleServerCommand(message: ServerCommand): void {
+ const playerCommand = message.payload.player;
+ if (!playerCommand) return;
+
+ switch (playerCommand.command) {
+ case "volume":
+ // Set volume command
+ if (playerCommand.volume !== undefined) {
+ this.stateManager.volume = playerCommand.volume;
+ this.streamHandler.handleVolumeUpdate();
+ // Notify external handler for hardware volume
+ if (this.useHardwareVolume && this.onVolumeCommand) {
+ this.onVolumeCommand(playerCommand.volume, this.stateManager.muted);
+ }
+ }
+ break;
+
+ case "mute":
+ // Mute/unmute command - uses boolean mute field
+ if (playerCommand.mute !== undefined) {
+ this.stateManager.muted = playerCommand.mute;
+ this.streamHandler.handleVolumeUpdate();
+ // Notify external handler for hardware volume
+ if (this.useHardwareVolume && this.onVolumeCommand) {
+ this.onVolumeCommand(this.stateManager.volume, playerCommand.mute);
+ }
+ }
+ break;
+
+ case "set_static_delay": {
+ const delay = playerCommand.static_delay_ms;
+ if (typeof delay === "number" && isFinite(delay)) {
+ const clamped = clampSyncDelayMs(delay);
+ this.streamHandler.handleSyncDelayChange(clamped);
+ this.onDelayCommand?.(clamped);
+ }
+ break;
+ }
+ }
+
+ // Reset periodic timer first, then send state with commanded values.
+ // Skip hardware read to avoid race where hardware hasn't applied the volume yet.
+ this.restartStateUpdateInterval();
+ this.sendStateUpdate(true);
+ }
+
+ // Send client hello with player identification
+ sendClientHello(): void {
+ const hello: ClientHello = {
+ type: "client/hello" as MessageType.CLIENT_HELLO,
+ payload: {
+ client_id: this.playerId,
+ name: this.clientName,
+ version: 1,
+ supported_roles: ["player@v1", "controller@v1", "metadata@v1"],
+ device_info: {
+ product_name: "Web Browser",
+ manufacturer:
+ (typeof navigator !== "undefined" && navigator.vendor) || "Unknown",
+ software_version:
+ (typeof navigator !== "undefined" && navigator.userAgent) ||
+ "Unknown",
+ },
+ "player@v1_support": {
+ supported_formats: getSupportedFormats(this.codecs),
+ buffer_capacity: this.bufferCapacity,
+ supported_commands: ["volume", "mute"],
+ },
+ },
+ };
+ this.wsManager.send(hello);
+ }
+
+ // Send state update
+ // When skipHardwareRead is true, use stateManager values instead of reading from hardware.
+ // This avoids race conditions when responding to volume commands.
+ sendStateUpdate(skipHardwareRead = false): void {
+ let volume = this.stateManager.volume;
+ let muted = this.stateManager.muted;
+ if (!skipHardwareRead && this.useHardwareVolume && this.getExternalVolume) {
+ const externalVol = this.getExternalVolume();
+ volume = externalVol.volume;
+ muted = externalVol.muted;
+ }
+
+ const syncDelayMs = this.streamHandler.getSyncDelayMs();
+ const staticDelayMs = clampSyncDelayMs(syncDelayMs);
+
+ const message: ClientState = {
+ type: "client/state" as MessageType.CLIENT_STATE,
+ payload: {
+ player: {
+ state: this.stateManager.playerState,
+ volume,
+ muted,
+ static_delay_ms: staticDelayMs,
+ supported_commands: ["set_static_delay"],
+ },
+ },
+ };
+ this.wsManager.send(message);
+ }
+
+ // Send goodbye message before disconnecting
+ sendGoodbye(reason: GoodbyeReason): void {
+ this.wsManager.send({
+ type: "client/goodbye" as MessageType.CLIENT_GOODBYE,
+ payload: {
+ reason,
+ },
+ } satisfies ClientGoodbye);
+ }
+
+ // Send controller command to server
+ sendCommand(
+ command: T,
+ params: ControllerCommands[T],
+ ): void {
+ this.wsManager.send({
+ type: "client/command" as MessageType.CLIENT_COMMAND,
+ payload: {
+ controller: {
+ command,
+ ...(params as object),
+ },
+ },
+ } satisfies ClientCommand);
+ }
+}
diff --git a/src/state-manager.ts b/src/core/state-manager.ts
similarity index 97%
rename from src/state-manager.ts
rename to src/core/state-manager.ts
index 98c41ad..a02054d 100644
--- a/src/state-manager.ts
+++ b/src/core/state-manager.ts
@@ -3,7 +3,7 @@ import type {
StreamFormat,
ServerStatePayload,
GroupUpdatePayload,
-} from "./types";
+} from "../types";
/**
* Apply a diff to an object, returning a new copy.
@@ -56,8 +56,8 @@ export class StateManager {
private _groupState: GroupUpdatePayload = {};
// Interval references for cleanup
- private timeSyncInterval: number | null = null;
- private stateUpdateInterval: number | null = null;
+ private timeSyncInterval: ReturnType | null = null;
+ private stateUpdateInterval: ReturnType | null = null;
// Callback for state changes
private onStateChangeCallback?: (state: {
diff --git a/src/time-filter.ts b/src/core/time-filter.ts
similarity index 100%
rename from src/time-filter.ts
rename to src/core/time-filter.ts
diff --git a/src/core/time-sync-manager.ts b/src/core/time-sync-manager.ts
new file mode 100644
index 0000000..85de19b
--- /dev/null
+++ b/src/core/time-sync-manager.ts
@@ -0,0 +1,216 @@
+import type { SendspinTimeFilter } from "./time-filter";
+import type { StateManager } from "./state-manager";
+import type { WebSocketManager } from "./websocket-manager";
+import type { ClientTime, MessageType, ServerTime } from "../types";
+
+const TIME_SYNC_BURST_SIZE = 8;
+const TIME_SYNC_BURST_INTERVAL_MS = 10000;
+const TIME_SYNC_REQUEST_TIMEOUT_MS = 2000;
+const TIME_SYNC_ROBUST_SELECTION_COUNT = 3;
+
+interface TimeSyncSample {
+ measurement: number;
+ maxError: number;
+ t4: number;
+ rttTerm: number;
+}
+
+export class TimeSyncManager {
+ private timeSyncBurstActive = false;
+ private timeSyncBurstSentCount = 0;
+ private timeSyncInFlightClientTransmitted: number | null = null;
+ private timeSyncInFlightTimeout: ReturnType | null = null;
+ private timeSyncBurstSamples: TimeSyncSample[] = [];
+
+ constructor(
+ private wsManager: WebSocketManager,
+ private stateManager: StateManager,
+ private timeFilter: SendspinTimeFilter,
+ ) {}
+
+ // Start an initial burst and schedule recurring bursts.
+ startAndSchedule(): void {
+ this.stop();
+ this.startTimeSyncBurstIfIdle();
+ this.scheduleNextTimeSyncBurstTick();
+ }
+
+ // Schedule the next fixed 10s burst tick.
+ private scheduleNextTimeSyncBurstTick(): void {
+ const timeSyncTimeout = globalThis.setTimeout(() => {
+ this.startTimeSyncBurstIfIdle();
+ this.scheduleNextTimeSyncBurstTick();
+ }, TIME_SYNC_BURST_INTERVAL_MS);
+ this.stateManager.setTimeSyncInterval(timeSyncTimeout);
+ }
+
+ private startTimeSyncBurstIfIdle(): void {
+ if (this.timeSyncBurstActive || !this.wsManager.isConnected()) {
+ return;
+ }
+
+ this.timeSyncBurstActive = true;
+ this.timeSyncBurstSentCount = 0;
+ this.timeSyncBurstSamples = [];
+ this.timeSyncInFlightClientTransmitted = null;
+ this.sendNextTimeSyncBurstProbe();
+ }
+
+ private sendNextTimeSyncBurstProbe(): void {
+ if (
+ !this.timeSyncBurstActive ||
+ this.timeSyncInFlightClientTransmitted !== null ||
+ !this.wsManager.isConnected()
+ ) {
+ return;
+ }
+
+ if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) {
+ this.finalizeTimeSyncBurst();
+ return;
+ }
+
+ const clientTransmitted = this.sendTimeSync();
+ this.timeSyncBurstSentCount += 1;
+ this.timeSyncInFlightClientTransmitted = clientTransmitted;
+ this.armTimeSyncProbeTimeout(clientTransmitted);
+ }
+
+ private armTimeSyncProbeTimeout(expectedClientTransmitted: number): void {
+ this.clearTimeSyncProbeTimeout();
+ this.timeSyncInFlightTimeout = globalThis.setTimeout(() => {
+ this.handleTimeSyncProbeTimeout(expectedClientTransmitted);
+ }, TIME_SYNC_REQUEST_TIMEOUT_MS);
+ }
+
+ private clearTimeSyncProbeTimeout(): void {
+ if (this.timeSyncInFlightTimeout !== null) {
+ clearTimeout(this.timeSyncInFlightTimeout);
+ this.timeSyncInFlightTimeout = null;
+ }
+ }
+
+ private handleTimeSyncProbeTimeout(expectedClientTransmitted: number): void {
+ if (
+ !this.timeSyncBurstActive ||
+ this.timeSyncInFlightClientTransmitted !== expectedClientTransmitted
+ ) {
+ return;
+ }
+
+ console.warn("Sendspin: Time sync probe timed out, aborting current burst");
+ this.abortTimeSyncBurst();
+ }
+
+ private finalizeTimeSyncBurst(): void {
+ this.clearTimeSyncProbeTimeout();
+
+ const candidate = this.selectTimeSyncBurstCandidate();
+ if (candidate) {
+ this.timeFilter.update(
+ candidate.measurement,
+ candidate.maxError,
+ candidate.t4,
+ );
+ }
+
+ this.timeSyncBurstActive = false;
+ this.timeSyncBurstSentCount = 0;
+ this.timeSyncInFlightClientTransmitted = null;
+ this.timeSyncBurstSamples = [];
+ }
+
+ private selectTimeSyncBurstCandidate(): TimeSyncSample | null {
+ if (this.timeSyncBurstSamples.length === 0) {
+ return null;
+ }
+
+ const topRttSamples = [...this.timeSyncBurstSamples]
+ .sort((a, b) => a.rttTerm - b.rttTerm)
+ .slice(
+ 0,
+ Math.min(
+ TIME_SYNC_ROBUST_SELECTION_COUNT,
+ this.timeSyncBurstSamples.length,
+ ),
+ );
+ const sortedByMeasurement = [...topRttSamples].sort(
+ (a, b) => a.measurement - b.measurement,
+ );
+ return sortedByMeasurement[Math.floor(sortedByMeasurement.length / 2)];
+ }
+
+ private abortTimeSyncBurst(): void {
+ this.clearTimeSyncProbeTimeout();
+ this.timeSyncBurstActive = false;
+ this.timeSyncBurstSentCount = 0;
+ this.timeSyncInFlightClientTransmitted = null;
+ this.timeSyncBurstSamples = [];
+ }
+
+ // Stop all time sync activity (interval + in-flight burst).
+ stop(): void {
+ this.stateManager.clearTimeSyncInterval();
+ this.abortTimeSyncBurst();
+ }
+
+ // Handle server/time response
+ handleServerTime(message: ServerTime): void {
+ if (
+ !this.timeSyncBurstActive ||
+ this.timeSyncInFlightClientTransmitted === null
+ ) {
+ return;
+ }
+
+ // Per spec: client_transmitted (T1), server_received (T2), server_transmitted (T3)
+ const T1 = message.payload.client_transmitted;
+ if (T1 !== this.timeSyncInFlightClientTransmitted) {
+ console.warn(
+ "Sendspin: Ignoring out-of-order time response",
+ T1,
+ this.timeSyncInFlightClientTransmitted,
+ );
+ return;
+ }
+
+ const T4 = Math.floor(performance.now() * 1000); // client received time
+ const T2 = message.payload.server_received;
+ const T3 = message.payload.server_transmitted;
+
+ // NTP offset calculation: measurement = ((T2 - T1) + (T3 - T4)) / 2
+ const measurement = (T2 - T1 + (T3 - T4)) / 2;
+
+ // Max error (half of round-trip time): max_error = ((T4 - T1) - (T3 - T2)) / 2
+ const rttTerm = Math.max(0, T4 - T1 - (T3 - T2));
+ const maxError = Math.max(1000, rttTerm / 2);
+ this.timeSyncBurstSamples.push({
+ measurement,
+ maxError,
+ t4: T4,
+ rttTerm,
+ });
+
+ this.clearTimeSyncProbeTimeout();
+ this.timeSyncInFlightClientTransmitted = null;
+
+ if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) {
+ this.finalizeTimeSyncBurst();
+ return;
+ }
+
+ this.sendNextTimeSyncBurstProbe();
+ }
+
+ // Send time synchronization message
+ sendTimeSync(clientTimeUs = Math.floor(performance.now() * 1000)): number {
+ const message: ClientTime = {
+ type: "client/time" as MessageType.CLIENT_TIME,
+ payload: {
+ client_transmitted: clientTimeUs,
+ },
+ };
+ this.wsManager.send(message);
+ return clientTimeUs;
+ }
+}
diff --git a/src/websocket-manager.ts b/src/core/websocket-manager.ts
similarity index 54%
rename from src/websocket-manager.ts
rename to src/core/websocket-manager.ts
index 449edc1..34ecd3c 100644
--- a/src/websocket-manager.ts
+++ b/src/core/websocket-manager.ts
@@ -1,6 +1,7 @@
+import type { ClientMessage } from "../types";
export class WebSocketManager {
private ws: WebSocket | null = null;
- private reconnectTimeout: number | null = null;
+ private reconnectTimeout: ReturnType | null = null;
private shouldReconnect: boolean = false;
// Event handlers
@@ -11,6 +12,102 @@ export class WebSocketManager {
constructor() {}
+ /**
+ * Adopt an existing WebSocket connection.
+ * The caller is responsible for having already opened the socket.
+ * Reconnection is disabled for adopted sockets.
+ *
+ * Returns a Promise that resolves once the adopted socket is open. Throws
+ * synchronously if the socket is already CLOSING or CLOSED.
+ */
+ adopt(
+ ws: WebSocket,
+ onOpen?: () => void,
+ onMessage?: (event: MessageEvent) => void,
+ onError?: (error: Event) => void,
+ onClose?: () => void,
+ ): Promise {
+ if (
+ ws.readyState !== WebSocket.OPEN &&
+ ws.readyState !== WebSocket.CONNECTING
+ ) {
+ throw new Error(
+ `Sendspin: Cannot adopt WebSocket in readyState ${ws.readyState} (must be OPEN or CONNECTING)`,
+ );
+ }
+
+ // Store handlers
+ this.onOpenHandler = onOpen;
+ this.onMessageHandler = onMessage;
+ this.onErrorHandler = onError;
+ this.onCloseHandler = onClose;
+
+ // Detach handlers from any existing socket so its async close event
+ // cannot fire into the newly-adopted session.
+ if (this.ws) {
+ const old = this.ws;
+ old.onopen = null;
+ old.onmessage = null;
+ old.onerror = null;
+ old.onclose = null;
+ old.close();
+ this.ws = null;
+ }
+
+ this.ws = ws;
+ this.ws.binaryType = "arraybuffer";
+ // No auto-reconnect for externally-managed sockets
+ this.shouldReconnect = false;
+
+ this.ws.onmessage = (event: MessageEvent) => {
+ if (this.onMessageHandler) {
+ this.onMessageHandler(event);
+ }
+ };
+
+ this.ws.onerror = (error: Event) => {
+ console.error("Sendspin: WebSocket error", error);
+ if (this.onErrorHandler) {
+ this.onErrorHandler(error);
+ }
+ };
+
+ this.ws.onclose = () => {
+ console.log("Sendspin: WebSocket disconnected");
+ if (this.onCloseHandler) {
+ this.onCloseHandler();
+ }
+ };
+
+ return new Promise((resolve, reject) => {
+ const fireOpen = () => {
+ if (this.onOpenHandler) {
+ this.onOpenHandler();
+ }
+ resolve();
+ };
+
+ if (ws.readyState === WebSocket.OPEN) {
+ console.log("Sendspin: Adopted open WebSocket");
+ fireOpen();
+ return;
+ }
+
+ // CONNECTING: wait for open or early close.
+ const prevOnClose = this.ws!.onclose;
+ this.ws!.onopen = () => {
+ console.log("Sendspin: Adopted WebSocket connected");
+ fireOpen();
+ };
+ this.ws!.onclose = (event: CloseEvent) => {
+ if (prevOnClose) {
+ prevOnClose.call(this.ws!, event);
+ }
+ reject(new Error("Sendspin: Adopted WebSocket closed before opening"));
+ };
+ });
+ }
+
// Connect to WebSocket server
async connect(
url: string,
@@ -85,7 +182,7 @@ export class WebSocketManager {
clearTimeout(this.reconnectTimeout);
}
- this.reconnectTimeout = window.setTimeout(() => {
+ this.reconnectTimeout = globalThis.setTimeout(() => {
if (this.shouldReconnect) {
console.log("Sendspin: Attempting to reconnect...");
this.connect(
@@ -117,7 +214,7 @@ export class WebSocketManager {
}
// Send message to server (JSON)
- send(message: any): void {
+ send(message: ClientMessage): void {
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
this.ws.send(JSON.stringify(message));
} else {
diff --git a/src/index.ts b/src/index.ts
index 8258943..0298215 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,8 +1,5 @@
-import { AudioProcessor } from "./audio-processor";
-import { ProtocolHandler } from "./protocol-handler";
-import { StateManager } from "./state-manager";
-import { WebSocketManager } from "./websocket-manager";
-import { SendspinTimeFilter } from "./time-filter";
+import { SendspinCore } from "./core/core";
+import { AudioScheduler } from "./audio/scheduler";
import { SILENT_AUDIO_SRC } from "./silent-audio.generated";
import type {
SendspinPlayerConfig,
@@ -67,43 +64,25 @@ function getDefaultSyncDelay(): number {
return 200;
}
-function generateRandomId(): string {
- return Math.random().toString(36).substring(2, 6);
-}
-
// Add a small cushion beyond the measured buffered runway so delayed timer
// delivery does not cut playback off just before the last scheduled audio ends.
const DISCONNECT_PLAYBACK_RESET_GRACE_MS = 250;
export class SendspinPlayer {
- private wsManager: WebSocketManager;
- private audioProcessor: AudioProcessor;
- private protocolHandler: ProtocolHandler;
- private stateManager: StateManager;
- private timeFilter: SendspinTimeFilter;
+ private core: SendspinCore;
+ private scheduler: AudioScheduler;
+ private ownsAudioElement = false;
private disconnectPlaybackResetTimeout: ReturnType | null =
null;
private suppressDisconnectPlaybackReset = false;
- private config: SendspinPlayerConfig;
- private wsUrl: string = "";
- private ownsAudioElement = false;
-
constructor(config: SendspinPlayerConfig) {
- // Apply defaults for playerId and clientName (share same random ID)
- const randomId = generateRandomId();
- const playerId = config.playerId ?? `sendspin-js-${randomId}`;
- const clientName = config.clientName ?? `Sendspin JS Client (${randomId})`;
-
// Auto-detect platform
const isAndroid = detectIsAndroid();
const isCastRuntime = detectIsCastRuntime();
const isMobile = detectIsMobile();
- // Determine output mode:
- // - If audioElement provided, use media-element
- // - If mobile (iOS/Android), default to media-element
- // - Otherwise, use direct
+ // Determine output mode
const outputMode =
config.audioElement || isMobile ? "media-element" : "direct";
this.ownsAudioElement =
@@ -115,63 +94,93 @@ export class SendspinPlayer {
);
}
- // Store config with resolved defaults
- this.config = {
- ...config,
- playerId,
- clientName,
- };
-
- // Initialize time filter (shared between audio processor and protocol handler)
- this.timeFilter = new SendspinTimeFilter(0, 1.1, 2.0, 1e-12);
-
- // Initialize state manager with callback
- this.stateManager = new StateManager(config.onStateChange);
-
- // Initialize audio processor
+ const syncDelay = config.syncDelay ?? getDefaultSyncDelay();
+
+ // Create core (protocol + decoding)
+ this.core = new SendspinCore({
+ playerId: config.playerId,
+ baseUrl: config.baseUrl,
+ clientName: config.clientName,
+ webSocket: config.webSocket,
+ codecs: config.codecs,
+ bufferCapacity:
+ config.bufferCapacity ??
+ (outputMode === "media-element" ? 1024 * 1024 * 5 : 1024 * 1024 * 1.5),
+ syncDelay,
+ useHardwareVolume: config.useHardwareVolume,
+ onVolumeCommand: config.onVolumeCommand,
+ onDelayCommand: config.onDelayCommand,
+ getExternalVolume: config.getExternalVolume,
+ onStateChange: config.onStateChange,
+ });
+
+ // Create scheduler (Web Audio playback)
let storage: SendspinStorage | null = null;
if (config.storage !== undefined) {
storage = config.storage;
} else if (typeof localStorage !== "undefined") {
storage = localStorage;
}
- this.audioProcessor = new AudioProcessor(
- this.stateManager,
- this.timeFilter,
+
+ this.scheduler = new AudioScheduler({
+ stateManager: this.core._stateManager,
+ timeFilter: this.core._timeFilter,
outputMode,
- config.audioElement,
+ audioElement: config.audioElement,
isAndroid,
isCastRuntime,
- this.ownsAudioElement,
- isAndroid ? SILENT_AUDIO_SRC : undefined,
- config.syncDelay ?? getDefaultSyncDelay(),
- config.useHardwareVolume ?? false,
- config.correctionMode ?? "sync",
+ ownsAudioElement: this.ownsAudioElement,
+ silentAudioSrc: isAndroid ? SILENT_AUDIO_SRC : undefined,
+ syncDelayMs: syncDelay,
+ useHardwareVolume: config.useHardwareVolume ?? false,
+ correctionMode: config.correctionMode ?? "sync",
storage,
- config.useOutputLatencyCompensation ?? true,
- );
+ useOutputLatencyCompensation: config.useOutputLatencyCompensation ?? true,
+ correctionThresholds: config.correctionThresholds,
+ });
- // Initialize WebSocket manager
- this.wsManager = new WebSocketManager();
-
- // Initialize protocol handler
- this.protocolHandler = new ProtocolHandler(
- playerId,
- this.wsManager,
- this.audioProcessor,
- this.stateManager,
- this.timeFilter,
- {
- clientName,
- codecs: config.codecs,
- bufferCapacity: config.bufferCapacity,
- useHardwareVolume: config.useHardwareVolume,
- onVolumeCommand: config.onVolumeCommand,
- onDelayCommand: config.onDelayCommand,
- getExternalVolume: config.getExternalVolume,
- useOutputLatencyCompensation: config.useOutputLatencyCompensation,
- },
- );
+ // Wire core events to scheduler
+ this.core.onAudioData = (chunk) => {
+ this.scheduler.handleDecodedChunk(chunk);
+ };
+
+ this.core.onStreamStart = (format, isFormatUpdate) => {
+ this.scheduler.initAudioContext();
+ this.scheduler.resumeAudioContext();
+ if (!isFormatUpdate) {
+ this.scheduler.clearBuffers();
+ }
+ this.scheduler.startAudioElement();
+ };
+
+ this.core.onStreamClear = () => {
+ this.scheduler.clearBuffers();
+ };
+
+ this.core.onStreamEnd = () => {
+ this.scheduler.clearBuffers();
+ this.scheduler.stopAudioElement();
+ };
+
+ this.core.onVolumeUpdate = () => {
+ this.scheduler.updateVolume();
+ };
+
+ this.core.onSyncDelayChange = (delayMs) => {
+ this.scheduler.setSyncDelay(delayMs);
+ };
+
+ // Wire connection lifecycle for disconnect playback deferral
+ this.core.onConnectionOpen = () => {
+ this.cancelPendingDisconnectPlaybackReset();
+ };
+
+ this.core.onConnectionClose = () => {
+ if (this.suppressDisconnectPlaybackReset) {
+ return;
+ }
+ this.scheduleDisconnectPlaybackReset();
+ };
}
private cancelPendingDisconnectPlaybackReset(): void {
@@ -183,13 +192,12 @@ export class SendspinPlayer {
private resetPlaybackStateAfterDisconnect(): void {
this.disconnectPlaybackResetTimeout = null;
- if (this.wsManager.isConnected()) {
+ if (this.core.isConnected) {
return;
}
- this.audioProcessor.clearBuffers();
- this.stateManager.currentStreamFormat = null;
- this.stateManager.isPlaying = false;
- this.audioProcessor.stopAudioElement();
+ this.scheduler.clearBuffers();
+ this.core.resetPlaybackState();
+ this.scheduler.stopAudioElement();
if (typeof navigator !== "undefined" && navigator.mediaSession) {
navigator.mediaSession.playbackState = "paused";
}
@@ -198,7 +206,7 @@ export class SendspinPlayer {
private scheduleDisconnectPlaybackReset(): void {
this.cancelPendingDisconnectPlaybackReset();
- const runwaySec = this.audioProcessor.measureBufferedPlaybackRunwaySec();
+ const runwaySec = this.scheduler.measureBufferedPlaybackRunwaySec();
if (runwaySec <= 0) {
this.resetPlaybackStateAfterDisconnect();
return;
@@ -215,76 +223,21 @@ export class SendspinPlayer {
// Connect to Sendspin server
async connect(): Promise {
this.suppressDisconnectPlaybackReset = false;
-
- // Build WebSocket URL
- const url = new URL(this.config.baseUrl);
- const wsProtocol = url.protocol === "https:" ? "wss:" : "ws:";
- this.wsUrl = `${wsProtocol}//${url.host}/sendspin`;
-
- // Connect to WebSocket
- await this.wsManager.connect(
- this.wsUrl,
- // onOpen
- () => {
- this.cancelPendingDisconnectPlaybackReset();
- console.log("Sendspin: Using player_id:", this.config.playerId);
- this.protocolHandler.sendClientHello();
- },
- // onMessage
- (event: MessageEvent) => {
- this.protocolHandler.handleMessage(event);
- },
- // onError
- (error: Event) => {
- console.error("Sendspin: WebSocket error", error);
- },
- // onClose
- () => {
- this.protocolHandler.stopTimeSync();
- console.log("Sendspin: Connection closed");
- if (this.suppressDisconnectPlaybackReset) {
- return;
- }
- this.stateManager.clearStateUpdateInterval();
- this.scheduleDisconnectPlaybackReset();
- },
- );
+ return this.core.connect();
}
/**
* Disconnect from Sendspin server
* @param reason - Optional reason for disconnecting (default: 'shutdown')
- * - 'another_server': Switching to a different Sendspin server
- * - 'shutdown': Client is shutting down
- * - 'restart': Client is restarting and will reconnect
- * - 'user_request': User explicitly requested to disconnect
*/
disconnect(reason: GoodbyeReason = "shutdown"): void {
this.cancelPendingDisconnectPlaybackReset();
this.suppressDisconnectPlaybackReset = true;
- // Send goodbye message if connected
- if (this.wsManager.isConnected()) {
- this.protocolHandler.sendGoodbye(reason);
- }
-
- // Stop time sync burst scheduler and in-flight timeout state
- this.protocolHandler.stopTimeSync();
-
- // Clear intervals
- this.stateManager.clearAllIntervals();
-
- // Disconnect WebSocket
- this.wsManager.disconnect();
-
- // Close audio processor
- this.audioProcessor.close();
+ this.core.disconnect(reason);
- // Reset time filter
- this.timeFilter.reset();
-
- // Reset state
- this.stateManager.reset();
+ // Close scheduler
+ this.scheduler.close();
// Reset MediaSession playbackState (if available)
if (typeof navigator !== "undefined" && navigator.mediaSession) {
@@ -295,34 +248,24 @@ export class SendspinPlayer {
// Set volume (0-100)
setVolume(volume: number): void {
- this.stateManager.volume = volume;
- this.audioProcessor.updateVolume();
- this.protocolHandler.sendStateUpdate();
+ this.core.setVolume(volume);
}
// Set muted state
setMuted(muted: boolean): void {
- this.stateManager.muted = muted;
- this.audioProcessor.updateVolume();
- this.protocolHandler.sendStateUpdate();
+ this.core.setMuted(muted);
}
- // Set static delay (in milliseconds, 0-5000). Positive values schedule playback earlier.
+ // Set static delay (in milliseconds, 0-5000)
setSyncDelay(delayMs: number): void {
- this.audioProcessor.setSyncDelay(delayMs);
- this.protocolHandler.sendStateUpdate();
+ this.core.setSyncDelay(delayMs);
}
/**
* Set the sync correction mode at runtime.
- * @param mode - The correction mode to use:
- * - "sync": Multi-device sync, may use pitch-changing playback-rate adjustments for faster convergence.
- * - "quality": No playback-rate changes; uses sample fixes and tighter resyncs, so expect fewer adjustments but occasional jumps. Starts out of sync until the clock converges. Not recommended for bad networks.
- * - "quality-local": Avoids playback-rate changes; may drift vs. other players and only resyncs
- * as a last resort.
*/
setCorrectionMode(mode: CorrectionMode): void {
- this.audioProcessor.setCorrectionMode(mode);
+ this.scheduler.setCorrectionMode(mode);
}
// ========================================
@@ -331,87 +274,52 @@ export class SendspinPlayer {
/**
* Send a controller command to the server.
- * Use this for playback control when the server manages the audio source.
- *
- * @throws Error if the command is not supported by the server
- *
- * @example
- * // Simple commands (no parameters)
- * player.sendCommand('play');
- * player.sendCommand('pause');
- * player.sendCommand('next');
- * player.sendCommand('previous');
- * player.sendCommand('stop');
- * player.sendCommand('shuffle');
- * player.sendCommand('unshuffle');
- * player.sendCommand('repeat_off');
- * player.sendCommand('repeat_one');
- * player.sendCommand('repeat_all');
- * player.sendCommand('switch');
- *
- * // Commands with required parameters
- * player.sendCommand('volume', { volume: 50 });
- * player.sendCommand('mute', { mute: true });
*/
sendCommand(
command: T,
params: ControllerCommands[T],
): void {
- const supportedCommands =
- this.stateManager.serverState.controller?.supported_commands;
- if (supportedCommands && !supportedCommands.includes(command)) {
- throw new Error(
- `Command '${command}' is not supported by the server. ` +
- `Supported commands: ${supportedCommands.join(", ")}`,
- );
- }
- this.protocolHandler.sendCommand(command, params);
+ this.core.sendCommand(command, params);
}
// Getters for reactive state
get isPlaying(): boolean {
- return this.stateManager.isPlaying;
+ return this.core.isPlaying;
}
get volume(): number {
- return this.stateManager.volume;
+ return this.core.volume;
}
get muted(): boolean {
- return this.stateManager.muted;
+ return this.core.muted;
}
get playerState(): PlayerState {
- return this.stateManager.playerState;
+ return this.core.playerState;
}
get currentFormat(): StreamFormat | null {
- return this.stateManager.currentStreamFormat;
+ return this.core.currentFormat;
}
get isConnected(): boolean {
- return this.wsManager.isConnected();
+ return this.core.isConnected;
}
// Get current correction mode
get correctionMode(): CorrectionMode {
- return this.audioProcessor.correctionMode;
+ return this.scheduler.correctionMode;
}
// Time sync info for debugging
get timeSyncInfo(): { synced: boolean; offset: number; error: number } {
- return {
- synced: this.timeFilter.is_synchronized,
- offset: Math.round(this.timeFilter.offset / 1000), // ms
- error: Math.round(this.timeFilter.error / 1000), // ms
- };
+ return this.core.timeSyncInfo;
}
/** Get current server time in microseconds using synchronized clock */
getCurrentServerTimeUs(): number {
- return this.timeFilter.computeServerTime(
- Math.floor(performance.now() * 1000),
- );
+ return this.core.getCurrentServerTimeUs();
}
/** Get current track progress with real-time position calculation */
@@ -420,27 +328,7 @@ export class SendspinPlayer {
durationMs: number;
playbackSpeed: number;
} | null {
- const metadata = this.stateManager.serverState.metadata;
- if (!metadata?.progress || metadata.timestamp === undefined) {
- return null;
- }
-
- const serverTimeUs = this.getCurrentServerTimeUs();
- const elapsedUs = serverTimeUs - metadata.timestamp;
- // playback_speed is multiplied by 1000 in protocol (1000 = normal speed)
- const positionMs =
- metadata.progress.track_progress +
- (elapsedUs * metadata.progress.playback_speed) / 1_000_000;
-
- return {
- positionMs: Math.max(
- 0,
- Math.min(positionMs, metadata.progress.track_duration),
- ),
- durationMs: metadata.progress.track_duration,
- // Normalize to float (1.0 = normal speed)
- playbackSpeed: metadata.progress.playback_speed / 1000,
- };
+ return this.core.trackProgress;
}
// Sync info for debugging/display
@@ -454,13 +342,16 @@ export class SendspinPlayer {
samplesAdjusted: number;
correctionMode: CorrectionMode;
} {
- return this.audioProcessor.syncInfo;
+ return this.scheduler.syncInfo;
}
}
// Re-export types for convenience
export * from "./types";
-export { SendspinTimeFilter } from "./time-filter";
+export { SendspinTimeFilter } from "./core/time-filter";
+export { SendspinCore } from "./core/core";
+export { SendspinDecoder } from "./audio/decoder";
+export { AudioScheduler } from "./audio/scheduler";
// Export platform detection utilities
export {
diff --git a/src/internal-types.ts b/src/internal-types.ts
new file mode 100644
index 0000000..8a83a6e
--- /dev/null
+++ b/src/internal-types.ts
@@ -0,0 +1,26 @@
+/**
+ * Internal plumbing types shared across SDK modules but not part of the
+ * public API surface. These are intentionally NOT re-exported from index.ts.
+ */
+
+import type { StreamFormat } from "./types";
+
+export interface AudioBufferQueueItem {
+ buffer: AudioBuffer;
+ serverTime: number;
+ generation: number;
+}
+
+/**
+ * Interface for protocol handler to call into the audio subsystem.
+ * Implemented by SendspinCore as the bridge between protocol and audio.
+ */
+export interface StreamHandler {
+ handleBinaryMessage(data: ArrayBuffer): void;
+ handleStreamStart(format: StreamFormat, isFormatUpdate: boolean): void;
+ handleStreamClear(): void;
+ handleStreamEnd(): void;
+ handleVolumeUpdate(): void;
+ handleSyncDelayChange(delayMs: number): void;
+ getSyncDelayMs(): number;
+}
diff --git a/src/protocol-handler.ts b/src/protocol-handler.ts
deleted file mode 100644
index 26cb268..0000000
--- a/src/protocol-handler.ts
+++ /dev/null
@@ -1,629 +0,0 @@
-import type { SendspinTimeFilter } from "./time-filter";
-import type {
- ClientCommand,
- ClientGoodbye,
- ClientHello,
- ClientState,
- ClientTime,
- Codec,
- ControllerCommand,
- ControllerCommands,
- GoodbyeReason,
- GroupUpdate,
- MessageType,
- ServerCommand,
- ServerMessage,
- ServerState,
- ServerTime,
- StreamClear,
- StreamEnd,
- StreamStart,
- SupportedFormat,
-} from "./types";
-import type { AudioProcessor } from "./audio-processor";
-import type { StateManager } from "./state-manager";
-import type { WebSocketManager } from "./websocket-manager";
-
-// Constants
-const STATE_UPDATE_INTERVAL = 5000; // 5 seconds
-const TIME_SYNC_BURST_SIZE = 8;
-const TIME_SYNC_BURST_INTERVAL_MS = 10000;
-const TIME_SYNC_REQUEST_TIMEOUT_MS = 2000;
-const TIME_SYNC_ROBUST_SELECTION_COUNT = 3;
-
-interface TimeSyncSample {
- measurement: number;
- maxError: number;
- t4: number;
- rttTerm: number;
-}
-
-export interface ProtocolHandlerConfig {
- clientName?: string;
- codecs?: Codec[];
- bufferCapacity?: number;
- useHardwareVolume?: boolean;
- onVolumeCommand?: (volume: number, muted: boolean) => void;
- onDelayCommand?: (delayMs: number) => void;
- getExternalVolume?: () => { volume: number; muted: boolean };
- useOutputLatencyCompensation?: boolean;
-}
-
-export class ProtocolHandler {
- private clientName: string;
- private codecs: Codec[];
- private bufferCapacity: number;
- private useHardwareVolume: boolean;
- private useOutputLatencyCompensation: boolean;
- private onVolumeCommand?: (volume: number, muted: boolean) => void;
- private onDelayCommand?: (delayMs: number) => void;
- private getExternalVolume?: () => { volume: number; muted: boolean };
- private timeSyncBurstActive: boolean = false;
- private timeSyncBurstSentCount: number = 0;
- private timeSyncInFlightClientTransmitted: number | null = null;
- private timeSyncInFlightTimeout: number | null = null;
- private timeSyncBurstSamples: TimeSyncSample[] = [];
-
- constructor(
- private playerId: string,
- private wsManager: WebSocketManager,
- private audioProcessor: AudioProcessor,
- private stateManager: StateManager,
- private timeFilter: SendspinTimeFilter,
- config: ProtocolHandlerConfig = {},
- ) {
- this.clientName = config.clientName ?? "Sendspin Player";
- this.codecs = config.codecs ?? ["opus", "flac", "pcm"];
- this.bufferCapacity = config.bufferCapacity ?? 1024 * 1024 * 5; // 5MB default
- this.useHardwareVolume = config.useHardwareVolume ?? false;
- this.useOutputLatencyCompensation =
- config.useOutputLatencyCompensation ?? true;
- this.onVolumeCommand = config.onVolumeCommand;
- this.onDelayCommand = config.onDelayCommand;
- this.getExternalVolume = config.getExternalVolume;
- }
-
- // Handle WebSocket messages
- handleMessage(event: MessageEvent): void {
- if (typeof event.data === "string") {
- // JSON message
- const message = JSON.parse(event.data) as ServerMessage;
- this.handleServerMessage(message);
- } else if (event.data instanceof ArrayBuffer) {
- // Binary message (audio chunk)
- this.audioProcessor.handleBinaryMessage(event.data);
- } else if (event.data instanceof Blob) {
- // Convert Blob to ArrayBuffer
- event.data.arrayBuffer().then((buffer) => {
- this.audioProcessor.handleBinaryMessage(buffer);
- });
- }
- }
-
- // Handle server messages
- private handleServerMessage(message: ServerMessage): void {
- switch (message.type) {
- case "server/hello":
- this.handleServerHello();
- break;
-
- case "server/time":
- this.handleServerTime(message);
- break;
-
- case "stream/start":
- this.handleStreamStart(message as StreamStart);
- break;
-
- case "stream/clear":
- this.handleStreamClear(message as StreamClear);
- break;
-
- case "stream/end":
- this.handleStreamEnd(message as StreamEnd);
- break;
-
- case "server/command":
- this.handleServerCommand(message as ServerCommand);
- break;
-
- case "server/state":
- this.stateManager.updateServerState((message as ServerState).payload);
- break;
-
- case "group/update":
- this.stateManager.updateGroupState((message as GroupUpdate).payload);
- break;
- }
- }
-
- // Handle server hello
- private handleServerHello(): void {
- console.log("Sendspin: Connected to server");
- // Per spec: Send initial client/state immediately after server/hello
- this.sendStateUpdate();
- // Start time synchronization with fixed bursts.
- this.stopTimeSync();
- this.startTimeSyncBurstIfIdle();
- this.scheduleNextTimeSyncBurstTick();
-
- // Start periodic state updates
- const stateInterval = window.setInterval(
- () => this.sendStateUpdate(),
- STATE_UPDATE_INTERVAL,
- );
- this.stateManager.setStateUpdateInterval(stateInterval);
- }
-
- // Restart the periodic state update interval.
- // Called after volume commands to prevent a pending periodic update
- // from sending stale hardware volume shortly after the command response.
- private restartStateUpdateInterval(): void {
- const newInterval = window.setInterval(
- () => this.sendStateUpdate(),
- STATE_UPDATE_INTERVAL,
- );
- this.stateManager.setStateUpdateInterval(newInterval);
- }
-
- // Schedule the next fixed 10s burst tick.
- private scheduleNextTimeSyncBurstTick(): void {
- const timeSyncTimeout = window.setTimeout(() => {
- this.startTimeSyncBurstIfIdle();
- this.scheduleNextTimeSyncBurstTick();
- }, TIME_SYNC_BURST_INTERVAL_MS);
- this.stateManager.setTimeSyncInterval(timeSyncTimeout);
- }
-
- private startTimeSyncBurstIfIdle(): void {
- if (this.timeSyncBurstActive || !this.wsManager.isConnected()) {
- return;
- }
-
- this.timeSyncBurstActive = true;
- this.timeSyncBurstSentCount = 0;
- this.timeSyncBurstSamples = [];
- this.timeSyncInFlightClientTransmitted = null;
- this.sendNextTimeSyncBurstProbe();
- }
-
- private sendNextTimeSyncBurstProbe(): void {
- if (
- !this.timeSyncBurstActive ||
- this.timeSyncInFlightClientTransmitted !== null ||
- !this.wsManager.isConnected()
- ) {
- return;
- }
-
- if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) {
- this.finalizeTimeSyncBurst();
- return;
- }
-
- const clientTransmitted = this.sendTimeSync();
- this.timeSyncBurstSentCount += 1;
- this.timeSyncInFlightClientTransmitted = clientTransmitted;
- this.armTimeSyncProbeTimeout(clientTransmitted);
- }
-
- private armTimeSyncProbeTimeout(expectedClientTransmitted: number): void {
- this.clearTimeSyncProbeTimeout();
- this.timeSyncInFlightTimeout = window.setTimeout(() => {
- this.handleTimeSyncProbeTimeout(expectedClientTransmitted);
- }, TIME_SYNC_REQUEST_TIMEOUT_MS);
- }
-
- private clearTimeSyncProbeTimeout(): void {
- if (this.timeSyncInFlightTimeout !== null) {
- clearTimeout(this.timeSyncInFlightTimeout);
- this.timeSyncInFlightTimeout = null;
- }
- }
-
- private handleTimeSyncProbeTimeout(expectedClientTransmitted: number): void {
- if (
- !this.timeSyncBurstActive ||
- this.timeSyncInFlightClientTransmitted !== expectedClientTransmitted
- ) {
- return;
- }
-
- console.warn("Sendspin: Time sync probe timed out, aborting current burst");
- this.abortTimeSyncBurst();
- }
-
- private finalizeTimeSyncBurst(): void {
- this.clearTimeSyncProbeTimeout();
-
- const candidate = this.selectTimeSyncBurstCandidate();
- if (candidate) {
- this.timeFilter.update(
- candidate.measurement,
- candidate.maxError,
- candidate.t4,
- );
- }
-
- this.timeSyncBurstActive = false;
- this.timeSyncBurstSentCount = 0;
- this.timeSyncInFlightClientTransmitted = null;
- this.timeSyncBurstSamples = [];
- }
-
- private selectTimeSyncBurstCandidate(): TimeSyncSample | null {
- if (this.timeSyncBurstSamples.length === 0) {
- return null;
- }
-
- const topRttSamples = [...this.timeSyncBurstSamples]
- .sort((a, b) => a.rttTerm - b.rttTerm)
- .slice(
- 0,
- Math.min(
- TIME_SYNC_ROBUST_SELECTION_COUNT,
- this.timeSyncBurstSamples.length,
- ),
- );
- const sortedByMeasurement = [...topRttSamples].sort(
- (a, b) => a.measurement - b.measurement,
- );
- return sortedByMeasurement[Math.floor(sortedByMeasurement.length / 2)];
- }
-
- private abortTimeSyncBurst(): void {
- this.clearTimeSyncProbeTimeout();
- this.timeSyncBurstActive = false;
- this.timeSyncBurstSentCount = 0;
- this.timeSyncInFlightClientTransmitted = null;
- this.timeSyncBurstSamples = [];
- }
-
- stopTimeSync(): void {
- this.stateManager.clearTimeSyncInterval();
- this.abortTimeSyncBurst();
- }
-
- // Handle server time synchronization
- private handleServerTime(message: ServerTime): void {
- if (
- !this.timeSyncBurstActive ||
- this.timeSyncInFlightClientTransmitted === null
- ) {
- return;
- }
-
- // Per spec: client_transmitted (T1), server_received (T2), server_transmitted (T3)
- const T1 = message.payload.client_transmitted;
- if (T1 !== this.timeSyncInFlightClientTransmitted) {
- console.warn(
- "Sendspin: Ignoring out-of-order time response",
- T1,
- this.timeSyncInFlightClientTransmitted,
- );
- return;
- }
-
- const T4 = Math.floor(performance.now() * 1000); // client received time
- const T2 = message.payload.server_received;
- const T3 = message.payload.server_transmitted;
-
- // NTP offset calculation: measurement = ((T2 - T1) + (T3 - T4)) / 2
- const measurement = (T2 - T1 + (T3 - T4)) / 2;
-
- // Max error (half of round-trip time): max_error = ((T4 - T1) - (T3 - T2)) / 2
- const rttTerm = Math.max(0, T4 - T1 - (T3 - T2));
- const maxError = Math.max(1000, rttTerm / 2);
- this.timeSyncBurstSamples.push({
- measurement,
- maxError,
- t4: T4,
- rttTerm,
- });
-
- this.clearTimeSyncProbeTimeout();
- this.timeSyncInFlightClientTransmitted = null;
-
- if (this.timeSyncBurstSentCount >= TIME_SYNC_BURST_SIZE) {
- this.finalizeTimeSyncBurst();
- return;
- }
-
- this.sendNextTimeSyncBurstProbe();
- }
-
- // Handle stream start (also used for format updates per new spec)
- private handleStreamStart(message: StreamStart): void {
- const isFormatUpdate = this.stateManager.currentStreamFormat !== null;
-
- this.stateManager.currentStreamFormat = message.payload.player;
- console.log(
- isFormatUpdate
- ? "Sendspin: Stream format updated"
- : "Sendspin: Stream started",
- this.stateManager.currentStreamFormat,
- );
- console.log(
- `Sendspin: Codec=${this.stateManager.currentStreamFormat.codec.toUpperCase()}, ` +
- `SampleRate=${this.stateManager.currentStreamFormat.sample_rate}Hz, ` +
- `Channels=${this.stateManager.currentStreamFormat.channels}, ` +
- `BitDepth=${this.stateManager.currentStreamFormat.bit_depth}bit`,
- );
-
- this.audioProcessor.initAudioContext();
- // Resume AudioContext if suspended (required for browser autoplay policies)
- this.audioProcessor.resumeAudioContext();
-
- if (!isFormatUpdate) {
- // New stream: reset scheduling state and clear buffers
- this.audioProcessor.clearBuffers();
- }
- // Format update: don't clear buffers (per new spec)
-
- this.stateManager.isPlaying = true;
-
- // Ensure audio element is playing for MediaSession
- this.audioProcessor.startAudioElement();
-
- // Explicitly set playbackState for Android (if mediaSession available)
- if (typeof navigator !== "undefined" && navigator.mediaSession) {
- navigator.mediaSession.playbackState = "playing";
- }
- }
-
- // Handle stream clear (for seek operations)
- private handleStreamClear(message: StreamClear): void {
- const roles = message.payload.roles;
- // If roles is undefined or includes 'player', clear player buffers
- if (!roles || roles.includes("player")) {
- console.log("Sendspin: Stream clear (seek)");
- this.audioProcessor.clearBuffers();
- // Note: Don't stop playing, don't clear format - just clear buffers
- }
- }
-
- // Handle stream end
- private handleStreamEnd(message: StreamEnd): void {
- const roles = message.payload?.roles;
-
- // If roles is undefined or includes 'player', handle player stream end
- if (!roles || roles.includes("player")) {
- console.log("Sendspin: Stream ended");
- // Per spec: Stop playback and clear buffers
- this.audioProcessor.clearBuffers();
-
- // Clear format and reset state
- this.stateManager.currentStreamFormat = null;
- this.stateManager.isPlaying = false;
-
- // Stop audio element (except on Android where silent loop continues)
- this.audioProcessor.stopAudioElement();
-
- // Explicitly set playbackState (if mediaSession available)
- if (typeof navigator !== "undefined" && navigator.mediaSession) {
- navigator.mediaSession.playbackState = "paused";
- }
-
- // Send state update to server
- this.sendStateUpdate();
- }
- }
-
- // Handle server commands
- private handleServerCommand(message: ServerCommand): void {
- const playerCommand = message.payload.player;
- if (!playerCommand) return;
-
- switch (playerCommand.command) {
- case "volume":
- // Set volume command
- if (playerCommand.volume !== undefined) {
- this.stateManager.volume = playerCommand.volume;
- this.audioProcessor.updateVolume();
- // Notify external handler for hardware volume
- if (this.useHardwareVolume && this.onVolumeCommand) {
- this.onVolumeCommand(playerCommand.volume, this.stateManager.muted);
- }
- }
- break;
-
- case "mute":
- // Mute/unmute command - uses boolean mute field
- if (playerCommand.mute !== undefined) {
- this.stateManager.muted = playerCommand.mute;
- this.audioProcessor.updateVolume();
- // Notify external handler for hardware volume
- if (this.useHardwareVolume && this.onVolumeCommand) {
- this.onVolumeCommand(this.stateManager.volume, playerCommand.mute);
- }
- }
- break;
-
- case "set_static_delay": {
- const delay = playerCommand.static_delay_ms;
- if (typeof delay === "number" && isFinite(delay)) {
- const clamped = Math.max(0, Math.min(5000, Math.round(delay)));
- this.audioProcessor.setSyncDelay(clamped);
- this.onDelayCommand?.(clamped);
- }
- break;
- }
- }
-
- // Reset periodic timer first, then send state with commanded values.
- // Skip hardware read to avoid race where hardware hasn't applied the volume yet.
- this.restartStateUpdateInterval();
- this.sendStateUpdate(true);
- }
-
- // Send client hello with player identification
- sendClientHello(): void {
- const hello: ClientHello = {
- type: "client/hello" as MessageType.CLIENT_HELLO,
- payload: {
- client_id: this.playerId,
- name: this.clientName,
- version: 1,
- supported_roles: ["player@v1", "controller@v1", "metadata@v1"],
- device_info: {
- product_name: "Web Browser",
- manufacturer:
- (typeof navigator !== "undefined" && navigator.vendor) || "Unknown",
- software_version:
- (typeof navigator !== "undefined" && navigator.userAgent) ||
- "Unknown",
- },
- "player@v1_support": {
- supported_formats: this.getSupportedFormats(),
- buffer_capacity: this.bufferCapacity,
- supported_commands: ["volume", "mute"],
- },
- },
- };
- this.wsManager.send(hello);
- }
-
- // Get supported codecs for the current browser
- private getBrowserSupportedCodecs(): Set {
- const userAgent =
- typeof navigator !== "undefined" ? navigator.userAgent : "";
- const isSafari = /^((?!chrome|android).)*safari/i.test(userAgent);
- const isFirefox = /firefox/i.test(userAgent);
-
- // Check if native Opus decoder is available (requires secure context)
- const hasNativeOpus = typeof AudioDecoder !== "undefined";
-
- if (!hasNativeOpus) {
- if (typeof window !== "undefined" && !window.isSecureContext) {
- console.warn(
- "[Opus] Running in insecure context, falling back to FLAC/PCM",
- );
- } else {
- console.warn(
- "[Opus] Native decoder not available, falling back to FLAC/PCM",
- );
- }
- }
-
- if (isSafari) {
- // Safari: No FLAC support
- return new Set(["pcm", "opus"] as Codec[]);
- }
-
- if (isFirefox) {
- // Firefox: Opus has audio glitches with both native and opus-encdec decoders
- return new Set(["pcm", "flac"] as Codec[]);
- }
-
- if (hasNativeOpus) {
- // Native Opus available (Chrome, Edge)
- return new Set(["pcm", "opus", "flac"] as Codec[]);
- }
-
- // No WebCodecs AudioDecoder (insecure context or unsupported browser)
- return new Set(["pcm", "flac"] as Codec[]);
- }
-
- // Build supported formats from requested codecs, filtering out unsupported ones
- private getSupportedFormats(): SupportedFormat[] {
- const browserSupported = this.getBrowserSupportedCodecs();
- const formats: SupportedFormat[] = [];
-
- for (const codec of this.codecs) {
- if (!browserSupported.has(codec)) {
- continue;
- }
-
- if (codec === "opus") {
- // Opus requires 48kHz
- formats.push({
- codec: "opus",
- sample_rate: 48000,
- channels: 2,
- bit_depth: 16,
- });
- } else {
- // PCM and FLAC support both sample rates
- formats.push({ codec, sample_rate: 48000, channels: 2, bit_depth: 16 });
- formats.push({ codec, sample_rate: 44100, channels: 2, bit_depth: 16 });
- }
- }
-
- if (formats.length === 0) {
- throw new Error(
- `No supported codecs: requested [${this.codecs.join(", ")}], ` +
- `browser supports [${[...browserSupported].join(", ")}]`,
- );
- }
-
- return formats;
- }
-
- // Send time synchronization message
- sendTimeSync(clientTimeUs = Math.floor(performance.now() * 1000)): number {
- const message: ClientTime = {
- type: "client/time" as MessageType.CLIENT_TIME,
- payload: {
- client_transmitted: clientTimeUs,
- },
- };
- this.wsManager.send(message);
- return clientTimeUs;
- }
-
- // Send state update
- // When skipHardwareRead is true, use stateManager values instead of reading from hardware.
- // This avoids race conditions when responding to volume commands.
- sendStateUpdate(skipHardwareRead = false): void {
- let volume = this.stateManager.volume;
- let muted = this.stateManager.muted;
- if (!skipHardwareRead && this.useHardwareVolume && this.getExternalVolume) {
- const externalVol = this.getExternalVolume();
- volume = externalVol.volume;
- muted = externalVol.muted;
- }
-
- const syncDelayMs = this.audioProcessor.getSyncDelayMs();
- const staticDelayMs = Math.max(0, Math.min(5000, Math.round(syncDelayMs)));
-
- const message: ClientState = {
- type: "client/state" as MessageType.CLIENT_STATE,
- payload: {
- player: {
- state: this.stateManager.playerState,
- volume,
- muted,
- static_delay_ms: staticDelayMs,
- supported_commands: ["set_static_delay"],
- },
- },
- };
- this.wsManager.send(message);
- }
-
- // Send goodbye message before disconnecting
- sendGoodbye(reason: GoodbyeReason): void {
- this.wsManager.send({
- type: "client/goodbye" as MessageType.CLIENT_GOODBYE,
- payload: {
- reason,
- },
- } satisfies ClientGoodbye);
- }
-
- // Send controller command to server
- sendCommand(
- command: T,
- params: ControllerCommands[T],
- ): void {
- this.wsManager.send({
- type: "client/command" as MessageType.CLIENT_COMMAND,
- payload: {
- controller: {
- command,
- ...(params as object),
- },
- },
- } satisfies ClientCommand);
- }
-}
diff --git a/src/sync-delay.ts b/src/sync-delay.ts
new file mode 100644
index 0000000..f0a7c1b
--- /dev/null
+++ b/src/sync-delay.ts
@@ -0,0 +1,6 @@
+export const SYNC_DELAY_MAX_MS = 5000;
+
+export function clampSyncDelayMs(delayMs: number): number {
+ if (!isFinite(delayMs)) return 0;
+ return Math.max(0, Math.min(SYNC_DELAY_MAX_MS, Math.round(delayMs)));
+}
diff --git a/src/types.ts b/src/types.ts
index 3baf5d6..fe87bdc 100644
--- a/src/types.ts
+++ b/src/types.ts
@@ -256,6 +256,27 @@ export type Codec = "pcm" | "opus" | "flac";
*/
export type CorrectionMode = "sync" | "quality" | "quality-local";
+/**
+ * Sync correction thresholds for a single correction mode.
+ * All values are in milliseconds unless noted.
+ */
+export interface CorrectionThresholds {
+ /** Hard resync when sync error exceeds this (ms) */
+ resyncAboveMs: number;
+ /** Use ±2% playback rate when error exceeds this (ms). Infinity = disabled. */
+ rate2AboveMs: number;
+ /** Use ±1% playback rate when error exceeds this (ms). Infinity = disabled. */
+ rate1AboveMs: number;
+ /** Use sample insertion/deletion when error is below this (ms). 0 = disabled. */
+ samplesBelowMs: number;
+ /** No correction when error is below this (ms) */
+ deadbandBelowMs: number;
+ /** Whether the recorrection monitor runs in this mode */
+ enableRecorrectionMonitor: boolean;
+ /** Whether runtime sync delay changes trigger immediate cutover */
+ immediateDelayCutover: boolean;
+}
+
export interface SupportedFormat {
codec: string;
channels: number;
@@ -263,22 +284,89 @@ export interface SupportedFormat {
bit_depth: number;
}
-export interface SendspinPlayerConfig {
+export interface SendspinPlayerConfig extends SendspinCoreConfig {
+ /**
+ * HTMLAudioElement for media-element output mode.
+ * Auto-created on mobile browsers if not provided.
+ */
+ audioElement?: HTMLAudioElement;
+
+ /**
+ * Sync correction mode:
+ * - "sync" (default): Corrects out of sync playback using all methods and may use pitch-changing
+ * playback-rate adjustments for faster convergence.
+ * Best for multi-device sync but may cause audible pitch shifts, especially just
+ * after starting of playback.
+ * - "quality": No playback-rate changes; uses sample fixes and tighter resyncs, so expect fewer adjustments but occasional jumps. Starts out of sync until the clock converges. Not recommended for bad networks.
+ * - "quality-local": Avoids playback-rate changes; may drift vs. other players and only resyncs
+ * as a last resort.
+ * Best for single-device playback where audio quality is priority.
+ */
+ correctionMode?: CorrectionMode;
+
+ /**
+ * Override default correction thresholds per mode.
+ * Partially override any mode — unspecified fields keep their defaults.
+ *
+ * @example
+ * // Make "sync" mode tolerate more drift before hard resyncing
+ * correctionThresholds: { sync: { resyncAboveMs: 400 } }
+ */
+ correctionThresholds?: Partial<
+ Record>
+ >;
+
+ /**
+ * Use browser's output latency API for automatic latency compensation.
+ * When enabled, reads AudioContext.baseLatency and outputLatency to
+ * compensate for hardware delay (e.g., Bluetooth headphones).
+ *
+ * Note: API reliability varies by browser/platform. But generally works well,
+ * especially on modern mobile browsers.
+ *
+ * Default: true
+ */
+ useOutputLatencyCompensation?: boolean;
+
+ /**
+ * Storage for persisting SDK state (e.g., cached output latency).
+ * Defaults to localStorage. Pass null to disable persistence.
+ */
+ storage?: SendspinStorage | null;
+}
+
+/**
+ * A decoded audio chunk with raw PCM samples.
+ * Emitted by SendspinCore after decoding compressed audio.
+ * Consumed by SendspinPlayer for playback, or by visualization/analysis tools.
+ */
+export interface DecodedAudioChunk {
+ /** PCM sample data, one Float32Array per channel (values in -1.0 to 1.0) */
+ samples: Float32Array[];
+ /** Sample rate in Hz */
+ sampleRate: number;
+ /** Server timestamp in microseconds */
+ serverTimeUs: number;
+ /** Stream generation (incremented on each new stream) */
+ generation: number;
+}
+
+/**
+ * Configuration for SendspinCore (protocol + decoding, no playback).
+ */
+export interface SendspinCoreConfig {
/** Unique player identifier. Auto-generated if not provided. */
playerId?: string;
- /** Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095") */
- baseUrl: string;
+ /**
+ * Base URL of the Sendspin server (e.g., "http://192.168.1.100:8095").
+ * Required unless webSocket is provided.
+ */
+ baseUrl?: string;
/** Human-readable name for this player. Auto-generated if not provided. */
clientName?: string;
- /**
- * HTMLAudioElement for media-element output mode.
- * Auto-created on mobile browsers if not provided.
- */
- audioElement?: HTMLAudioElement;
-
/**
* Codecs to use for audio streaming, in priority order.
* Unsupported codecs for the current browser are automatically filtered out:
@@ -296,6 +384,14 @@ export interface SendspinPlayerConfig {
*/
bufferCapacity?: number;
+ /**
+ * Pre-established WebSocket connection.
+ * When provided, the core adopts this socket instead of creating one from baseUrl.
+ * The socket must connect to the Sendspin /sendspin endpoint.
+ * Auto-reconnect is disabled for externally-managed sockets.
+ */
+ webSocket?: WebSocket;
+
/**
* Static sync delay in milliseconds.
* Positive values make playback earlier to compensate for downstream device latency.
@@ -305,43 +401,6 @@ export interface SendspinPlayerConfig {
*/
syncDelay?: number;
- /**
- * Sync correction mode:
- * - "sync" (default): Corrects out of sync playback using all methods and may use pitch-changing
- * playback-rate adjustments for faster convergence.
- * Best for multi-device sync but may cause audible pitch shifts, especially just
- * after starting of playback.
- * - "quality": No playback-rate changes; uses sample fixes and tighter resyncs, so expect fewer adjustments but occasional jumps. Starts out of sync until the clock converges. Not recommended for bad networks.
- * - "quality-local": Avoids playback-rate changes; may drift vs. other players and only resyncs
- * as a last resort.
- * Best for single-device playback where audio quality is priority.
- */
- correctionMode?: CorrectionMode;
-
- /**
- * Use browser's output latency API for automatic latency compensation.
- * When enabled, reads AudioContext.baseLatency and outputLatency to
- * compensate for hardware delay (e.g., Bluetooth headphones).
- *
- * Note: API reliability varies by browser/platform. But generally works well,
- * especially on modern mobile browsers.
- *
- * Default: true
- */
- useOutputLatencyCompensation?: boolean;
-
- /** Callback when player state changes (local or from server) */
- onStateChange?: (state: {
- isPlaying: boolean;
- volume: number;
- muted: boolean;
- playerState: PlayerState;
- /** Cached server state (merged from server/state messages) */
- serverState: ServerStatePayload;
- /** Cached group state (merged from group/update messages) */
- groupState: GroupUpdatePayload;
- }) => void;
-
/**
* Use hardware/external volume control instead of software gain.
* When true, the internal gain node stays at 1.0 and volume commands
@@ -372,17 +431,17 @@ export interface SendspinPlayerConfig {
*/
getExternalVolume?: () => { volume: number; muted: boolean };
- /**
- * Storage for persisting SDK state (e.g., cached output latency).
- * Defaults to localStorage. Pass null to disable persistence.
- */
- storage?: SendspinStorage | null;
-}
-
-export interface AudioBufferQueueItem {
- buffer: AudioBuffer;
- serverTime: number;
- generation: number;
+ /** Callback when player state changes (local or from server). */
+ onStateChange?: (state: {
+ isPlaying: boolean;
+ volume: number;
+ muted: boolean;
+ playerState: PlayerState;
+ /** Cached server state (merged from server/state messages) */
+ serverState: ServerStatePayload;
+ /** Cached group state (merged from group/update messages) */
+ groupState: GroupUpdatePayload;
+ }) => void;
}
/**