diff --git a/Project.toml b/Project.toml index 92e0228..efc9cf0 100644 --- a/Project.toml +++ b/Project.toml @@ -16,8 +16,10 @@ JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" ProgressBars = "49802e3a-d2f1-5c88-81d8-b72133a6f568" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" ResumableFunctions = "c5292f4c-5179-55e1-98c5-05642aab7184" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" @@ -35,8 +37,10 @@ JuliaFormatter = "1.0.62" LinearAlgebra = "1.11.0" Logging = "1.11.0" NaNMath = "0.3, 1" +Plots = "1.40.17" ProgressBars = "1.4" Random = "1.11.0" +RecipesBase = "1.3.4" ResumableFunctions = "0.6" StatsBase = "0.33" julia = "1.11" diff --git a/src/Stingray.jl b/src/Stingray.jl index 533cecd..454b072 100644 --- a/src/Stingray.jl +++ b/src/Stingray.jl @@ -6,22 +6,7 @@ using ProgressBars: tqdm as show_progress using DocStringExtensions using LinearAlgebra using Random - -include("fourier.jl") -export positive_fft_bins -export poisson_level -export normalize_abs -export normalize_frac -export normalize_leahy_from_variance -export normalize_periodograms -export bias_term -export raw_coherence -export estimate_intrinsic_coherence -export error_on_averaged_cross_spectrum -export get_average_ctrate -export get_flux_iterable_from_segments -export avg_pds_from_events -export avg_cs_from_events +using RecipesBase include("events.jl") export FITSMetadata, @@ -62,6 +47,24 @@ export AbstractLightCurve, rebin include("utils.jl") +include("fourier.jl") +export positive_fft_bins +export poisson_level +export normalize_abs +export normalize_frac +export normalize_leahy_from_variance +export normalize_periodograms +export bias_term +export raw_coherence +export estimate_intrinsic_coherence +export error_on_averaged_cross_spectrum +export get_average_ctrate +export get_flux_iterable_from_segments +export avg_pds_from_events,avg_pds_from_iterable +export avg_cs_from_events,avg_cs_from_iterables,avg_cs_from_iterables_quick +export avg_pds_from_eventlist,avg_cs_from_eventlists,avg_pds_from_lightcurve,avg_cs_from_lightcurves +export get_norm_label,get_poisson_level,extract_gti + include("gti.jl") export load_gtis export get_total_gti_length @@ -75,6 +78,17 @@ export apply_gtis export fill_bad_time_intervals! export create_filtered_lightcurve export check_gtis -export split_by_gtis +export split_by_gtis,intersect_gtis + +include("crossspectrum.jl") +export CrossSpectrum, AveragedCrossSpectrum +export is_averaged, is_single, theoretical_noise_level, fill_errors! +export white_noise_level, noise_corrected_power, signal_to_noise_ratio +export detect_aliasing, coherence, phase_lag, time_lag, noise_properties +export significant_frequencies, get_noise_level, quality_metrics +export rebin, rebin_log, geometric_rebin, adaptive_rebin +export is_rebinned, effective_samples_per_bin,AbstractCrossSpectrum + +include("plotting/plots_recipes_crossspectrum.jl") end diff --git a/src/crossspectrum.jl b/src/crossspectrum.jl new file mode 100644 index 0000000..d0bca14 --- /dev/null +++ b/src/crossspectrum.jl @@ -0,0 +1,2294 @@ +""" + AbstractCrossSpectrum{T} + +Abstract type for cross-spectrum analysis structures. + +Type parameter `T` represents the numeric type used for calculations (typically `Float64`). +""" +abstract type AbstractCrossSpectrum{T} end + +""" + CrossSpectrum{T} <: AbstractCrossSpectrum{T} + +A structure containing cross power spectrum data and associated metadata. + +# Type Parameters +- `T`: Numeric type for calculations (typically `Float64`) + +# Fields +$(TYPEDFIELDS) + +# Examples +```julia +# Create from event lists +cs = CrossSpectrum(ev1, ev2, 100.0, 0.1) + +# Create from light curves +cs = CrossSpectrum(lc1, lc2, 100.0) +``` +""" +mutable struct CrossSpectrum{T} <: AbstractCrossSpectrum{T} + "Frequencies in Hz" + freq::Vector{T} + "Complex cross power values" + power::Vector{Complex{T}} + "Power errors" + power_err::Union{Nothing,Vector{T}} + "Auto power spectrum of first light curve" + ps1::Vector{T} + "Auto power spectrum of second light curve" + ps2::Vector{T} + "Normalization type (leahy, frac, rms, abs, none)" + norm::String + "Power type (all, real, absolute)" + power_type::String + "Frequency resolution (Hz)" + df::T + "Total number of photons in first light curve" + nphots1::T + "Total number of photons in second light curve" + nphots2::T + "Number of segments (m=1: single spectrum, m>1: averaged spectrum)" + m::Int + "Number of frequencies" + n::Int + "Number of nearby averaged frequencies" + k::Union{Int,Vector{Int}} + "Original light curve metadata from first signal" + metadata1::Union{LightCurveMetadata,FITSMetadata} + "Original light curve metadata from second signal" + metadata2::Union{LightCurveMetadata,FITSMetadata} + "return full frequencies -ve and +ve if true else only +ve" + fullspec::Bool + "if set to false Independent channels else Reference band contains subject band" + channels_overlap::Bool + "Segment size used for averaging (nothing for single spectrum)" + segment_size::Union{Nothing,T} + "Mean rate of first light curve (nothing for single spectrum)" + mean_rate1::Union{Nothing,T} + "Mean rate of second light curve (nothing for single spectrum)" + mean_rate2::Union{Nothing,T} +end + +""" + is_averaged(cs::CrossSpectrum) -> Bool + +Check if a CrossSpectrum represents an averaged spectrum (multiple segments). + +# Arguments +- `cs::CrossSpectrum`: The cross spectrum to check + +# Returns +- `Bool`: `true` if averaged (m > 1), `false` if single segment + +# Examples +```julia +cs_single = CrossSpectrum(lc1, lc2) +cs_averaged = AveragedCrossSpectrum(lc1, lc2, 100.0) + +is_averaged(cs_single) # false +is_averaged(cs_averaged) # true +``` +""" +is_averaged(cs::CrossSpectrum) = cs.m > 1 + +""" + is_single(cs::CrossSpectrum) -> Bool + +Check if a CrossSpectrum represents a single segment spectrum. + +# Arguments +- `cs::CrossSpectrum`: The cross spectrum to check + +# Returns +- `Bool`: `true` if single segment (m = 1), `false` if averaged + +# Examples +```julia +cs_single = CrossSpectrum(lc1, lc2) +is_single(cs_single) # true +``` +""" +is_single(cs::CrossSpectrum) = cs.m == 1 + +""" + CrossSpectrum(ev1::EventList, ev2::EventList, segment_size::Real, dt::Real; + norm::String="frac", use_common_mean::Bool=true, + fullspec::Bool=false, power_type::String="all") -> CrossSpectrum + +Create a single-segment cross spectrum from two event lists. + +# Arguments +- `ev1::EventList`: First event list +- `ev2::EventList`: Second event list +- `segment_size::Real`: Size of the segment in time units +- `dt::Real`: Time bin size + +# Keywords +- `norm::String="frac"`: Normalization type ("frac", "leahy", "rms", "abs", "none") +- `use_common_mean::Bool=true`: Whether to use common mean for normalization +- `fullspec::Bool=false`: If true, include negative frequencies +- `power_type::String="all"`: Power type ("all", "real", "absolute") + +# Returns +- `CrossSpectrum`: Single-segment cross spectrum + +# Examples +```julia +# Basic usage +cs = CrossSpectrum(ev1, ev2, 100.0, 0.1) + +# With different normalization +cs = CrossSpectrum(ev1, ev2, 100.0, 0.1, norm="leahy") +``` +""" +function CrossSpectrum( + ev1::EventList, + ev2::EventList, + segment_size::Real, + dt::Real; + norm::String = "frac", + use_common_mean::Bool = true, + fullspec::Bool = false, + power_type::String = "all", +) + + validate_time_alignment(ev1.meta.headers, ev2.meta.headers) + + validate_time_alignment(ev1.meta.headers, ev2.meta.headers) + + # Get GTIs and find intersection + gti1 = ev1.meta.gti + gti2 = ev2.meta.gti + cross_gti = intersect_gtis(gti1, gti2) + + if size(cross_gti, 1) == 0 + error("No overlapping GTIs between event lists!") + end + + # Calculate number of bins + n_bin = round(Int, segment_size / dt) + adjusted_dt = segment_size / n_bin + + # Create frequency array + freq = fftfreq(n_bin, 1 / adjusted_dt) + if !fullspec + fgt0 = positive_fft_bins(n_bin) + freq = freq[fgt0] + end + + # Find and process ONLY THE FIRST valid segment + cross_power = nothing + pds1_power = nothing + pds2_power = nothing + segment_photons1 = 0.0 + segment_photons2 = 0.0 + + for (s, e, idx0_1, idx1_1) in + generate_indices_of_segment_boundaries_unbinned(ev1.times, cross_gti, segment_size) + # Find corresponding indices in second event list + idx0_2 = searchsortedfirst(ev2.times, s) + idx1_2 = searchsortedfirst(ev2.times, e) + + if idx1_1 - idx0_1 < 2 || idx1_2 - idx0_2 < 2 + continue + end + + # Get event times for this segment + segment_times1 = @view ev1.times[idx0_1:idx1_1-1] + segment_times2 = @view ev2.times[idx0_2:idx1_2-1] + + # Create binned light curves + edges = range(s, stop = e, length = n_bin + 1) + lc1 = fit(Histogram, segment_times1, edges).weights + lc2 = fit(Histogram, segment_times2, edges).weights + + if length(lc1) != n_bin || length(lc2) != n_bin + continue + end + + # Calculate FFTs + ft1 = fft(Float64.(lc1)) + ft2 = fft(Float64.(lc2)) + + # Calculate cross spectrum and power spectra + cross_power = ft1 .* conj.(ft2) + pds1_power = abs2.(ft1) + pds2_power = abs2.(ft2) + + # Take only positive frequencies if needed + if !fullspec + cross_power = cross_power[fgt0] + pds1_power = pds1_power[fgt0] + pds2_power = pds2_power[fgt0] + end + + # Store photon counts + segment_photons1 = sum(lc1) + segment_photons2 = sum(lc2) + + # BREAK after first valid segment + break + end + + if isnothing(cross_power) + error("No valid segments found for cross spectrum") + end + + # Calculate mean rates for this single segment + mean_rate1 = segment_photons1 / segment_size + mean_rate2 = segment_photons2 / segment_size + + # Normalize the spectra + if norm != "none" + cross_power = normalize_periodograms( + cross_power, + adjusted_dt, + n_bin; + mean_flux = sqrt(segment_photons1 * segment_photons2) / n_bin, + n_ph = sqrt(segment_photons1 * segment_photons2), + norm = norm, + power_type = power_type, + ) + pds1_power = normalize_periodograms( + pds1_power, + adjusted_dt, + n_bin; + mean_flux = segment_photons1 / n_bin, + n_ph = segment_photons1, + norm = norm, + power_type = power_type, + ) + pds2_power = normalize_periodograms( + pds2_power, + adjusted_dt, + n_bin; + mean_flux = segment_photons2 / n_bin, + n_ph = segment_photons2, + norm = norm, + power_type = power_type, + ) + end + + return CrossSpectrum{Float64}( + freq, + cross_power, + nothing, # power_err + pds1_power, + pds2_power, + norm, + power_type, + freq[2] - freq[1], # df + segment_photons1, + segment_photons2, + 1, # m = 1 for SINGLE segment + n_bin, # n + 1, # k + ev1.meta, # metadata1 + ev2.meta, # metadata2 + fullspec, + false, # channels_overlap + Float64(segment_size), # segment_size + mean_rate1, # mean_rate1 + mean_rate2, # mean_rate2 + ) +end + +""" + CrossSpectrum(lc1::LightCurve, lc2::LightCurve, segment_size::Union{Nothing,Real}=nothing; + norm::String="frac", use_common_mean::Bool=true, + fullspec::Bool=false, power_type::String="all") -> CrossSpectrum + +Create a single-segment cross spectrum from two light curves. + +# Arguments +- `lc1::LightCurve`: First light curve +- `lc2::LightCurve`: Second light curve +- `segment_size::Union{Nothing,Real}=nothing`: Segment size (if nothing, uses minimum segment size) + +# Keywords +- `norm::String="frac"`: Normalization type +- `use_common_mean::Bool=true`: Whether to use common mean for normalization +- `fullspec::Bool=false`: If true, include negative frequencies +- `power_type::String="all"`: Power type + +# Returns +- `CrossSpectrum`: Single-segment cross spectrum + +# Examples +```julia +# Auto-determine segment size +cs = CrossSpectrum(lc1, lc2) + +# Specify segment size +cs = CrossSpectrum(lc1, lc2, 100.0) +``` +""" +function CrossSpectrum( + lc1::LightCurve, + lc2::LightCurve, + segment_size::Union{Nothing,Real} = nothing; + norm::String = "frac", + use_common_mean::Bool = true, + fullspec::Bool = false, + power_type::String = "all", +) + + gti1 = haskey(lc1.metadata.extra, "gti") ? lc1.metadata.extra["gti"] : nothing + gti2 = haskey(lc2.metadata.extra, "gti") ? lc2.metadata.extra["gti"] : nothing + cross_gti = + isnothing(gti1) ? gti2 : (isnothing(gti2) ? gti1 : intersect_gtis(gti1, gti2)) + + if isnothing(cross_gti) || size(cross_gti, 1) == 0 + error("No overlapping GTIs between light curves!") + end + + # Apply GTIs to get segments + lcs1 = apply_gtis(lc1, cross_gti) + lcs2 = apply_gtis(lc2, cross_gti) + minseg = min(length(lcs1), length(lcs2)) + + if minseg == 0 + error("No valid segments after GTI intersection!") + end + + # Get time bin size + dt_val = isa(lc1.dt, Vector) ? lc1.dt[1] : lc1.dt + + if isnothing(segment_size) + segment_size = minimum([lcs1[i].dt * length(lcs1[i].counts) for i = 1:minseg]) + end + + # Calculate number of bins + n_bin = round(Int, segment_size / dt_val) + adjusted_dt = segment_size / n_bin + + # Create frequency array + freq = fftfreq(n_bin, 1 / adjusted_dt) + if !fullspec + fgt0 = positive_fft_bins(n_bin) + freq = freq[fgt0] + end + + # Process ONLY THE FIRST valid segment + cross_power = nothing + pds1_power = nothing + pds2_power = nothing + segment_photons1 = 0.0 + segment_photons2 = 0.0 + + for i = 1:minseg + lc1_seg = lcs1[i] + lc2_seg = lcs2[i] + + # Handle different segment lengths by truncating to common length + min_len = min(length(lc1_seg.counts), length(lc2_seg.counts)) + + if min_len < 2 + continue + end + + # Use common length + counts1 = lc1_seg.counts[1:min_len] + counts2 = lc2_seg.counts[1:min_len] + + # Ensure we have the expected number of bins or adjust + if length(counts1) != n_bin || length(counts2) != n_bin + # If segment is longer than expected, truncate + if length(counts1) >= n_bin && length(counts2) >= n_bin + counts1 = counts1[1:n_bin] + counts2 = counts2[1:n_bin] + else + # If too short, skip this segment + continue + end + end + + # Calculate FFTs + ft1 = fft(Float64.(counts1)) + ft2 = fft(Float64.(counts2)) + + # Calculate cross spectrum and power spectra + cross_power = ft1 .* conj.(ft2) + pds1_power = abs2.(ft1) + pds2_power = abs2.(ft2) + + # Take only positive frequencies if needed + if !fullspec + cross_power = cross_power[fgt0] + pds1_power = pds1_power[fgt0] + pds2_power = pds2_power[fgt0] + end + + # Store photon counts + segment_photons1 = sum(counts1) + segment_photons2 = sum(counts2) + + # BREAK after first valid segment - this is the key difference! + break + end + + if isnothing(cross_power) + error("No valid segments found for cross spectrum") + end + + # Calculate mean rates for this single segment + mean_rate1 = segment_photons1 / segment_size + mean_rate2 = segment_photons2 / segment_size + + # Normalize the spectra + if norm != "none" + cross_power = normalize_periodograms( + cross_power, + adjusted_dt, + n_bin; + mean_flux = sqrt(segment_photons1 * segment_photons2) / n_bin, + n_ph = sqrt(segment_photons1 * segment_photons2), + norm = norm, + power_type = power_type, + ) + pds1_power = normalize_periodograms( + pds1_power, + adjusted_dt, + n_bin; + mean_flux = segment_photons1 / n_bin, + n_ph = segment_photons1, + norm = norm, + power_type = power_type, + ) + pds2_power = normalize_periodograms( + pds2_power, + adjusted_dt, + n_bin; + mean_flux = segment_photons2 / n_bin, + n_ph = segment_photons2, + norm = norm, + power_type = power_type, + ) + end + + return CrossSpectrum{Float64}( + freq, + cross_power, + nothing, # power_err + pds1_power, + pds2_power, + norm, + power_type, + freq[2] - freq[1], # df + segment_photons1, + segment_photons2, + 1, # m = 1 for SINGLE segment + n_bin, # n + 1, # k + lc1.metadata, # metadata1 + lc2.metadata, # metadata2 + fullspec, + false, # channels_overlap + Float64(segment_size), # segment_size + mean_rate1, # mean_rate1 + mean_rate2, # mean_rate2 + ) +end + +""" + AveragedCrossSpectrum(lc1::LightCurve{T}, lc2::LightCurve{T}, segment_size::Real; + norm::String="frac", use_common_mean::Bool=true, + fullspec::Bool=false, power_type::String="all", + fill_errors_on_creation::Bool=true) where T<:Real -> CrossSpectrum + +Create an averaged cross spectrum from two light curves by averaging multiple segments. + +# Arguments +- `lc1::LightCurve{T}`: First light curve +- `lc2::LightCurve{T}`: Second light curve +- `segment_size::Real`: Size of each segment for averaging + +# Keywords +- `norm::String="frac"`: Normalization type +- `use_common_mean::Bool=true`: Whether to use common mean for normalization +- `fullspec::Bool=false`: If true, include negative frequencies +- `power_type::String="all"`: Power type +- `fill_errors_on_creation::Bool=true`: Whether to calculate errors immediately + +# Returns +- `CrossSpectrum`: Multi-segment averaged cross spectrum with m > 1 + +# Examples +```julia +# Create averaged cross spectrum +cs_avg = AveragedCrossSpectrum(lc1, lc2, 100.0) + +# Without error calculation +cs_avg = AveragedCrossSpectrum(lc1, lc2, 100.0, fill_errors_on_creation=false) +``` +""" +function AveragedCrossSpectrum( + lc1::LightCurve{T}, + lc2::LightCurve{T}, + segment_size::Real; + norm::String = "frac", + use_common_mean::Bool = true, + fullspec::Bool = false, + power_type::String = "all", + fill_errors_on_creation::Bool = true, +) where {T<:Real} + + if isnan(segment_size) + throw(ArgumentError("Segment size cannot be NaN")) + end + if isinf(segment_size) + throw(ArgumentError("Segment size cannot be Inf")) + end + if segment_size <= 0 + throw(ArgumentError("Segment size must be positive")) + end + + bin_size1 = lc1.metadata.bin_size + bin_size2 = lc2.metadata.bin_size + + if abs(bin_size1 - bin_size2) > 1e-10 + throw(ArgumentError("Light curves must have the same bin size")) + end + + bin_size = bin_size1 + n_bins_per_segment = round(Int, segment_size / bin_size) + + if n_bins_per_segment <= 1 + throw(ArgumentError("Segment size too small")) + end + + # Extract GTI information + gti1 = if hasfield(typeof(lc1.metadata), :gti) && !isnothing(lc1.metadata.gti) + lc1.metadata.gti + elseif haskey(lc1.metadata.extra, "gti") + lc1.metadata.extra["gti"] + elseif haskey(lc1.metadata.extra, "GTI") + lc1.metadata.extra["GTI"] + else + throw(ArgumentError("No GTI information found in first light curve metadata")) + end + + gti2 = if hasfield(typeof(lc2.metadata), :gti) && !isnothing(lc2.metadata.gti) + lc2.metadata.gti + elseif haskey(lc2.metadata.extra, "gti") + lc2.metadata.extra["gti"] + elseif haskey(lc2.metadata.extra, "GTI") + lc2.metadata.extra["GTI"] + else + throw(ArgumentError("No GTI information found in second light curve metadata")) + end + + cross_gti = intersect_gtis(gti1, gti2) + + if size(cross_gti, 1) == 0 + throw(ArgumentError("No overlapping GTIs between light curves")) + end + + # Generate segment boundaries + segment_generator1 = generate_indices_of_segment_boundaries_binned( + lc1.time, + cross_gti, + segment_size, + dt = bin_size, + ) + segment_generator2 = generate_indices_of_segment_boundaries_binned( + lc2.time, + cross_gti, + segment_size, + dt = bin_size, + ) + + # Create frequency array + freqs = fftfreq(n_bins_per_segment, 1 / bin_size) + if !fullspec + pos_freq_idx = positive_fft_bins(n_bins_per_segment; include_zero = false) + freqs = freqs[pos_freq_idx] + else + pos_freq_idx = 1:length(freqs) + end + df = freqs[2] - freqs[1] + + # Initialize accumulators + total_cross_power = zeros(Complex{T}, length(pos_freq_idx)) + total_power1 = zeros(T, length(pos_freq_idx)) + total_power2 = zeros(T, length(pos_freq_idx)) + total_counts1 = 0 + total_counts2 = 0 + n_segments_used = 0 + + segments1 = collect(segment_generator1) + segments2 = collect(segment_generator2) + + if length(segments1) != length(segments2) + throw(ArgumentError("Mismatch in number of segments between light curves")) + end + + # Process each segment + for i = 1:length(segments1) + start_time1, stop_time1, start_idx1, stop_idx1 = segments1[i] + start_time2, stop_time2, start_idx2, stop_idx2 = segments2[i] + + segment_length1 = stop_idx1 - start_idx1 + segment_length2 = stop_idx2 - start_idx2 + + if segment_length1 != n_bins_per_segment || segment_length2 != n_bins_per_segment + continue + end + + segment_counts1 = @view lc1.counts[start_idx1+1:stop_idx1] + segment_counts2 = @view lc2.counts[start_idx2+1:stop_idx2] + + segment_sum1 = sum(segment_counts1) + segment_sum2 = sum(segment_counts2) + + if segment_sum1 == 0 || segment_sum2 == 0 + continue + end + + # Calculate FFTs + ft1 = fft(segment_counts1) + ft2 = fft(segment_counts2) + + unnorm_cross_power = ft1 .* conj.(ft2) + unnorm_power1 = abs2.(ft1) + unnorm_power2 = abs2.(ft2) + + if !fullspec + unnorm_cross_power = unnorm_cross_power[pos_freq_idx] + unnorm_power1 = unnorm_power1[pos_freq_idx] + unnorm_power2 = unnorm_power2[pos_freq_idx] + end + + # Normalize + cross_power = normalize_periodograms( + unnorm_cross_power, + bin_size, + n_bins_per_segment; + mean_flux = sqrt(mean(segment_counts1) * mean(segment_counts2)), + n_ph = sqrt(segment_sum1 * segment_sum2), + norm = norm, + power_type = power_type, + ) + + power1 = normalize_periodograms( + unnorm_power1, + bin_size, + n_bins_per_segment; + mean_flux = mean(segment_counts1), + n_ph = segment_sum1, + norm = norm, + power_type = power_type, + ) + + power2 = normalize_periodograms( + unnorm_power2, + bin_size, + n_bins_per_segment; + mean_flux = mean(segment_counts2), + n_ph = segment_sum2, + norm = norm, + power_type = power_type, + ) + + # Accumulate + total_cross_power .+= cross_power + total_power1 .+= power1 + total_power2 .+= power2 + total_counts1 += segment_sum1 + total_counts2 += segment_sum2 + n_segments_used += 1 + end + + if n_segments_used == 0 + throw(ArgumentError("No valid segments found")) + end + + # Average + avg_cross_power = total_cross_power ./ n_segments_used + avg_power1 = total_power1 ./ n_segments_used + avg_power2 = total_power2 ./ n_segments_used + + # Calculate mean rates + mean_rate1 = total_counts1 / (n_segments_used * segment_size) + mean_rate2 = total_counts2 / (n_segments_used * segment_size) + + # Create object using unified CrossSpectrum struct + cs = CrossSpectrum{T}( + freqs, + avg_cross_power, + nothing, # Will be filled by fill_errors! if requested + avg_power1, + avg_power2, + norm, + power_type, + df, + T(total_counts1), + T(total_counts2), + n_segments_used, # m > 1 indicates averaged + length(freqs), + 1, + lc1.metadata, + lc2.metadata, + fullspec, + false, + T(segment_size), + mean_rate1, + mean_rate2, + ) + + # Fill proper errors if requested + if fill_errors_on_creation + fill_errors!(cs) + end + + return cs +end + +""" + AveragedCrossSpectrum(ev1::EventList, ev2::EventList, segment_size::Real, dt::Real; + norm::String="frac", use_common_mean::Bool=true, + fullspec::Bool=false, power_type::String="all", + fill_errors_on_creation::Bool=true) -> CrossSpectrum + +Create an averaged cross spectrum from two event lists by averaging multiple segments. + +# Arguments +- `ev1::EventList`: First event list +- `ev2::EventList`: Second event list +- `segment_size::Real`: Size of each segment for averaging +- `dt::Real`: Time bin size + +# Keywords +- `norm::String="frac"`: Normalization type +- `use_common_mean::Bool=true`: Whether to use common mean for normalization +- `fullspec::Bool=false`: If true, include negative frequencies +- `power_type::String="all"`: Power type +- `fill_errors_on_creation::Bool=true`: Whether to calculate errors immediately + +# Returns +- `CrossSpectrum`: Multi-segment averaged cross spectrum + +# Examples +```julia +# Create averaged cross spectrum from events +cs_avg = AveragedCrossSpectrum(ev1, ev2, 100.0, 0.1) +``` +""" + +function AveragedCrossSpectrum( + ev1::EventList, + ev2::EventList, + segment_size::Real, + dt::Real; + norm::String = "frac", + use_common_mean::Bool = true, + fullspec::Bool = false, + power_type::String = "all", + fill_errors_on_creation::Bool = true, +) + + if isnan(segment_size) + throw(ArgumentError("Segment size cannot be NaN")) + end + if isinf(segment_size) + throw(ArgumentError("Segment size cannot be Inf")) + end + if segment_size <= 0 + throw(ArgumentError("Segment size must be positive")) + end + if dt <= 0 + throw(ArgumentError("Bin size must be positive")) + end + if segment_size <= dt + throw(ArgumentError("Segment size must be larger than bin size")) + end + + # Validate time alignment + validate_time_alignment(ev1.meta.headers, ev2.meta.headers) + + # Get GTI intersection + gti1 = ev1.meta.gti + gti2 = ev2.meta.gti + cross_gti = intersect_gtis(gti1, gti2) + + if size(cross_gti, 1) == 0 + throw(ArgumentError("No overlapping GTIs between event lists")) + end + + n_bins_per_segment = round(Int, segment_size / dt) + + if n_bins_per_segment < 2 + throw(ArgumentError("Segment size too small relative to dt")) + end + + # Generate segments + segment_generator = + generate_indices_of_segment_boundaries_unbinned(ev1.times, cross_gti, segment_size) + + # Create frequency array + freqs = fftfreq(n_bins_per_segment, 1 / dt) + if !fullspec + pos_freq_idx = positive_fft_bins(n_bins_per_segment; include_zero = false) + freqs = freqs[pos_freq_idx] + else + pos_freq_idx = 1:length(freqs) + end + df = freqs[2] - freqs[1] + + # Initialize accumulators + total_cross_power = zeros(Complex{Float64}, length(pos_freq_idx)) + total_power1 = zeros(Float64, length(pos_freq_idx)) + total_power2 = zeros(Float64, length(pos_freq_idx)) + total_counts1 = 0.0 + total_counts2 = 0.0 + n_segments_used = 0 + + # Process segments + for (start_time, stop_time, start_idx1, stop_idx1) in segment_generator + start_idx2 = searchsortedfirst(ev2.times, start_time) + stop_idx2 = searchsortedfirst(ev2.times, stop_time) + + if stop_idx1 - start_idx1 < 2 || stop_idx2 - start_idx2 < 2 + continue + end + + segment_times1 = @view ev1.times[start_idx1:stop_idx1-1] + segment_times2 = @view ev2.times[start_idx2:stop_idx2-1] + + # Create time grid and bin events + time_grid = range(start_time, stop = stop_time, length = n_bins_per_segment + 1) + + counts1 = zeros(Int, n_bins_per_segment) + counts2 = zeros(Int, n_bins_per_segment) + + for event_time in segment_times1 + bin_idx = searchsortedfirst(time_grid, event_time) + if 1 <= bin_idx <= n_bins_per_segment + counts1[bin_idx] += 1 + end + end + + for event_time in segment_times2 + bin_idx = searchsortedfirst(time_grid, event_time) + if 1 <= bin_idx <= n_bins_per_segment + counts2[bin_idx] += 1 + end + end + + segment_sum1 = sum(counts1) + segment_sum2 = sum(counts2) + + if segment_sum1 == 0 || segment_sum2 == 0 + continue + end + + # Calculate FFTs + ft1 = fft(counts1) + ft2 = fft(counts2) + + unnorm_cross_power = ft1 .* conj.(ft2) + unnorm_power1 = abs2.(ft1) + unnorm_power2 = abs2.(ft2) + + if !fullspec + unnorm_cross_power = unnorm_cross_power[pos_freq_idx] + unnorm_power1 = unnorm_power1[pos_freq_idx] + unnorm_power2 = unnorm_power2[pos_freq_idx] + end + + # Normalize + cross_power = normalize_periodograms( + unnorm_cross_power, + dt, + n_bins_per_segment; + mean_flux = sqrt(mean(counts1) * mean(counts2)), + n_ph = sqrt(segment_sum1 * segment_sum2), + norm = norm, + power_type = power_type, + ) + + power1 = normalize_periodograms( + unnorm_power1, + dt, + n_bins_per_segment; + mean_flux = mean(counts1), + n_ph = segment_sum1, + norm = norm, + power_type = power_type, + ) + + power2 = normalize_periodograms( + unnorm_power2, + dt, + n_bins_per_segment; + mean_flux = mean(counts2), + n_ph = segment_sum2, + norm = norm, + power_type = power_type, + ) + + # Accumulate + total_cross_power .+= cross_power + total_power1 .+= power1 + total_power2 .+= power2 + total_counts1 += segment_sum1 + total_counts2 += segment_sum2 + n_segments_used += 1 + end + + if n_segments_used == 0 + throw(ArgumentError("No valid segments found")) + end + + # Average + avg_cross_power = total_cross_power ./ n_segments_used + avg_power1 = total_power1 ./ n_segments_used + avg_power2 = total_power2 ./ n_segments_used + + # Calculate mean rates + mean_rate1 = total_counts1 / (n_segments_used * segment_size) + mean_rate2 = total_counts2 / (n_segments_used * segment_size) + + # Create object using unified CrossSpectrum struct + cs = CrossSpectrum{Float64}( + freqs, + avg_cross_power, + nothing, # Will be filled by fill_errors! if requested + avg_power1, + avg_power2, + norm, + power_type, + df, + total_counts1, + total_counts2, + n_segments_used, # m > 1 indicates averaged + length(freqs), + 1, + ev1.meta, + ev2.meta, + fullspec, + false, + Float64(segment_size), + mean_rate1, + mean_rate2, + ) + + # Fill proper errors if requested + if fill_errors_on_creation + fill_errors!(cs) + end + + return cs +end +""" + theoretical_noise_level(cs::CrossSpectrum{T}) where T -> T + +Calculate theoretical noise level for cross spectrum based on Poisson statistics. + +For cross spectra, the noise level is calculated as the geometric mean of individual +power spectral noise levels. For averaged spectra, the noise is reduced by √m where +m is the number of segments averaged. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `T`: Theoretical cross spectrum noise level + +# Mathematical Details +- For single spectra: σ_cross = √(noise1 × noise2) +- For averaged spectra: σ_cross = √(noise1 × noise2) / √m +- Individual noise levels computed using Poisson statistics + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +noise = theoretical_noise_level(cs) +println("Theoretical noise level: ", noise) +``` +""" +function theoretical_noise_level(cs::CrossSpectrum{T}) where {T} + + # For cross spectra, noise is related to individual power spectra + # The cross spectrum noise level should be approximately: + # σ_cross ≈ sqrt(P1 * P2) / sqrt(m) for signal-dominated regions + # σ_cross ≈ sqrt(noise1 * noise2) / sqrt(m) for noise-dominated regions + + # Individual Poisson noise levels (per segment) + if is_averaged(cs) + noise1 = poisson_level(cs.norm; meanrate = cs.mean_rate1, n_ph = cs.nphots1 / cs.m) + noise2 = poisson_level(cs.norm; meanrate = cs.mean_rate2, n_ph = cs.nphots2 / cs.m) + # For cross spectrum, theoretical noise is the geometric mean of individual noise levels + # divided by sqrt of number of segments averaged + cross_noise = sqrt(noise1 * noise2) / sqrt(cs.m) + else + noise1 = poisson_level(cs.norm; meanrate = cs.mean_rate1, n_ph = cs.nphots1) + noise2 = poisson_level(cs.norm; meanrate = cs.mean_rate2, n_ph = cs.nphots2) + cross_noise = sqrt(noise1 * noise2) + end + + return cross_noise +end + +""" + fill_errors!(cs::CrossSpectrum{T}) where T -> CrossSpectrum{T} + +Fill proper error estimates for cross spectrum power values. + +Computes error estimates based on theoretical noise levels and, for averaged spectra, +includes sample variance contributions where signal-to-noise ratio exceeds 1. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to fill errors for (modified in-place) + +# Returns +- `CrossSpectrum{T}`: The same cross spectrum with `power_err` field populated + +# Mathematical Details +- Base errors start with theoretical noise level +- For averaged spectra with S/N > 1: σ_total = √(σ_noise² + σ_sample²) +- Sample variance scales as signal/√m for averaged spectra + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +fill_errors!(cs) +# cs.power_err now contains error estimates +``` + +# Notes +This function modifies the input cross spectrum in-place by setting the `power_err` field. +Work is still in progress for optimal error estimation methods. +""" +function fill_errors!(cs::CrossSpectrum{T}) where {T} + + # Get theoretical noise level + noise_level = theoretical_noise_level(cs) + + # Initialize with theoretical noise level + errors = fill(noise_level, length(cs.power)) + + if is_averaged(cs) + # For averaged cross spectra, add sample variance contribution + signal_power = abs.(cs.power) + snr = signal_power ./ noise_level + + # Where S/N > 1, add sample variance term + high_snr_mask = snr .> 1.0 + if any(high_snr_mask) + # Sample variance scales as signal/sqrt(m) + sample_variance = signal_power[high_snr_mask] ./ sqrt(cs.m) + errors[high_snr_mask] = + sqrt.(errors[high_snr_mask] .^ 2 .+ sample_variance .^ 2) + end + else + # For single spectra, errors are primarily Poisson noise + # but we can add additional terms if needed + # errors already initialized with theoretical noise level + end + + cs.power_err = errors + return cs +end + +""" + white_noise_level(cs::CrossSpectrum{T}; high_freq_fraction::Real=0.2) where T -> T + +Estimate white noise level from high frequency portion of spectrum. + +Uses the highest frequency bins to empirically estimate the noise floor, assuming +that high frequencies are dominated by white noise rather than signal. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze +- `high_freq_fraction::Real=0.2`: Fraction of highest frequencies to use for estimation + +# Returns +- `T`: Estimated white noise level + +# Mathematical Details +- Uses median of high frequency power values to avoid outliers +- Compares with theoretical noise level as sanity check +- Warns if estimated noise significantly exceeds theoretical expectations + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +empirical_noise = white_noise_level(cs, high_freq_fraction=0.3) +theoretical_noise = theoretical_noise_level(cs) +println("Empirical vs theoretical: ", empirical_noise, " vs ", theoretical_noise) +``` + +# Warnings +- Issues warning if fewer than 5 high frequency bins available +- Issues warning if estimated noise > 3× theoretical noise +""" +function white_noise_level(cs::CrossSpectrum{T}; high_freq_fraction::Real = 0.2) where {T} + + # Use the highest frequencies to estimate noise floor + freq_cutoff = maximum(cs.freq) * (1.0 - high_freq_fraction) + mask = cs.freq .>= freq_cutoff + + if sum(mask) < 5 + @warn "Too few high frequency points for reliable noise estimation" + return theoretical_noise_level(cs) + end + + high_freq_power = abs.(cs.power[mask]) + + # Use median to avoid outliers + estimated_noise = median(high_freq_power) + + # Compare with theoretical expectation + theoretical_noise = theoretical_noise_level(cs) + + # If estimated noise is much larger than theoretical, might indicate problems + if estimated_noise > 3.0 * theoretical_noise + @warn "Estimated noise level ($(estimated_noise)) much higher than theoretical ($(theoretical_noise))" + end + + return estimated_noise +end + +""" + noise_corrected_power(cs::CrossSpectrum{T}) where T -> Vector{T} + +Return power spectrum with noise floor subtracted. + +Subtracts the empirically estimated white noise level from the power spectrum +amplitude, ensuring non-negative results. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to correct + +# Returns +- `Vector{T}`: Noise-corrected power amplitudes + +# Mathematical Details +- Uses `white_noise_level()` to estimate noise floor +- Corrected power = max(|power| - noise_level, 0.01 × noise_level) +- Minimum floor prevents negative or zero values + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +raw_power = abs.(cs.power) +corrected_power = noise_corrected_power(cs) +plot(cs.freq, [raw_power corrected_power], labels=["Raw" "Corrected"]) +``` +""" +function noise_corrected_power(cs::CrossSpectrum{T}) where {T} + + # Use white noise level from high frequencies + noise_level = white_noise_level(cs) + power_amplitude = abs.(cs.power) + + # Subtract noise floor, ensuring non-negative result + corrected_power = max.(power_amplitude .- noise_level, 0.01 * noise_level) + + return corrected_power +end + +""" + signal_to_noise_ratio(cs::CrossSpectrum{T}) where T -> Vector{T} + +Calculate signal-to-noise ratio for each frequency bin. + +Computes the ratio of signal power to noise level using empirical noise +estimation from high frequencies. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `Vector{T}`: Signal-to-noise ratio for each frequency bin + +# Mathematical Details +- S/N = |power(f)| / noise_level +- Uses `white_noise_level()` for noise estimation +- Higher values indicate stronger signal relative to noise + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +snr = signal_to_noise_ratio(cs) +significant_mask = snr .> 3.0 +println("Significant detections: ", sum(significant_mask)) +``` +""" +function signal_to_noise_ratio(cs::CrossSpectrum{T}) where {T} + # Use empirical noise estimate from high frequencies + noise_level = white_noise_level(cs) + signal_power = abs.(cs.power) + + return signal_power ./ noise_level +end + +""" + detect_aliasing(cs::CrossSpectrum{T}) where T -> (Bool, String) + +Detect potential aliasing in cross spectrum. + +Analyzes the spectrum for signs of aliasing by comparing power levels +at high and low frequencies. Aliasing typically manifests as artificially +high power at frequencies near the Nyquist frequency. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `Tuple{Bool, String}`: (aliasing_detected, diagnostic_message) + +# Mathematical Details +- Compares median power in highest 20% vs lowest 20% of frequencies +- Aliasing suspected if: high_freq_power > 5× low_freq_power AND > 10× noise_level +- Uses theoretical noise level for comparison + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +aliased, message = detect_aliasing(cs) +if aliased + println("Warning: ", message) + # Consider using anti-aliasing filters or higher sampling rate +end +``` + +# Detection Criteria +- High frequency power significantly exceeds low frequency power +- High frequency power significantly exceeds expected noise level +- Requires at least 5 bins in both high and low frequency regions +""" +function detect_aliasing(cs::CrossSpectrum{T}) where {T} + + # Check if power increases dramatically at high frequencies + # This is often a sign of aliasing + + nyquist_freq = maximum(cs.freq) + high_freq_mask = cs.freq .> 0.8 * nyquist_freq + low_freq_mask = cs.freq .< 0.2 * nyquist_freq + + if sum(high_freq_mask) < 5 || sum(low_freq_mask) < 5 + return false, "Not enough frequency bins for aliasing detection" + end + + high_freq_power = median(abs.(cs.power[high_freq_mask])) + low_freq_power = median(abs.(cs.power[low_freq_mask])) + + # If high frequency power is much larger than low frequency power, + # and much larger than expected noise, suspect aliasing + noise_level = theoretical_noise_level(cs) + + aliasing_suspected = + (high_freq_power > 5.0 * low_freq_power) && (high_freq_power > 10.0 * noise_level) + + message = if aliasing_suspected + "Possible aliasing detected: high freq power = $(high_freq_power), low freq power = $(low_freq_power)" + else + "No obvious aliasing detected" + end + + return aliasing_suspected, message +end + +""" + coherence(cs::CrossSpectrum{T}) where T -> Vector{T} + +Calculate coherence function for cross spectrum. + +Coherence measures the linear correlation between two signals as a function +of frequency, ranging from 0 (no correlation) to 1 (perfect correlation). + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `Vector{T}`: Coherence values for each frequency bin + +# Mathematical Details +- For averaged spectra: γ²(f) = |P_xy(f)|² / (P_xx(f) × P_yy(f)) +- For single spectra: Coherence is theoretically 1.0 everywhere +- Bias correction applied: subtracts 1/m for averaged spectra +- Values clamped to [0, 1] range + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +coh = coherence(cs) +plot(cs.freq, coh, xlabel="Frequency", ylabel="Coherence") +``` + +# Notes +- Only meaningful for averaged cross spectra (m > 1) +- Single spectra have coherence ≈ 1.0 due to no ensemble averaging +- Low coherence may indicate non-linear coupling or noise dominance +""" +function coherence(cs::CrossSpectrum{T}) where {T} + + if is_single(cs) + # For single cross spectra, coherence is theoretically 1.0 everywhere + # but we can compute it anyway for diagnostic purposes + coherence_values = Vector{T}(undef, length(cs.freq)) + + for i in eachindex(cs.freq) + cross_power_mag_sq = abs2(cs.power[i]) + coherence_values[i] = cross_power_mag_sq / (cs.ps1[i] * cs.ps2[i]) + # Clamp to [0, 1] to handle numerical issues + coherence_values[i] = min(max(coherence_values[i], 0.0), 1.0) + end + + return coherence_values + + else + # For averaged cross spectra, coherence is meaningful + coherence_values = Vector{T}(undef, length(cs.freq)) + + for i in eachindex(cs.freq) + cross_power_mag_sq = abs2(cs.power[i]) + coherence_values[i] = cross_power_mag_sq / (cs.ps1[i] * cs.ps2[i]) + end + + # For averaged spectra, apply bias correction + # The expected coherence for pure noise is approximately 1/m + # where m is the number of segments averaged + bias_correction = 1.0 / cs.m + + # Subtract bias and ensure values stay in [0, 1] + for i in eachindex(coherence_values) + coherence_values[i] = max(coherence_values[i] - bias_correction, 0.0) + coherence_values[i] = min(coherence_values[i], 1.0) + end + + return coherence_values + end +end + +""" + phase_lag(cs::CrossSpectrum) -> Vector + +Calculate phase lag from cross spectrum. + +Computes the phase difference between the two signals as a function of frequency. +Positive phase indicates the second signal lags the first. + +# Arguments +- `cs::CrossSpectrum`: The cross spectrum to analyze + +# Returns +- `Vector`: Phase lag in radians for each frequency bin + +# Mathematical Details +- Phase lag = arg(P_xy(f)) = arctan(Im(P_xy)/Re(P_xy)) +- Range: [-π, π] radians +- Positive values: signal 2 lags signal 1 +- Negative values: signal 2 leads signal 1 + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +phase = phase_lag(cs) +plot(cs.freq, phase, xlabel="Frequency (Hz)", ylabel="Phase Lag (rad)") +``` +""" +function phase_lag(cs::CrossSpectrum) + return angle.(cs.power) +end + +""" + time_lag(cs::CrossSpectrum) -> Vector + +Calculate time lag from cross spectrum phase information. + +Converts phase lag to time lag by dividing by 2πf. Positive values indicate +that the second signal lags behind the first signal. + +# Arguments +- `cs::CrossSpectrum`: The cross spectrum to analyze + +# Returns +- `Vector`: Time lag in seconds for each frequency bin + +# Mathematical Details +- Time lag = phase_lag(f) / (2π × f) +- Units: seconds +- Positive values: signal 2 lags signal 1 +- Negative values: signal 2 leads signal 1 + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +tlag = time_lag(cs) +plot(cs.freq, tlag, xlabel="Frequency (Hz)", ylabel="Time Lag (s)") +``` + +# Notes +- Time lag becomes very large (and less meaningful) at low frequencies +- Consider filtering or masking very low frequency bins +- Assumes linear relationship between signals +""" +function time_lag(cs::CrossSpectrum) + return phase_lag(cs) ./ (2π .* cs.freq) +end + +""" + noise_properties(cs::CrossSpectrum{T}) where T -> Dict{String, Any} + +Comprehensive noise analysis for cross spectrum. + +This function analyzes various noise properties of the cross spectrum including +theoretical and empirical noise levels, signal-to-noise statistics, and detection rates. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `Dict{String, Any}`: Dictionary containing noise analysis results with keys: + - `"theoretical_noise"`: Theoretical Poisson noise level + - `"noise_level_1"`, `"noise_level_2"`: Individual signal noise levels + - `"mean_power"`, `"std_power"`: Power spectrum statistics + - `"mean_snr"`: Average signal-to-noise ratio + - `"significant_detections"`: Number of bins with S/N > 3 + - `"total_frequencies"`: Total number of frequency bins + - `"high_freq_power"`: Mean power in high frequency region + - `"segments_averaged"`: Number of segments averaged (if applicable) + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +props = noise_properties(cs) +println("Mean S/N: ", props["mean_snr"]) +println("Significant detections: ", props["significant_detections"]) +``` +""" +function noise_properties(cs::CrossSpectrum{T}) where {T} + + # Individual noise levels + if is_averaged(cs) + noise1 = poisson_level(cs.norm; meanrate = cs.mean_rate1, n_ph = cs.nphots1 / cs.m) + noise2 = poisson_level(cs.norm; meanrate = cs.mean_rate2, n_ph = cs.nphots2 / cs.m) + else + noise1 = poisson_level(cs.norm; meanrate = cs.mean_rate1, n_ph = cs.nphots1) + noise2 = poisson_level(cs.norm; meanrate = cs.mean_rate2, n_ph = cs.nphots2) + end + + cross_noise = theoretical_noise_level(cs) + + # Statistics + mean_power = mean(abs.(cs.power)) + std_power = std(abs.(cs.power)) + + # S/N analysis + snr = signal_to_noise_ratio(cs) + mean_snr = mean(snr) + significant_bins = sum(snr .> 3.0) + + # High frequency noise estimate + high_freq_idx = cs.freq .> (maximum(cs.freq) * 0.8) + high_freq_power = length(high_freq_idx) > 0 ? mean(abs.(cs.power[high_freq_idx])) : NaN + + # Build properties dictionary + properties = Dict( + "theoretical_noise" => cross_noise, + "noise_level_1" => noise1, + "noise_level_2" => noise2, + "mean_power" => mean_power, + "std_power" => std_power, + "mean_snr" => mean_snr, + "significant_detections" => significant_bins, + "total_frequencies" => length(cs.freq), + "high_freq_power" => high_freq_power, + "noise_to_signal_ratio" => cross_noise / mean_power, + "mean_rate_1" => cs.mean_rate1, + "mean_rate_2" => cs.mean_rate2, + "is_averaged" => is_averaged(cs), + ) + # Add averaging-specific properties + if is_averaged(cs) + properties["segments_averaged"] = cs.m + properties["averaging_improvement"] = sqrt(cs.m) + else + properties["segments_averaged"] = 1 + properties["averaging_improvement"] = 1.0 + end + + return properties +end + +""" + significant_frequencies(cs::CrossSpectrum{T}, threshold::Real=3.0) where T -> Vector{T} + +Return frequencies where signal detection is significant. + +Identifies frequency bins where the signal-to-noise ratio exceeds a specified +threshold, indicating statistically significant signal detection. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze +- `threshold::Real=3.0`: S/N ratio threshold for significance + +# Returns +- `Vector{T}`: Frequencies with significant signal detection + +# Mathematical Details +- Uses `signal_to_noise_ratio()` for S/N calculation +- Default threshold of 3.0 corresponds to ~99.7% confidence +- Higher thresholds provide more conservative detection + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +sig_freqs = significant_frequencies(cs, 5.0) # Very conservative +println("Significant frequencies: ", length(sig_freqs), " out of ", length(cs.freq)) +``` + +# Applications +- Identifying QPO frequencies +- Filtering noise-dominated frequency bins +- Statistical significance testing +""" +function significant_frequencies(cs::CrossSpectrum{T}, threshold::Real = 3.0) where {T} + snr = signal_to_noise_ratio(cs) + significant_mask = snr .> threshold + return cs.freq[significant_mask] +end + +""" + get_noise_level(cs::CrossSpectrum{T}, method::Symbol=:theoretical) where T -> T + +Get noise level using different estimation methods. + +Provides flexible access to different noise level estimation approaches, +either theoretical (based on Poisson statistics) or empirical (from high frequencies). + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze +- `method::Symbol=:theoretical`: Estimation method (`:theoretical` or `:empirical`) + +# Returns +- `T`: Estimated noise level + +# Methods +- `:theoretical`: Uses Poisson statistics and known count rates +- `:empirical`: Estimates from high frequency power levels + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +theoretical = get_noise_level(cs, :theoretical) +empirical = get_noise_level(cs, :empirical) +println("Theoretical: ", theoretical, ", Empirical: ", empirical) +``` + +# Throws +- `ArgumentError`: If unknown method specified +""" +function get_noise_level(cs::CrossSpectrum{T}, method::Symbol = :theoretical) where {T} + if method == :theoretical + return theoretical_noise_level(cs) + elseif method == :empirical + return white_noise_level(cs) + else + throw(ArgumentError("Unknown method: $method. Use :theoretical or :empirical")) + end +end + +""" + quality_metrics(cs::CrossSpectrum{T}) where T -> Dict{String, Any} + +Calculate quality metrics for cross spectrum analysis. + +Computes various metrics to assess the quality and reliability of the cross +spectrum, including signal-to-noise statistics and dynamic range measures. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to analyze + +# Returns +- `Dict{String, Any}`: Dictionary containing quality metrics: + - `"mean_snr"`: Average signal-to-noise ratio + - `"max_snr"`: Maximum signal-to-noise ratio + - `"significant_fraction"`: Fraction of bins with S/N > 3 + - `"noise_level"`: Theoretical noise level + - `"dynamic_range"`: Ratio of max to min power + - `"is_averaged"`: Whether spectrum is averaged + - `"segments_averaged"`: Number of averaged segments (if applicable) + - `"expected_improvement"`: Expected S/N improvement from averaging + +# Examples +```julia +cs = AveragedCrossSpectrum(lc1, lc2, 100.0) +metrics = quality_metrics(cs) +``` + +# Quality Indicators +- High `mean_snr`: Good overall signal quality +- High `significant_fraction`: Many reliable frequency bins +- High `dynamic_range`: Wide range of signal strengths +- `expected_improvement` ≈ √m for properly averaged spectra +""" +function quality_metrics(cs::CrossSpectrum{T}) where {T} + snr = signal_to_noise_ratio(cs) + + metrics = Dict( + "mean_snr" => mean(snr), + "max_snr" => maximum(snr), + "significant_fraction" => sum(snr .> 3.0) / length(snr), + "noise_level" => theoretical_noise_level(cs), + "dynamic_range" => maximum(abs.(cs.power)) / minimum(abs.(cs.power)), + "is_averaged" => is_averaged(cs), + ) + + if is_averaged(cs) + metrics["segments_averaged"] = cs.m + metrics["expected_improvement"] = sqrt(cs.m) + end + + return metrics +end + +""" + rebin(cs::CrossSpectrum{T}, df_new::Real) where T -> CrossSpectrum{T} + +Rebin cross spectrum to a new frequency resolution. + +Combines adjacent frequency bins to achieve lower frequency resolution and +improved signal-to-noise ratio through increased effective averaging. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to rebin +- `df_new::Real`: New frequency resolution (must be ≥ current resolution) + +# Returns +- `CrossSpectrum{T}`: Rebinned cross spectrum with new frequency resolution + +# Mathematical Details +- Rebin factor = round(df_new / current_df) +- New values are averages of original bins within each new bin +- Error propagation: σ_new = √(Σσ²) / n_bins for each new bin +- Updates k values to track effective number of averaged bins + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) # df = 0.1 Hz +cs_rebinned = rebin(cs, 1.0) # Rebin to 1.0 Hz resolution +println("Original bins: ", length(cs.freq)) +println("Rebinned bins: ", length(cs_rebinned.freq)) +``` + +# Notes +- Cannot increase frequency resolution (df_new < current_df) +- Some bins at the end may be dropped if not evenly divisible +- Preserves all spectrum metadata and normalization +""" +function rebin(cs::CrossSpectrum{T}, df_new::Real) where {T} + if df_new < cs.df + error("New frequency resolution must be >= current resolution") + end + + rebin_factor = round(Int, df_new / cs.df) + + if rebin_factor == 1 + return cs + end + + n_new = div(length(cs.freq), rebin_factor) + if n_new == 0 + error("Rebinning factor too large for available frequencies") + end + + freq_new = Vector{T}(undef, n_new) + power_new = Vector{Complex{T}}(undef, n_new) + ps1_new = Vector{T}(undef, n_new) + ps2_new = Vector{T}(undef, n_new) + power_err_new = isnothing(cs.power_err) ? nothing : Vector{T}(undef, n_new) + + for i = 1:n_new + start_idx = (i - 1) * rebin_factor + 1 + end_idx = min(i * rebin_factor, length(cs.freq)) + + freq_new[i] = mean(cs.freq[start_idx:end_idx]) + power_new[i] = mean(cs.power[start_idx:end_idx]) + ps1_new[i] = mean(cs.ps1[start_idx:end_idx]) + ps2_new[i] = mean(cs.ps2[start_idx:end_idx]) + + if !isnothing(cs.power_err) + power_err_new[i] = + sqrt(sum(cs.power_err[start_idx:end_idx] .^ 2)) / (end_idx - start_idx + 1) + end + end + + k_new = if is_averaged(cs) + if isa(cs.k, Int) + cs.k * rebin_factor + else + rebin_factor + end + else + rebin_factor + end + + return CrossSpectrum{T}( + freq_new, + power_new, + power_err_new, + ps1_new, + ps2_new, + cs.norm, + cs.power_type, + df_new, + cs.nphots1, + cs.nphots2, + cs.m, + length(freq_new), + k_new, + cs.metadata1, + cs.metadata2, + cs.fullspec, + cs.channels_overlap, + cs.segment_size, + cs.mean_rate1, + cs.mean_rate2, + ) +end + +""" + rebin_log(cs::CrossSpectrum{T}; f::Real=0.01) where T -> CrossSpectrum{T} + +Rebin cross spectrum using logarithmic frequency spacing. + +Creates bins with logarithmically increasing widths, providing higher resolution +at low frequencies and lower resolution at high frequencies. This is particularly +useful for analyzing power-law spectra where features span multiple decades. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to rebin +- `f::Real=0.01`: Fractional frequency resolution (Δf/f), must be between 0 and 1 + +# Returns +- `CrossSpectrum{T}`: Logarithmically rebinned cross spectrum + +# Mathematical Details +- Each bin spans frequency range [f_i, f_i(1+f)] +- Bin centers are geometric means: f_center = √(f_low × f_high) +- Number of bins ≈ log₁₀(f_max/f_min) / log₁₀(1+f) +- Preserves frequency-integrated power + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +cs_log = rebin_log(cs, f=0.1) # 10% fractional resolution +plot(cs_log.freq, abs.(cs_log.power), xscale=:log10) +``` + +# Notes +- Automatically skips zero frequency if present +- Smaller f values give finer resolution but more bins +- Ideal for power-law noise analysis and broad-band features +""" +function rebin_log(cs::CrossSpectrum{T}; f::Real = 0.01) where {T} + if f <= 0 || f >= 1 + error("Fractional frequency resolution f must be between 0 and 1") + end + + start_idx = cs.freq[1] == 0 ? 2 : 1 + + if start_idx >= length(cs.freq) + error("Not enough frequency points for logarithmic rebinning") + end + + freq_min = cs.freq[start_idx] + freq_max = cs.freq[end] + + log_freq_min = log10(freq_min) + log_freq_max = log10(freq_max) + + n_bins = floor(Int, (log_freq_max - log_freq_min) / log10(1 + f)) + 1 + + if n_bins <= 1 + error("Not enough frequency range for logarithmic rebinning with f=$f") + end + + freq_new = Vector{T}(undef, n_bins) + power_new = Vector{Complex{T}}(undef, n_bins) + ps1_new = Vector{T}(undef, n_bins) + ps2_new = Vector{T}(undef, n_bins) + power_err_new = isnothing(cs.power_err) ? nothing : Vector{T}(undef, n_bins) + k_new = Vector{Int}(undef, n_bins) + + current_freq = freq_min + + for i = 1:n_bins + freq_low = current_freq + freq_high = current_freq * (1 + f) + + mask = (cs.freq .>= freq_low) .& (cs.freq .< freq_high) + + if i == n_bins + mask = (cs.freq .>= freq_low) .& (cs.freq .<= freq_max) + end + + indices = findall(mask) + + if isempty(indices) + closest_idx = argmin(abs.(cs.freq .- (freq_low + freq_high) / 2)) + indices = [closest_idx] + end + + freq_new[i] = sqrt(freq_low * freq_high) + power_new[i] = mean(cs.power[indices]) + ps1_new[i] = mean(cs.ps1[indices]) + ps2_new[i] = mean(cs.ps2[indices]) + + if is_averaged(cs) + if isa(cs.k, Int) + k_new[i] = cs.k * length(indices) + else + k_new[i] = sum(cs.k[indices]) + end + else + k_new[i] = length(indices) + end + + if !isnothing(cs.power_err) + power_err_new[i] = sqrt(sum(cs.power_err[indices] .^ 2)) / length(indices) + end + + current_freq = freq_high + end + + df_new = freq_new[2] - freq_new[1] + + return CrossSpectrum{T}( + freq_new, + power_new, + power_err_new, + ps1_new, + ps2_new, + cs.norm, + cs.power_type, + df_new, + cs.nphots1, + cs.nphots2, + cs.m, + length(freq_new), + k_new, + cs.metadata1, + cs.metadata2, + cs.fullspec, + cs.channels_overlap, + cs.segment_size, + cs.mean_rate1, + cs.mean_rate2, + ) +end + +""" + rebin(cs::CrossSpectrum{T}, rebin_factor::Int) where T -> CrossSpectrum{T} + +Rebin cross spectrum by an integer factor. + +Convenience method that combines adjacent frequency bins by the specified integer +factor, equivalent to calling `rebin(cs, cs.df * rebin_factor)`. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to rebin +- `rebin_factor::Int`: Integer rebinning factor (≥ 1) + +# Returns +- `CrossSpectrum{T}`: Rebinned cross spectrum + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +cs_2x = rebin(cs, 2) # Combine every 2 bins +cs_10x = rebin(cs, 10) # Combine every 10 bins +``` + +# Notes +- Returns original spectrum unchanged if `rebin_factor ≤ 1` +- More intuitive than specifying exact frequency resolution +""" +function rebin(cs::CrossSpectrum{T}, rebin_factor::Int) where {T} + if rebin_factor <= 1 + return cs + end + + df_new = cs.df * rebin_factor + return rebin(cs, df_new) +end + +""" + geometric_rebin(cs::CrossSpectrum{T}, factor::Real) where T -> CrossSpectrum{T} + +Rebin cross spectrum with geometrically increasing bin widths. + +Creates frequency bins that grow in width by a constant multiplicative factor, +providing a compromise between linear and logarithmic rebinning. Each successive +bin is `factor` times wider than the previous one. + +# Arguments +- `cs::CrossSpectrum{T}`: The cross spectrum to rebin +- `factor::Real`: Geometric growth factor for bin widths (must be > 1.0) + +# Returns +- `CrossSpectrum{T}`: Geometrically rebinned cross spectrum + +# Mathematical Details +- First bin width = original df +- Subsequent bin widths: Δf_n = Δf_1 × factor^(n-1) +- Bin centers are arithmetic means of bin edges +- Preserves total integrated power + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +cs_geom = geometric_rebin(cs, 1.5) # Each bin 50% wider than previous +cs_geom2 = geometric_rebin(cs, 2.0) # Each bin twice as wide +``` + +# Applications +- Intermediate between linear and logarithmic spacing +- Useful for spectra with features at multiple scales +- Good for quasi-periodic oscillations (QPO) analysis + +# Notes +- Factor must be > 1.0 +- Automatically skips zero frequency if present +- Final bin may extend to maximum frequency +""" +function geometric_rebin(cs::CrossSpectrum{T}, factor::Real) where {T} + if factor <= 1.0 + error("Geometric factor must be > 1.0") + end + + start_idx = cs.freq[1] == 0 ? 2 : 1 + + if start_idx >= length(cs.freq) + error("Not enough frequency points for geometric rebinning") + end + + freq_edges = [cs.freq[start_idx]] + current_width = cs.df + + while freq_edges[end] < cs.freq[end] + next_edge = freq_edges[end] + current_width + if next_edge > cs.freq[end] + freq_edges = push!(freq_edges, cs.freq[end]) + break + end + freq_edges = push!(freq_edges, next_edge) + current_width *= factor + end + + n_bins = length(freq_edges) - 1 + + if n_bins <= 1 + error("Not enough frequency range for geometric rebinning") + end + + freq_new = Vector{T}(undef, n_bins) + power_new = Vector{Complex{T}}(undef, n_bins) + ps1_new = Vector{T}(undef, n_bins) + ps2_new = Vector{T}(undef, n_bins) + power_err_new = isnothing(cs.power_err) ? nothing : Vector{T}(undef, n_bins) + k_new = Vector{Int}(undef, n_bins) + + for i = 1:n_bins + mask = (cs.freq .>= freq_edges[i]) .& (cs.freq .< freq_edges[i+1]) + + if i == n_bins + mask = (cs.freq .>= freq_edges[i]) .& (cs.freq .<= freq_edges[i+1]) + end + + indices = findall(mask) + + if isempty(indices) + closest_idx = argmin(abs.(cs.freq .- (freq_edges[i] + freq_edges[i+1]) / 2)) + indices = [closest_idx] + end + + freq_new[i] = (freq_edges[i] + freq_edges[i+1]) / 2 + power_new[i] = mean(cs.power[indices]) + ps1_new[i] = mean(cs.ps1[indices]) + ps2_new[i] = mean(cs.ps2[indices]) + + if is_averaged(cs) + if isa(cs.k, Int) + k_new[i] = cs.k * length(indices) + else + k_new[i] = sum(cs.k[indices]) + end + else + k_new[i] = length(indices) + end + + if !isnothing(cs.power_err) + power_err_new[i] = sqrt(sum(cs.power_err[indices] .^ 2)) / length(indices) + end + end + + df_new = freq_new[2] - freq_new[1] + + return CrossSpectrum{T}( + freq_new, + power_new, + power_err_new, + ps1_new, + ps2_new, + cs.norm, + cs.power_type, + df_new, + cs.nphots1, + cs.nphots2, + cs.m, + length(freq_new), + k_new, + cs.metadata1, + cs.metadata2, + cs.fullspec, + cs.channels_overlap, + cs.segment_size, + cs.mean_rate1, + cs.mean_rate2, + ) +end + +""" + is_rebinned(cs::CrossSpectrum) -> Bool + +Check whether a cross spectrum has been rebinned. + +Determines if the cross spectrum has undergone any rebinning operation by +examining the `k` parameter, which tracks the number of original frequency +bins combined into each current bin. + +# Arguments +- `cs::CrossSpectrum`: Cross spectrum to check + +# Returns +- `Bool`: `true` if rebinned, `false` if original resolution + +# Mathematical Details +- Original spectrum: k = 1 (scalar) +- Uniformly rebinned: k > 1 (scalar) +- Non-uniformly rebinned: k is Vector with varying values + +# Examples +```julia +cs_orig = CrossSpectrum(lc1, lc2) +println(is_rebinned(cs_orig)) # false + +cs_rebin = rebin(cs_orig, 5) +println(is_rebinned(cs_rebin)) # true + +cs_log = rebin_log(cs_orig) +println(is_rebinned(cs_log)) # true +``` + +# Applications +- Quality control in analysis pipelines +- Determining appropriate statistical methods +- Choosing correct error propagation formulas +""" +function is_rebinned(cs::CrossSpectrum) + return isa(cs.k, Vector) || (isa(cs.k, Int) && cs.k > 1) +end + +""" + effective_samples_per_bin(cs::CrossSpectrum) -> Union{Int, Vector{Int}} + +Calculate effective number of independent samples per frequency bin. + +Computes the total number of independent measurements contributing to each +frequency bin, accounting for both segment averaging and frequency rebinning. +This is crucial for proper statistical analysis and error estimation. + +# Arguments +- `cs::CrossSpectrum`: Cross spectrum to analyze + +# Returns +- `Int`: Effective samples per bin (if uniform) +- `Vector{Int}`: Effective samples for each bin (if non-uniform rebinning) + +# Mathematical Details +- For averaged spectrum: N_eff = m × k +- For single spectrum: N_eff = k +- Where m = number of segments, k = rebinning factor per bin + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) # Single segment +n_eff = effective_samples_per_bin(cs) # Returns 1 + +cs_avg = average([cs1, cs2, cs3]) # 3 segments +n_eff_avg = effective_samples_per_bin(cs_avg) # Returns 3 + +cs_rebin = rebin(cs_avg, 5) # Rebin by factor 5 +n_eff_rebin = effective_samples_per_bin(cs_rebin) # Returns 15 +``` + +# Applications +- Chi-squared goodness-of-fit testing +- Confidence interval calculation +- Model parameter uncertainty estimation +- Detection significance assessment + +# Notes +- Higher values indicate better statistical precision +- Vector return indicates non-uniform rebinning (e.g., logarithmic) +- Essential for proper statistical interpretation +""" +function effective_samples_per_bin(cs::CrossSpectrum) + if is_averaged(cs) + if isa(cs.k, Int) + return cs.m * cs.k + else + return cs.m .* cs.k + end + else + if isa(cs.k, Int) + return cs.k + else + return cs.k + end + end +end + +""" + adaptive_rebin(cs::CrossSpectrum{T}, target_snr::Real=3.0, max_rebin_factor::Int=10) where T -> CrossSpectrum{T} + +Automatically rebin cross spectrum to achieve target signal-to-noise ratio. + +Iteratively increases the rebinning factor until the desired SNR is achieved +or the maximum rebinning factor is reached. This is useful for optimizing +the trade-off between frequency resolution and statistical significance. + +# Arguments +- `cs::CrossSpectrum{T}`: Cross spectrum to adaptively rebin +- `target_snr::Real=3.0`: Target signal-to-noise ratio +- `max_rebin_factor::Int=10`: Maximum allowed rebinning factor + +# Returns +- `CrossSpectrum{T}`: Rebinned cross spectrum meeting SNR requirement + +# Algorithm +1. Calculate current SNR for each frequency bin +2. If mean SNR ≥ target_snr, return original spectrum +3. Otherwise, try rebinning factors 2, 3, ..., max_rebin_factor +4. Return first rebinning that achieves target SNR +5. If none succeed, return spectrum with maximum rebinning + +# Examples +```julia +cs_noisy = CrossSpectrum(lc1, lc2) # Low SNR spectrum +cs_clean = adaptive_rebin(cs_noisy, target_snr=5.0) + +# For detection work +cs_detect = adaptive_rebin(cs, target_snr=3.0) # 3σ detection +``` + +# Applications +- Automatic optimization of frequency resolution vs. sensitivity +- Standardizing analysis pipelines for different data qualities +- Preparing spectra for feature detection algorithms +- Quality-driven data reduction + +# Notes +- Uses linear rebinning (uniform frequency spacing) +- SNR calculation depends on available error estimates +- May significantly reduce frequency resolution +- Consider scientific requirements before applying +""" +function adaptive_rebin( + cs::CrossSpectrum{T}, + target_snr::Real = 3.0, + max_rebin_factor::Int = 10, +) where {T} + current_snr = signal_to_noise_ratio(cs) + + needs_rebinning = current_snr .< target_snr + + if !any(needs_rebinning) + return cs + end + + for factor = 2:max_rebin_factor + rebinned_cs = rebin(cs, factor) + new_snr = signal_to_noise_ratio(rebinned_cs) + + if mean(new_snr) >= target_snr + return rebinned_cs + end + end + + return rebin(cs, max_rebin_factor) +end + +""" + Base.show(io::IO, ::MIME"text/plain", cs::CrossSpectrum) + +Display cross spectrum information in a readable format. + +Provides a comprehensive summary of the cross spectrum properties, +including frequency information, averaging details, and analysis parameters. +The display format differs based on whether the spectrum is averaged or single. + +# Arguments +- `io::IO`: Output stream +- `::MIME"text/plain"`: MIME type for plain text display +- `cs::CrossSpectrum`: Cross spectrum to display + +# Display Information + +## For Averaged Spectra +- Type and element type +- Number of frequency bins +- Number of segments averaged +- Segment size and mean count rates +- Normalization method + +## For Single Spectra +- Type and element type +- Number of frequency bins +- Normalization method + +# Examples +```julia +cs = CrossSpectrum(lc1, lc2) +println(cs) +# Output: +# CrossSpectrum{Float64} (Single) +# Frequencies: 1024 +# Normalization: leahy + +cs_avg = average([cs1, cs2, cs3]) +println(cs_avg) +# Output: +# CrossSpectrum{Float64} (Averaged) +# Frequencies: 1024 +# Segments averaged: 3 +# Segment size: 2048 +# Mean rates: 150.0, 200.0 +# Normalization: leahy +``` + +# Notes +- Automatically called by `println()` and REPL display +- Helps distinguish between single and averaged spectra +- Provides key information for analysis interpretation +""" +function Base.show(io::IO, ::MIME"text/plain", cs::CrossSpectrum) + if is_averaged(cs) + println(io, "CrossSpectrum{$(eltype(cs.freq))} (Averaged)") + println(io, " Frequencies: $(length(cs.freq))") + println(io, " Segments averaged: $(cs.m)") + println(io, " Segment size: $(cs.segment_size)") + println(io, " Mean rates: $(cs.mean_rate1), $(cs.mean_rate2)") + println(io, " Normalization: $(cs.norm)") + else + println(io, "CrossSpectrum{$(eltype(cs.freq))} (Single)") + println(io, " Frequencies: $(length(cs.freq))") + println(io, " Normalization: $(cs.norm)") + end +end \ No newline at end of file diff --git a/src/fourier.jl b/src/fourier.jl index 0ede925..113f562 100644 --- a/src/fourier.jl +++ b/src/fourier.jl @@ -3,9 +3,12 @@ function positive_fft_bins(n_bin::Integer; include_zero::Bool = false) if include_zero minbin = 1 end - return (minbin : (n_bin+1) ÷ 2) + if n_bin % 2 == 0 + return minbin:(n_bin ÷ 2) + else + return minbin:((n_bin + 1) ÷ 2) + end end - function poisson_level(norm::String; meanrate = nothing, n_ph = nothing, backrate::Real = 0.0) if norm == "abs" return 2.0 * meanrate @@ -19,7 +22,79 @@ function poisson_level(norm::String; meanrate = nothing, n_ph = nothing, backrat throw(ArgumentError("Unknown value for norm: $norm")) end end +""" + get_norm_label(norm::Union{String,Symbol}) + +Get the appropriate ylabel text for different power spectrum normalizations. + +# Arguments +- `norm`: Normalization type ("leahy", "frac", "rms", "abs", "none") + +# Returns +- String: Appropriate label for the y-axis + +# Examples +```julia +get_norm_label("frac") # Returns "(rms/mean)² Hz⁻¹" +get_norm_label("leahy") # Returns "Leahy Power" +``` +""" +function get_norm_label(norm::Union{String,Symbol}) + norm_str = string(norm) + labels = Dict( + "leahy" => "Leahy Power", + "frac" => "(rms/mean)² Hz⁻¹", + "rms" => "rms² Hz⁻¹", + "abs" => "Absolute Power", + "none" => "Raw Power" + ) + return get(labels, norm_str, "Power") +end + +""" + get_poisson_level(norm::String; meanrate=nothing, n_ph=nothing, backrate=0.0) + +Calculate the Poisson noise level for a given normalization. + +# Arguments +- `norm`: Normalization type +- `meanrate`: Mean count rate (optional) +- `n_ph`: Number of photons (optional, used to estimate meanrate if meanrate is nothing) +- `backrate`: Background rate (default: 0.0) + +# Returns +- Float64: Poisson noise level for the given normalization +""" +function get_poisson_level(norm::String; meanrate::Union{Nothing,Real}=nothing, + n_ph::Union{Nothing,Real}=nothing, backrate::Real=0.0) + if isnothing(meanrate) && !isnothing(n_ph) + meanrate = n_ph + end + return poisson_level(norm; meanrate=meanrate, n_ph=n_ph, backrate=backrate) +end +""" + extract_gti(meta) + +Extract Good Time Intervals (GTI) from various metadata types. + +# Arguments +- `meta`: Metadata object with potential GTI information + +# Returns +- GTI data or nothing if not found +""" +function extract_gti(meta) + if hasfield(typeof(meta), :gti) && !isnothing(meta.gti) + return meta.gti + elseif isa(meta, Dict) && haskey(meta, "gti") + return meta["gti"] + elseif hasfield(typeof(meta), :extra) && haskey(meta.extra, "gti") + return meta.extra["gti"] + else + return nothing + end +end function normalize_frac(unnorm_power::AbstractVector{<:Number}, dt::Real, n_bin::Integer, mean_flux::Real; background_flux::Real=0.0) if background_flux > 0 @@ -333,7 +408,7 @@ function avg_pds_from_iterable(flux_iterable, dt::Real; norm::String="frac", results[!,"freq"] = freq results[!,"power"] = cross results[!,"unnorm_power"] = unnorm_cross - results = (; results , n = n_bin, m = n_ave, dt, norm, df = 1 / (dt * n_bin), nphots = n_ph, setment_size = dt * n_bin, mean = common_mean, variance = common_variance) + results = (; results , n = n_bin, m = n_ave, dt, norm, df = 1 / (dt * n_bin), nphots = n_ph, segment_size = dt * n_bin, mean = common_mean, variance = common_variance) results = DataFrame(results.results) return results @@ -674,17 +749,115 @@ function avg_cs_from_iterables( return results +end + +function avg_pds_from_eventlist(eventlist::EventList, segment_size::Real, dt::Real; + norm::String="frac", use_common_mean::Bool=true, + silent::Bool=false, fluxes=nothing, errors=nothing) + # Extract times and GTI from EventList + times = eventlist.times + gti = eventlist.meta.gti + + # If no GTI available, create one from the full time range + if isnothing(gti) + gti = reshape([minimum(times), maximum(times)], 1, 2) + end + # Call the original function with extracted data + return avg_pds_from_events(times, gti, segment_size, dt; + norm=norm, use_common_mean=use_common_mean, + silent=silent, fluxes=fluxes, errors=errors) end -function avg_pds_from_events(times:: AbstractVector{<:Real}, gti::AbstractMatrix{<:Real}, +function avg_cs_from_eventlists(eventlist1::EventList, eventlist2::EventList, + segment_size::Real, dt::Real; norm::String="frac", + use_common_mean::Bool=true, fullspec::Bool=false, + silent::Bool=false, power_type::String="all", + fluxes1=nothing, fluxes2=nothing, errors1=nothing, + errors2=nothing, return_auxil=false) + # Extract times and GTI from EventLists + times1 = eventlist1.times + times2 = eventlist2.times + + # Use GTI from first eventlist, or create from time range if not available + gti = eventlist1.meta.gti + if isnothing(gti) + min_time = min(minimum(times1), minimum(times2)) + max_time = max(maximum(times1), maximum(times2)) + gti = reshape([min_time, max_time], 1, 2) + end + + # Call the original function with extracted data + return avg_cs_from_events(times1, times2, gti, segment_size, dt; + norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, silent=silent, + power_type=power_type, fluxes1=fluxes1, + fluxes2=fluxes2, errors1=errors1, + errors2=errors2, return_auxil=return_auxil) +end + +function avg_pds_from_lightcurve(lc::LightCurve, segment_size::Union{Real,Nothing}=nothing; + norm::String="frac", use_common_mean::Bool=true, + silent::Bool=false) + # Extract relevant data from LightCurve + times = lc.time + dt_lc = isa(lc.dt, Vector) ? lc.dt[1] : lc.dt # Use first dt if vector + counts = lc.counts + + # Use segment_size or entire light curve + if isnothing(segment_size) + segment_size = (maximum(times) - minimum(times)) + dt_lc + end + + # Create flux iterable from light curve data + flux_iterable = [counts] # Simple case: use entire light curve as one segment + + return avg_pds_from_iterable(flux_iterable, dt_lc; norm=norm, + use_common_mean=use_common_mean, silent=silent) +end + +function avg_cs_from_lightcurves(lc1::LightCurve, lc2::LightCurve, + segment_size::Union{Real,Nothing}=nothing; + norm::String="frac", use_common_mean::Bool=true, + fullspec::Bool=false, silent::Bool=false, + power_type::String="all", return_auxil=false) + # Extract relevant data from LightCurves + dt1 = isa(lc1.dt, Vector) ? lc1.dt[1] : lc1.dt + dt2 = isa(lc2.dt, Vector) ? lc2.dt[1] : lc2.dt + + # Ensure both light curves have the same time resolution + @assert dt1 ≈ dt2 "Light curves must have the same time resolution" + + counts1 = lc1.counts + counts2 = lc2.counts + + # Ensure both light curves have the same length + min_length = min(length(counts1), length(counts2)) + if length(counts1) != length(counts2) + @warn "Light curves have different lengths, truncating to shorter one" + counts1 = counts1[1:min_length] + counts2 = counts2[1:min_length] + end + + # Create flux iterables from light curve data + flux_iterable1 = [counts1] # Simple case: use entire light curves as one segment + flux_iterable2 = [counts2] + + return avg_cs_from_iterables(flux_iterable1, flux_iterable2, dt1; + norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, silent=silent, + power_type=power_type, return_auxil=return_auxil) +end + +# Original functions (kept for backward compatibility) +function avg_pds_from_events(times::AbstractVector{<:Real}, gti::AbstractMatrix{<:Real}, segment_size::Real, dt::Real; norm::String="frac", use_common_mean::Bool=true, silent::Bool=false, fluxes=nothing, errors=nothing) if isnothing(segment_size) - segment_size = max(gti) - min(gti) + segment_size = maximum(gti) - minimum(gti) end - n_bin = round(Int,segment_size / dt) + n_bin = round(Int, segment_size / dt) dt = segment_size / n_bin flux_iterable = get_flux_iterable_from_segments(times, gti, segment_size; @@ -703,14 +876,14 @@ function avg_pds_from_events(times:: AbstractVector{<:Real}, gti::AbstractMatrix end -function avg_cs_from_events(times1:: AbstractVector{<:Real}, times2:: AbstractVector{<:Real}, +function avg_cs_from_events(times1::AbstractVector{<:Real}, times2::AbstractVector{<:Real}, gti::AbstractMatrix{<:Real}, segment_size::Real, dt::Real; norm::String="frac", use_common_mean::Bool=true, fullspec::Bool=false, silent::Bool=false, power_type::String="all", fluxes1=nothing, fluxes2=nothing, errors1=nothing, errors2=nothing, return_auxil=false) if isnothing(segment_size) - segment_size = max(gti) - min(gti) + segment_size = maximum(gti) - minimum(gti) end n_bin = round(Int, segment_size / dt) # adjust dt @@ -723,8 +896,7 @@ function avg_cs_from_events(times1:: AbstractVector{<:Real}, times2:: AbstractVe times2, gti, segment_size; n_bin, fluxes=fluxes2, errors=errors2 ) - is_events = all(isnothing,(fluxes1, fluxes2, errors1, - errors2)) + is_events = all(isnothing, (fluxes1, fluxes2, errors1, errors2)) if (is_events && silent diff --git a/src/gti.jl b/src/gti.jl index 5d74538..d176b73 100644 --- a/src/gti.jl +++ b/src/gti.jl @@ -241,6 +241,11 @@ function operations_on_gtis(gti_list::AbstractVector{<:AbstractMatrix{T}}, return mapreduce(permutedims, vcat, final_gti, init=reshape(T[], 0, 2)) end +function intersect_gtis(gtis1::AbstractMatrix{<:Real}, gtis2::AbstractMatrix{<:Real}) + check_gtis(gtis1) + check_gtis(gtis2) + return operations_on_gtis([gtis1, gtis2], intersect) +end """ get_btis(gtis::AbstractMatrix{<:Real}) -> Matrix{<:Real} diff --git a/src/plotting/plots_recipes_crossspectrum.jl b/src/plotting/plots_recipes_crossspectrum.jl new file mode 100644 index 0000000..9ef72bc --- /dev/null +++ b/src/plotting/plots_recipes_crossspectrum.jl @@ -0,0 +1,1545 @@ +""" +# Usage +```julia +# Basic amplitude plot +plot(cs) +plot(cs, plot_type=:amplitude, xscale=:log10) + +plot(cs, plot_type=:phase_lag, show_errors=true, freq_range=(0.1, 10)) + +# Multiple plots +plot(cs, plot_type=:coherence, show_noise_level=true) +``` + +# Notes +- Returns (freq_plot, data_plot) for single series or Float64[], Float64[] for multi-series +- Uses @series for multiple plot components +- Handles both averaged and single cross spectra +""" +@recipe function f( + cs::CrossSpectrum{T}; + plot_type = :amplitude, + xscale = :log10, + yscale = :log10, + show_errors = false, + freq_range = nothing, + power_range = nothing, + linewidth = 2, + fontsize = 8, + color = :blue, + show_noise_level = false, +) where {T} + + grid --> true + minorgrid --> true + legend --> :topright + linewidth --> linewidth + markersize --> 2 + tickfontsize --> fontsize + guidefontsize --> fontsize + legendfontsize --> fontsize + + freq_mask = if !isnothing(freq_range) + (cs.freq .>= freq_range[1]) .& (cs.freq .<= freq_range[2]) + else + trues(length(cs.freq)) + end + + freq_plot = cs.freq[freq_mask] + + if plot_type == :amplitude + if is_averaged(cs) + title --> "Averaged Cross Spectrum Amplitude ($(cs.m) segments)" + else + title --> "Single Cross Spectrum Amplitude" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Amplitude" + xaxis --> xscale + yaxis --> yscale + + amplitude_plot = abs.(cs.power[freq_mask]) + + if !isnothing(power_range) + ylims --> power_range + end + + if show_errors && !isnothing(cs.power_err) + seriestype --> :scatter + yerror --> cs.power_err[freq_mask] + color --> color + markershape --> :circle + alpha --> 0.7 + label --> "Cross Spectrum Amplitude" + else + seriestype --> :line + color --> color + alpha --> 0.8 + label --> "Cross Spectrum Amplitude" + end + + if show_noise_level + @series begin + seriestype --> :hline + y --> [theoretical_noise_level(cs)] + color --> :red + linestyle --> :dash + alpha --> 0.7 + linewidth --> 1 + label --> "Noise Level" + Float64[], Float64[] + end + end + + return freq_plot, amplitude_plot + + elseif plot_type == :power + if is_averaged(cs) + title --> "Averaged Cross Power Spectrum ($(cs.m) segments)" + else + title --> "Single Cross Power Spectrum" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> xscale + yaxis --> yscale + + power_plot = abs2.(cs.power[freq_mask]) + + if !isnothing(power_range) + ylims --> power_range + end + + if show_errors && !isnothing(cs.power_err) + seriestype --> :scatter + yerror --> (cs.power_err[freq_mask] .* 2 .* abs.(cs.power[freq_mask])) + color --> color + markershape --> :circle + label --> "Cross Power" + else + seriestype --> :line + color --> color + label --> "Cross Power" + end + + return freq_plot, power_plot + + elseif plot_type == :phase_lag + if is_averaged(cs) + title --> "Averaged Phase Lag Spectrum ($(cs.m) segments)" + else + title --> "Single Phase Lag Spectrum" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Phase Lag (radians)" + xaxis --> xscale + yaxis --> :identity + + phase_plot = angle.(cs.power[freq_mask]) + + seriestype --> :line + color --> color + label --> "Phase Lag" + + return freq_plot, phase_plot + + elseif plot_type == :time_lag + if is_averaged(cs) + title --> "Averaged Time Lag Spectrum ($(cs.m) segments)" + else + title --> "Single Time Lag Spectrum" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Time Lag (s)" + xaxis --> xscale + yaxis --> :identity + + time_lag_plot = angle.(cs.power[freq_mask]) ./ (2π .* freq_plot) + + seriestype --> :line + color --> color + label --> "Time Lag" + + return freq_plot, time_lag_plot + + elseif plot_type == :raw_coherence + if is_averaged(cs) + title --> "Averaged Raw Coherence Function ($(cs.m) segments)" + else + title --> "Single Raw Coherence Function" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Raw Coherence" + xaxis --> xscale + yaxis --> :identity + ylims --> (0, 1.1) + + # Raw coherence calculation (biased) + raw_coherence_plot = + abs2.(cs.power[freq_mask]) ./ (cs.ps1[freq_mask] .* cs.ps2[freq_mask]) + + seriestype --> :line + color --> color + label --> "Raw Coherence" + + return freq_plot, raw_coherence_plot + + elseif plot_type == :coherence + if is_averaged(cs) + title --> "Averaged Coherence Function ($(cs.m) segments)" + else + title --> "Single Coherence Function" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Coherence" + xaxis --> xscale + yaxis --> :identity + ylims --> (0, 1.1) + + # Use the bias-corrected coherence function + coherence_values = coherence(cs) + coherence_plot = coherence_values[freq_mask] + + seriestype --> :line + color --> color + label --> "Coherence" + + return freq_plot, coherence_plot + + elseif plot_type == :snr + if is_averaged(cs) + title --> "Averaged Signal-to-Noise Ratio ($(cs.m) segments)" + else + title --> "Single Signal-to-Noise Ratio" + end + xlabel --> "Frequency (Hz)" + ylabel --> "S/N Ratio" + xaxis --> :log10 + yaxis --> :identity + + # Use empirical noise level (consistent with signal_to_noise_ratio function) + snr_plot = signal_to_noise_ratio(cs) # This uses white_noise_level internally + + seriestype --> :line + color --> :purple + label --> "S/N Ratio (Empirical)" + + return cs.freq, snr_plot + + elseif plot_type == :cross_power_raw + title --> "Cross Power (Raw Complex Values)" + xlabel --> "Frequency (Hz)" + ylabel --> "Cross Power" + xaxis --> xscale + yaxis --> yscale + + power_raw = cs.power[freq_mask] + + seriestype --> :line + color --> color + label --> "Cross Power (Complex Modulus)" + + return freq_plot, abs.(power_raw) + + elseif plot_type == :real_imaginary + if is_averaged(cs) + title --> "Averaged Real and Imaginary Parts ($(cs.m) segments)" + else + title --> "Single Real and Imaginary Parts" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Cross Spectrum" + xaxis --> xscale + yaxis --> :identity + + @series begin + seriestype --> :line + color --> :blue + linewidth --> linewidth + label --> "Real Part" + freq_plot, real.(cs.power[freq_mask]) + end + + @series begin + seriestype --> :line + color --> :red + linewidth --> linewidth + label --> "Imaginary Part" + freq_plot, imag.(cs.power[freq_mask]) + end + + return Float64[], Float64[] + + elseif plot_type == :pds_comparison + if is_averaged(cs) + title --> "Averaged Power Density Spectra ($(cs.m) segments)" + else + title --> "Single Power Density Spectra" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> xscale + yaxis --> yscale + + @series begin + seriestype --> :line + color --> :blue + linewidth --> linewidth + label --> "PDS 1" + freq_plot, cs.ps1[freq_mask] + end + + @series begin + seriestype --> :line + color --> :red + linewidth --> linewidth + label --> "PDS 2" + freq_plot, cs.ps2[freq_mask] + end + + return Float64[], Float64[] + + else + error( + "Unknown plot_type: $plot_type. Available: :amplitude, :power, :phase_lag, :time_lag, :coherence, :snr, :cross_power_raw, :real_imaginary, :pds_comparison", + ) + end +end + +""" +# Usage +```julia +plot(cs, Val(:significant_detections), threshold=5.0) +``` + +# Notes +- Highlights frequency bins with S/N > threshold +- Uses median of high frequencies for noise estimation +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:significant_detections}; + threshold = 3.0, + use_empirical_noise = true, # New parameter for consistency +) where {T} + + if is_averaged(cs) + title --> + "Averaged Cross Spectrum - Significant Detections >$(threshold)σ ($(cs.m) segments)" + else + title --> "Single Cross Spectrum - Significant Detections >$(threshold)σ" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Cross Spectrum Amplitude" + xaxis --> :log10 + yaxis --> :log10 + grid --> true + legend --> :topright + + # Use consistent noise level estimation + noise_level = if use_empirical_noise + white_noise_level(cs) # Same as signal_to_noise_ratio() uses + else + theoretical_noise_level(cs) # Original behavior + end + + snr_data = abs.(cs.power) ./ noise_level + significant_mask = snr_data .> threshold + + @series begin + seriestype --> :line + color --> :gray + alpha --> 0.5 + linewidth --> 1 + label --> "All frequencies" + cs.freq, abs.(cs.power) + end + + if sum(significant_mask) > 0 + @series begin + seriestype --> :scatter + color --> :red + markersize --> 3 + markershape --> :circle + label --> "Significant ($(sum(significant_mask)) bins)" + cs.freq[significant_mask], abs.(cs.power[significant_mask]) + end + end + + @series begin + seriestype --> :hline + y --> [noise_level] + color --> :black + linestyle --> :dash + alpha --> 0.7 + label --> use_empirical_noise ? "Empirical Noise Level" : "Theoretical Noise Level" + Float64[], Float64[] + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot(cs, Val(:phase_lag_errors), freq_range=(0.1, 10)) +``` + +# Notes +- Error propagation: σ_phase = σ_amplitude / amplitude +- Automatically switches between scatter (with errors) and line plots +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:phase_lag_errors}; + freq_range = nothing, +) where {T} + + if is_averaged(cs) + title --> "Averaged Phase Lag with Error Bars ($(cs.m) segments)" + else + title --> "Single Phase Lag with Error Bars" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Phase Lag (radians)" + xaxis --> :log10 + yaxis --> :identity + grid --> true + + freq_mask = if !isnothing(freq_range) + (cs.freq .>= freq_range[1]) .& (cs.freq .<= freq_range[2]) + else + trues(length(cs.freq)) + end + + freq_plot = cs.freq[freq_mask] + phase_plot = angle.(cs.power[freq_mask]) + + if !isnothing(cs.power_err) + amplitude = abs.(cs.power[freq_mask]) + phase_err = cs.power_err[freq_mask] ./ amplitude + + seriestype --> :scatter + yerror --> phase_err + color --> :blue + markersize --> 3 + markershape --> :circle + label --> "Phase Lag with errors" + else + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Phase Lag" + end + + if !isnothing(freq_range) + xlims --> freq_range + end + + @series begin + seriestype --> :hline + y --> [0.0] + color --> :black + linestyle --> :dash + linewidth --> 1 + alpha --> 0.5 + label --> "" + Float64[], Float64[] + end + + return freq_plot, phase_plot +end +""" +# Usage +```julia +plot(cs, Val(:time_lag_errors), freq_range=(0.1, 10)) +``` + +# Notes +- Converts phase to time: τ = φ/(2πf) +- Shows error bars when available +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:time_lag_errors}; + freq_range = nothing, +) where {T} + + if is_averaged(cs) + title --> "Averaged Time Lag with Error Bars ($(cs.m) segments)" + else + title --> "Single Time Lag with Error Bars" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Time Lag (s)" + xaxis --> :log10 + yaxis --> :identity + grid --> true + + freq_mask = if !isnothing(freq_range) + (cs.freq .>= freq_range[1]) .& (cs.freq .<= freq_range[2]) + else + trues(length(cs.freq)) + end + + freq_plot = cs.freq[freq_mask] + time_lag_plot = angle.(cs.power[freq_mask]) ./ (2π .* freq_plot) + + if !isnothing(cs.power_err) + amplitude = abs.(cs.power[freq_mask]) + phase_err = cs.power_err[freq_mask] ./ amplitude + time_err = phase_err ./ (2π .* freq_plot) + + seriestype --> :scatter + yerror --> time_err + color --> :blue + markersize --> 3 + markershape --> :circle + label --> "Time Lag with errors" + else + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Time Lag" + end + + if !isnothing(freq_range) + xlims --> freq_range + end + + @series begin + seriestype --> :hline + y --> [0.0] + color --> :black + linestyle --> :dash + linewidth --> 1 + alpha --> 0.5 + label --> "" + Float64[], Float64[] + end + + return freq_plot, time_lag_plot +end + +""" +# Usage +```julia +plot(cs, Val(:white_noise), high_freq_fraction=0.2) +``` + +# Notes +- Uses median of highest 20% frequencies as noise floor estimate +- Shows noise region and estimated level +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:white_noise}; + high_freq_fraction = 0.2, +) where {T} + + if is_averaged(cs) + title --> "Averaged Cross Spectrum - White Noise Estimation ($(cs.m) segments)" + else + title --> "Single Cross Spectrum - White Noise Estimation" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> :log10 + yaxis --> :log10 + grid --> true + legend --> :topright + + freq_cutoff = maximum(cs.freq) * (1.0 - high_freq_fraction) + high_freq_mask = cs.freq .>= freq_cutoff + white_noise = median(abs.(cs.power[high_freq_mask])) + + @series begin + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Cross Spectrum" + cs.freq, abs.(cs.power) + end + + @series begin + seriestype --> :vline + x --> [freq_cutoff] + color --> :green + linestyle --> :dash + alpha --> 0.7 + label --> "Noise Region" + Float64[], Float64[] + end + + @series begin + seriestype --> :hline + y --> [white_noise] + color --> :red + linestyle --> :dash + linewidth --> 2 + label --> "White Noise Level" + Float64[], Float64[] + end + + return Float64[], Float64[] +end + +""" +# Usage +```julia +plot(cs, Val(:noise_diagnosis)) +``` + +# Notes +- Comprehensive 2x2 diagnostic panel with aliasing detection +- Shows cross spectrum, S/N ratio, power distribution, and frequency comparison +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:noise_diagnosis}) where {T} + + layout --> (2, 2) + size --> (1000, 800) + + nyquist_freq = maximum(cs.freq) + high_freq_mask = cs.freq .> 0.8 * nyquist_freq + aliasing_detected = + length(high_freq_mask) > 0 && + median(abs.(cs.power[high_freq_mask])) > 5.0 * theoretical_noise_level(cs) + + @series begin + subplot --> 1 + seriestype --> :line + color --> :blue + linewidth --> 2 + alpha --> 0.8 + label --> "Cross Spectrum" + title --> "Noise Level Comparison" + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> :log10 + yaxis --> :log10 + cs.freq, abs.(cs.power) + end + + theoretical_noise = theoretical_noise_level(cs) + high_freq_idx = cs.freq .> maximum(cs.freq) * 0.8 + empirical_noise = + length(high_freq_idx) > 0 ? median(abs.(cs.power[high_freq_idx])) : + theoretical_noise + + @series begin + subplot --> 1 + seriestype --> :hline + y --> [theoretical_noise] + color --> :red + linestyle --> :dash + linewidth --> 2 + alpha --> 0.7 + label --> "Theoretical Noise" + Float64[], Float64[] + end + + @series begin + subplot --> 1 + seriestype --> :hline + y --> [empirical_noise] + color --> :green + linestyle --> :dot + linewidth --> 2 + alpha --> 0.7 + label --> "Empirical Noise" + Float64[], Float64[] + end + + snr_data = abs.(cs.power) ./ theoretical_noise + @series begin + subplot --> 2 + seriestype --> :line + color --> :purple + linewidth --> 2 + label --> "S/N Ratio" + title --> "Signal-to-Noise vs Frequency" + xlabel --> "Frequency (Hz)" + ylabel --> "S/N" + xaxis --> :log10 + yaxis --> :log10 + cs.freq, max.(snr_data, 0.01) + end + + @series begin + subplot --> 3 + seriestype --> :histogram + bins --> 50 + color --> :orange + alpha --> 0.7 + label --> "" + title --> "Power Distribution" + xlabel --> "Log₁₀(Power)" + ylabel --> "Frequency" + log10.(abs.(cs.power)) + end + + high_freq_mask = cs.freq .> 0.8 * nyquist_freq + low_freq_mask = cs.freq .< 0.2 * nyquist_freq + + plot_color = aliasing_detected ? :red : :green + plot_title = aliasing_detected ? "⚠️ Aliasing Suspected" : "✓ No Obvious Aliasing" + + @series begin + subplot --> 4 + seriestype --> :scatter + color --> plot_color + alpha --> 0.6 + markersize --> 3 + label --> "High Freq (>0.8 Nyquist)" + title --> plot_title + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> :log10 + yaxis --> :log10 + cs.freq[high_freq_mask], abs.(cs.power[high_freq_mask]) + end + + @series begin + subplot --> 4 + seriestype --> :scatter + color --> :blue + alpha --> 0.6 + markersize --> 3 + label --> "Low Freq (<0.2 Nyquist)" + cs.freq[low_freq_mask], abs.(cs.power[low_freq_mask]) + end + + return Float64[], Float64[] +end + +""" +# Usage +```julia +plot(cs, Val(:analysis)) +``` + +# Notes +- Comprehensive 2x3 analysis dashboard +- Shows amplitude, phase lag, time lag, coherence, real/imaginary parts, and input spectra +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:analysis}) where {T} + layout --> (2, 3) + size --> (1200, 800) + + @series begin + subplot --> 1 + plot_type --> :amplitude + if is_averaged(cs) + title --> "Averaged Amplitude ($(cs.m) segments)" + else + title --> "Single Amplitude" + end + cs + end + + @series begin + subplot --> 2 + plot_type --> :phase_lag + if is_averaged(cs) + title --> "Averaged Phase Lag ($(cs.m) segments)" + else + title --> "Single Phase Lag" + end + cs + end + + @series begin + subplot --> 3 + plot_type --> :time_lag + if is_averaged(cs) + title --> "Averaged Time Lag ($(cs.m) segments)" + else + title --> "Single Time Lag" + end + cs + end + + @series begin + subplot --> 4 + plot_type --> :coherence + if is_averaged(cs) + title --> "Averaged Coherence ($(cs.m) segments)" + else + title --> "Single Coherence" + end + cs + end + + @series begin + subplot --> 5 + plot_type --> :real_imaginary + if is_averaged(cs) + title --> "Averaged Real & Imaginary ($(cs.m) segments)" + else + title --> "Single Real & Imaginary" + end + cs + end + + @series begin + subplot --> 6 + plot_type --> :pds_comparison + if is_averaged(cs) + title --> "Averaged Input Spectra ($(cs.m) segments)" + else + title --> "Single Input Spectra" + end + cs + end + + return Float64[], Float64[] +end + +""" +# Usage +```julia +plot(cs, Val(:noise_analysis)) +``` + +# Notes +- Detailed 2x2 noise analysis with noise subtraction effects +- Shows original vs noise-corrected power +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:noise_analysis}) where {T} + + layout --> (2, 2) + size --> (1000, 800) + + noise_level = theoretical_noise_level(cs) + snr_data = abs.(cs.power) ./ noise_level + noise_corrected = max.(abs.(cs.power) .- noise_level, 0.01 * noise_level) + + spectrum_info = is_averaged(cs) ? " ($(cs.m) segments)" : "" + + @series begin + subplot --> 1 + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Cross Spectrum" + title --> "Cross Spectrum vs Noise Level$spectrum_info" + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> :log10 + yaxis --> :log10 + cs.freq, abs.(cs.power) + end + + @series begin + subplot --> 1 + seriestype --> :hline + y --> [noise_level] + color --> :red + linestyle --> :dash + linewidth --> 2 + label --> "Theoretical Noise" + Float64[], Float64[] + end + + @series begin + subplot --> 2 + seriestype --> :line + color --> :green + linewidth --> 2 + label --> "S/N Ratio" + title --> "Signal-to-Noise Ratio$spectrum_info" + xlabel --> "Frequency (Hz)" + ylabel --> "S/N" + xaxis --> :log10 + cs.freq, snr_data + end + + @series begin + subplot --> 2 + seriestype --> :hline + y --> [3.0] + color --> :black + linestyle --> :dash + alpha --> 0.7 + label --> "3σ threshold" + Float64[], Float64[] + end + + @series begin + subplot --> 3 + seriestype --> :line + color --> :purple + linewidth --> 2 + label --> "Noise Subtracted" + title --> "Noise-Corrected Power$spectrum_info" + xlabel --> "Frequency (Hz)" + ylabel --> "Corrected Power" + xaxis --> :log10 + yaxis --> :log10 + cs.freq, noise_corrected + end + + mean_snr = mean(snr_data) + significant_bins = sum(snr_data .> 3.0) + + @series begin + subplot --> 4 + seriestype --> :bar + color --> [:blue, :red, :green] + label --> "" + title --> "Summary Stats$spectrum_info" + xlabel --> "Metric" + ylabel --> "Value" + [1, 2, 3], [mean_snr, noise_level, significant_bins] + end + + return Float64[], Float64[] +end + +""" +# Usage +```julia +plot(cs, Val(:coherence_confidence)) +``` + +# Notes +- Confidence levels: C_α = α^(1/(m-1)) where m is number of segments +- Only meaningful for averaged cross spectra (m > 1) +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:coherence_confidence}) where {T} + + if is_averaged(cs) + title --> "Averaged Coherence with Confidence Levels ($(cs.m) segments)" + else + title --> "Single Coherence with Confidence Levels" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Coherence" + xaxis --> :log10 + ylims --> (0, 1.1) + grid --> true + + coh_values = abs2.(cs.power) ./ (cs.ps1 .* cs.ps2) + + m_eff = is_averaged(cs) ? cs.m : 1 + confidence_95 = fill(0.95^(1 / (m_eff - 1)), length(cs.freq)) + confidence_99 = fill(0.99^(1 / (m_eff - 1)), length(cs.freq)) + + @series begin + seriestype --> :line + color --> :green + linewidth --> 2 + label --> "Coherence" + cs.freq, coh_values + end + + if !isnothing(cs.power_err) + coh_err = cs.power_err ./ abs.(cs.power) + @series begin + seriestype --> :scatter + yerror --> coh_err + color --> :green + alpha --> 0.7 + markersize --> 2 + label --> "" + cs.freq, coh_values + end + end + + @series begin + seriestype --> :line + color --> :gray + linestyle --> :dash + alpha --> 0.7 + label --> "95% confidence" + cs.freq, confidence_95 + end + + @series begin + seriestype --> :line + color --> :darkgray + linestyle --> :dot + alpha --> 0.7 + label --> "99% confidence" + cs.freq, confidence_99 + end + + return Float64[], Float64[] +end + +""" +# Usage +```julia +plot(cs, Val(:frequency_snr), rebin_factor=10) +``` + +# Notes +- Frequency-resolved S/N with optional rebinning for noise reduction +- Shows 3σ detection threshold +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:frequency_snr}; rebin_factor = 10) where {T} + + if is_averaged(cs) + title --> "Averaged Frequency-Resolved Signal-to-Noise ($(cs.m) segments)" + else + title --> "Single Frequency-Resolved Signal-to-Noise" + end + xlabel --> "Frequency (Hz)" + ylabel --> "S/N Ratio" + xaxis --> :log10 + grid --> true + + n_bins = div(length(cs.freq), rebin_factor) + freq_rebinned = Vector{Float64}(undef, n_bins) + snr_rebinned = Vector{Float64}(undef, n_bins) + + noise_level = theoretical_noise_level(cs) + snr_full = abs.(cs.power) ./ noise_level + + for i = 1:n_bins + start_idx = (i - 1) * rebin_factor + 1 + end_idx = min(i * rebin_factor, length(cs.freq)) + + freq_rebinned[i] = mean(cs.freq[start_idx:end_idx]) + snr_rebinned[i] = mean(snr_full[start_idx:end_idx]) + end + + @series begin + seriestype --> :line + color --> :blue + linewidth --> 2 + marker --> :circle + markersize --> 3 + label --> "Rebinned S/N (factor $rebin_factor)" + freq_rebinned, snr_rebinned + end + + @series begin + seriestype --> :hline + y --> [1.0] + color --> :black + linestyle --> :solid + alpha --> 0.5 + label --> "Unity" + Float64[], Float64[] + end + + @series begin + seriestype --> :hline + y --> [3.0] + color --> :red + linestyle --> :dash + alpha --> 0.7 + label --> "3σ detection" + Float64[], Float64[] + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot(cs, Val(:noise_comparison)) +``` + +# Notes +- Compares original cross spectrum with noise-subtracted version +- Shows theoretical noise level +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:noise_comparison}) where {T} + + if is_averaged(cs) + title --> "Averaged Cross Spectrum: Original vs Noise-Corrected ($(cs.m) segments)" + else + title --> "Single Cross Spectrum: Original vs Noise-Corrected" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Power" + xaxis --> :log10 + yaxis --> :log10 + grid --> true + legend --> :topright + + noise_level = theoretical_noise_level(cs) + noise_corrected = max.(abs.(cs.power) .- noise_level, 0.01 * noise_level) + + @series begin + seriestype --> :line + color --> :blue + alpha --> 0.7 + linewidth --> 2 + label --> "Original" + cs.freq, abs.(cs.power) + end + + @series begin + seriestype --> :line + color --> :red + linewidth --> 2 + label --> "Noise Subtracted" + cs.freq, noise_corrected + end + + @series begin + seriestype --> :hline + y --> [noise_level] + color --> :gray + linestyle --> :dash + alpha --> 0.8 + label --> "Noise Level" + Float64[], Float64[] + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot([cs1, cs2, cs3], Val(:normalization_comparison), + norm_labels=["Leahy", "Fractional RMS", "Absolute RMS"]) +``` + +# Notes +- Compares different normalization schemes +- Layout: stacked plots for each normalization +""" +@recipe function f( + cs_array::Vector{CrossSpectrum{T}}, + ::Val{:normalization_comparison}; + norm_labels = ["Leahy", "Fractional RMS", "Absolute RMS"], +) where {T} + + layout --> (length(cs_array), 1) + size --> (800, 300 * length(cs_array)) + + for (i, cs) in enumerate(cs_array) + @series begin + subplot --> i + seriestype --> :line + color --> :black + linewidth --> 2 + label --> "" + title --> i <= length(norm_labels) ? norm_labels[i] : "Spectrum $i" + xlabel --> "Frequency (Hz)" + ylabel --> i == 1 ? "Leahy cross-power" : + (i == 2 ? "RMS cross-power" : "Absolute cross-power") + xaxis --> :identity + yaxis --> :log10 + cs.freq, abs.(cs.power) + end + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot(cs, Val(:lag_frequency_errors), + freq_range=(0.1, 10), reference_lag=(frequency=1.0, expected_lag=0.1)) +``` + +# Notes +- Shows time lag vs frequency with error bars +- Optional reference lines for expected values +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:lag_frequency_errors}; + rebin_factor = 1, + freq_range = nothing, + reference_lag = nothing, +) where {T} + + if is_averaged(cs) + title --> "Averaged Lag-Frequency Spectrum ($(cs.m) segments)" + else + title --> "Single Lag-Frequency Spectrum" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Time Lag (s)" + xaxis --> :identity + grid --> true + + freq_mask = if !isnothing(freq_range) + (cs.freq .>= freq_range[1]) .& (cs.freq .<= freq_range[2]) + else + trues(length(cs.freq)) + end + + freq_plot = cs.freq[freq_mask] + lag_plot = angle.(cs.power[freq_mask]) ./ (2π .* freq_plot) + + if !isnothing(freq_range) + xlims --> freq_range + end + + @series begin + seriestype --> :hline + y --> [0.0] + color --> :black + linestyle --> :dash + linewidth --> 2 + label --> "" + Float64[], Float64[] + end + + if !isnothing(reference_lag) && haskey(reference_lag, :frequency) + @series begin + seriestype --> :vline + x --> [reference_lag[:frequency]] + color --> :gray + linestyle --> :dash + alpha --> 0.7 + label --> "" + Float64[], Float64[] + end + end + + if !isnothing(reference_lag) && haskey(reference_lag, :expected_lag) + @series begin + seriestype --> :line + color --> :green + linewidth --> 2 + label --> "Expected Time Lag" + freq_plot, fill(reference_lag[:expected_lag], length(freq_plot)) + end + end + + if !isnothing(cs.power_err) + amplitude = abs.(cs.power[freq_mask]) + phase_err = cs.power_err[freq_mask] ./ amplitude + lag_err = phase_err ./ (2π .* freq_plot) + + seriestype --> :scatter + yerror --> lag_err + color --> :blue + markersize --> 3 + markershape --> :circle + label --> "Measured Time Lag" + + return freq_plot, lag_plot + else + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Time Lag" + + return freq_plot, lag_plot + end +end +""" +# Usage +```julia +plot(cs, Val(:phase_frequency_errors), + freq_range=(0.1, 10), reference_phase=(frequency=1.0, expected_phase=0.5)) +``` + +# Notes +- Shows phase lag vs frequency with error bars +- Optional reference lines for expected values +""" +@recipe function f( + cs::CrossSpectrum{T}, + ::Val{:phase_frequency_errors}; + freq_range = nothing, + reference_phase = nothing, +) where {T} + + if is_averaged(cs) + title --> "Averaged Phase-Frequency Spectrum ($(cs.m) segments)" + else + title --> "Single Phase-Frequency Spectrum" + end + xlabel --> "Frequency (Hz)" + ylabel --> "Phase Lag (rad)" + xaxis --> :identity + grid --> true + + freq_mask = if !isnothing(freq_range) + (cs.freq .>= freq_range[1]) .& (cs.freq .<= freq_range[2]) + else + trues(length(cs.freq)) + end + + freq_plot = cs.freq[freq_mask] + phase_plot = angle.(cs.power[freq_mask]) + + if !isnothing(freq_range) + xlims --> freq_range + end + + @series begin + seriestype --> :hline + y --> [0.0] + color --> :black + linestyle --> :dash + linewidth --> 2 + label --> "" + Float64[], Float64[] + end + + if !isnothing(reference_phase) && haskey(reference_phase, :frequency) + @series begin + seriestype --> :vline + x --> [reference_phase[:frequency]] + color --> :gray + linestyle --> :dash + alpha --> 0.7 + label --> "" + Float64[], Float64[] + end + end + + if !isnothing(reference_phase) && haskey(reference_phase, :expected_phase) + @series begin + seriestype --> :hline + y --> [reference_phase[:expected_phase]] + color --> :green + linewidth --> 2 + label --> "Expected Phase Lag" + Float64[], Float64[] + end + end + + if !isnothing(cs.power_err) + amplitude = abs.(cs.power[freq_mask]) + phase_err = cs.power_err[freq_mask] ./ amplitude + + seriestype --> :scatter + yerror --> phase_err + color --> :blue + markersize --> 3 + markershape --> :circle + label --> "Measured Phase Lag" + + return freq_plot, phase_plot + else + seriestype --> :line + color --> :blue + linewidth --> 2 + label --> "Phase Lag" + + return freq_plot, phase_plot + end +end + +""" +# Usage +```julia +plot(cs_array, energy_bands, Val(:lag_energy)) +``` + +# Notes +- Common in X-ray timing analysis +- Shows energy-dependent time lags +""" +@recipe function f( + cs_array::Vector{CrossSpectrum{T}}, + energy_bands::Vector{<:Real}, + ::Val{:lag_energy}, +) where {T} + + title --> "Lag-Energy Spectrum" + xlabel --> "Energy (keV)" + ylabel --> "Time Lag (s)" + grid --> true + + avg_lags = Float64[] + lag_errors = Float64[] + + for cs in cs_array + time_lags = angle.(cs.power) ./ (2π .* cs.freq) + avg_lag = mean(time_lags) + lag_error = std(time_lags) / sqrt(length(cs.freq)) + + push!(avg_lags, avg_lag) + push!(lag_errors, lag_error) + end + + seriestype --> :scatter + yerror --> lag_errors + color --> :purple + markersize --> 5 + markershape --> :circle + label --> "Energy-dependent Lag" + + return energy_bands[1:end-1], avg_lags +end +""" +# Usage +```julia +plot(cs_original, cs_rebinned, Val(:rebinning_comparison), rebin_type="Linear") +``` + +# Notes +- Demonstrates effect of rebinning on cross spectrum +- Shows original vs rebinned data +""" +@recipe function f( + cs_original::CrossSpectrum{T}, + cs_rebinned::CrossSpectrum{T}, + ::Val{:rebinning_comparison}; + rebin_type = "Linear", +) where {T} + + title --> "$rebin_type Rebinning Comparison" + xlabel --> "Frequency (Hz)" + ylabel --> "Cross Spectrum Amplitude" + xaxis --> :log10 + yaxis --> :log10 + grid --> true + legend --> :topright + + @series begin + seriestype --> :line + color --> :lightblue + alpha --> 0.6 + linewidth --> 1 + label --> "Original" + cs_original.freq, abs.(cs_original.power) + end + + @series begin + seriestype --> :line + color --> :darkblue + linewidth --> 2 + label --> "$rebin_type Rebinned" + cs_rebinned.freq, abs.(cs_rebinned.power) + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot(lc1, lc2, colors=[:blue, :red], max_points=10000, time_range=(0, 100)) +``` + +# Notes +- Automatically downsamples for performance if n > max_points +- Shows point count in legend +""" +@recipe function f( + lc1::LightCurve, + lc2::LightCurve; + colors = [:blue, :red], + labels = ["Light Curve 1", "Light Curve 2"], + max_points = 10000, + time_range = nothing, +) + + title --> "Light Curves Comparison" + xlabel --> "Time (s)" + ylabel --> "Counts" + grid --> true + legend --> :topright + linewidth --> 2 + + # Downsample function + function quick_downsample(time_vec, count_vec, max_pts) + n = length(time_vec) + if n <= max_pts + return time_vec, count_vec + end + step = max(1, div(n, max_pts)) + return time_vec[1:step:end], count_vec[1:step:end] + end + + # Process data + t1, c1 = quick_downsample(lc1.time, lc1.counts, max_points) + t2, c2 = quick_downsample(lc2.time, lc2.counts, max_points) + + # Apply time filter + if !isnothing(time_range) + xlims --> time_range + mask1 = (t1 .>= time_range[1]) .& (t1 .<= time_range[2]) + mask2 = (t2 .>= time_range[1]) .& (t2 .<= time_range[2]) + t1, c1 = t1[mask1], c1[mask1] + t2, c2 = t2[mask2], c2[mask2] + end + + @series begin + seriestype --> :line + color --> colors[1] + label --> "$(labels[1]) ($(length(t1)) pts)" + t1, c1 + end + + @series begin + seriestype --> :line + color --> colors[2] + label --> "$(labels[2]) ($(length(t2)) pts)" + t2, c2 + end + + return Float64[], Float64[] +end +""" +# Usage +```julia +plot(cs, Val(:noise_timeline)) +``` + +# Notes +- Shows power and S/N vs frequency in 2x1 layout +- Useful for noise property visualization +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:noise_timeline}) where {T} + props = noise_properties(cs) + + # Create a 2x1 layout + layout --> (2, 1) + + # Subplot 1: Power vs Frequency + @series begin + subplot := 1 + title := "Power Spectrum" + xlabel := "Frequency" + ylabel := "Power" + seriestype := :scatter + markersize := 2 + label := "Power" + cs.freq, abs.(cs.power) + end + + # Subplot 2: S/N vs Frequency + @series begin + subplot := 2 + title := "Signal-to-Noise Ratio" + xlabel := "Frequency" + ylabel := "S/N" + seriestype := :scatter + markersize := 2 + label := "S/N" + cs.freq, signal_to_noise_ratio(cs) + end +end +""" +# Usage +```julia +plot(cs, Val(:noise_properties)) +``` + +# Notes +- Bar chart showing key noise metrics +- Displays mean S/N, noise/signal ratio, and significant detections +""" +@recipe function f(cs::CrossSpectrum{T}, ::Val{:noise_properties}) where {T} + + title --> "Noise Analysis Summary" + xlabel --> "Property" + ylabel --> "Value" + grid --> true + + props = noise_properties(cs) + + # Select key properties to plot + prop_names = ["mean_snr", "noise_to_signal_ratio", "significant_detections"] + prop_values = [props[name] for name in prop_names] + prop_labels = ["Mean S/N", "Noise/Signal", "Significant Bins"] + + seriestype --> :bar + color --> [:blue, :red, :green] + label --> "" + + return 1:length(prop_values), prop_values +end \ No newline at end of file diff --git a/src/utils.jl b/src/utils.jl index 5c31f9e..9e6df45 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -95,3 +95,49 @@ function merge_overlapping_gtis(gtis::Matrix{Float64}) return merged[1:merged_count, :] end + + +function validate_time_alignment(header1::FITSIO.FITSHeader, header2::FITSIO.FITSHeader) + mjd_ref1, time_zero1, time_unit1, time_sys1, time_pixr1, time_del1 = extract_timing_keywords(header1) + mjd_ref2, time_zero2, time_unit2, time_sys2, time_pixr2, time_del2 = extract_timing_keywords(header2) + + if isnothing(mjd_ref1) || isnothing(mjd_ref2) + @warn "Missing MJDREF in one or both event lists - time alignment cannot be verified" + return + end + + mjd_tolerance = 1e-9 # ~0.1 milliseconds in days + if abs(mjd_ref1 - mjd_ref2) > mjd_tolerance + throw(ArgumentError("Event lists have different MJDREF values: $(mjd_ref1) vs $(mjd_ref2). Cross spectrum requires same time reference.")) + end + + if !isnothing(time_zero1) && !isnothing(time_zero2) + if abs(time_zero1 - time_zero2) > 1e-6 + @warn "Different TIMEZERO values: $(time_zero1) vs $(time_zero2) - this may affect timing alignment" + end + end + + if !isnothing(time_unit1) && !isnothing(time_unit2) + if abs(time_unit1 - time_unit2) > 1e-9 + throw(ArgumentError("Event lists have different TIMEUNIT values: $(time_unit1) vs $(time_unit2)")) + end + end + + if !isnothing(time_sys1) && !isnothing(time_sys2) + if time_sys1 != time_sys2 + @warn "Different TIMESYS values: '$(time_sys1)' vs '$(time_sys2)' - verify time system compatibility" + end + end + + if !isnothing(time_pixr1) && !isnothing(time_pixr2) + if abs(time_pixr1 - time_pixr2) > 1e-9 + @warn "Different TIMEPIXR values: $(time_pixr1) vs $(time_pixr2) - this affects time bin assignment" + end + end + + if !isnothing(time_del1) && !isnothing(time_del2) + if abs(time_del1 - time_del2) > 1e-9 + @warn "Different TIMEDEL values: $(time_del1) vs $(time_del2) - this affects timing resolution" + end + end +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 12ca46a..6b82fd2 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,8 +4,13 @@ using FFTW, Distributions, Statistics, StatsBase, HDF5, FITSIO using Logging ,LinearAlgebra using CFITSIO using Random +using Plots -include("test_fourier.jl") +@testset "Fourier" begin + include("test_fourier/test_fourier.jl") + include("test_fourier/test_coherence.jl") + include("test_fourier/test_norm.jl") +end @testset "GTI" begin include("test_gti.jl") end @@ -15,4 +20,11 @@ end @testset "lightcurve" begin include("test_lightcurve.jl") +end +@testset "crossspectrum" begin + include("test_crossspectrum.jl") +end + +@testset "recipes" begin + include("test_plotting/test_plots_recipes_crossspectrum.jl") end \ No newline at end of file diff --git a/test/test_crossspectrum.jl b/test/test_crossspectrum.jl new file mode 100644 index 0000000..8b54692 --- /dev/null +++ b/test/test_crossspectrum.jl @@ -0,0 +1,658 @@ +# Helper function to create mock CrossSpectrum for testing +function create_test_crossspectrum(freqs::Vector{Float64}, powers::Vector{Complex{Float64}}; + ps1::Union{Vector{Float64}, Nothing}=nothing, + ps2::Union{Vector{Float64}, Nothing}=nothing, + norm::String="frac", power_type::String="all", + nphots1::Float64=1000.0, nphots2::Float64=1000.0, + m::Int=1, k::Union{Int, Vector{Int}}=1, + segment_size::Float64=100.0, + mean_rate1::Float64=10.0, mean_rate2::Float64=10.0, + fullspec::Bool=false, channels_overlap::Bool=false) + + mock_metadata = LightCurveMetadata( + "test", "test", "test", 0.0, (0.0, segment_size), + length(freqs) > 1 ? freqs[2] - freqs[1] : 1.0, + Vector{Dict{String,Any}}(), Dict{String,Any}() + ) + + df = if length(freqs) > 1 + freqs[2] - freqs[1] + else + 1.0 + end + + ps1_data = ps1 !== nothing ? ps1 : abs2.(powers) + ps2_data = ps2 !== nothing ? ps2 : abs2.(powers) + + return CrossSpectrum{Float64}( + freqs, powers, nothing, ps1_data, ps2_data, norm, power_type, df, + nphots1, nphots2, m, length(freqs), k, mock_metadata, mock_metadata, + fullspec, channels_overlap, segment_size, mean_rate1, mean_rate2 + ) +end +# Helper function to create EventList for testing +function create_test_cseventlist(times::Vector{Float64}, energies::Union{Vector{Float64}, Nothing}=nothing) + mock_headers = FITSIO.FITSHeader(["MJDREF"], [50000.0], ["Modified Julian Date reference"]) + gti = reshape([minimum(times), maximum(times)], 1, 2) + + mock_metadata = FITSMetadata( + "test.fits", 1, "EVENTS", Dict{String, Vector}(), mock_headers, gti, + nothing, nothing, nothing, nothing, nothing, nothing, nothing + ) + + return EventList(times, energies, mock_metadata) +end + +# Helper function to create LightCurve for testing +function create_test_cslightcurve(times::Vector{Float64}, counts::Vector{Int}, dt::Float64=1.0) + tstart, tstop = minimum(times) - dt/2, maximum(times) + dt/2 + gti = reshape([tstart, tstop], 1, 2) + + metadata = LightCurveMetadata( + "test", "test", "test", 0.0, (tstart, tstop), dt, + Vector{Dict{String,Any}}(), Dict{String,Any}("gti" => gti) + ) + + return LightCurve(times, dt, counts, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) +end +# Test CrossSpectrum struct construction and helper functions +let + cs_type = CrossSpectrum{Float64} + @test cs_type <: AbstractCrossSpectrum{Float64} + + mock_cs_single = create_test_crossspectrum( + [1.0, 2.0, 3.0], [1.0+0im, 2.0+1im, 3.0-1im], m=1 + ) + + mock_cs_averaged = create_test_crossspectrum( + [1.0, 2.0, 3.0], [1.0+0im, 2.0+1im, 3.0-1im], m=5 + ) + + @test is_single(mock_cs_single) + @test !is_averaged(mock_cs_single) + @test !is_single(mock_cs_averaged) + @test is_averaged(mock_cs_averaged) +end + +# Test theoretical noise level calculation +let + cs_single = create_test_crossspectrum( + [1.0, 2.0], [1.0+0im, 2.0+1im], + nphots1=100.0, nphots2=200.0, m=1 + ) + + cs_averaged = create_test_crossspectrum( + [1.0, 2.0], [1.0+0im, 2.0+1im], + nphots1=500.0, nphots2=1000.0, m=5 + ) + + noise_single = theoretical_noise_level(cs_single) + noise_averaged = theoretical_noise_level(cs_averaged) + + @test noise_single > 0 + @test noise_averaged > 0 + @test noise_averaged < noise_single +end + +# Test fill_errors! function +let + cs = create_test_crossspectrum( + [1.0, 2.0, 3.0], [1.0+0im, 2.0+1im, 3.0-1im], m=5 + ) + + @test isnothing(cs.power_err) + fill_errors!(cs) + @test !isnothing(cs.power_err) + @test length(cs.power_err) == length(cs.power) + @test all(cs.power_err .>= 0) +end + +# Test white noise level estimation +let + freqs = collect(0.1:0.1:10.0) + powers = [complex(1.0, 0.5) for _ in freqs] + powers[1:20] .*= 3.0 + + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, r"Estimated noise level .* much higher than theoretical") begin + noise_level = white_noise_level(cs) + @test noise_level > 0 + @test noise_level < mean(abs.(powers[1:20])) + end +end + +# Test noise corrected power +let + freqs = collect(0.1:0.1:5.0) + powers = [complex(2.0 + 0.5*randn(), 0.5*randn()) for _ in freqs] + + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, r"Estimated noise level .* much higher than theoretical") begin + corrected = noise_corrected_power(cs) + @test length(corrected) == length(powers) + @test all(corrected .>= 0) + end +end + +# Test signal-to-noise ratio calculation +let + freqs = [1.0, 2.0, 3.0] + powers = [complex(5.0, 1.0), complex(2.0, 0.5), complex(10.0, 2.0)] + + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, "Too few high frequency points for reliable noise estimation") begin + snr = signal_to_noise_ratio(cs) + @test length(snr) == 3 + @test all(snr .>= 0) + @test snr[3] > snr[2] + end +end + +# Test aliasing detection +let + freqs = collect(0.1:0.1:5.0) + powers = [complex(1.0, 0.1) for _ in freqs] + + cs_normal = create_test_crossspectrum(freqs, powers) + + # Test without expecting specific warning logs since detect_aliasing might not produce them + aliasing_detected, message = detect_aliasing(cs_normal) + @test !aliasing_detected + + powers_alias = copy(powers) + high_freq_indices = Int(round(0.8 * length(freqs))):length(freqs) + powers_alias[high_freq_indices] .*= 20.0 + + cs_alias = create_test_crossspectrum(freqs, powers_alias) + + # Test without expecting specific warning logs + aliasing_detected_2, message_2 = detect_aliasing(cs_alias) + @test aliasing_detected_2 +end + +# Test coherence calculation +let + freqs = [1.0, 2.0, 3.0] + cross_powers = [complex(2.0, 1.0), complex(1.5, 0.5), complex(3.0, 1.5)] + ps1 = [4.0, 2.25, 9.0] + ps2 = [1.0, 1.0, 1.0] + + cs = create_test_crossspectrum(freqs, cross_powers, ps1=ps1, ps2=ps2) + + coh = coherence(cs) + @test length(coh) == 3 + @test all(coh .>= 0.0) + @test all(coh .<= 1.0) + + cs_avg = create_test_crossspectrum(freqs, cross_powers, ps1=ps1, ps2=ps2, m=5) + + coh_avg = coherence(cs_avg) + @test length(coh_avg) == 3 + @test all(coh_avg .>= 0.0) +end + +# Test phase and time lag calculations +let + freqs = [1.0, 2.0, 4.0] + cross_powers = [complex(1.0, 1.0), complex(0.0, 2.0), complex(-1.0, 1.0)] + + cs = create_test_crossspectrum(freqs, cross_powers) + + phases = phase_lag(cs) + @test length(phases) == 3 + @test all(-π .<= phases .<= π) + @test phases[1] ≈ π/4 atol=1e-10 + @test phases[2] ≈ π/2 atol=1e-10 + + time_lags = time_lag(cs) + @test length(time_lags) == 3 + @test time_lags[1] ≈ phases[1]/(2π * freqs[1]) atol=1e-10 + @test time_lags[2] ≈ phases[2]/(2π * freqs[2]) atol=1e-10 +end + +# Test noise properties analysis +let + freqs = collect(0.5:0.5:10.0) + powers = [complex(1.0 + 0.1*randn(), 0.1*randn()) for _ in freqs] + + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, r"Estimated noise level .* much higher than theoretical") match_mode=:any begin + props = noise_properties(cs) + + required_keys = ["theoretical_noise", "noise_level_1", "noise_level_2", + "mean_power", "std_power", "mean_snr", "significant_detections", + "total_frequencies", "high_freq_power", "noise_to_signal_ratio", + "mean_rate_1", "mean_rate_2", "is_averaged", "segments_averaged", + "averaging_improvement"] + + for key in required_keys + @test haskey(props, key) + end + + @test props["total_frequencies"] == length(freqs) + @test props["is_averaged"] == false + @test props["segments_averaged"] == 1 + @test props["averaging_improvement"] == 1.0 + end +end + +# Test significant frequencies detection +let + freqs = [0.5, 1.0, 1.5, 2.0, 2.5] + powers = [complex(0.1, 0.05), complex(5.0, 2.0), complex(0.2, 0.1), + complex(8.0, 3.0), complex(0.15, 0.08)] + + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, "Too few high frequency points for reliable noise estimation") match_mode=:any begin + sig_freqs_3 = significant_frequencies(cs, 3.0) + sig_freqs_5 = significant_frequencies(cs, 5.0) + + @test length(sig_freqs_5) <= length(sig_freqs_3) + end +end + +# Test get_noise_level with different methods +let + freqs = collect(0.1:0.1:5.0) + powers = [complex(1.0, 0.1) for _ in freqs] + + cs = create_test_crossspectrum(freqs, powers) + + theoretical_noise = get_noise_level(cs, :theoretical) + @test theoretical_noise > 0 + + @test_logs (:warn, r"Estimated noise level .* much higher than theoretical") begin + empirical_noise = get_noise_level(cs, :empirical) + @test empirical_noise > 0 + end + + @test_throws ArgumentError get_noise_level(cs, :invalid_method) +end + +# Test quality metrics calculation +let + freqs = [1.0, 2.0, 3.0, 4.0] + powers = [complex(5.0, 1.0), complex(2.0, 0.5), complex(8.0, 2.0), complex(1.0, 0.2)] + + cs_single = create_test_crossspectrum(freqs, powers, m=1) + cs_averaged = create_test_crossspectrum(freqs, powers, m=10) + + @test_logs (:warn, "Too few high frequency points for reliable noise estimation") match_mode=:any begin + metrics_single = quality_metrics(cs_single) + metrics_averaged = quality_metrics(cs_averaged) + + required_keys = ["mean_snr", "max_snr", "significant_fraction", + "noise_level", "dynamic_range", "is_averaged"] + + for key in required_keys + @test haskey(metrics_single, key) + @test haskey(metrics_averaged, key) + end + + @test metrics_single["is_averaged"] == false + @test metrics_averaged["is_averaged"] == true + @test haskey(metrics_averaged, "segments_averaged") + @test haskey(metrics_averaged, "expected_improvement") + @test metrics_averaged["segments_averaged"] == 10 + @test metrics_averaged["expected_improvement"] ≈ sqrt(10) atol=1e-10 + end +end + +# Test rebin function with frequency resolution +let + freqs = collect(0.5:0.5:10.0) + powers = [complex(1.0 + 0.1*i, 0.1*i) for i in 1:length(freqs)] + + cs = create_test_crossspectrum(freqs, powers) + + rebinned = rebin(cs, 1.0) + @test rebinned.df ≈ 1.0 atol=1e-10 + @test length(rebinned.freq) < length(cs.freq) + + @test_throws ErrorException rebin(cs, 0.25) + + rebinned_int = rebin(cs, 2) + @test rebinned_int.df ≈ 1.0 atol=1e-10 + @test rebinned.k == 2 +end + +# Test logarithmic rebinning +let + freqs = collect(0.1:0.1:10.0) + powers = [complex(1.0, 0.1) for _ in freqs] + + cs = create_test_crossspectrum(freqs, powers, m=5) + + log_rebinned = rebin_log(cs; f=0.1) + @test length(log_rebinned.freq) < length(cs.freq) + @test all(log_rebinned.freq .> 0) + @test issorted(log_rebinned.freq) + + @test_throws ErrorException rebin_log(cs; f=0.0) + @test_throws ErrorException rebin_log(cs; f=1.0) +end + +# Test geometric rebinning +let + freqs = collect(0.1:0.1:5.0) + powers = [complex(1.0, 0.1) for _ in freqs] + + cs = create_test_crossspectrum(freqs, powers) + + geom_rebinned = geometric_rebin(cs, 1.5) + @test length(geom_rebinned.freq) < length(cs.freq) + @test all(geom_rebinned.freq .> 0) + @test issorted(geom_rebinned.freq) + + @test_throws ErrorException geometric_rebin(cs, 0.8) +end + +# Test rebinning helper functions +let + freqs = [1.0, 2.0, 3.0, 4.0] + powers = [complex(1.0, 0.1), complex(2.0, 0.2), complex(3.0, 0.3), complex(4.0, 0.4)] + + cs_single = create_test_crossspectrum(freqs, powers, m=1, k=3) + cs_averaged = create_test_crossspectrum(freqs, powers, m=5, k=[1, 2, 3, 4]) + + @test is_rebinned(cs_single) + @test is_rebinned(cs_averaged) + + samples_single = effective_samples_per_bin(cs_single) + samples_averaged = effective_samples_per_bin(cs_averaged) + + @test samples_single == 3 + @test samples_averaged == [5, 10, 15, 20] +end + +# Test adaptive rebinning +let + freqs = collect(0.5:0.5:10.0) + powers = [complex(1.0, 0.1) for _ in freqs] + cs = create_test_crossspectrum(freqs, powers) + + @test_logs (:warn, r"Estimated noise level .* much higher than theoretical") match_mode=:any begin + adaptive_rebinned = adaptive_rebin(cs, 5.0, 8) + @test length(adaptive_rebinned.freq) <= length(cs.freq) + @test length(adaptive_rebinned.freq) > 0 + + heavily_rebinned = adaptive_rebin(cs, 100.0, 3) + @test length(heavily_rebinned.freq) <= length(cs.freq) + + max_rebinned = adaptive_rebin(cs, 1000.0, 5) + @test length(max_rebinned.freq) < length(cs.freq) + + result1 = adaptive_rebin(cs, 1.0, 2) + result2 = adaptive_rebin(cs, 1.0, 2) + @test length(result1.freq) == length(result2.freq) + end + + minimal_freqs = [1.0, 2.0] + minimal_powers = [complex(1.0, 0.1), complex(1.0, 0.1)] + cs_minimal = create_test_crossspectrum(minimal_freqs, minimal_powers) + + @test_logs (:warn, r"Too few high frequency points") begin + minimal_result = adaptive_rebin(cs_minimal, 3.0, 2) + @test length(minimal_result.freq) <= length(cs_minimal.freq) + end +end + +# Test Base.show method +let + cs_single = create_test_crossspectrum([1.0, 2.0], [1.0+0im, 2.0+1im], m=1) + cs_averaged = create_test_crossspectrum([1.0, 2.0], [1.0+0im, 2.0+1im], m=5) + + io = IOBuffer() + show(io, MIME"text/plain"(), cs_single) + single_output = String(take!(io)) + @test occursin("CrossSpectrum", single_output) + @test occursin("Single", single_output) + + show(io, MIME"text/plain"(), cs_averaged) + averaged_output = String(take!(io)) + @test occursin("CrossSpectrum", averaged_output) + @test occursin("Averaged", averaged_output) +end + +# Test CrossSpectrum constructor from EventList +let + times = collect(0.0:0.1:100.0) + dt = 0.1 + + Random.seed!(42) + signal1 = 5.0 .* sin.(2π * 0.5 * times) .+ 10.0 + signal2 = 5.0 .* sin.(2π * 0.5 * times .+ π/6) .+ 10.0 + + noise1 = 2.0 .* randn(length(times)) + noise2 = 2.0 .* randn(length(times)) + + counts1 = max.(0, round.(Int, signal1 .+ noise1)) + counts2 = max.(0, round.(Int, signal2 .+ noise2)) + + event_times1 = Float64[] + event_times2 = Float64[] + + for i in 1:length(times) + for _ in 1:counts1[i] + push!(event_times1, times[i] + rand() * dt - dt/2) + end + for _ in 1:counts2[i] + push!(event_times2, times[i] + rand() * dt - dt/2) + end + end + + sort!(event_times1) + sort!(event_times2) + + ev1 = create_test_cseventlist(event_times1) + ev2 = create_test_cseventlist(event_times2) + + cs = CrossSpectrum(ev1, ev2, 50.0, dt) + + @test is_single(cs) + @test cs.m == 1 + @test cs.df ≈ 1/50.0 atol=1e-10 + @test cs.segment_size == 50.0 + @test cs.nphots1 > 0 + @test cs.nphots2 > 0 +end + +# Test AveragedCrossSpectrum from LightCurve +let + times = collect(0.0:0.1:1000.0) + dt = 0.1 + + Random.seed!(123) + f1, f2 = 0.5, 2.0 + + signal1 = 10.0 .* (sin.(2π * f1 * times) .+ sin.(2π * f2 * times)) + signal2 = 10.0 .* (sin.(2π * f1 * times .+ π/4) .+ sin.(2π * f2 * times .+ π/4)) + + noise_level = 2.0 + noise1 = noise_level .* randn(length(times)) + noise2 = noise_level .* randn(length(times)) + + base_level = 20.0 + counts1 = floor.(Int, (signal1 .+ noise1 .+ base_level)) + counts2 = floor.(Int, (signal2 .+ noise2 .+ base_level)) + + counts1 = max.(0, counts1) + counts2 = max.(0, counts2) + + tstart, tstop = minimum(times), maximum(times) + gti = [tstart tstop] + + metadata1 = LightCurveMetadata( + "", "", "", 0.0, (tstart, tstop), dt, + Vector{Dict{String,Any}}(), + Dict{String,Any}("gti" => gti) + ) + + metadata2 = LightCurveMetadata( + "", "", "", 0.0, (tstart, tstop), dt, + Vector{Dict{String,Any}}(), + Dict{String,Any}("gti" => gti) + ) + + lc1 = LightCurve(times, dt, counts1, nothing, nothing, EventProperty{Float64}[], metadata1, :poisson) + lc2 = LightCurve(times, dt, counts2, nothing, nothing, EventProperty{Float64}[], metadata2, :poisson) + + cs = CrossSpectrum(lc1, lc2, 100.0) + @test !is_averaged(cs) + @test is_single(cs) + @test cs.m == 1 + + @test cs.freq[1] ≈ cs.df atol=1e-10 + @test maximum(cs.freq) ≈ 1/(2*dt) atol=0.1 + + theoretical = theoretical_noise_level(cs) + @test theoretical > 0 + + acs = AveragedCrossSpectrum(lc1, lc2, 100.0) + @test is_averaged(acs) + @test !is_single(acs) + @test acs.m > 1 + + fill_errors!(acs) + @test !isnothing(acs.power_err) + @test all(acs.power_err .>= 0) + + coh = coherence(acs) + @test all(0 .<= coh .<= 1) + + freq_mask1 = findall(x -> abs(x - f1) < 3*acs.df, acs.freq) + freq_mask2 = findall(x -> abs(x - f2) < 3*acs.df, acs.freq) + + @test !isempty(freq_mask1) + @test !isempty(freq_mask2) + @test maximum(coh[freq_mask1]) > 0.2 + @test maximum(coh[freq_mask2]) > 0.2 + + phases = phase_lag(acs) + @test all(-π .<= phases .<= π) + + tlags = time_lag(acs) + @test length(tlags) == length(acs.freq) + + @test acs.df ≈ 1/100.0 atol=1e-10 + @test length(cs.freq) ≈ length(acs.freq) + @test cs.df ≈ acs.df atol=1e-10 +end +#all test +let + # Create time array and simulated data + times = collect(0.0:0.1:1000.0) + dt = 0.1 + + # Create synthetic signal with better control + f1, f2 = 0.5, 2.0 # Hz + + # Set random seed for reproducibility + Random.seed!(123) + + # Create base signals with phase relationship + signal1 = 10.0 .* (sin.(2π * f1 * times) .+ sin.(2π * f2 * times)) + signal2 = 10.0 .* (sin.(2π * f1 * times .+ π/4) .+ sin.(2π * f2 * times .+ π/4)) + + # Add controlled noise + noise_level = 2.0 + noise1 = noise_level .* randn(length(times)) + noise2 = noise_level .* randn(length(times)) + + # Ensure positive counts with good SNR + base_level = 20.0 + counts1 = floor.(Int, (signal1 .+ noise1 .+ base_level)) + counts2 = floor.(Int, (signal2 .+ noise2 .+ base_level)) + + # Ensure no negative counts + counts1 = max.(0, counts1) + counts2 = max.(0, counts2) + + # Create light curves with proper GTIs + tstart, tstop = minimum(times), maximum(times) + gti = [tstart tstop] + + metadata1 = LightCurveMetadata( + "", "", "", 0.0, + (tstart, tstop), + dt, + Vector{Dict{String,Any}}(), + Dict{String,Any}("gti" => gti) + ) + + metadata2 = LightCurveMetadata( + "", "", "", 0.0, + (tstart, tstop), + dt, + Vector{Dict{String,Any}}(), + Dict{String,Any}("gti" => gti) + ) + + lc1 = LightCurve(times, dt, counts1, nothing, nothing, EventProperty{Float64}[], metadata1, :poisson) + lc2 = LightCurve(times, dt, counts2, nothing, nothing, EventProperty{Float64}[], metadata2, :poisson) + + # Test CrossSpectrum + cs = CrossSpectrum(lc1, lc2, 100.0) + @test !is_averaged(cs) + @test is_single(cs) + @test cs.m == 1 + + # Test frequency properties + @test cs.freq[1] ≈ cs.df atol=1e-10 + @test maximum(cs.freq) ≈ 1/(2*dt) atol=0.1 + + # Test noise levels + theoretical = theoretical_noise_level(cs) + empirical = white_noise_level(cs) + @test theoretical > 0 + @test empirical > 0 + + # Test AveragedCrossSpectrum + acs = AveragedCrossSpectrum(lc1, lc2, 100.0) + @test is_averaged(acs) + @test !is_single(acs) + @test acs.m > 1 + + # Test error estimation + fill_errors!(acs) + @test !isnothing(acs.power_err) + @test all(acs.power_err .>= 0) + + # Test coherence + coh = coherence(acs) + @test all(0 .<= coh .<= 1) + + # Find frequency bins near input frequencies + freq_mask1 = findall(x -> abs(x - f1) < 3*acs.df, acs.freq) + freq_mask2 = findall(x -> abs(x - f2) < 3*acs.df, acs.freq) + + # Test for presence of signal (less stringent coherence test) + @test !isempty(freq_mask1) + @test !isempty(freq_mask2) + @test maximum(coh[freq_mask1]) > 0.2 + @test maximum(coh[freq_mask2]) > 0.2 + + # Test significant frequencies detection + sig_freqs = significant_frequencies(acs) + @test any(abs.(sig_freqs .- f1) .< 3*acs.df) + @test any(abs.(sig_freqs .- f2) .< 3*acs.df) + + # Test phase lags + phases = phase_lag(acs) + @test all(-π .<= phases .<= π) + + # Test time lags + tlags = time_lag(acs) + @test length(tlags) == length(acs.freq) + + # Basic property tests + @test acs.df ≈ 1/100.0 atol=1e-10 + @test length(cs.freq) ≈ length(acs.freq) + @test cs.df ≈ acs.df atol=1e-10 +end \ No newline at end of file diff --git a/test/test_fourier.jl b/test/test_fourier.jl deleted file mode 100644 index 38de843..0000000 --- a/test/test_fourier.jl +++ /dev/null @@ -1,425 +0,0 @@ -function compare_tables(table1, table2; rtol=0.001, discard = []) - - s_discard = Symbol.(discard) - test_result = true - - for key in propertynames(table1) - if key in s_discard - continue - end - oe, oc = getproperty(table1,key), getproperty(table1,key) - if oe isa Integer || oe isa String - if !(oe==oc) - test_result = false - break - end - elseif isnothing(oe) - if !(isnothing(oc)) - test_result = false - break - end - else - if !(≈(oe,oc,rtol=rtol)) - test_result = false - break - end - end - end - - - data_fields_table1 = filter(field -> !(typeof(field) == Symbol && startswith(string(field), "meta")), names(table1)) - data_fields_table2 = filter(field -> !(typeof(field) == Symbol && startswith(string(field), "meta")), names(table2)) - - - - for field in names(table1) - if field in discard - continue - end - oe, oc = table1[!,field], table2[!,field] - - if !(≈(oe,oc,rtol=rtol)) - test_result = false - break - end - end - @test test_result -end - -@testset "positive_fft_bins" begin - freq = fftfreq(11) - goodbins = positive_fft_bins(11) - @test filter(x -> x >0, freq) == freq[goodbins] -end - -@testset "test_coherence" begin - - fid = h5open(joinpath(@__DIR__ ,"data","sample_variable_lc.h5"),"r") - data = (fid["dataset1"][1:10000]) .* 1000.0 - close(fid) - - data1 = rand.(Poisson.(data)) - data2 = rand.(Poisson.(data)) - ft1 = fft(data1) - ft2 = fft(data2) - dt = 0.01 - N = length(data) - mean = Statistics.mean(data) - meanrate = mean / dt - freq = fftfreq(length(data), 1/dt) - good = 0 .< freq .< 0.1 - keepat!(ft1, good) - keepat!(ft2, good) - cross = normalize_periodograms( - ft1 .* conj(ft2), dt, N, mean_flux = mean, norm="abs", power_type="all") - pds1 = normalize_periodograms( - ft1 .* conj(ft1), dt, N, mean_flux = mean, norm="abs", power_type="real") - pds2 = normalize_periodograms( - ft2 .* conj(ft2), dt, N, mean_flux = mean, norm="abs", power_type="real") - - p1noise = poisson_level("abs",meanrate=meanrate) - p2noise = poisson_level("abs",meanrate=meanrate) - - @testset "test_intrinsic_coherence" begin - coh = estimate_intrinsic_coherence.( - cross, pds1, pds2, p1noise, p2noise, N) - @test all(x -> isapprox(x, 1; atol=0.001), coh) - end - - @testset "test_raw_high_coherence" begin - coh = raw_coherence.(cross, pds1, pds2, p1noise, p2noise, N) - @test all(x -> isapprox(x, 1; atol=0.001), coh) - end - - @testset "test_raw_low_coherence" begin - nbins = 2 - C, P1, P2 = @view(cross[1:nbins]), @view(pds1[1:nbins]), @view(pds2[1:nbins]) - bsq = bias_term.(P1, P2, p1noise, p2noise, N) - # must be lower than bsq! - low_coh_cross = @. rand(Normal(bsq^0.5 / 10, bsq^0.5 / 100)) + 0.0im - coh = raw_coherence.(low_coh_cross, P1, P2, p1noise, p2noise, N) - @test all(iszero,coh) - end - - @testset "test_raw_high_bias" begin - C = [12986.0 + 8694.0im] - P1 = [476156.0] - P2 = [482751.0] - P1noise = 495955 - P2noise = 494967 - - coh = raw_coherence.(C, P1, P2, P1noise, P2noise, 499; intrinsic_coherence=1) - coh_sngl = raw_coherence(C[1], P1[1], P2[1], P1noise, P2noise, 499; intrinsic_coherence=1) - @test coh==real(C .* conj(C))./ (P1 .* P2) - @test coh_sngl==real(C .* conj(C))[1] / (P1[1] * P2[1]) - end - -end - -@testset "test_fourier" begin - dt = 1 - len = 100 - ctrate = 10000 - N = len ÷ dt - dt = len / N - times = sort(rand(Uniform(0, len), len * ctrate)) - gti = [[0 len];;] - bins = LinRange(0, len, N + 1) - counts = fit(Histogram, times, bins).weights - errs = fill!(similar(counts),1) * sqrt(ctrate) - bin_times = (@view(bins[1:end-1]) + @view(bins[2:end])) / 2 - segment_size = 20.0 - times2 = sort(rand(Uniform(0, len), len * ctrate)) - counts2 = fit(Histogram, times2, bins).weights - errs2 = fill!(similar(counts2),1) * sqrt(ctrate) - - @test get_average_ctrate(times, gti, segment_size) == ctrate - @test get_average_ctrate(bin_times, gti, segment_size; counts=counts) == ctrate - - @testset "test_fts_from_segments_cts_and_events_are_equal" begin - N = round(Int, segment_size / dt) - fts_evts = collect(get_flux_iterable_from_segments(times, gti, segment_size, n_bin=N)) - fts_cts = collect(get_flux_iterable_from_segments( - bin_times, gti, segment_size, fluxes=counts)) - @test fts_evts == fts_cts - end - - @testset "test_error_on_averaged_cross_spectrum_low_nave" for common_ref in [true, false] - @test_logs (:warn,r"n_ave is below 30." - ) error_on_averaged_cross_spectrum([4 + 1.0im], [2], [4], 29, 2, 2; common_ref=common_ref) - end - - @testset "test_avg_pds_bad_input" begin - _times = rand(Uniform(0,1000),1) - out_ev = avg_pds_from_events(_times, gti, segment_size, dt,silent = true) - @test isnothing(out_ev) - end - - @testset "test_avg_cs_bad_input" for return_auxil in [true, false] - _times1 = rand(Uniform(0,1000),1) - _times2 = rand(Uniform(0,1000),1) - out_ev = avg_cs_from_events(_times1, _times2, gti, - segment_size, dt, silent = true, return_auxil=return_auxil) - @test isnothing(out_ev) - end - - @testset "test_avg_pds_use_common_mean_similar_stats" for norm in ["frac", "abs", "none", "leahy"] - out_comm = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=true, - silent=true, - fluxes=nothing, - ).power - out = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=false, - silent=true, - fluxes=nothing, - ).power - @test std(out_comm)≈std(out) rtol=0.1 - end - - @testset "test_avg_cs_use_common_mean_similar_stats" for - norm in ["frac", "abs", "none", "leahy"], - return_auxil in [true, false], fullspec in [true,false] - out_comm = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=true, - silent=true, - fullspec=fullspec, - return_auxil=return_auxil - ).power - out = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=false, - silent=true, - fullspec=fullspec, - return_auxil=return_auxil - ).power - @test std(out_comm)≈std(out) rtol=0.1 - end - - @testset "test_avg_pds_cts_and_events_are_equal" for use_common_mean in [true,false], norm in ["frac", "abs", "none", "leahy"] - out_ev = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes=nothing, - ) - out_ct = avg_pds_from_events( - bin_times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes=counts, - ) - compare_tables(out_ev, out_ct) - end - - @testset "test_avg_pds_cts_and_err_and_events_are_equal" for use_common_mean in [true,false], norm in ["frac", "abs", "none", "leahy"] - out_ev = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes=nothing, - ) - out_ct = avg_pds_from_events( - bin_times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes=counts, - errors=errs, - ) - # The variance is not _supposed_ to be equal, when we specify errors - if use_common_mean - compare_tables(out_ev, out_ct, rtol=0.01, discard=["variance"]) - else - compare_tables(out_ev, out_ct, rtol=0.1, discard=["variance"]) - end - end - - @testset "test_avg_cs_cts_and_events_are_equal" for use_common_mean in [true,false], norm in ["frac", "abs", "none", "leahy"] - out_ev = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - ) - out_ct = avg_cs_from_events( - bin_times, - bin_times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes1=counts, - fluxes2=counts2, - ) - if use_common_mean - compare_tables(out_ev, out_ct, rtol=0.01) - else - compare_tables(out_ev, out_ct, rtol=0.1) - end - end - - @testset "test_avg_cs_cts_and_err_and_events_are_equal" for use_common_mean in [true,false], norm in ["frac", "abs", "none", "leahy"] - out_ev = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - ) - out_ct = avg_cs_from_events( - bin_times, - bin_times, - gti, - segment_size, - dt, - norm=norm, - use_common_mean=use_common_mean, - silent=true, - fluxes1=counts, - fluxes2=counts2, - errors1=errs, - errors2=errs2, - ) - discard = [m for m in propertynames(out_ev) if m == :variance] - - if use_common_mean - compare_tables(out_ev, out_ct, rtol=0.01, discard=discard) - else - compare_tables(out_ev, out_ct, rtol=0.1, discard=discard) - end - end -end - -@testset "test_norm" begin - mean = var = 100000 - N = 1000000 - dt = 0.2 - meanrate = mean / dt - lc = rand(Poisson(mean),N) - pds = abs2.(fft(lc)) - freq = fftfreq(N, dt) - good = 2:(N÷2) - - pdsabs = normalize_abs(pds, dt, size(lc,1)) - pdsfrac = normalize_frac(pds, dt, size(lc,1), mean) - pois_abs = poisson_level("abs", meanrate=meanrate) - pois_frac = poisson_level("frac", meanrate=meanrate) - - @test Statistics.mean(keepat!(pdsabs,good))≈pois_abs rtol=0.02 - @test Statistics.mean(keepat!(pdsfrac,good))≈pois_frac rtol=0.02 - - mean = var = 100000.0 - N = 800000 - dt = 0.2 - df = 1 / (N * dt) - freq = fftfreq(N, dt) - good = freq .> 0 - meanrate = mean / dt - lc = rand(Poisson(mean),N) - nph = sum(lc) - pds = keepat!(abs2.(fft(lc)),good) - lc_bksub = lc .- mean - pds_bksub = keepat!(abs2.(fft(lc_bksub)),good) - lc_renorm = lc / mean - pds_renorm = keepat!(abs2.(fft(lc_renorm)),good) - lc_renorm_bksub = lc_renorm .- 1 - pds_renorm_bksub = keepat!(abs2.(fft(lc_renorm_bksub)),good) - - @testset "test_leahy_bksub_var_vs_standard" begin - leahyvar = normalize_leahy_from_variance(pds_bksub, Statistics.var(lc_bksub), N) - leahy = 2 .* pds ./ sum(lc) - ratio = Statistics.mean(leahyvar./leahy) - @test ratio≈1 rtol=0.01 - end - @testset "test_abs_bksub" begin - ratio = normalize_abs(pds_bksub, dt, N) ./ normalize_abs(pds, dt, N) - @test Statistics.mean(ratio)≈1 rtol=0.01 - end - @testset "test_frac_renorm_constant" begin - ratio = normalize_frac(pds_renorm, dt, N, 1) ./ normalize_frac(pds, dt, N, mean) - @test Statistics.mean(ratio)≈1 rtol=0.01 - end - @testset "test_frac_to_abs_ctratesq" begin - ratio = ( - normalize_frac(pds, dt, N, mean) - ./ normalize_abs(pds, dt, N) - .* meanrate .^ 2 - ) - @test Statistics.mean(ratio)≈1 rtol=0.01 - end - @testset "test_total_variance" begin - vdk_total_variance = sum((lc .- mean) .^ 2) - ratio = Statistics.mean(pds) / vdk_total_variance - @test Statistics.mean(ratio)≈1 rtol=0.01 - end - @testset "test_poisson_level_$(norm)" for norm in ["abs", "frac", "leahy","none"] - pdsnorm = normalize_periodograms(pds, dt, N; mean_flux=mean, n_ph=nph, norm=norm) - @test Statistics.mean(pdsnorm)≈poisson_level(norm; meanrate=meanrate, n_ph=nph) rtol=0.01 - end - - @testset "test_poisson_level_real_$(norm)" for norm in ["abs", "frac", "leahy","none"] - pdsnorm = normalize_periodograms(pds, dt, N; mean_flux=mean, n_ph=nph, norm=norm, power_type = "real") - @test Statistics.mean(pdsnorm)≈poisson_level(norm; meanrate=meanrate, n_ph=nph) rtol=0.01 - end - - @testset "test_poisson_level_absolute_$(norm)" for norm in ["abs", "frac", "leahy","none"] - pdsnorm = normalize_periodograms(pds, dt, N; mean_flux=mean, n_ph=nph, norm=norm, power_type = "abs") - @test Statistics.mean(pdsnorm)≈poisson_level(norm; meanrate=meanrate, n_ph=nph) rtol=0.01 - end - - @testset "test_normalize_with_variance" begin - pdsnorm = normalize_periodograms(pds, dt, N; mean_flux=mean, variance = var, norm="leahy") - @test Statistics.mean(pdsnorm)≈2 rtol=0.01 - end - - @testset "test_normalize_none" begin - pdsnorm = normalize_periodograms(pds, dt, N; mean_flux=mean, n_ph=nph, norm="none") - @test Statistics.mean(pdsnorm)≈Statistics.mean(pds) rtol=0.01 - end -end diff --git a/test/test_fourier/test_coherence.jl b/test/test_fourier/test_coherence.jl new file mode 100644 index 0000000..456b5aa --- /dev/null +++ b/test/test_fourier/test_coherence.jl @@ -0,0 +1,88 @@ +function test_data() + fid = h5open(joinpath(@__DIR__, "..", "data", "sample_variable_lc.h5"), "r") + data = (fid["dataset1"][1:10000]) .* 1000.0 + close(fid) + dt = 0.01 + N = length(data) + mean = Statistics.mean(data) + meanrate = mean / dt + freq = fftfreq(length(data), 1/dt) + good = 0 .< freq .< 0.1 + return (data=data, dt=dt, N=N, mean=mean, meanrate=meanrate, good=good) +end + +# Helper function to generate processed data for tests +function generate_test_data(data; same_poisson=true) + data1 = rand.(Poisson.(data.data)) + data2 = same_poisson ? data1 : rand.(Poisson.(data.data)) + + ft1 = fft(data1) + ft2 = fft(data2) + keepat!(ft1, data.good) + keepat!(ft2, data.good) + cross = normalize_periodograms( + ft1 .* conj(ft2), data.dt, data.N, + mean_flux = data.mean, norm="abs", power_type="all") + pds1 = normalize_periodograms( + ft1 .* conj(ft1), data.dt, data.N, + mean_flux = data.mean, norm="abs", power_type="real") + pds2 = normalize_periodograms( + ft2 .* conj(ft2), data.dt, data.N, + mean_flux = data.mean, norm="abs", power_type="real") + p1noise = poisson_level("abs", meanrate=data.meanrate) + p2noise = poisson_level("abs", meanrate=data.meanrate) + + return (cross=cross, pds1=pds1, pds2=pds2, p1noise=p1noise, p2noise=p2noise) +end +data = test_data() + +# test_coherence +let + test_data = generate_test_data(data, same_poisson=false) +end + +# test_intrinsic_coherence +let + test_data = generate_test_data(data, same_poisson=true) + + coh = estimate_intrinsic_coherence.( + test_data.cross, test_data.pds1, test_data.pds2, + test_data.p1noise, test_data.p2noise, data.N) + @test all(x -> isapprox(x, 1; atol=0.001), coh) +end + +# test_raw_high_coherence +let + test_data = generate_test_data(data, same_poisson=true) + + coh = raw_coherence.(test_data.cross, test_data.pds1, test_data.pds2, + test_data.p1noise, test_data.p2noise, data.N) + @test all(x -> isapprox(x, 1; atol=0.001), coh) +end + +# test_raw_low_coherence +let + test_data = generate_test_data(data, same_poisson=true) + + nbins = 2 + C, P1, P2 = @view(test_data.cross[1:nbins]), @view(test_data.pds1[1:nbins]), @view(test_data.pds2[1:nbins]) + bsq = bias_term.(P1, P2, test_data.p1noise, test_data.p2noise, data.N) + # must be lower than bsq! + low_coh_cross = @. rand(Normal(bsq^0.5 / 10, bsq^0.5 / 100)) + 0.0im + coh = raw_coherence.(low_coh_cross, P1, P2, test_data.p1noise, test_data.p2noise, data.N) + @test all(iszero, coh) +end + +# test_raw_high_bias (this one was already different, so keeping it separate) +let + C = [12986.0 + 8694.0im] + P1 = [476156.0] + P2 = [482751.0] + P1noise = 495955 + P2noise = 494967 + + coh = raw_coherence.(C, P1, P2, P1noise, P2noise, 499; intrinsic_coherence=1) + coh_sngl = raw_coherence(C[1], P1[1], P2[1], P1noise, P2noise, 499; intrinsic_coherence=1) + @test coh == real(C .* conj(C)) ./ (P1 .* P2) + @test coh_sngl == real(C .* conj(C))[1] / (P1[1] * P2[1]) +end \ No newline at end of file diff --git a/test/test_fourier/test_fourier.jl b/test/test_fourier/test_fourier.jl new file mode 100644 index 0000000..f14f5d4 --- /dev/null +++ b/test/test_fourier/test_fourier.jl @@ -0,0 +1,508 @@ +function compare_tables(table1, table2; rtol=0.001, discard=[]) + s_discard = Symbol.(discard) + test_result = true + + for key in propertynames(table1) + if key in s_discard + continue + end + oe, oc = getproperty(table1, key), getproperty(table2, key) + if oe isa Integer || oe isa String + if !(oe == oc) + test_result = false + break + end + elseif isnothing(oe) + if !(isnothing(oc)) + test_result = false + break + end + else + if !(≈(oe, oc, rtol=rtol)) + test_result = false + break + end + end + end + + data_fields_table1 = filter(field -> !(typeof(field) == Symbol && startswith(string(field), "meta")), names(table1)) + data_fields_table2 = filter(field -> !(typeof(field) == Symbol && startswith(string(field), "meta")), names(table2)) + + for field in names(table1) + if field in discard + continue + end + oe, oc = table1[!, field], table2[!, field] + + if !(≈(oe, oc, rtol=rtol)) + test_result = false + break + end + end + @test test_result +end + +const FOURIER_DT = 1 +const FOURIER_LEN = 100 +const FOURIER_CTRATE = 10000 +const FOURIER_SEGMENT_SIZE = 20.0 + +function setup_fourier_test_data(; dt=FOURIER_DT, len=FOURIER_LEN, ctrate=FOURIER_CTRATE, two_datasets=false) + N = len ÷ dt + dt = len / N + times = sort(rand(Uniform(0, len), len * ctrate)) + gti = Float64[0 len] + bins = LinRange(0, len, N + 1) + counts = fit(Histogram, times, bins).weights + errs = fill!(similar(counts), 1) * sqrt(ctrate) + bin_times = (@view(bins[1:end-1]) + @view(bins[2:end])) / 2 + + if two_datasets + times2 = sort(rand(Uniform(0, len), len * ctrate)) + counts2 = fit(Histogram, times2, bins).weights + errs2 = fill!(similar(counts2), 1) * sqrt(ctrate) + return (times=times, times2=times2, gti=gti, bins=bins, counts=counts, counts2=counts2, + errs=errs, errs2=errs2, bin_times=bin_times, dt=dt, N=N) + else + return (times=times, gti=gti, bins=bins, counts=counts, errs=errs, + bin_times=bin_times, dt=dt, N=N) + end +end + +function test_pds_common_mean(norm::String) + data = setup_fourier_test_data() + + out_comm = avg_pds_from_events( + data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=true, silent=true, fluxes=nothing + ).power + out = avg_pds_from_events( + data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=false, silent=true, fluxes=nothing + ).power + @test std(out_comm) ≈ std(out) rtol=0.1 +end + +function test_cs_common_mean(norm::String, return_auxil::Bool, fullspec::Bool) + data = setup_fourier_test_data(two_datasets=true) + + out_comm = avg_cs_from_events( + data.times, data.times2, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=true, silent=true, + fullspec=fullspec, return_auxil=return_auxil + ).power + out = avg_cs_from_events( + data.times, data.times2, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=false, silent=true, + fullspec=fullspec, return_auxil=return_auxil + ).power + @test std(out_comm) ≈ std(out) rtol=0.1 +end + +function test_events_vs_counts_pds(use_common_mean::Bool, norm::String, include_errors::Bool=false) + data = setup_fourier_test_data() + + out_ev = avg_pds_from_events( + data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true, fluxes=nothing + ) + + if include_errors + out_ct = avg_pds_from_events( + data.bin_times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true, + fluxes=data.counts, errors=data.errs + ) + rtol = use_common_mean ? 0.01 : 0.1 + compare_tables(out_ev, out_ct, rtol=rtol, discard=["variance"]) + else + out_ct = avg_pds_from_events( + data.bin_times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true, fluxes=data.counts + ) + compare_tables(out_ev, out_ct) + end +end + +function test_events_vs_counts_cs(use_common_mean::Bool, norm::String, include_errors::Bool=false) + data = setup_fourier_test_data(two_datasets=true) + + out_ev = avg_cs_from_events( + data.times, data.times2, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true + ) + + if include_errors + out_ct = avg_cs_from_events( + data.bin_times, data.bin_times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true, + fluxes1=data.counts, fluxes2=data.counts2, + errors1=data.errs, errors2=data.errs2 + ) + discard = [m for m in propertynames(out_ev) if m == :variance] + rtol = use_common_mean ? 0.01 : 0.1 + compare_tables(out_ev, out_ct, rtol=rtol, discard=discard) + else + out_ct = avg_cs_from_events( + data.bin_times, data.bin_times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, silent=true, + fluxes1=data.counts, fluxes2=data.counts2 + ) + rtol = use_common_mean ? 0.01 : 0.1 + compare_tables(out_ev, out_ct, rtol=rtol) + end +end + +let + data = setup_fourier_test_data() + @test get_average_ctrate(data.times, data.gti, FOURIER_SEGMENT_SIZE) == FOURIER_CTRATE + @test get_average_ctrate(data.bin_times, data.gti, FOURIER_SEGMENT_SIZE; counts=data.counts) == FOURIER_CTRATE +end + +let + data = setup_fourier_test_data() + N = round(Int, FOURIER_SEGMENT_SIZE / data.dt) + fts_evts = collect(get_flux_iterable_from_segments(data.times, data.gti, FOURIER_SEGMENT_SIZE, n_bin=N)) + fts_cts = collect(get_flux_iterable_from_segments(data.bin_times, data.gti, FOURIER_SEGMENT_SIZE, fluxes=data.counts)) + @test fts_evts == fts_cts +end + +let + common_ref = true + @test_logs (:warn, r"n_ave is below 30.") error_on_averaged_cross_spectrum([4 + 1.0im], [2], [4], 29, 2, 2; common_ref=common_ref) +end + +let + common_ref = false + @test_logs (:warn, r"n_ave is below 30.") error_on_averaged_cross_spectrum([4 + 1.0im], [2], [4], 29, 2, 2; common_ref=common_ref) +end + +let + gti = Float64[0 FOURIER_LEN] + _times = rand(Uniform(0, 1000), 1) + out_ev = avg_pds_from_events(_times, gti, FOURIER_SEGMENT_SIZE, FOURIER_DT, silent=true) + @test isnothing(out_ev) +end + +let + gti = Float64[0 FOURIER_LEN] + return_auxil = true + _times1 = rand(Uniform(0, 1000), 1) + _times2 = rand(Uniform(0, 1000), 1) + out_ev = avg_cs_from_events(_times1, _times2, gti, FOURIER_SEGMENT_SIZE, FOURIER_DT, silent=true, return_auxil=return_auxil) + @test isnothing(out_ev) +end + +let + gti = Float64[0 FOURIER_LEN] + return_auxil = false + _times1 = rand(Uniform(0, 1000), 1) + _times2 = rand(Uniform(0, 1000), 1) + out_ev = avg_cs_from_events(_times1, _times2, gti, FOURIER_SEGMENT_SIZE, FOURIER_DT, silent=true, return_auxil=return_auxil) + @test isnothing(out_ev) +end + +let; test_pds_common_mean("frac"); end +let; test_pds_common_mean("abs"); end +let; test_pds_common_mean("none"); end +let; test_pds_common_mean("leahy"); end + +let; test_cs_common_mean("frac", true, true); end +let; test_cs_common_mean("frac", true, false); end +let; test_cs_common_mean("frac", false, true); end +let; test_cs_common_mean("frac", false, false); end + +let; test_events_vs_counts_pds(true, "frac"); end +let; test_events_vs_counts_pds(false, "frac"); end +let; test_events_vs_counts_pds(true, "frac", true); end + +let; test_events_vs_counts_cs(true, "frac"); end +let; test_events_vs_counts_cs(true, "frac", true); end + +# Test LightCurve with vector dt (should use first element) +let + data = setup_fourier_test_data() + eventlist = EventList(data.times, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + + result_eventlist = avg_pds_from_eventlist(eventlist, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + result_times = avg_pds_from_events(data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + + @test result_eventlist.power ≈ result_times.power + @test result_eventlist.freq ≈ result_times.freq +end + +# Test EventList PDS function equivalence with different norms +let + data = setup_fourier_test_data() + eventlist = EventList(data.times, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + + for norm in ["frac", "abs", "none", "leahy"] + result_eventlist = avg_pds_from_eventlist(eventlist, FOURIER_SEGMENT_SIZE, data.dt, norm=norm, silent=true) + result_times = avg_pds_from_events(data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, norm=norm, silent=true) + + @test result_eventlist.power ≈ result_times.power + @test result_eventlist.freq ≈ result_times.freq + end +end + +# Test EventList PDS function with use_common_mean parameter +let + data = setup_fourier_test_data() + eventlist = EventList(data.times, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + + for use_common_mean in [true, false] + result_eventlist = avg_pds_from_eventlist(eventlist, FOURIER_SEGMENT_SIZE, data.dt, use_common_mean=use_common_mean, silent=true) + result_times = avg_pds_from_events(data.times, data.gti, FOURIER_SEGMENT_SIZE, data.dt, use_common_mean=use_common_mean, silent=true) + + @test result_eventlist.power ≈ result_times.power + @test result_eventlist.freq ≈ result_times.freq + end +end + +# Test EventList cross spectrum function equivalence +let + data = setup_fourier_test_data(two_datasets=true) + eventlist1 = EventList(data.times, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + eventlist2 = EventList(data.times2, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + + result_eventlists = avg_cs_from_eventlists(eventlist1, eventlist2, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + result_times = avg_cs_from_events(data.times, data.times2, data.gti, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + + @test result_eventlists.power ≈ result_times.power + @test result_eventlists.freq ≈ result_times.freq +end + +# Test EventList cross spectrum function with different parameters +let + data = setup_fourier_test_data(two_datasets=true) + eventlist1 = EventList(data.times, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + eventlist2 = EventList(data.times2, nothing, FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), data.gti, nothing,nothing,nothing,nothing,nothing,nothing,nothing + )) + + for norm in ["frac", "abs", "none", "leahy"] + for use_common_mean in [true, false] + for fullspec in [true, false] + for return_auxil in [true, false] + result_eventlists = avg_cs_from_eventlists( + eventlist1, eventlist2, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, return_auxil=return_auxil, silent=true + ) + result_times = avg_cs_from_events( + data.times, data.times2, data.gti, FOURIER_SEGMENT_SIZE, data.dt, + norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, return_auxil=return_auxil, silent=true + ) + + @test result_eventlists.power ≈ result_times.power + @test result_eventlists.freq ≈ result_times.freq + + if return_auxil + @test result_eventlists.pds1 ≈ result_times.pds1 + @test result_eventlists.pds2 ≈ result_times.pds2 + end + end + end + end + end +end + +# Test LightCurve PDS function equivalence +let + dt = 1.0 + N = 100 + times = collect(0:dt:(N-1)*dt) + counts = rand(Poisson(100), N) + + metadata = LightCurveMetadata( + "TEST", "INST", "TEST_OBJ", 58000.0, + (times[1], times[end]), dt, + [Dict{String,Any}()], Dict{String,Any}() + ) + + lc = LightCurve(times, dt, counts, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + flux_iterable = [counts] + + result_lightcurve = avg_pds_from_lightcurve(lc, silent=true) + result_iterable = avg_pds_from_iterable(flux_iterable, dt, silent=true) + + @test result_lightcurve.power ≈ result_iterable.power + @test result_lightcurve.freq ≈ result_iterable.freq +end + +# Test LightCurve PDS function with different norms +let + dt = 1.0 + N = 100 + times = collect(0:dt:(N-1)*dt) + counts = rand(Poisson(100), N) + + metadata = LightCurveMetadata( + "TEST", "INST", "TEST_OBJ", 58000.0, + (times[1], times[end]), dt, + [Dict{String,Any}()], Dict{String,Any}() + ) + + lc = LightCurve(times, dt, counts, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + flux_iterable = [counts] + + for norm in ["frac", "abs", "none", "leahy"] + for use_common_mean in [true, false] + result_lightcurve = avg_pds_from_lightcurve(lc, norm=norm, use_common_mean=use_common_mean, silent=true) + result_iterable = avg_pds_from_iterable(flux_iterable, dt, norm=norm, use_common_mean=use_common_mean, silent=true) + + @test result_lightcurve.power ≈ result_iterable.power + @test result_lightcurve.freq ≈ result_iterable.freq + end + end +end + +# Test LightCurve cross spectrum function equivalence +let + dt = 1.0 + N = 100 + times = collect(0:dt:(N-1)*dt) + counts1 = rand(Poisson(100), N) + counts2 = rand(Poisson(100), N) + + metadata = LightCurveMetadata( + "TEST", "INST", "TEST_OBJ", 58000.0, + (times[1], times[end]), dt, + [Dict{String,Any}()], Dict{String,Any}() + ) + + lc1 = LightCurve(times, dt, counts1, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + lc2 = LightCurve(times, dt, counts2, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + + flux_iterable1 = [counts1] + flux_iterable2 = [counts2] + + result_lightcurves = avg_cs_from_lightcurves(lc1, lc2, silent=true) + result_iterables = avg_cs_from_iterables(flux_iterable1, flux_iterable2, dt, silent=true) + + @test result_lightcurves.power ≈ result_iterables.power + @test result_lightcurves.freq ≈ result_iterables.freq +end + +# Test LightCurve cross spectrum function with different parameters +let + dt = 1.0 + N = 100 + times = collect(0:dt:(N-1)*dt) + counts1 = rand(Poisson(100), N) + counts2 = rand(Poisson(100), N) + + metadata = LightCurveMetadata( + "TEST", "INST", "TEST_OBJ", 58000.0, + (times[1], times[end]), dt, + [Dict{String,Any}()], Dict{String,Any}() + ) + + lc1 = LightCurve(times, dt, counts1, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + lc2 = LightCurve(times, dt, counts2, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + + flux_iterable1 = [counts1] + flux_iterable2 = [counts2] + + for norm in ["frac", "abs", "none", "leahy"] + for use_common_mean in [true, false] + for fullspec in [true, false] + for return_auxil in [true, false] + result_lightcurves = avg_cs_from_lightcurves( + lc1, lc2, norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, return_auxil=return_auxil, silent=true + ) + result_iterables = avg_cs_from_iterables( + flux_iterable1, flux_iterable2, dt, + norm=norm, use_common_mean=use_common_mean, + fullspec=fullspec, return_auxil=return_auxil, silent=true + ) + + @test result_lightcurves.power ≈ result_iterables.power + @test result_lightcurves.freq ≈ result_iterables.freq + + if return_auxil + @test result_lightcurves.pds1 ≈ result_iterables.pds1 + @test result_lightcurves.pds2 ≈ result_iterables.pds2 + end + end + end + end + end +end + +# Test EventList with no GTI (should create default GTI) +let + data = setup_fourier_test_data() + eventlist_no_gti = EventList( + data.times, + nothing, + FITSMetadata( + "[test]", 1, nothing, Dict{String,Vector}(), + Dict{String,Any}(), # headers + nothing, # gti + nothing, # gti_source + nothing, # mjd_ref + nothing, # time_zero + nothing, # time_unit + nothing, # time_sys + nothing, # time_pixr + nothing # time_del + ) + ) + + + expected_gti = Float64[minimum(data.times) maximum(data.times)] + + result_eventlist = avg_pds_from_eventlist(eventlist_no_gti, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + result_times = avg_pds_from_events(data.times, expected_gti, FOURIER_SEGMENT_SIZE, data.dt, silent=true) + + @test result_eventlist.power ≈ result_times.power + @test result_eventlist.freq ≈ result_times.freq +end + +# Test LightCurve with vector dt (should use first element) +let + dt_vec = [1.0, 1.0, 1.0] + N = 100 + times = collect(0:dt_vec[1]:(N-1)*dt_vec[1]) + counts = rand(Poisson(100), N) + metadata = LightCurveMetadata( + "TEST", "INST", "TEST_OBJ", 58000.0, + (times[1], times[end]), dt_vec[1], + [Dict{String,Any}()], Dict{String,Any}() + ) + lc_vec_dt = LightCurve(times, dt_vec, counts, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) + flux_iterable = [counts] + result_lightcurve = avg_pds_from_lightcurve(lc_vec_dt, silent=true) + result_iterable = avg_pds_from_iterable(flux_iterable, dt_vec[1], silent=true) + @test result_lightcurve.power ≈ result_iterable.power + @test result_lightcurve.freq ≈ result_iterable.freq +end \ No newline at end of file diff --git a/test/test_fourier/test_norm.jl b/test/test_fourier/test_norm.jl new file mode 100644 index 0000000..7b440ce --- /dev/null +++ b/test/test_fourier/test_norm.jl @@ -0,0 +1,130 @@ +const TEST_MEAN = TEST_VAR = 100000.0 +const TEST_N = 800000 +const TEST_DT = 0.2 + +function setup_test_data(; N=TEST_N, dt=TEST_DT, mean=TEST_MEAN) + freq = fftfreq(N, dt) + good = freq .> 0 + meanrate = mean / dt + lc = rand(Poisson(mean), N) + nph = sum(lc) + pds = keepat!(abs2.(fft(lc)), good) + return ( + freq=freq, good=good, meanrate=meanrate, + lc=lc, nph=nph, pds=pds, N=N, dt=dt, mean=mean + ) +end + +# Helper function for Poisson level tests +function test_poisson_level(norm::String, power_type::String="all") + data = setup_test_data() + pdsnorm = normalize_periodograms( + data.pds, data.dt, data.N; + mean_flux=data.mean, n_ph=data.nph, + norm=norm, power_type=power_type + ) + expected = poisson_level(norm; meanrate=data.meanrate, n_ph=data.nph) + @test Statistics.mean(pdsnorm) ≈ expected rtol=0.01 +end + +# test_positive_fft_bins +let + freq = fftfreq(11) + goodbins = positive_fft_bins(11) + @test filter(x -> x > 0, freq) == freq[goodbins] +end + +# test_norm_setup_and_basic_tests +let + data = setup_test_data(N=1000000) + pdsabs = normalize_abs(data.pds, data.dt, size(data.lc, 1)) + pdsfrac = normalize_frac(data.pds, data.dt, size(data.lc, 1), data.mean) + pois_abs = poisson_level("abs", meanrate=data.meanrate) + pois_frac = poisson_level("frac", meanrate=data.meanrate) + + @test Statistics.mean(pdsabs) ≈ pois_abs rtol=0.02 + @test Statistics.mean(pdsfrac) ≈ pois_frac rtol=0.02 +end + +# test_leahy_bksub_var_vs_standard +let + data = setup_test_data() + lc_bksub = data.lc .- data.mean + pds_bksub = keepat!(abs2.(fft(lc_bksub)), data.good) + leahyvar = normalize_leahy_from_variance(pds_bksub, Statistics.var(lc_bksub), data.N) + leahy = 2 .* data.pds ./ sum(data.lc) + ratio = Statistics.mean(leahyvar ./ leahy) + @test ratio ≈ 1 rtol=0.01 +end + +# test_abs_bksub +let + data = setup_test_data() + lc_bksub = data.lc .- data.mean + pds_bksub = keepat!(abs2.(fft(lc_bksub)), data.good) + ratio = normalize_abs(pds_bksub, data.dt, data.N) ./ normalize_abs(data.pds, data.dt, data.N) + @test Statistics.mean(ratio) ≈ 1 rtol=0.01 +end +# test_frac_renorm_constant +let + data = setup_test_data() + lc_renorm = data.lc / data.mean + pds_renorm = keepat!(abs2.(fft(lc_renorm)), data.good) + ratio = normalize_frac(pds_renorm, data.dt, data.N, 1) ./ normalize_frac(data.pds, data.dt, data.N, data.mean) + @test Statistics.mean(ratio) ≈ 1 rtol=0.01 +end + +# test_frac_to_abs_ctratesq +let + data = setup_test_data() + + ratio = ( + normalize_frac(data.pds, data.dt, data.N, data.mean) + ./ normalize_abs(data.pds, data.dt, data.N) + .* data.meanrate .^ 2 + ) + @test Statistics.mean(ratio) ≈ 1 rtol=0.01 +end + +# test_total_variance +let + data = setup_test_data() + + vdk_total_variance = sum((data.lc .- data.mean) .^ 2) + ratio = Statistics.mean(data.pds) / vdk_total_variance + @test Statistics.mean(ratio) ≈ 1 rtol=0.01 +end + +# All Poisson level tests - much more concise now! +@testset "Poisson Level Tests" begin + test_poisson_level("abs") + test_poisson_level("frac") + test_poisson_level("leahy") + test_poisson_level("none") + + # Real power type tests + test_poisson_level("abs", "real") + test_poisson_level("frac", "real") + test_poisson_level("leahy", "real") + test_poisson_level("none", "real") + + # Absolute power type tests + test_poisson_level("abs", "abs") + test_poisson_level("frac", "abs") + test_poisson_level("leahy", "abs") + test_poisson_level("none", "abs") +end + +# test_normalize_with_variance +let + data = setup_test_data() + pdsnorm = normalize_periodograms(data.pds, data.dt, data.N; mean_flux=data.mean, variance=TEST_VAR, norm="leahy") + @test Statistics.mean(pdsnorm) ≈ 2 rtol=0.01 +end + +# test_normalize_none +let + data = setup_test_data() + pdsnorm = normalize_periodograms(data.pds, data.dt, data.N; mean_flux=data.mean, n_ph=data.nph, norm="none") + @test Statistics.mean(pdsnorm) ≈ Statistics.mean(data.pds) rtol=0.01 +end \ No newline at end of file diff --git a/test/test_gti.jl b/test/test_gti.jl index 282809b..eb59079 100644 --- a/test/test_gti.jl +++ b/test/test_gti.jl @@ -7,7 +7,7 @@ function create_test_eventlist(times::Vector{Float64}, energies::Union{Vector{Fl "keV", # energy_units Dict{String,Vector}(), # extra_columns mock_headers, # headers - nothing, # gti + reshape([0.0, maximum(times)], 1, 2), # gti nothing, # gti_source nothing, # mjd_ref nothing, # time_zero @@ -26,7 +26,7 @@ function create_test_lightcurve(times::Vector{Float64}, counts::Vector{Int}, dt: (minimum(times)-dt/2, maximum(times)+dt/2), dt, Vector{Dict{String,Any}}(), - Dict{String,Any}() + Dict{String,Any}("gti" => reshape([minimum(times) - dt/2, maximum(times) + dt/2], 1, 2)) ) return LightCurve( diff --git a/test/test_plotting/test_plots_recipes_crossspectrum.jl b/test/test_plotting/test_plots_recipes_crossspectrum.jl new file mode 100644 index 0000000..72ee084 --- /dev/null +++ b/test/test_plotting/test_plots_recipes_crossspectrum.jl @@ -0,0 +1,427 @@ +# Mock data creators +function create_crossspectrum(freqs::Vector{Float64}, powers::Vector{Complex{Float64}}; + ps1::Union{Vector{Float64}, Nothing}=nothing, + ps2::Union{Vector{Float64}, Nothing}=nothing, + norm::String="frac", power_type::String="all", + nphots1::Float64=1000.0, nphots2::Float64=1000.0, + m::Int=1, k::Union{Int, Vector{Int}}=1, + segment_size::Float64=100.0, + mean_rate1::Float64=10.0, mean_rate2::Float64=10.0, + fullspec::Bool=false, channels_overlap::Bool=false) + mock_metadata = LightCurveMetadata( + "test", "test", "test", 0.0, (0.0, segment_size), + length(freqs) > 1 ? freqs[2] - freqs[1] : 1.0, + Vector{Dict{String,Any}}(), Dict{String,Any}() + ) + + df = length(freqs) > 1 ? freqs[2] - freqs[1] : 1.0 + ps1_data = ps1 !== nothing ? ps1 : abs2.(powers) + ps2_data = ps2 !== nothing ? ps2 : abs2.(powers) + + return CrossSpectrum{Float64}( + freqs, powers, nothing, ps1_data, ps2_data, norm, power_type, df, + nphots1, nphots2, m, length(freqs), k, mock_metadata, mock_metadata, + fullspec, channels_overlap, segment_size, mean_rate1, mean_rate2 + ) +end + +function create_lightcurve_test(times::Vector{Float64}, counts::Vector{Int}, dt::Float64=1.0) + tstart, tstop = minimum(times) - dt/2, maximum(times) + dt/2 + gti = reshape([tstart, tstop], 1, 2) + + metadata = LightCurveMetadata( + "test", "test", "test", 0.0, (tstart, tstop), dt, + Vector{Dict{String,Any}}(), Dict{String,Any}("gti" => gti) + ) + + return LightCurve(times, dt, counts, nothing, nothing, + EventProperty{Float64}[], metadata, :poisson) +end + +# Basic amplitude plot recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 1.0, randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:amplitude) + @test p isa Plots.Plot + @test length(p.series_list) == 1 + + # Verify recipe returns correct data + plot_data = p.series_list[1] + expected_amplitude = abs.(powers) + @test plot_data.plotattributes[:x] ≈ freqs + @test plot_data.plotattributes[:y] ≈ expected_amplitude +end + +# Power spectrum recipe - squared amplitudes +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 1.0, randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:power) + plot_data = p.series_list[1] + expected_power = abs2.(powers) + @test plot_data.plotattributes[:x] ≈ freqs + @test plot_data.plotattributes[:y] ≈ expected_power +end + +# Phase lag recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:phase_lag) + plot_data = p.series_list[1] + expected_phase = angle.(powers) + @test plot_data.plotattributes[:x] ≈ freqs + @test plot_data.plotattributes[:y] ≈ expected_phase +end + +# Time lag recipe - phase converted to time +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:time_lag) + plot_data = p.series_list[1] + expected_time_lag = angle.(powers) ./ (2π .* freqs) + @test plot_data.plotattributes[:x] ≈ freqs + @test plot_data.plotattributes[:y] ≈ expected_time_lag +end + +# Coherence recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + ps1 = abs2.(powers) .+ 0.5 + ps2 = abs2.(powers) .+ 0.5 + cs = create_crossspectrum(freqs, powers, ps1=ps1, ps2=ps2) + + p = plot(cs, plot_type=:coherence) + plot_data = p.series_list[1] + expected_coherence = abs2.(powers) ./ (ps1 .* ps2) + @test plot_data.plotattributes[:x] ≈ freqs + @test plot_data.plotattributes[:y] ≈ expected_coherence +end + +# SNR recipe with theoretical noise +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 2.0, randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:snr) + plot_data = p.series_list[1] + expected_snr = signal_to_noise_ratio(cs) # Use the same function as recipe + @test plot_data.plotattributes[:x] ≈ cs.freq + @test plot_data.plotattributes[:y] ≈ expected_snr +end + +# Real/imaginary components recipe - separate series +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, plot_type=:real_imaginary) + @test length(p.series_list) >= 2 + + # Find real and imaginary series + real_found = false + imag_found = false + + for series in p.series_list + if haskey(series.plotattributes, :y) + y_data = series.plotattributes[:y] + if isa(y_data, Vector) && length(y_data) == length(powers) + try + if y_data ≈ real.(powers) + real_found = true + elseif y_data ≈ imag.(powers) + imag_found = true + end + catch + continue + end + end + end + end + + @test real_found && imag_found +end + +# PDS comparison recipe - both input power spectra +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + ps1 = abs2.(powers) .+ randn(length(freqs)) .* 0.1 + ps2 = abs2.(powers) .+ randn(length(freqs)) .* 0.2 + cs = create_crossspectrum(freqs, powers, ps1=ps1, ps2=ps2) + + p = plot(cs, plot_type=:pds_comparison) + @test length(p.series_list) >= 2 + + # Find ps1 and ps2 series + ps1_found = false + ps2_found = false + + for series in p.series_list + if haskey(series.plotattributes, :y) + y_data = series.plotattributes[:y] + if isa(y_data, Vector) && length(y_data) == length(ps1) + try + if y_data ≈ ps1 + ps1_found = true + elseif y_data ≈ ps2 + ps2_found = true + end + catch + continue + end + end + end + end + + @test ps1_found && ps2_found +end + +# Significant detections specialized recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 0.5, randn(length(freqs))) + powers[5:7] .= complex.(randn(3) .+ 5.0, randn(3)) # High SNR points + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, Val(:significant_detections), threshold=3.0) + @test length(p.series_list) >= 2 # Main spectrum + significant points + + # Verify significant points detected + noise_level = theoretical_noise_level(cs) + snr_data = abs.(powers) ./ noise_level + significant_mask = snr_data .> 3.0 + @test sum(significant_mask) > 0 +end + +# Phase lag errors recipe with error bars +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + cs.power_err = abs.(powers) .* 0.1 + + p = plot(cs, Val(:phase_lag_errors)) + plot_data = p.series_list[1] + @test haskey(plot_data.plotattributes, :yerror) + + expected_phase_err = cs.power_err ./ abs.(powers) + @test plot_data.plotattributes[:yerror] ≈ expected_phase_err +end + +# Time lag errors recipe with proper scaling +let + freqs = collect(0.5:0.1:5.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + cs.power_err = abs.(powers) .* 0.1 + + p = plot(cs, Val(:time_lag_errors)) + plot_data = p.series_list[1] + phase_err = cs.power_err ./ abs.(powers) + expected_time_err = phase_err ./ (2π .* freqs) + @test plot_data.plotattributes[:yerror] ≈ expected_time_err +end + +# White noise estimation recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 1.0, randn(length(freqs))) + high_freq_idx = freqs .> 8.0 + powers[high_freq_idx] .= complex.(randn(sum(high_freq_idx)) .+ 0.5, randn(sum(high_freq_idx))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, Val(:white_noise)) + @test length(p.series_list) >= 3 + + # Check noise level calculation + high_freq_cutoff = maximum(freqs) * 0.8 + high_freq_mask = freqs .>= high_freq_cutoff + expected_noise = median(abs.(powers[high_freq_mask])) + @test expected_noise > 0 +end + +# Frequency range filtering in recipes +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + freq_range = (2.0, 5.0) + p = plot(cs, plot_type=:amplitude, freq_range=freq_range) + plot_data = p.series_list[1] + plotted_freqs = plot_data.plotattributes[:x] + @test all(plotted_freqs .>= freq_range[1]) + @test all(plotted_freqs .<= freq_range[2]) +end + +# Coherence confidence recipe for averaged spectra +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + ps1 = abs2.(powers) .+ randn(length(freqs)) .* 0.1 + ps2 = abs2.(powers) .+ randn(length(freqs)) .* 0.1 + m_segments = 10 + cs = create_crossspectrum(freqs, powers, ps1=ps1, ps2=ps2, m=m_segments) + + p = plot(cs, Val(:coherence_confidence)) + @test length(p.series_list) >= 3 + + # Verify confidence level calculations + confidence_95 = 0.95^(1/(m_segments-1)) + confidence_99 = 0.99^(1/(m_segments-1)) + @test confidence_95 < 1.0 + @test confidence_99 < 1.0 + @test confidence_99 > confidence_95 +end + +# Frequency rebinning recipe +let + freqs = collect(0.1:0.01:5.0) # 491 points + powers = complex.(randn(length(freqs)) .+ 1.5, randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + rebin_factor = 10 + p = plot(cs, Val(:frequency_snr), rebin_factor=rebin_factor) + plot_data = p.series_list[1] + rebinned_points = length(plot_data.plotattributes[:x]) + expected_points = div(length(freqs), rebin_factor) + @test rebinned_points ≈ expected_points rtol=0.1 +end + +# Noise comparison recipe +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)) .+ 2.0, randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + p = plot(cs, Val(:noise_comparison)) + @test length(p.series_list) >= 2 + + original_series = p.series_list[1] + corrected_series = p.series_list[2] + + original_values = original_series.plotattributes[:y] + corrected_values = corrected_series.plotattributes[:y] + + @test original_values ≈ abs.(powers) + @test all(corrected_values .<= original_values) # Noise subtraction effect +end + +# Light curve comparison recipe with downsampling +let + times1 = collect(0.0:0.01:1000.0) # 100,001 points + times2 = collect(0.0:0.01:1000.0) + counts1 = rand(1:50, length(times1)) + counts2 = rand(1:40, length(times2)) + + lc1 = create_lightcurve_test(times1, counts1, 0.01) + lc2 = create_lightcurve_test(times2, counts2, 0.01) + + max_points = 5000 + p = plot(lc1, lc2, max_points=max_points) + @test p isa Plots.Plot + @test length(p.series_list) >= 2 + + # Check downsampling behavior + data_series_lengths = Int[] + for series in p.series_list + if haskey(series.plotattributes, :x) && haskey(series.plotattributes, :y) + x_data = series.plotattributes[:x] + y_data = series.plotattributes[:y] + if isa(x_data, Vector) && isa(y_data, Vector) && + length(x_data) == length(y_data) && length(x_data) > 100 + push!(data_series_lengths, length(x_data)) + end + end + end + + @test length(data_series_lengths) >= 2 + for n_points in data_series_lengths + @test n_points <= max_points * 1.1 # Allow rounding tolerance + end + @test any(n_points -> n_points < length(times1) / 10, data_series_lengths) +end + +# Multiple spectra normalization comparison recipe +let + freqs = collect(0.1:0.1:10.0) + + powers1 = complex.(randn(length(freqs)) .+ 1.0, randn(length(freqs))) + powers2 = powers1 .* 2.0 + powers3 = powers1 .* 0.5 + + cs1 = create_crossspectrum(freqs, powers1, norm="leahy") + cs2 = create_crossspectrum(freqs, powers2, norm="frac") + cs3 = create_crossspectrum(freqs, powers3, norm="rms") + + cs_array = [cs1, cs2, cs3] + p = plot(cs_array, Val(:normalization_comparison)) + @test length(p.series_list) >= length(cs_array) + + # Check each spectrum is plotted + found_count = 0 + for cs in cs_array + expected_data = abs.(cs.power) + for series in p.series_list + if haskey(series.plotattributes, :y) + y_data = series.plotattributes[:y] + if isa(y_data, Vector) && length(y_data) == length(expected_data) + try + if y_data ≈ expected_data + found_count += 1 + break + end + catch + continue + end + end + end + end + end + @test found_count == length(cs_array) +end + +# Rebinning comparison recipe +let + freqs_fine = collect(0.1:0.01:10.0) + freqs_coarse = collect(0.1:0.1:10.0) + + base_spectrum = sin.(freqs_fine * 2π) .+ randn(length(freqs_fine)) .* 0.1 + powers_fine = complex.(base_spectrum, randn(length(freqs_fine)) .* 0.1) + + coarse_spectrum = [mean(base_spectrum[max(1, round(Int, (f-0.05)/0.01)):min(end, round(Int, (f+0.05)/0.01))]) for f in freqs_coarse] + powers_coarse = complex.(coarse_spectrum, randn(length(freqs_coarse)) .* 0.1) + + cs_fine = create_crossspectrum(freqs_fine, powers_fine) + cs_coarse = create_crossspectrum(freqs_coarse, powers_coarse) + + p = plot(cs_fine, cs_coarse, Val(:rebinning_comparison)) + @test length(p.series_list) >= 2 + + # Check different resolutions are present + series_lengths = [length(series.plotattributes[:x]) for series in p.series_list] + @test maximum(series_lengths) > minimum(series_lengths) +end + +# Error handling for invalid plot type +let + freqs = collect(0.1:0.1:10.0) + powers = complex.(randn(length(freqs)), randn(length(freqs))) + cs = create_crossspectrum(freqs, powers) + + @test_throws ErrorException plot(cs, plot_type=:invalid_type) +end \ No newline at end of file