From 1f284cd3603bc130b8a12964528a2cf2f159417f Mon Sep 17 00:00:00 2001 From: Grzegorz Borkowski Date: Thu, 21 Mar 2024 22:46:00 +0100 Subject: [PATCH 1/5] simple time evolution implementation for synchrotron --- agnpy/spectra/spectra.py | 88 +++++++++++++++++++++++++++++++- agnpy/synchrotron/synchrotron.py | 11 +++- 2 files changed, 97 insertions(+), 2 deletions(-) diff --git a/agnpy/spectra/spectra.py b/agnpy/spectra/spectra.py index 6fb6f4d..c7e1224 100644 --- a/agnpy/spectra/spectra.py +++ b/agnpy/spectra/spectra.py @@ -1,9 +1,11 @@ # module containing the particle spectra import numpy as np import astropy.units as u -from astropy.constants import m_e, m_p +from astropy.units.quantity import Quantity +from astropy.constants import m_e, m_p, c from scipy.interpolate import CubicSpline import matplotlib.pyplot as plt +import math __all__ = [ @@ -250,6 +252,90 @@ def evaluate(gamma, k, p, gamma_min, gamma_max): def __call__(self, gamma): return self.evaluate(gamma, self.k, self.p, self.gamma_min, self.gamma_max) + def gamma_values(self): + return np.logspace(np.log10(self.gamma_min), np.log10(self.gamma_max), 200) + + def evaluate_time(self, time, energy_loss_function, intervals=1): + class Bin: + def __init__(self, n, gamma_start, gamma_end): + self.n = n + self.gamma_start = gamma_start + self.gamma_end = gamma_end + + gamma_values = self.gamma_values() + n_values = self.__call__(gamma_values) + bin_size_sqrt = math.sqrt(gamma_values[1]/gamma_values[0]) + original_bins = [] + + # fill bins with initial data + for index, gamma_value in enumerate(gamma_values): + n = n_values[index] + if index == 0: + gamma_start = gamma_value / bin_size_sqrt + else: + gamma_start = original_bins[index-1].gamma_end + original_bins.append(Bin(n, gamma_start, gamma_value * bin_size_sqrt)) + + unit_time_interval = time / intervals + + def gamma_recalculated_after_loss(gamma): + old_energy = (gamma * m_e * c ** 2).to("J") + energy_loss_per_time = energy_loss_function(gamma).to("J s-1") + energy_loss = (energy_loss_per_time * unit_time_interval).to("J") + new_energy = old_energy - energy_loss + if new_energy < 0: + raise ValueError( + "Energy loss formula returned value higher then original energy, for gamma " + "{:e}".format(gamma)) + new_gamma = (new_energy / (m_e * c ** 2)).value + return new_gamma + + # move and narrow down the bins according to energy loss function + translated_bins = original_bins.copy() + for r in range(intervals): + for index, bin in enumerate(translated_bins): + bin_integral = bin.n * (bin.gamma_end - bin.gamma_start) + if index == 0: + new_bin_start = gamma_recalculated_after_loss(bin.gamma_start) + else: + new_bin_start = translated_bins[index-1].gamma_end + new_bin_end = gamma_recalculated_after_loss(bin.gamma_end) + new_bin_width = new_bin_end - new_bin_start + if new_bin_width <= 0: + raise ValueError( + "Energy loss formula returned higher energy loss for bin-end than for bin-start;" + " use shorter time intervals to fix it") + new_n = bin_integral / (new_bin_width) + translated_bins[index] = (Bin(new_n, new_bin_start, new_bin_end)) + + # remap the translated bins onto the original gamma points + final_bins = [] + unit = u.Unit("cm-3") + for bin in original_bins: + final_bins.append(Bin(Quantity(0) * unit, bin.gamma_start, bin.gamma_end)) + for index, bin in enumerate(final_bins): + for translated_bin in translated_bins: + gamma_bin_min = bin.gamma_start + gamma_bin_max = bin.gamma_end + gamma_new_bin_min = translated_bin.gamma_start + gamma_new_bin_max = translated_bin.gamma_end + if gamma_new_bin_max > gamma_bin_min and gamma_new_bin_min < gamma_bin_max: + if gamma_new_bin_min < gamma_bin_min < gamma_new_bin_max: + overlap = gamma_new_bin_max - gamma_bin_min + elif gamma_new_bin_min < gamma_bin_max < gamma_new_bin_max: + overlap = gamma_bin_max - gamma_new_bin_min + elif gamma_bin_min <= gamma_new_bin_min and gamma_new_bin_max <= gamma_bin_max: + overlap = gamma_new_bin_max - gamma_new_bin_min + else: + raise AssertionError("Unexpected situation, bin has been widened instead of narrowed down") + n = overlap * translated_bin.n + final_bin = final_bins[index] + final_bin.n = final_bin.n + n + for bin in final_bins: + bin.n = bin.n / (bin.gamma_end - bin.gamma_start) + + + return gamma_values, np.array(list(map(lambda bin: bin.n.value, final_bins))) * unit + @staticmethod def evaluate_SSA_integrand(gamma, k, p, gamma_min, gamma_max): r"""Analytical integrand for the synchrotron self-absorption: diff --git a/agnpy/synchrotron/synchrotron.py b/agnpy/synchrotron/synchrotron.py index 9289030..fe21534 100644 --- a/agnpy/synchrotron/synchrotron.py +++ b/agnpy/synchrotron/synchrotron.py @@ -1,7 +1,7 @@ # module containing the synchrotron radiative process import numpy as np import astropy.units as u -from astropy.constants import e, h, c, m_e, sigma_T +from astropy.constants import e, h, c, m_e, sigma_T, mu0 from ..utils.math import axes_reshaper, gamma_e_to_integrate from ..utils.conversion import nu_to_epsilon_prime, B_to_cgs, lambda_c_e from ..radiative_process import RadiativeProcess @@ -90,6 +90,15 @@ def __init__(self, blob, ssa=False, integrator=np.trapz): self.ssa = ssa self.integrator = integrator + def electron_energy_loss_per_time(self, gamma): + # For synchrotron, energy loss dE/dt formula is: + # (4/3 * sigma_T * c * magn_energy_dens) * gamma^2 + # The part in brackets is fixed, so as an improvement we could calculate it once and cache, + # and later we could use the formula (fixed * gamma^2) + magn_energy_dens = (self.blob.B.to("T") ** 2) / (2 * mu0) + fixed = (4 / 3) * sigma_T * c * magn_energy_dens + return (fixed * gamma ** 2).to("erg/s") + @staticmethod def evaluate_tau_ssa( nu, From b11ed43ea96e9fe7e68486dcf6275f0df236f342 Mon Sep 17 00:00:00 2001 From: Grzegorz Borkowski Date: Sun, 14 Apr 2024 21:18:03 +0200 Subject: [PATCH 2/5] algorithm redesigned + test --- agnpy/spectra/spectra.py | 127 +++++++++------------------- agnpy/spectra/tests/test_spectra.py | 48 ++++++++++- agnpy/synchrotron/synchrotron.py | 7 +- 3 files changed, 93 insertions(+), 89 deletions(-) diff --git a/agnpy/spectra/spectra.py b/agnpy/spectra/spectra.py index c7e1224..5eba1d8 100644 --- a/agnpy/spectra/spectra.py +++ b/agnpy/spectra/spectra.py @@ -200,6 +200,42 @@ def plot(self, gamma=None, gamma_power=0, ax=None, **kwargs): return ax + def evaluate_time(self, time, energy_loss_function, subintervals_count=1): + unit_time_interval = time / subintervals_count + + def gamma_recalculated_after_loss(gamma): + old_energy = (gamma * m_e * c ** 2).to("J") + energy_loss_per_time = energy_loss_function(gamma).to("J s-1") + energy_loss = (energy_loss_per_time * unit_time_interval).to("J") + new_energy = old_energy - energy_loss + if np.any(new_energy < 0): + raise ValueError("Energy loss formula returned value higher then original energy") + new_gamma = (new_energy / (m_e * c ** 2)).value + return new_gamma + + gammas = self.gamma_values() + n_array = self.__call__(gammas) + + # for each gamma point create a narrow bin, calculate the energy loss for start and end of the bin, + # and scale up density by the bin narrowing factor + for r in range(subintervals_count): + bin_size_factor = 0.0001 + bins_width = gammas * bin_size_factor + bins_end_recalc = gamma_recalculated_after_loss(gammas + bins_width) + gammas = gamma_recalculated_after_loss(gammas) + narrowed_bins = bins_end_recalc - gammas + if np.any(narrowed_bins <= 0): + raise ValueError( + "Energy loss formula returned too big value - use shorter time intervals") + density_increase = bins_width / narrowed_bins + if np.any(density_increase < 1): + # not possible for simple scenarios because higher gammas should lose more energy, + # but maybe for more complex scenarios it could happen? then we should revise this assertion + raise AssertionError("Unexpected situation, bin has been widened instead of narrowed down") + n_array = n_array * density_increase + + return InterpolatedDistribution(gammas, n_array) + class PowerLaw(ParticleDistribution): r"""Class describing a power-law particle distribution. @@ -255,87 +291,6 @@ def __call__(self, gamma): def gamma_values(self): return np.logspace(np.log10(self.gamma_min), np.log10(self.gamma_max), 200) - def evaluate_time(self, time, energy_loss_function, intervals=1): - class Bin: - def __init__(self, n, gamma_start, gamma_end): - self.n = n - self.gamma_start = gamma_start - self.gamma_end = gamma_end - - gamma_values = self.gamma_values() - n_values = self.__call__(gamma_values) - bin_size_sqrt = math.sqrt(gamma_values[1]/gamma_values[0]) - original_bins = [] - - # fill bins with initial data - for index, gamma_value in enumerate(gamma_values): - n = n_values[index] - if index == 0: - gamma_start = gamma_value / bin_size_sqrt - else: - gamma_start = original_bins[index-1].gamma_end - original_bins.append(Bin(n, gamma_start, gamma_value * bin_size_sqrt)) - - unit_time_interval = time / intervals - - def gamma_recalculated_after_loss(gamma): - old_energy = (gamma * m_e * c ** 2).to("J") - energy_loss_per_time = energy_loss_function(gamma).to("J s-1") - energy_loss = (energy_loss_per_time * unit_time_interval).to("J") - new_energy = old_energy - energy_loss - if new_energy < 0: - raise ValueError( - "Energy loss formula returned value higher then original energy, for gamma " + "{:e}".format(gamma)) - new_gamma = (new_energy / (m_e * c ** 2)).value - return new_gamma - - # move and narrow down the bins according to energy loss function - translated_bins = original_bins.copy() - for r in range(intervals): - for index, bin in enumerate(translated_bins): - bin_integral = bin.n * (bin.gamma_end - bin.gamma_start) - if index == 0: - new_bin_start = gamma_recalculated_after_loss(bin.gamma_start) - else: - new_bin_start = translated_bins[index-1].gamma_end - new_bin_end = gamma_recalculated_after_loss(bin.gamma_end) - new_bin_width = new_bin_end - new_bin_start - if new_bin_width <= 0: - raise ValueError( - "Energy loss formula returned higher energy loss for bin-end than for bin-start;" - " use shorter time intervals to fix it") - new_n = bin_integral / (new_bin_width) - translated_bins[index] = (Bin(new_n, new_bin_start, new_bin_end)) - - # remap the translated bins onto the original gamma points - final_bins = [] - unit = u.Unit("cm-3") - for bin in original_bins: - final_bins.append(Bin(Quantity(0) * unit, bin.gamma_start, bin.gamma_end)) - for index, bin in enumerate(final_bins): - for translated_bin in translated_bins: - gamma_bin_min = bin.gamma_start - gamma_bin_max = bin.gamma_end - gamma_new_bin_min = translated_bin.gamma_start - gamma_new_bin_max = translated_bin.gamma_end - if gamma_new_bin_max > gamma_bin_min and gamma_new_bin_min < gamma_bin_max: - if gamma_new_bin_min < gamma_bin_min < gamma_new_bin_max: - overlap = gamma_new_bin_max - gamma_bin_min - elif gamma_new_bin_min < gamma_bin_max < gamma_new_bin_max: - overlap = gamma_bin_max - gamma_new_bin_min - elif gamma_bin_min <= gamma_new_bin_min and gamma_new_bin_max <= gamma_bin_max: - overlap = gamma_new_bin_max - gamma_new_bin_min - else: - raise AssertionError("Unexpected situation, bin has been widened instead of narrowed down") - n = overlap * translated_bin.n - final_bin = final_bins[index] - final_bin.n = final_bin.n + n - for bin in final_bins: - bin.n = bin.n / (bin.gamma_end - bin.gamma_start) - - - return gamma_values, np.array(list(map(lambda bin: bin.n.value, final_bins))) * unit - @staticmethod def evaluate_SSA_integrand(gamma, k, p, gamma_min, gamma_max): r"""Analytical integrand for the synchrotron self-absorption: @@ -815,7 +770,7 @@ class InterpolatedDistribution(ParticleDistribution): function to be used for integration, default is :class:`~numpy.trapz` """ - def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz): + def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz, gamma_min=None, gamma_max=None): super().__init__(mass, integrator, "InterpolatedDistribution") if n.unit != u.Unit("cm-3"): raise ValueError( @@ -827,9 +782,9 @@ def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz): # scaling parameter self.norm = norm # perform the interpolation - self.log10_interp = self.log10_interpolation(gamma, n) + self.log10_interp = self.log10_interpolation(gamma, n, gamma_min, gamma_max) - def log10_interpolation(self, gamma, n): + def log10_interpolation(self, gamma, n, gamma_min=None, gamma_max=None): """Returns the function interpolating in log10 the particle spectrum as a function of the Lorentz factor. TODO: make possible to pass arguments to CubicSpline. @@ -843,8 +798,8 @@ def log10_interpolation(self, gamma, n): # min and max lorentz factor are now the first gamma values for which # the input distribution is not null - self.gamma_min = np.min(_gamma) - self.gamma_max = np.max(_gamma) + self.gamma_min = gamma_min if gamma_min else np.min(_gamma) + self.gamma_max = gamma_max if gamma_max else np.max(_gamma) return interpolator diff --git a/agnpy/spectra/tests/test_spectra.py b/agnpy/spectra/tests/test_spectra.py index 8c3332f..962f5f7 100644 --- a/agnpy/spectra/tests/test_spectra.py +++ b/agnpy/spectra/tests/test_spectra.py @@ -2,8 +2,11 @@ from pathlib import Path import numpy as np import astropy.units as u -from astropy.constants import m_e, m_p +from astropy.constants import m_e, m_p, c import pytest +from astropy.coordinates import Distance + +from agnpy import Blob, Synchrotron from agnpy.spectra import ( PowerLaw, BrokenPowerLaw, @@ -740,3 +743,46 @@ def test_interpolation_physical(self): assert u.allclose( n_e(gamma_init), 2 * n_init, atol=0 * u.Unit("cm-3"), rtol=1e-3 ) + + +class TestSpectraTimeEvolution: + + @pytest.mark.parametrize("p", [0.5, 1.2, 2.4]) + @pytest.mark.parametrize("time", [1, 60, 120] * u.s) + def test_compare_numerical_results_with_analytical_calculation(self, p, time): + """Test time evolution of spectral electron density for synchrotron energy losses. + Use a simple power low spectrum for easy calculation of analytical results.""" + gamma_min = 1e2 + gamma_max = 1e7 + k = 0.1 * u.Unit("cm-3") + initial_n_e = PowerLaw(k, p, gamma_min=gamma_min, gamma_max=gamma_max, mass=m_e) + blob = Blob(1e16 * u.cm, Distance(1e27, unit=u.cm).z, 0, 10, 1 * u.G, n_e=initial_n_e) + synch = Synchrotron(blob) + evaluated_n_e = initial_n_e.evaluate_time(time, synch.electron_energy_loss_per_time, subintervals_count=50) + + def gamma_before(gamma_after_time, time): + """Reverse-time calculation of the gamma value before the synchrotron energy loss, + using formula -dE/dt ~ (E ** 2)""" + coef = synch._electron_energy_loss_formula_prefix() / (m_e * c ** 2) + return 1 / ((1 / gamma_after_time) - time * coef) + + def integral_analytical(gamma_min, gamma_max): + """Integral for the power-law distribution""" + return k * (gamma_max ** (1 - p) - gamma_min ** (1 - p)) / (1 - p) + + assert u.isclose( + gamma_before(evaluated_n_e.gamma_max, time), + gamma_max, + rtol=0.05 + ) + assert u.isclose( + evaluated_n_e.integrate(evaluated_n_e.gamma_min, evaluated_n_e.gamma_max), + integral_analytical(gamma_before(evaluated_n_e.gamma_min, time), gamma_before(evaluated_n_e.gamma_max, time)), + rtol=0.05 + ) + assert u.isclose( + # only the highest energy range where most losses occur + evaluated_n_e.integrate(evaluated_n_e.gamma_max / 10, evaluated_n_e.gamma_max), + integral_analytical(gamma_before(evaluated_n_e.gamma_max / 10, time), gamma_before(evaluated_n_e.gamma_max, time)), + rtol=0.05 + ) diff --git a/agnpy/synchrotron/synchrotron.py b/agnpy/synchrotron/synchrotron.py index fe21534..d40417c 100644 --- a/agnpy/synchrotron/synchrotron.py +++ b/agnpy/synchrotron/synchrotron.py @@ -95,10 +95,13 @@ def electron_energy_loss_per_time(self, gamma): # (4/3 * sigma_T * c * magn_energy_dens) * gamma^2 # The part in brackets is fixed, so as an improvement we could calculate it once and cache, # and later we could use the formula (fixed * gamma^2) - magn_energy_dens = (self.blob.B.to("T") ** 2) / (2 * mu0) - fixed = (4 / 3) * sigma_T * c * magn_energy_dens + fixed = self._electron_energy_loss_formula_prefix() return (fixed * gamma ** 2).to("erg/s") + def _electron_energy_loss_formula_prefix(self): + magn_energy_dens = (self.blob.B.to("T") ** 2) / (2 * mu0) + return (4 / 3) * sigma_T * c * magn_energy_dens + @staticmethod def evaluate_tau_ssa( nu, From 76ef9d0a43bc8f21f645737e662d2e4ced1470f3 Mon Sep 17 00:00:00 2001 From: Grzegorz Borkowski Date: Mon, 22 Apr 2024 22:10:49 +0200 Subject: [PATCH 3/5] fixes from PR comments --- agnpy/spectra/spectra.py | 31 ++++++++++++++++++++++------- agnpy/spectra/tests/test_spectra.py | 4 ++-- agnpy/synchrotron/synchrotron.py | 4 ++-- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/agnpy/spectra/spectra.py b/agnpy/spectra/spectra.py index 5eba1d8..ad58373 100644 --- a/agnpy/spectra/spectra.py +++ b/agnpy/spectra/spectra.py @@ -1,11 +1,10 @@ # module containing the particle spectra import numpy as np import astropy.units as u -from astropy.units.quantity import Quantity +from agnpy.utils.conversion import mec2 from astropy.constants import m_e, m_p, c from scipy.interpolate import CubicSpline import matplotlib.pyplot as plt -import math __all__ = [ @@ -201,16 +200,34 @@ def plot(self, gamma=None, gamma_power=0, ax=None, **kwargs): return ax def evaluate_time(self, time, energy_loss_function, subintervals_count=1): + """Performs the time evaluation of energy change for this particle distribution. + + Parameters + ---------- + time : `~astropy.units.Quantity` + total time for the calculation + energy_loss_function : function + thr function to be used for calculation of energy loss; + the function accepts the gamma value and produces "energy per time" quantity + subintervals_count : int + optional number defining how many equal-length subintervals the total time will be split into + + Returns + ------- + InterpolatedDistribution + a new distribution + """ unit_time_interval = time / subintervals_count def gamma_recalculated_after_loss(gamma): - old_energy = (gamma * m_e * c ** 2).to("J") - energy_loss_per_time = energy_loss_function(gamma).to("J s-1") - energy_loss = (energy_loss_per_time * unit_time_interval).to("J") + old_energy = (gamma * mec2).to("erg") + energy_loss_per_time = energy_loss_function(gamma).to("erg s-1") + energy_loss = (energy_loss_per_time * unit_time_interval).to("erg") new_energy = old_energy - energy_loss if np.any(new_energy < 0): - raise ValueError("Energy loss formula returned value higher then original energy") - new_gamma = (new_energy / (m_e * c ** 2)).value + raise ValueError( + "Energy loss formula returned value higher then original energy. Use the shorter time ranges.") + new_gamma = (new_energy / mec2).value return new_gamma gammas = self.gamma_values() diff --git a/agnpy/spectra/tests/test_spectra.py b/agnpy/spectra/tests/test_spectra.py index 962f5f7..0b20821 100644 --- a/agnpy/spectra/tests/test_spectra.py +++ b/agnpy/spectra/tests/test_spectra.py @@ -751,7 +751,7 @@ class TestSpectraTimeEvolution: @pytest.mark.parametrize("time", [1, 60, 120] * u.s) def test_compare_numerical_results_with_analytical_calculation(self, p, time): """Test time evolution of spectral electron density for synchrotron energy losses. - Use a simple power low spectrum for easy calculation of analytical results.""" + Use a simple power law spectrum for easy calculation of analytical results.""" gamma_min = 1e2 gamma_max = 1e7 k = 0.1 * u.Unit("cm-3") @@ -781,7 +781,7 @@ def integral_analytical(gamma_min, gamma_max): rtol=0.05 ) assert u.isclose( - # only the highest energy range where most losses occur + # synchrotron losses are highest at highest energy, so test the highest energy range, as the most affected evaluated_n_e.integrate(evaluated_n_e.gamma_max / 10, evaluated_n_e.gamma_max), integral_analytical(gamma_before(evaluated_n_e.gamma_max / 10, time), gamma_before(evaluated_n_e.gamma_max, time)), rtol=0.05 diff --git a/agnpy/synchrotron/synchrotron.py b/agnpy/synchrotron/synchrotron.py index d40417c..f9f8cd1 100644 --- a/agnpy/synchrotron/synchrotron.py +++ b/agnpy/synchrotron/synchrotron.py @@ -93,8 +93,8 @@ def __init__(self, blob, ssa=False, integrator=np.trapz): def electron_energy_loss_per_time(self, gamma): # For synchrotron, energy loss dE/dt formula is: # (4/3 * sigma_T * c * magn_energy_dens) * gamma^2 - # The part in brackets is fixed, so as an improvement we could calculate it once and cache, - # and later we could use the formula (fixed * gamma^2) + # In case of constant B, the part in brackets is fixed, so as an improvement we could calculate it once + # and cache, and later we could use the formula (fixed * gamma^2) fixed = self._electron_energy_loss_formula_prefix() return (fixed * gamma ** 2).to("erg/s") From 11f250482a4c32a1e989c8d5d084acf4c25d3844 Mon Sep 17 00:00:00 2001 From: Grzegorz Borkowski Date: Thu, 25 Apr 2024 20:18:18 +0200 Subject: [PATCH 4/5] fixes from PR comments --- agnpy/spectra/spectra.py | 25 +++++++++---------------- agnpy/synchrotron/synchrotron.py | 5 +++-- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/agnpy/spectra/spectra.py b/agnpy/spectra/spectra.py index ad58373..8e2a9eb 100644 --- a/agnpy/spectra/spectra.py +++ b/agnpy/spectra/spectra.py @@ -199,7 +199,7 @@ def plot(self, gamma=None, gamma_power=0, ax=None, **kwargs): return ax - def evaluate_time(self, time, energy_loss_function, subintervals_count=1): + def evaluate_time(self, time, energy_loss_function, subintervals_count=10): """Performs the time evaluation of energy change for this particle distribution. Parameters @@ -226,11 +226,11 @@ def gamma_recalculated_after_loss(gamma): new_energy = old_energy - energy_loss if np.any(new_energy < 0): raise ValueError( - "Energy loss formula returned value higher then original energy. Use the shorter time ranges.") + "Energy loss formula returned value higher then original energy. Use shorter time ranges.") new_gamma = (new_energy / mec2).value return new_gamma - gammas = self.gamma_values() + gammas = np.logspace(np.log10(self.gamma_min), np.log10(self.gamma_max), 200) n_array = self.__call__(gammas) # for each gamma point create a narrow bin, calculate the energy loss for start and end of the bin, @@ -243,12 +243,8 @@ def gamma_recalculated_after_loss(gamma): narrowed_bins = bins_end_recalc - gammas if np.any(narrowed_bins <= 0): raise ValueError( - "Energy loss formula returned too big value - use shorter time intervals") + "Energy loss formula returned too big value. Use shorter time ranges.") density_increase = bins_width / narrowed_bins - if np.any(density_increase < 1): - # not possible for simple scenarios because higher gammas should lose more energy, - # but maybe for more complex scenarios it could happen? then we should revise this assertion - raise AssertionError("Unexpected situation, bin has been widened instead of narrowed down") n_array = n_array * density_increase return InterpolatedDistribution(gammas, n_array) @@ -305,9 +301,6 @@ def evaluate(gamma, k, p, gamma_min, gamma_max): def __call__(self, gamma): return self.evaluate(gamma, self.k, self.p, self.gamma_min, self.gamma_max) - def gamma_values(self): - return np.logspace(np.log10(self.gamma_min), np.log10(self.gamma_max), 200) - @staticmethod def evaluate_SSA_integrand(gamma, k, p, gamma_min, gamma_max): r"""Analytical integrand for the synchrotron self-absorption: @@ -787,7 +780,7 @@ class InterpolatedDistribution(ParticleDistribution): function to be used for integration, default is :class:`~numpy.trapz` """ - def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz, gamma_min=None, gamma_max=None): + def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz): super().__init__(mass, integrator, "InterpolatedDistribution") if n.unit != u.Unit("cm-3"): raise ValueError( @@ -799,9 +792,9 @@ def __init__(self, gamma, n, norm=1, mass=m_e, integrator=np.trapz, gamma_min=No # scaling parameter self.norm = norm # perform the interpolation - self.log10_interp = self.log10_interpolation(gamma, n, gamma_min, gamma_max) + self.log10_interp = self.log10_interpolation(gamma, n) - def log10_interpolation(self, gamma, n, gamma_min=None, gamma_max=None): + def log10_interpolation(self, gamma, n): """Returns the function interpolating in log10 the particle spectrum as a function of the Lorentz factor. TODO: make possible to pass arguments to CubicSpline. @@ -815,8 +808,8 @@ def log10_interpolation(self, gamma, n, gamma_min=None, gamma_max=None): # min and max lorentz factor are now the first gamma values for which # the input distribution is not null - self.gamma_min = gamma_min if gamma_min else np.min(_gamma) - self.gamma_max = gamma_max if gamma_max else np.max(_gamma) + self.gamma_min = np.min(_gamma) + self.gamma_max = np.max(_gamma) return interpolator diff --git a/agnpy/synchrotron/synchrotron.py b/agnpy/synchrotron/synchrotron.py index f9f8cd1..cccab94 100644 --- a/agnpy/synchrotron/synchrotron.py +++ b/agnpy/synchrotron/synchrotron.py @@ -96,11 +96,12 @@ def electron_energy_loss_per_time(self, gamma): # In case of constant B, the part in brackets is fixed, so as an improvement we could calculate it once # and cache, and later we could use the formula (fixed * gamma^2) fixed = self._electron_energy_loss_formula_prefix() - return (fixed * gamma ** 2).to("erg/s") + return fixed * gamma ** 2 def _electron_energy_loss_formula_prefix(self): + # using SI units here because of the ambiguity of the different CGS systems in terms of mu0 definition magn_energy_dens = (self.blob.B.to("T") ** 2) / (2 * mu0) - return (4 / 3) * sigma_T * c * magn_energy_dens + return ((4 / 3) * sigma_T * c * magn_energy_dens).to("erg/s") @staticmethod def evaluate_tau_ssa( From 32bd6af92a19f1218a0cc20d6feb1b8c9ff30d07 Mon Sep 17 00:00:00 2001 From: Grzegorz Borkowski Date: Thu, 25 Apr 2024 22:02:40 +0200 Subject: [PATCH 5/5] additional test --- agnpy/spectra/tests/test_spectra.py | 38 ++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/agnpy/spectra/tests/test_spectra.py b/agnpy/spectra/tests/test_spectra.py index 0b20821..dc1f08d 100644 --- a/agnpy/spectra/tests/test_spectra.py +++ b/agnpy/spectra/tests/test_spectra.py @@ -748,17 +748,19 @@ def test_interpolation_physical(self): class TestSpectraTimeEvolution: @pytest.mark.parametrize("p", [0.5, 1.2, 2.4]) - @pytest.mark.parametrize("time", [1, 60, 120] * u.s) - def test_compare_numerical_results_with_analytical_calculation(self, p, time): + @pytest.mark.parametrize("time_and_steps", [(1, 10), (60, 60), (200, 100)]) + def test_compare_numerical_results_with_analytical_calculation(self, p, time_and_steps): """Test time evolution of spectral electron density for synchrotron energy losses. Use a simple power law spectrum for easy calculation of analytical results.""" + time = time_and_steps[0] * u.s + steps = time_and_steps[1] gamma_min = 1e2 gamma_max = 1e7 k = 0.1 * u.Unit("cm-3") initial_n_e = PowerLaw(k, p, gamma_min=gamma_min, gamma_max=gamma_max, mass=m_e) blob = Blob(1e16 * u.cm, Distance(1e27, unit=u.cm).z, 0, 10, 1 * u.G, n_e=initial_n_e) synch = Synchrotron(blob) - evaluated_n_e = initial_n_e.evaluate_time(time, synch.electron_energy_loss_per_time, subintervals_count=50) + evaluated_n_e = initial_n_e.evaluate_time(time, synch.electron_energy_loss_per_time, subintervals_count=steps) def gamma_before(gamma_after_time, time): """Reverse-time calculation of the gamma value before the synchrotron energy loss, @@ -786,3 +788,33 @@ def integral_analytical(gamma_min, gamma_max): integral_analytical(gamma_before(evaluated_n_e.gamma_max / 10, time), gamma_before(evaluated_n_e.gamma_max, time)), rtol=0.05 ) + + def test_compare_calculation_with_calculation_split_into_two(self): + initial_n_e = BrokenPowerLaw( + k=1e-8 * u.Unit("cm-3"), + p1=1.9, + p2=2.6, + gamma_b=1e4, + gamma_min=10, + gamma_max=1e6, + mass=m_e, + ) + blob = Blob(1e16 * u.cm, Distance(1e27, unit=u.cm).z, 0, 10, 1 * u.G, n_e=initial_n_e) + synch = Synchrotron(blob) + + # iterate over 60 s in 20 steps + eval_1 = initial_n_e.evaluate_time(60*u.s, synch.electron_energy_loss_per_time, subintervals_count=20) + # iterate first over 30 s, and then, starting with interpolated distribution, over the remaining 30 s, + # with slightly different number of subintervals + eval_2 = initial_n_e.evaluate_time(30 * u.s, synch.electron_energy_loss_per_time, subintervals_count=10)\ + .evaluate_time(30 * u.s, synch.electron_energy_loss_per_time, subintervals_count=8) + + gamma_min = eval_1.gamma_min + gamma_max = eval_1.gamma_max + gammas = np.logspace(np.log10(gamma_min), np.log10(gamma_max)) + assert u.allclose( + eval_1.evaluate(gammas, 1, gamma_min, gamma_max), + eval_2.evaluate(gammas, 1, gamma_min, gamma_max), + 0.001) + +