Skip to content

Commit

Permalink
export downsample to its own function for external use
Browse files Browse the repository at this point in the history
  • Loading branch information
JosephJHowlett committed Dec 23, 2019
1 parent 3125eb1 commit 03fe050
Showing 1 changed file with 21 additions and 14 deletions.
35 changes: 21 additions & 14 deletions strax/processing/peak_building.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,25 @@ def find_peaks(hits, adc_to_pe,
yield offset


@export
@numba.jit(nopython=True, nogil=True, cache=True)
def downsample(p, buffer, samples):
downs_f = int(np.ceil(p['length'] / samples))
if downs_f > 1:
# Compute peak length after downsampling.
# We floor rather than ceil here, potentially cutting off
# some samples from the right edge of the peak.
# If we would ceil, the peak could grow larger and
# overlap with a subsequent next peak, crashing strax later.
new_ns = p['length'] = int(np.floor(p['length'] / downs_f))
p['data'][:new_ns] = \
buffer[:new_ns * downs_f].reshape(-1, downs_f).sum(axis=1)
p['dt'] *= downs_f
else:
p['data'][:p['length']] = buffer[:p['length']]
return p


@export
@numba.jit(nopython=True, nogil=True, cache=True)
def sum_waveform(peaks, records, adc_to_pe, n_channels=248):
Expand Down Expand Up @@ -192,19 +211,7 @@ def sum_waveform(peaks, records, adc_to_pe, n_channels=248):

# Store the sum waveform
# Do we need to downsample the swv to store it?
downs_f = int(np.ceil(p_length / sum_wv_samples))
if downs_f > 1:
# Compute peak length after downsampling.
# We floor rather than ceil here, potentially cutting off
# some samples from the right edge of the peak.
# If we would ceil, the peak could grow larger and
# overlap with a subsequent next peak, crashing strax later.
new_ns = p['length'] = int(np.floor(p_length / downs_f))
p['data'][:new_ns] = \
swv_buffer[:new_ns * downs_f].reshape(-1, downs_f).sum(axis=1)
p['dt'] *= downs_f
else:
p['data'][:p_length] = swv_buffer[:p_length]
p = downsample(p, swv_buffer, sum_wv_samples)

# Store the saturation count and area per channel
p['n_saturated_channels'] = p['saturated_channel'].sum()
Expand Down Expand Up @@ -236,4 +243,4 @@ def find_peak_groups(peaks, gap_threshold,
gap_threshold=gap_threshold,
left_extension=left_extension, right_extension=right_extension,
min_channels=1, min_area=0)
return fake_peaks['time'], strax.endtime(fake_peaks)
return fake_peaks['time'], strax.endtime(fake_peaks)

0 comments on commit 03fe050

Please sign in to comment.