Skip to content
Browse files

WIP

  • Loading branch information...
1 parent 1779513 commit 0e465807b1dec41f6e7fe367ea377c3fe3c3b61f @hmmr committed
View
13 configure.in
@@ -18,11 +18,6 @@ AC_LANG([C++])
AC_CXX_HAVE_STL
AX_CXX_CHECK_FLAG([-std=c++0x],,,,[AC_MSG_ERROR( [g++ does not support -std=c++0x], 2)])
-AC_OPENMP
-if test x$OPENMP_CXXFLAGS != x; then
- OPENMP_LDADD="-lgomp"
-fi
-
AX_BOOST_BASE([1.41])
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig/:~/lib/pkgconfig:$PKG_CONFIG_PATH
@@ -44,9 +39,13 @@ PKG_CHECK_MODULES([AGHUI],
AC_CHECK_LIB(
fftw3_omp, fftw_init_threads,
- [],
+ [AC_DEFINE([HAVE_LIBFFTW3_OMP], [], [we have fftw3 omp-enabled])
+ AC_SUBST(fftw3_ldadd, ["-lfftw3_omp -lpthread"])
+ LIBFFTW3_LDADD="-lfftw3_omp -lpthread"
+ ],
[],
[-lpthread -lfftw3_omp -lfftw3])
+AC_SUBST(LIBFFTW3_LDADD, $LIBFFTW3_LDADD)
AC_PROG_LIBTOOL
@@ -82,5 +81,5 @@ AC_OUTPUT([
echo \
"
-${PACKAGE} configured to build using $main_fp_type as main floating-point type
+${PACKAGE} configured with $main_fp_type as main floating-point type
"
View
2 libsigfile.pc.in
@@ -9,6 +9,6 @@ Version: @VERSION@
URL: johnhommer.com/academic/code/aghermann
Requires:
Conflicts:
-Libs: -L${libdir} -lsigfile -lfftw3 @FFTW3_LDADD@
+Libs: -L${libdir} -lsigfile -lfftw3 @fftw3_ldadd@
Libs.private:
Cflags:
View
2 src/Makefile.am
@@ -57,4 +57,4 @@ aghermann_LDADD = \
libexstrom/libexstrom.la \
libsigfile/libsigfile.la \
$(AGHCORE_LIBS) $(AGHSIGFILE_LIBS) $(AGHICA_LIBS) $(AGHUI_LIBS) \
- $(OPENMP_LDADD) $(FFTW3_LDADD) $(GSL_LIBS)
+ $(OPENMP_LDADD) $(LIBFFTW3_LDADD) $(GSL_LIBS)
View
19 src/libagh/primaries.cc
@@ -175,7 +175,7 @@ agh::CExpDesign::enumerate_eeg_channels() const
for ( auto &F : E.sources ) {
auto hh = F.channel_list();
for ( auto &H : hh )
- if ( sigfile::SChannel::signal_type_is_fftable(H) )
+ if ( sigfile::SChannel::channel_is_fftable(H) )
recp.push_back( H);
}
recp.sort();
@@ -252,8 +252,8 @@ agh::CSubject::SEpisode::SEpisode( sigfile::CSource&& Fmc,
const sigfile::SFFTParamSet& fft_params)
{
// move it in place
- fprintf( stderr, "CSubject::SEpisode::SEpisode( file: \"%s\", type: %d, J: \"%s\", E: \"%s\", D: \"%s\")\n",
- Fmc.filename(), (int)Fmc.type(), Fmc.subject(), Fmc.episode(), Fmc.session());
+ // fprintf( stderr, "CSubject::SEpisode::SEpisode( file: \"%s\", type: %d, J: \"%s\", E: \"%s\", D: \"%s\")\n",
+ // Fmc.filename(), (int)Fmc.type(), Fmc.subject(), Fmc.episode(), Fmc.session());
sources.emplace_back( static_cast<sigfile::CSource&&>(Fmc));
auto& F = sources.back();
auto HH = F.channel_list();
@@ -291,11 +291,9 @@ int
agh::CSubject::SEpisodeSequence::add_one( sigfile::CSource&& Fmc, const sigfile::SFFTParamSet& fft_params,
float max_hours_apart)
{
- FAFA;
auto Ei = find( episodes.begin(), episodes.end(),
Fmc.episode());
- FAFA;
if ( Ei == episodes.end() ) {
// ensure the newly added episode is well-placed
for ( auto &E : episodes ) {
@@ -307,17 +305,14 @@ agh::CSubject::SEpisodeSequence::add_one( sigfile::CSource&& Fmc, const sigfile:
return AGH_EPSEQADD_OVERLAP;
}
// or is not too far off
- FAFA;
if ( episodes.size() > 0 &&
episodes.begin()->sources.size() > 0 &&
fabs( difftime( episodes.begin()->sources.begin()->start_time(), Fmc.start_time())) / 3600 > max_hours_apart )
return AGH_EPSEQADD_TOOFAR;
- FAFA;
fprintf( stderr, "CSubject::SEpisodeSequence::add_one( file: \"%s\", J: \"%s\", E: \"%s\", D: \"%s\")\n",
Fmc.filename(), Fmc.subject(), Fmc.episode(), Fmc.session());
episodes.emplace_back( static_cast<sigfile::CSource&&>(Fmc), fft_params);
- FAFA;
episodes.sort();
} else { // same as SEpisode() but done on an existing one
@@ -334,7 +329,6 @@ agh::CSubject::SEpisodeSequence::add_one( sigfile::CSource&& Fmc, const sigfile:
// no new episode added: don't sort
}
- FAFA;
// compute start_rel and end_rel
// do it for all episodes over again (necessary if the newly added episode becomes the new first)
SEpisode &e0 = episodes.front();
@@ -418,8 +412,8 @@ agh::CExpDesign::register_intree_source( sigfile::CSource&& F,
J = &*Ji;
// insert/update episode observing start/end times
- fprintf( stderr, "CExpDesign::register_intree_source( file: \"%s\", J: \"%s\", E: \"%s\", D: \"%s\")\n",
- F.filename(), F.subject(), F.episode(), F.session());
+ // fprintf( stderr, "CExpDesign::register_intree_source( file: \"%s\", J: \"%s\", E: \"%s\", D: \"%s\")\n",
+ // F.filename(), F.subject(), F.episode(), F.session());
switch ( J->measurements[F.session()].add_one(
(sigfile::CSource&&)F, fft_params) ) { // this will do it
case AGH_EPSEQADD_OVERLAP:
@@ -433,7 +427,6 @@ agh::CExpDesign::register_intree_source( sigfile::CSource&& F,
log_message( string(F.filename()) + " not added as it is too far removed from the rest\n");
return -1;
default:
- FAFA;
return 0;
}
// fprintf( stderr, "CExpDesign::register_intree_source(\"%s\"): ok\n", toparse());
@@ -530,7 +523,7 @@ edf_file_processor( const char *fname, const struct stat *st, int flag, struct F
++__cur_edf_file;
only_progress_fun( fname, __n_edf_files, __cur_edf_file);
try {
- sigfile::CSource f_tmp {fname, (int)__expdesign->fft_params.page_size};
+ sigfile::CSource f_tmp {fname, __expdesign->fft_params.page_size};
string st = f_tmp.explain_status();
if ( st.size() )
__expdesign->log_message( string (fname) + ": "+ st + '\n');
View
2 src/libsigfile/Makefile.am
@@ -14,6 +14,7 @@ libsigfile_la_SOURCES = \
source.cc \
source.hh \
edf.cc \
+ edf.ii \
edf.hh \
page.cc \
page.hh \
@@ -27,6 +28,7 @@ libsigfileinc_HEADERS = \
channel.hh \
source-base.hh \
source.hh \
+ edf.ii \
edf.hh \
page.hh \
psd.hh
View
5 src/libsigfile/channel.hh
@@ -76,6 +76,11 @@ struct SChannel
{
return signal_type == TType::eeg;
}
+ static bool channel_is_fftable( const string& H)
+ {
+ return signal_type_is_fftable(
+ signal_type_of_channel(H));
+ }
int compare( const char *a, const char *b) __attribute__ ((pure));
};
View
2 src/libsigfile/edf.cc
@@ -207,7 +207,6 @@ sigfile::CEDFFile::CEDFFile( const char *fname)
sigfile::CEDFFile::CEDFFile( CEDFFile&& rv)
: CSource_base ((CSource_base&&)rv)
{
- FAFA;
header = rv.header;
n_data_records = rv.n_data_records;
data_record_size = rv.data_record_size;
@@ -227,7 +226,6 @@ sigfile::CEDFFile::CEDFFile( CEDFFile&& rv)
_total_samples_per_record = rv._total_samples_per_record;
_mmapping = rv._mmapping;
- FAFA;
rv._mmapping = (void*)-1; // will prevent munmap in ~CEDFFile()
}
View
239 src/libsigfile/edf.hh
@@ -546,244 +546,7 @@ class CEDFFile
};
- // extern template sigfile::SChannel::TType sigfile::CEDFFile::signal_type<int>( int);
- // template sigfile::SChannel::TType sigfile::CEDFFile::signal_type( const char*);
-
-
-
-
-
-template <class A>
-valarray<TFloat>
-CEDFFile::get_region_original_( A h,
- size_t sa, size_t sz) const
-{
- valarray<TFloat> recp;
- if ( unlikely (_status & (TStatus::bad_header | TStatus::bad_version)) ) {
- fprintf( stderr, "CEDFFile::get_region_original(): broken source \"%s\"\n", filename());
- return recp;
- }
- if ( sa >= sz || sz > samplerate(h) * recording_time() ) {
- fprintf( stderr, "CEDFFile::get_region_original() for \"%s\": bad region (%zu, %zu)\n",
- filename(), sa, sz);
- return recp;
- }
-
- const SSignal& H = (*this)[h];
- size_t r0 = ( sa) / H.samples_per_record,
- r_cnt = (size_t) ceilf( (float)(sz-sa) / H.samples_per_record);
-
- int16_t* tmp;
- tmp = (int16_t*)malloc( r_cnt * H.samples_per_record * 2); // 2 is sizeof(sample) sensu edf
-
- while ( r_cnt-- )
- memcpy( &tmp[ r_cnt * H.samples_per_record ],
-
- (char*)_mmapping + _data_offset
- + (r0 + r_cnt) * _total_samples_per_record * 2 // full records before
- + H._at * 2, // offset to our samples
-
- H.samples_per_record * 2); // our precious ones
-
- recp.resize( sz - sa);
-
- // repackage for shipping
- size_t sa_off = sa - r0 * H.samples_per_record;
- for ( size_t s = 0; s < recp.size(); ++s )
- recp[s] = tmp[sa_off + s];
-
- // and scale
- recp *= H.scale;
-
- free( tmp);
-
- return recp;
-}
-
-
-
-template <class Th>
-valarray<TFloat>
-CEDFFile::get_region_filtered_( Th h,
- size_t smpla, size_t smplz) const
-{
- valarray<TFloat> recp =
- get_region_original( h, smpla, smplz);
- if ( recp.size() == 0 )
- return valarray<TFloat> (0);
- // and zeromean
- recp -= (recp.sum() / recp.size());
-
- const SSignal& H = (*this)[h];
-
- // artifacts
- size_t this_samplerate = H.samples_per_record / data_record_size;
- for ( auto &A : H.artifacts() ) {
- size_t run = A.second - A.first,
- window = min( run, this_samplerate),
- t;
- valarray<TFloat>
- W (run);
-
- if ( run > window ) {
- // construct a vector of multipliers using an INVERTED windowing function on the
- // first and last windows of the run
- size_t t0;
- for ( t = 0; t < window/2; ++t )
- W[t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
- t0 = run-window; // start of the last window but one
- for ( t = window/2; t < window; ++t )
- W[t0 + t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
- // AND, connect mid-first to mid-last windows (at lowest value of the window)
- TFloat minimum = winf[(size_t)H.artifacts.dampen_window_type]( window/2, window);
- W[ slice(window/2, run-window, 1) ] =
- (1. - minimum);
- } else // run is shorter than samplerate (1 sec)
- for ( t = 0; t < window; ++t )
- W[t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
-
- // now gently apply the multiplier vector onto the artifacts
- recp[ slice(A.first, run, 1) ] *= (W * (TFloat)H.artifacts.factor);
- }
-
- // filters
- if ( H.filters.low_pass_cutoff > 0. && H.filters.high_pass_cutoff > 0. ) {
- auto tmp (exstrom::band_pass( recp, this_samplerate,
- H.filters.high_pass_cutoff, H.filters.low_pass_cutoff,
- H.filters.low_pass_order, true));
- recp = tmp;
- } else {
- if ( H.filters.low_pass_cutoff > 0. ) {
- auto tmp (exstrom::low_pass( recp, this_samplerate,
- H.filters.low_pass_cutoff, H.filters.low_pass_order, true));
- recp = tmp;
- }
- if ( H.filters.high_pass_cutoff > 0. ) {
- auto tmp (exstrom::high_pass( recp, this_samplerate,
- H.filters.high_pass_cutoff, H.filters.high_pass_order, true));
- recp = tmp;
- }
- }
-
- switch ( H.filters.notch_filter ) {
- case SFilterPack::TNotchFilter::at50Hz:
- recp = exstrom::band_stop( recp, this_samplerate,
- 48, 52, 1, true);
- break;
- case SFilterPack::TNotchFilter::at60Hz:
- recp = exstrom::band_stop( recp, this_samplerate,
- 58, 62, 1, true);
- break;
- case SFilterPack::TNotchFilter::none:
- break;
- }
-
- return recp;
-}
-
-
-
-
-
-template <class A>
-int
-CEDFFile::put_region_( A h,
- const valarray<TFloat>& src, size_t sa, size_t sz) const
-{
- if ( unlikely (_status & (TStatus::bad_header | TStatus::bad_version)) ) {
- fprintf( stderr, "CEDFFile::put_region(): broken source \"%s\"\n", filename());
- return -1;
- }
- if ( sa >= sz || sz > samplerate(h) * recording_time() ) {
- fprintf( stderr, "CEDFFile::get_region_original() for \"%s\": bad region (%zu, %zu)\n",
- filename(), sa, sz);
- return -2;
- }
-
- const SSignal& H = (*this)[h];
- size_t r0 = ( sa) / H.samples_per_record,
- r_cnt = (size_t) ceilf( (float)(sz-sa) / H.samples_per_record);
-
- valarray<TFloat> src_copy = src / H.scale;
- valarray<int16_t> tmp (r_cnt * H.samples_per_record); // 2 is sizeof(sample) sensu edf
- for ( size_t i = 0; i < (sz - sa); ++i )
- tmp[i] = src_copy[sa+i];
-
- size_t r;
- for ( r = 0; r < r_cnt - 1; ++r ) // minus one
- memcpy( (char*)_mmapping + _data_offset
- + (r0 + r) * _total_samples_per_record * 2 // full records before
- + H._at * 2, // offset to our samples
-
- &tmp[ r * H.samples_per_record ],
-
- H.samples_per_record * 2); // our precious ones
- // last record is underfull
- memcpy( (char*)_mmapping + _data_offset
- + (r0 + r) * _total_samples_per_record * 2
- + H._at * 2,
-
- &tmp[ r * H.samples_per_record ],
-
- (sz - r * H.samples_per_record) * 2);
-
- return 0;
-}
-
-
-
-template <class Th>
-int
-CEDFFile::put_signal_( Th h,
- const valarray<TFloat>& src) const
-{
- size_t src_expected_size = n_data_records * (*this)[h].samples_per_record;
- if ( src.size() > src_expected_size )
- fprintf( stderr,
- "put_signal: Source vector size (%zu) > n_samples in "
- "EDF channel (%zu): truncating source\n", src.size(), src_expected_size);
- else if ( src.size() < src_expected_size )
- fprintf( stderr,
- "put_signal: Source vector size (%zu) < n_samples in "
- "EDF channel (%zu): remainder possibly stale\n", src.size(), src_expected_size);
- return put_region(
- h, src, 0, min(src.size(), src_expected_size));
-}
-
-
-template <class Th>
-int
-CEDFFile::export_original_( Th h, const char *fname) const
-{
- valarray<TFloat> signal = get_signal_original( h);
- FILE *fd = fopen( fname, "w");
- if ( fd ) {
- for ( size_t i = 0; i < signal.size(); ++i )
- fprintf( fd, "%g\n", signal[i]);
- fclose( fd);
- return 0;
- } else
- return -1;
-}
-
-
-template <class Th>
-int
-CEDFFile::export_filtered_( Th h, const char *fname) const
-{
- valarray<TFloat> signal = get_signal_filtered( h);
- FILE *fd = fopen( fname, "w");
- if ( fd ) {
- for ( size_t i = 0; i < signal.size(); ++i )
- fprintf( fd, "%g\n", signal[i]);
- fclose( fd);
- return 0;
- } else
- return -1;
-}
-
-
-
+#include "edf.ii"
} // namespace sigfile
View
243 src/libsigfile/edf.ii
@@ -0,0 +1,243 @@
+// ;-*-C++-*-
+/*
+ * File name: libsigfile/edf.ii
+ * Project: Aghermann
+ * Author: Andrei Zavada <johnhommer@gmail.com>
+ * Initial version: 2011-11-21
+ *
+ * Purpose: CEDFFile (big) templated methods
+ *
+ * License: GPL
+ */
+
+
+template <class A>
+valarray<TFloat>
+CEDFFile::get_region_original_( A h,
+ size_t sa, size_t sz) const
+{
+ valarray<TFloat> recp;
+ if ( unlikely (_status & (TStatus::bad_header | TStatus::bad_version)) ) {
+ fprintf( stderr, "CEDFFile::get_region_original(): broken source \"%s\"\n", filename());
+ return recp;
+ }
+ if ( sa >= sz || sz > samplerate(h) * recording_time() ) {
+ fprintf( stderr, "CEDFFile::get_region_original() for \"%s\": bad region (%zu, %zu)\n",
+ filename(), sa, sz);
+ return recp;
+ }
+
+ const SSignal& H = (*this)[h];
+ size_t r0 = ( sa) / H.samples_per_record,
+ r_cnt = (size_t) ceilf( (float)(sz-sa) / H.samples_per_record);
+
+ int16_t* tmp;
+ tmp = (int16_t*)malloc( r_cnt * H.samples_per_record * 2); // 2 is sizeof(sample) sensu edf
+
+ while ( r_cnt-- )
+ memcpy( &tmp[ r_cnt * H.samples_per_record ],
+
+ (char*)_mmapping + _data_offset
+ + (r0 + r_cnt) * _total_samples_per_record * 2 // full records before
+ + H._at * 2, // offset to our samples
+
+ H.samples_per_record * 2); // our precious ones
+
+ recp.resize( sz - sa);
+
+ // repackage for shipping
+ size_t sa_off = sa - r0 * H.samples_per_record;
+ for ( size_t s = 0; s < recp.size(); ++s )
+ recp[s] = tmp[sa_off + s];
+
+ // and scale
+ recp *= H.scale;
+
+ free( tmp);
+
+ return recp;
+}
+
+
+
+template <class Th>
+valarray<TFloat>
+CEDFFile::get_region_filtered_( Th h,
+ size_t smpla, size_t smplz) const
+{
+ valarray<TFloat> recp =
+ get_region_original( h, smpla, smplz);
+ if ( recp.size() == 0 )
+ return valarray<TFloat> (0);
+ // and zeromean
+ recp -= (recp.sum() / recp.size());
+
+ const SSignal& H = (*this)[h];
+
+ // artifacts
+ size_t this_samplerate = H.samples_per_record / data_record_size;
+ for ( auto &A : H.artifacts() ) {
+ size_t run = A.second - A.first,
+ window = min( run, this_samplerate),
+ t;
+ valarray<TFloat>
+ W (run);
+
+ if ( run > window ) {
+ // construct a vector of multipliers using an INVERTED windowing function on the
+ // first and last windows of the run
+ size_t t0;
+ for ( t = 0; t < window/2; ++t )
+ W[t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
+ t0 = run-window; // start of the last window but one
+ for ( t = window/2; t < window; ++t )
+ W[t0 + t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
+ // AND, connect mid-first to mid-last windows (at lowest value of the window)
+ TFloat minimum = winf[(size_t)H.artifacts.dampen_window_type]( window/2, window);
+ W[ slice(window/2, run-window, 1) ] =
+ (1. - minimum);
+ } else // run is shorter than samplerate (1 sec)
+ for ( t = 0; t < window; ++t )
+ W[t] = (1 - winf[(size_t)H.artifacts.dampen_window_type]( t, window));
+
+ // now gently apply the multiplier vector onto the artifacts
+ recp[ slice(A.first, run, 1) ] *= (W * (TFloat)H.artifacts.factor);
+ }
+
+ // filters
+ if ( H.filters.low_pass_cutoff > 0. && H.filters.high_pass_cutoff > 0. ) {
+ auto tmp (exstrom::band_pass( recp, this_samplerate,
+ H.filters.high_pass_cutoff, H.filters.low_pass_cutoff,
+ H.filters.low_pass_order, true));
+ recp = tmp;
+ } else {
+ if ( H.filters.low_pass_cutoff > 0. ) {
+ auto tmp (exstrom::low_pass( recp, this_samplerate,
+ H.filters.low_pass_cutoff, H.filters.low_pass_order, true));
+ recp = tmp;
+ }
+ if ( H.filters.high_pass_cutoff > 0. ) {
+ auto tmp (exstrom::high_pass( recp, this_samplerate,
+ H.filters.high_pass_cutoff, H.filters.high_pass_order, true));
+ recp = tmp;
+ }
+ }
+
+ switch ( H.filters.notch_filter ) {
+ case SFilterPack::TNotchFilter::at50Hz:
+ recp = exstrom::band_stop( recp, this_samplerate,
+ 48, 52, 1, true);
+ break;
+ case SFilterPack::TNotchFilter::at60Hz:
+ recp = exstrom::band_stop( recp, this_samplerate,
+ 58, 62, 1, true);
+ break;
+ case SFilterPack::TNotchFilter::none:
+ break;
+ }
+
+ return recp;
+}
+
+
+
+
+
+template <class A>
+int
+CEDFFile::put_region_( A h,
+ const valarray<TFloat>& src, size_t sa, size_t sz) const
+{
+ if ( unlikely (_status & (TStatus::bad_header | TStatus::bad_version)) ) {
+ fprintf( stderr, "CEDFFile::put_region(): broken source \"%s\"\n", filename());
+ return -1;
+ }
+ if ( sa >= sz || sz > samplerate(h) * recording_time() ) {
+ fprintf( stderr, "CEDFFile::get_region_original() for \"%s\": bad region (%zu, %zu)\n",
+ filename(), sa, sz);
+ return -2;
+ }
+
+ const SSignal& H = (*this)[h];
+ size_t r0 = ( sa) / H.samples_per_record,
+ r_cnt = (size_t) ceilf( (float)(sz-sa) / H.samples_per_record);
+
+ valarray<TFloat> src_copy = src / H.scale;
+ valarray<int16_t> tmp (r_cnt * H.samples_per_record); // 2 is sizeof(sample) sensu edf
+ for ( size_t i = 0; i < (sz - sa); ++i )
+ tmp[i] = src_copy[sa+i];
+
+ size_t r;
+ for ( r = 0; r < r_cnt - 1; ++r ) // minus one
+ memcpy( (char*)_mmapping + _data_offset
+ + (r0 + r) * _total_samples_per_record * 2 // full records before
+ + H._at * 2, // offset to our samples
+
+ &tmp[ r * H.samples_per_record ],
+
+ H.samples_per_record * 2); // our precious ones
+ // last record is underfull
+ memcpy( (char*)_mmapping + _data_offset
+ + (r0 + r) * _total_samples_per_record * 2
+ + H._at * 2,
+
+ &tmp[ r * H.samples_per_record ],
+
+ (sz - r * H.samples_per_record) * 2);
+
+ return 0;
+}
+
+
+
+template <class Th>
+int
+CEDFFile::put_signal_( Th h,
+ const valarray<TFloat>& src) const
+{
+ size_t src_expected_size = n_data_records * (*this)[h].samples_per_record;
+ if ( src.size() > src_expected_size )
+ fprintf( stderr,
+ "put_signal: Source vector size (%zu) > n_samples in "
+ "EDF channel (%zu): truncating source\n", src.size(), src_expected_size);
+ else if ( src.size() < src_expected_size )
+ fprintf( stderr,
+ "put_signal: Source vector size (%zu) < n_samples in "
+ "EDF channel (%zu): remainder possibly stale\n", src.size(), src_expected_size);
+ return put_region(
+ h, src, 0, min(src.size(), src_expected_size));
+}
+
+
+template <class Th>
+int
+CEDFFile::export_original_( Th h, const char *fname) const
+{
+ valarray<TFloat> signal = get_signal_original( h);
+ FILE *fd = fopen( fname, "w");
+ if ( fd ) {
+ for ( size_t i = 0; i < signal.size(); ++i )
+ fprintf( fd, "%g\n", signal[i]);
+ fclose( fd);
+ return 0;
+ } else
+ return -1;
+}
+
+
+template <class Th>
+int
+CEDFFile::export_filtered_( Th h, const char *fname) const
+{
+ valarray<TFloat> signal = get_signal_filtered( h);
+ FILE *fd = fopen( fname, "w");
+ if ( fd ) {
+ for ( size_t i = 0; i < signal.size(); ++i )
+ fprintf( fd, "%g\n", signal[i]);
+ fclose( fd);
+ return 0;
+ } else
+ return -1;
+}
+
+
View
16 src/libsigfile/psd.cc
@@ -257,8 +257,8 @@ sigfile::CBinnedPower::obtain_power( const CSource& F, int sig_no,
bool force)
{
// check if we have it already
- size_t req_signature = F.artifacts( sig_no).dirty_signature();
- if ( _data.size() > 0 && (*this) == req_params
+ hash_t req_signature = F.artifacts( sig_no).dirty_signature();
+ if ( have_power() && (*this) == req_params
&& _signature == req_signature )
return 0;
@@ -305,7 +305,6 @@ sigfile::CBinnedPower::obtain_power( const CSource& F, int sig_no,
bool got_it = (_mirror_back( new_mirror_fname) == 0);
-// printf( "%s\n%s\n\n", old_mirror_fname, new_mirror_fname);
// remove previously saved power
if ( strcmp( old_mirror_fname, new_mirror_fname) )
if ( unlink( old_mirror_fname) )
@@ -314,7 +313,8 @@ sigfile::CBinnedPower::obtain_power( const CSource& F, int sig_no,
if ( got_it and not force )
return 0;
- // 0. get signal sample, truncate to n_pages
+ // 0. get signal sample; always use double not TFloat
+ // so that saved power is usable irrespective of what TFloat is today
valarray<double> S = to_vad( F.get_signal_filtered( sig_no));
// 1. dampen samples marked as artifacts
@@ -415,7 +415,7 @@ sigfile::CBinnedPower::_mirror_enable( const char *fname)
{
int fd, retval = 0;
if ( (fd = open( fname, O_RDWR | O_CREAT | O_TRUNC, 0644)) == -1 ||
- write( fd, &_data[0], _data.size() * sizeof(TFloat)) == -1 )
+ write( fd, &_data[0], _data.size() * sizeof(double)) == -1 )
retval = -1;
close( fd);
@@ -430,13 +430,11 @@ sigfile::CBinnedPower::_mirror_back( const char *fname)
try {
if ( (fd = open( fname, O_RDONLY)) == -1 )
throw -1;
- if ( read( fd, &_data[0], _data.size() * sizeof(TFloat))
- != (ssize_t)(_data.size() * sizeof(TFloat)) )
+ if ( read( fd, &_data[0], _data.size() * sizeof(double))
+ != (ssize_t)(_data.size() * sizeof(double)) )
throw -2;
-// fprintf( stderr, "CBinnedPower::_mirror_back(\"%s\") ok\n", fname);
return 0;
} catch (int ex) {
-// fprintf( stderr, "CBinnedPower::_mirror_back(\"%s\") failed\n", fname);
if ( fd != -1 ) {
close( fd);
if ( unlink( fname) )
View
3 src/libsigfile/source-base.cc
@@ -60,7 +60,7 @@ sigfile::SArtifacts::clear_artifact( size_t aa, size_t az)
}
-size_t
+hash_t
sigfile::SArtifacts::dirty_signature() const
{
string sig ("a");
@@ -78,7 +78,6 @@ sigfile::CSource_base::CSource_base( CSource_base&& rv)
swap( _filename, rv._filename);
_status = rv._status;
no_save_extra_files = rv.no_save_extra_files;
- FAFA;
}
View
2 src/libsigfile/source-base.hh
@@ -105,7 +105,7 @@ struct SArtifacts {
void mark_artifact( size_t aa, size_t az);
void clear_artifact( size_t aa, size_t az);
- size_t dirty_signature() const;
+ hash_t dirty_signature() const;
};
View
10 src/libsigfile/source.cc
@@ -52,6 +52,7 @@ sigfile::CSource::CSource( const char* fname,
}
+
sigfile::CSource::CSource( CSource&& rv)
: CHypnogram (rv)
{
@@ -76,6 +77,15 @@ sigfile::CSource::CSource( CSource&& rv)
}
+sigfile::CSource::~CSource()
+{
+ if ( _obj ) {
+ if ( not _obj->no_save_extra_files ) // quirky, eh?
+ CHypnogram::save( make_fname_hypnogram());
+ delete _obj;
+ }
+}
+
sigfile::CSource::TType
View
8 src/libsigfile/source.hh
@@ -55,13 +55,7 @@ class CSource
// ctor
CSource( const char* fname, size_t pagesize);
CSource( CSource&& rv);
- ~CSource()
- {
- if ( not _obj->no_save_extra_files ) // quirky, eh?
- CHypnogram::save( make_fname_hypnogram());
- if ( _obj )
- delete _obj;
- }
+ ~CSource();
TType type() const
{
View
4 src/misc.hh
@@ -60,8 +60,8 @@ using namespace std;
-typedef size_t sid_type;
-typedef size_t hash_key;
+//typedef size_t sid_type;
+typedef unsigned long hash_t;
#define HASHKEY(s) (hash<std::string>()(s))
#define HASHKEY_ANY (hash<std::string>()("any"))
View
4 src/tools/Makefile.am
@@ -11,7 +11,7 @@ edfhed_SOURCES = \
edfhed.cc
edfhed_LDADD = \
../libsigfile/libsigfile.la \
- $(AGHSIGFILE_LIBS) $(FFTW3_LDADD) $(OPENMP_LDADD) $(GSL_LIBS)
+ $(AGHSIGFILE_LIBS) $(LIBFFTW3_LDADD) $(OPENMP_LDADD) $(GSL_LIBS)
edfhed_gtk_SOURCES = \
../libsigfile/libsigfile.la \
@@ -19,4 +19,4 @@ edfhed_gtk_SOURCES = \
../ui/misc.cc
edfhed_gtk_LDADD = \
../libsigfile/libsigfile.la \
- $(AGHSIGFILE_LIBS) $(FFTW3_LDADD) $(OPENMP_LDADD) $(GSL_LIBS) $(AGHUI_LIBS)
+ $(AGHSIGFILE_LIBS) $(LIBFFTW3_LDADD) $(OPENMP_LDADD) $(GSL_LIBS) $(AGHUI_LIBS)
View
32 src/ui/expdesign.cc
@@ -501,23 +501,21 @@ aghui::SExpDesignUI::populate_1()
for ( auto Gi = ED->groups.begin(); Gi != ED->groups.end(); ++Gi ) {
groups.emplace_back( Gi, *this); // precisely need the iterator, not object by reference
SGroupPresentation& Gp = groups.back();
- for_each( Gi->second.begin(), Gi->second.end(),
- [&] (agh::CSubject& j)
- {
- Gp.emplace_back( j, Gp);
- const SSubjectPresentation& J = Gp.back();
- if ( J.cscourse && j.have_session(*_AghDi) ) {
- auto& ee = j.measurements[*_AghDi].episodes;
- if ( not ee.empty() ) {
- if ( earliest_start == (time_t)-1 || earliest_start > ee.front().start_rel )
- earliest_start = ee.front().start_rel;
- if ( latest_end == (time_t)-1 || latest_end < ee.back().end_rel )
- latest_end = ee.back().end_rel;
- } else
- fprintf( stderr, "SExpDesignUI::populate_1(): session \"%s\", channel \"%s\" for subject \"%s\" is empty\n",
- AghD(), AghT(), j.name());
- }
- });
+ for ( auto &J : Gi->second ) {
+ Gp.emplace_back( J, Gp);
+ const SSubjectPresentation& j = Gp.back();
+ if ( j.cscourse && J.have_session(*_AghDi) ) {
+ auto& ee = J.measurements[*_AghDi].episodes;
+ if ( not ee.empty() ) {
+ if ( earliest_start == (time_t)-1 || earliest_start > ee.front().start_rel )
+ earliest_start = ee.front().start_rel;
+ if ( latest_end == (time_t)-1 || latest_end < ee.back().end_rel )
+ latest_end = ee.back().end_rel;
+ } else
+ fprintf( stderr, "SExpDesignUI::populate_1(): session \"%s\", channel \"%s\" for subject \"%s\" is empty\n",
+ AghD(), AghT(), J.name());
+ }
+ }
};
timeline_start = earliest_start;

0 comments on commit 0e46580

Please sign in to comment.
Something went wrong with that request. Please try again.