|
5 | 5 |
|
6 | 6 |
|
7 | 7 | This sub-module provides routines for computing features on univariate time series.
|
8 |
| -Many functions are improved version of PyEEG [PYEEG]_ functions. |
9 |
| -Here is a comprehensive list of functions: |
| 8 | +Many functions are improved version of PyEEG [PYEEG]_ functions. Be careful, |
| 9 | +some functions will give different results compared to PyEEG as the maths have been changed to match original definitions. |
| 10 | +Have a look at the documentation notes/ source code to know more. |
| 11 | +
|
| 12 | +Here a list of the functions that were reimplemented: |
| 13 | +
|
| 14 | +* Approximate entropy :func:`~pyrem.univariate.ap_entropy` [RIC00]_ |
| 15 | +* Fisher information :func:`~pyrem.univariate.fisher_info` [PYEEG]_ |
| 16 | +* Higuchi fractal dimension :func:`~pyrem.univariate.hfd` [HIG88]_ |
| 17 | +* Hjorth parameters :func:`~pyrem.univariate.hjorth` [HJO70]_ |
| 18 | +* Petrosian fractal dimension :func:`~pyrem.univariate.pfd` [PET95]_ |
| 19 | +* Sample entropy :func:`~pyrem.univariate.samp_entropy` [RIC00]_ |
| 20 | +* Singular value decomposition entropy :func:`~pyrem.univariate.svd_entropy` [PYEEG]_ |
| 21 | +* Spectral entropy :func:`~pyrem.univariate.spectral_entropy` [PYEEG]_ |
10 | 22 |
|
11 |
| -todo |
12 | 23 |
|
13 | 24 |
|
14 | 25 | .. [PET95] A. Petrosian, Kolmogorov complexity of finite sequences and recognition of different preictal EEG patterns, in ,
|
@@ -455,66 +466,6 @@ def spectral_entropy(a, sampling_freq, bands=None):
|
455 | 466 |
|
456 | 467 | return - np.sum(power_per_band * np.log2(power_per_band))
|
457 | 468 |
|
458 |
| -def dfa(X, Ave = None, L = None, sampling= 1): |
459 |
| - """ |
460 |
| - WIP on this function. It is basicaly copied and pasted from [PYEEG]_, without verification of the maths or unittests. |
461 |
| - """ |
462 |
| - X = np.array(X) |
463 |
| - if Ave is None: |
464 |
| - Ave = np.mean(X) |
465 |
| - Y = np.cumsum(X) |
466 |
| - Y -= Ave |
467 |
| - if not L: |
468 |
| - max_power = np.int(np.log2(len(X)))-4 |
469 |
| - L = X.size / 2 ** np.arange(4,max_power) |
470 |
| - if len(L)<2: |
471 |
| - raise Exception("Too few values for L. Time series too short?") |
472 |
| - F = np.zeros(len(L)) # F(n) of different given box length n |
473 |
| - |
474 |
| - for i,n in enumerate(L): |
475 |
| - sampled = 0 |
476 |
| - for j in xrange(0,len(X) -n ,n): |
477 |
| - |
478 |
| - if np.random.rand() < sampling: |
479 |
| - F[i] += np.polyfit(np.arange(j,j+n), Y[j:j+n],1, full=True)[1] |
480 |
| - sampled += 1 |
481 |
| - if sampled > 0: |
482 |
| - F[i] /= float(sampled) |
483 |
| - |
484 |
| - LF = np.array([(l,f) for l,f in zip(L,F) if l>0]).T |
485 |
| - |
486 |
| - F = np.sqrt(LF[1]) |
487 |
| - Alpha = np.polyfit(np.log(LF[0]), np.log(F),1)[0] |
488 |
| - return Alpha |
489 |
| - |
490 |
| -def hurst(signal): |
491 |
| - """ |
492 |
| - **Experimental**/untested implementation taken from: |
493 |
| - http://drtomstarke.com/index.php/calculation-of-the-hurst-exponent-to-test-for-trend-and-mean-reversion/ |
494 |
| -
|
495 |
| - Use at your own risks. |
496 |
| - """ |
497 |
| - tau = []; lagvec = [] |
498 |
| - |
499 |
| - # Step through the different lags |
500 |
| - for lag in range(2,20): |
501 |
| - |
502 |
| - # produce price difference with lag |
503 |
| - pp = np.subtract(signal[lag:],signal[:-lag]) |
504 |
| - |
505 |
| - # Write the different lags into a vector |
506 |
| - lagvec.append(lag) |
507 |
| - |
508 |
| - # Calculate the variance of the difference vector |
509 |
| - tau.append(np.std(pp)) |
510 |
| - |
511 |
| - # linear fit to double-log graph (gives power) |
512 |
| - m = np.polyfit(np.log10(lagvec),np.log10(tau),1) |
513 |
| - |
514 |
| - # calculate hurst |
515 |
| - hurst = m[0] |
516 |
| - |
517 |
| - return hurst |
518 | 469 |
|
519 | 470 |
|
520 | 471 | def hfd(a, k_max):
|
@@ -596,3 +547,66 @@ def hfd(a, k_max):
|
596 | 547 |
|
597 | 548 |
|
598 | 549 |
|
| 550 | +def dfa(X, Ave = None, L = None, sampling= 1): |
| 551 | + """ |
| 552 | + WIP on this function. It is basically copied and pasted from [PYEEG]_, without verification of the maths or unittests. |
| 553 | + """ |
| 554 | + X = np.array(X) |
| 555 | + if Ave is None: |
| 556 | + Ave = np.mean(X) |
| 557 | + Y = np.cumsum(X) |
| 558 | + Y -= Ave |
| 559 | + if not L: |
| 560 | + max_power = np.int(np.log2(len(X)))-4 |
| 561 | + L = X.size / 2 ** np.arange(4,max_power) |
| 562 | + if len(L)<2: |
| 563 | + raise Exception("Too few values for L. Time series too short?") |
| 564 | + F = np.zeros(len(L)) # F(n) of different given box length n |
| 565 | + |
| 566 | + for i,n in enumerate(L): |
| 567 | + sampled = 0 |
| 568 | + for j in xrange(0,len(X) -n ,n): |
| 569 | + |
| 570 | + if np.random.rand() < sampling: |
| 571 | + F[i] += np.polyfit(np.arange(j,j+n), Y[j:j+n],1, full=True)[1] |
| 572 | + sampled += 1 |
| 573 | + if sampled > 0: |
| 574 | + F[i] /= float(sampled) |
| 575 | + |
| 576 | + LF = np.array([(l,f) for l,f in zip(L,F) if l>0]).T |
| 577 | + |
| 578 | + F = np.sqrt(LF[1]) |
| 579 | + Alpha = np.polyfit(np.log(LF[0]), np.log(F),1)[0] |
| 580 | + return Alpha |
| 581 | + |
| 582 | +def hurst(signal): |
| 583 | + """ |
| 584 | + **Experimental**/untested implementation taken from: |
| 585 | + http://drtomstarke.com/index.php/calculation-of-the-hurst-exponent-to-test-for-trend-and-mean-reversion/ |
| 586 | +
|
| 587 | + Use at your own risks. |
| 588 | + """ |
| 589 | + tau = []; lagvec = [] |
| 590 | + |
| 591 | + # Step through the different lags |
| 592 | + for lag in range(2,20): |
| 593 | + |
| 594 | + # produce price difference with lag |
| 595 | + pp = np.subtract(signal[lag:],signal[:-lag]) |
| 596 | + |
| 597 | + # Write the different lags into a vector |
| 598 | + lagvec.append(lag) |
| 599 | + |
| 600 | + # Calculate the variance of the difference vector |
| 601 | + tau.append(np.std(pp)) |
| 602 | + |
| 603 | + # linear fit to double-log graph (gives power) |
| 604 | + m = np.polyfit(np.log10(lagvec),np.log10(tau),1) |
| 605 | + |
| 606 | + # calculate hurst |
| 607 | + hurst = m[0] |
| 608 | + |
| 609 | + return hurst |
| 610 | + |
| 611 | + |
| 612 | + |
0 commit comments