|
5 | 5 | elif which[0] == "numeric":
|
6 | 6 | from MLab import *
|
7 | 7 | elif which[0] == "scipy":
|
8 |
| - from scipy import * |
9 |
| - from scipy.basic.linalg import svd, eig |
10 |
| - try: |
11 |
| - from scipy.integrate import trapz |
12 |
| - from scipy.signal.signaltools import \ |
13 |
| - hanning, kaiser, blackman, bartlett, \ |
14 |
| - hamming |
15 |
| - from scipy.special import sinc |
16 |
| - except ImportError: |
17 |
| - pass |
18 |
| - |
19 |
| - def mean(m,axis=0): |
20 |
| - """mean(m,axis=0) returns the mean of m along the given dimension. |
21 |
| - If m is of integer type, returns a floating point answer. |
22 |
| - """ |
23 |
| - m = asarray(m) |
24 |
| - return add.reduce(m,axis)/float(m.shape[axis]) |
25 |
| - |
26 |
| - |
27 |
| - def std(m,axis=0): |
28 |
| - """std(m,axis=0) returns the standard deviation along the given |
29 |
| - dimension of m. The result is unbiased with division by N-1. |
30 |
| - If m is of integer type returns a floating point answer. |
31 |
| - """ |
32 |
| - x = asarray(m) |
33 |
| - n = float(x.shape[axis]) |
34 |
| - mx = asarray(mean(x,axis)) |
35 |
| - if axis < 0: |
36 |
| - axis = len(x.shape) + axis |
37 |
| - mx.shape = mx.shape[:axis] + (1,) + mx.shape[axis:] |
38 |
| - x = x - mx |
39 |
| - return sqrt(add.reduce(x*x,axis)/(n-1.0)) |
40 |
| - |
41 |
| - def cov(m,y=None, rowvar=0, bias=0): |
42 |
| - """Estimate the covariance matrix. |
43 |
| -
|
44 |
| - If m is a vector, return the variance. For matrices where each row |
45 |
| - is an observation, and each column a variable, return the covariance |
46 |
| - matrix. Note that in this case diag(cov(m)) is a vector of |
47 |
| - variances for each column. |
48 |
| -
|
49 |
| - cov(m) is the same as cov(m, m) |
50 |
| -
|
51 |
| - Normalization is by (N-1) where N is the number of observations |
52 |
| - (unbiased estimate). If bias is 1 then normalization is by N. |
53 |
| -
|
54 |
| - If rowvar is zero, then each row is a variable with |
55 |
| - observations in the columns. |
56 |
| - """ |
57 |
| - if y is None: |
58 |
| - y = m |
59 |
| - else: |
60 |
| - y = y |
61 |
| - if rowvar: |
62 |
| - m = transpose(m) |
63 |
| - y = transpose(y) |
64 |
| - if (m.shape[0] == 1): |
65 |
| - m = transpose(m) |
66 |
| - if (y.shape[0] == 1): |
67 |
| - y = transpose(y) |
68 |
| - N = m.shape[0] |
69 |
| - if (y.shape[0] != N): |
70 |
| - raise ValueError, "x and y must have the same number of observations." |
71 |
| - m = m - mean(m,axis=0) |
72 |
| - y = y - mean(y,axis=0) |
73 |
| - if bias: |
74 |
| - fact = N*1.0 |
75 |
| - else: |
76 |
| - fact = N-1.0 |
77 |
| - # |
78 |
| - val = squeeze(dot(transpose(m),conjugate(y)) / fact) |
79 |
| - return val |
80 |
| - |
81 |
| - def bartlett(M): |
82 |
| - """bartlett(M) returns the M-point Bartlett window. |
83 |
| - """ |
84 |
| - n = arange(0,M) |
85 |
| - return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1)) |
86 |
| - |
87 |
| - def hanning(M): |
88 |
| - """hanning(M) returns the M-point Hanning window. |
89 |
| - """ |
90 |
| - n = arange(0,M) |
91 |
| - return 0.5-0.5*cos(2.0*pi*n/(M-1)) |
92 |
| - |
93 |
| - def hamming(M): |
94 |
| - """hamming(M) returns the M-point Hamming window. |
95 |
| - """ |
96 |
| - n = arange(0,M) |
97 |
| - return 0.54-0.46*cos(2.0*pi*n/(M-1)) |
98 |
| - |
99 |
| - def sinc(x): |
100 |
| - """sinc(x) returns sin(pi*x)/(pi*x) at all points of array x. |
101 |
| - """ |
102 |
| - y = pi* where(x == 0, 1.0e-20, x) |
103 |
| - return sin(y)/y |
104 |
| - |
105 |
| - def msort(a): |
106 |
| - return sort(a, axis=0) |
| 8 | + from scipy.base.mlab import * |
107 | 9 | else:
|
108 | 10 | raise RuntimeError("invalid numerix selector")
|
109 | 11 |
|
110 |
| -if which[0] != "scipy": |
111 |
| - # for easy access to these functions w/o clobbering builtins; |
112 |
| - # scipy already has amin, amax |
113 |
| - amin = min |
114 |
| - amax = max |
| 12 | +amin = min |
| 13 | +amax = max |
0 commit comments