Skip to content

Commit 1b0a9a0

Browse files
committedApr 4, 2024·
Small improvements to Matrix and regression
matrix.py: - defined power operator for Matrix to perform elementwise raise to power - added method for entry-wise p-norm regression.py: - improved iteratively reweighted LS method
1 parent 6b19fe0 commit 1b0a9a0

File tree

3 files changed

+39
-8
lines changed

3 files changed

+39
-8
lines changed
 

‎main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,6 @@
174174
# define the measurements
175175
y = Matrix([[0],[1],[2]])
176176

177-
theta = regression.irls(H, y, threshold = 1e-12)
177+
theta = regression.fit_irls(H, y, p=2, threshold = 1e-12)
178178

179179
print(theta)

‎mola/matrix.py

+26
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,16 @@ def __iter__(self):
8787
else:
8888
return iter(self.data)
8989

90+
def __pow__(self,exponent):
91+
n = self.get_height()
92+
m = self.get_width()
93+
mat = Matrix(n,m,0)
94+
95+
for i in range(n):
96+
for j in range(m):
97+
mat[i,j] = self.data[i][j]**exponent
98+
return mat
99+
90100
def __abs__(self):
91101
"""
92102
Return the absolute value of a 1x1 matrix (i.e., a matrix with just one element).
@@ -980,6 +990,22 @@ def norm_Euclidean(self) -> float:
980990
for j in range(self.n_cols):
981991
norm = norm + pow(self.data[i][j],2)
982992
return math.sqrt(norm)
993+
994+
def norm_entrywise(self,p=2) -> float:
995+
"""
996+
Return the entry-wise norm of the matrix with a given p.
997+
998+
Arguments:
999+
p -- float: the exponential of the norm (default 2, where the norm is the Frobenius norm)
1000+
"""
1001+
n = self.get_height()
1002+
m = self.get_width()
1003+
s = 0
1004+
1005+
for i in range(n):
1006+
for j in range(m):
1007+
s += (abs(self.data[i][j])**p)**(1/p)
1008+
return s
9831009

9841010
# doesn't seem to work
9851011
def get_dominant_eigenvector(self):

‎mola/regression.py

+12-7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from mola.matrix import Matrix
2-
from mola.utils import identity, ones, zeros, randoms, covmat, diag
2+
from mola.utils import identity, ones, zeros, randoms, diag
33
from copy import deepcopy
44

55
def linear_least_squares(H: Matrix, z: Matrix, W=None):
@@ -23,35 +23,40 @@ def linear_least_squares(H: Matrix, z: Matrix, W=None):
2323
th_tuple = (th.get(0,0), th.get(1,0))
2424
return th_tuple
2525

26-
def irls(H: Matrix, z: Matrix, threshold = 1e-6):
26+
def fit_irls(H: Matrix, z: Matrix, p: int = 2, threshold: float = 1e-6):
2727
"""
2828
Return the iteratively reweighted least squares (IRLS) estimate of the parameters of a model defined by observation matrix H and dependent values z.
2929
3030
Arguments:
3131
H -- Matrix: the observation matrix of the linear system of equations
3232
z -- Matrix: the observed or dependent values depicting the right side of the linear system of equations
33+
p -- float: norm exponential (default 2)
34+
threshold -- float: the maximum difference between two consecutive estimate sets to break from iteration
3335
"""
3436

3537
# estimate the parameters using ordinary least squares
3638
th = ((H.get_transpose())*H).get_inverse() * H.get_transpose() * z
3739

3840

3941
difference = float('inf')
42+
delta = 1e-5
4043

4144
while difference > threshold:
4245

46+
th_previous = th
47+
4348
# calculate absolute values of residuals
44-
residuals = (H*th-z).get_absolute_matrix()
45-
46-
# estimate sample variance-covariance matrix V using the OLS residuals
47-
#V = covmat(residuals)
49+
residuals = ((H*th-z).get_absolute_matrix())**(p-2)
50+
51+
# formulate weighting matrix W as the diagonal of the residuals
4852
W = diag(residuals)
4953
print("W: ", W)
5054

5155
# re-estimate the parameters using generalized least squares
5256
th = ((H.get_transpose())*W*H).get_inverse() * H.get_transpose() * W * z
5357

54-
difference = residuals.norm_Euclidean()
58+
difference = (th-th_previous).norm_entrywise(p)
59+
print("diff: ", difference)
5560

5661
th_tuple = tuple(th.get_column(0))
5762
return th_tuple

0 commit comments

Comments
 (0)
Please sign in to comment.