Skip to content

Commit

Permalink
Small improvements to Matrix and regression
Browse files Browse the repository at this point in the history
matrix.py:
- defined power operator for Matrix to perform elementwise raise to power
- added method for entry-wise p-norm

regression.py:
- improved iteratively reweighted LS method
  • Loading branch information
jerela committed Apr 4, 2024
1 parent 6b19fe0 commit 1b0a9a0
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 8 deletions.
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,6 @@
# define the measurements
y = Matrix([[0],[1],[2]])

theta = regression.irls(H, y, threshold = 1e-12)
theta = regression.fit_irls(H, y, p=2, threshold = 1e-12)

print(theta)
26 changes: 26 additions & 0 deletions mola/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,16 @@ def __iter__(self):
else:
return iter(self.data)

def __pow__(self,exponent):
n = self.get_height()
m = self.get_width()
mat = Matrix(n,m,0)

for i in range(n):
for j in range(m):
mat[i,j] = self.data[i][j]**exponent
return mat

def __abs__(self):
"""
Return the absolute value of a 1x1 matrix (i.e., a matrix with just one element).
Expand Down Expand Up @@ -980,6 +990,22 @@ def norm_Euclidean(self) -> float:
for j in range(self.n_cols):
norm = norm + pow(self.data[i][j],2)
return math.sqrt(norm)

def norm_entrywise(self,p=2) -> float:
"""
Return the entry-wise norm of the matrix with a given p.
Arguments:
p -- float: the exponential of the norm (default 2, where the norm is the Frobenius norm)
"""
n = self.get_height()
m = self.get_width()
s = 0

for i in range(n):
for j in range(m):
s += (abs(self.data[i][j])**p)**(1/p)
return s

# doesn't seem to work
def get_dominant_eigenvector(self):
Expand Down
19 changes: 12 additions & 7 deletions mola/regression.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from mola.matrix import Matrix
from mola.utils import identity, ones, zeros, randoms, covmat, diag
from mola.utils import identity, ones, zeros, randoms, diag
from copy import deepcopy

def linear_least_squares(H: Matrix, z: Matrix, W=None):
Expand All @@ -23,35 +23,40 @@ def linear_least_squares(H: Matrix, z: Matrix, W=None):
th_tuple = (th.get(0,0), th.get(1,0))
return th_tuple

def irls(H: Matrix, z: Matrix, threshold = 1e-6):
def fit_irls(H: Matrix, z: Matrix, p: int = 2, threshold: float = 1e-6):
"""
Return the iteratively reweighted least squares (IRLS) estimate of the parameters of a model defined by observation matrix H and dependent values z.
Arguments:
H -- Matrix: the observation matrix of the linear system of equations
z -- Matrix: the observed or dependent values depicting the right side of the linear system of equations
p -- float: norm exponential (default 2)
threshold -- float: the maximum difference between two consecutive estimate sets to break from iteration
"""

# estimate the parameters using ordinary least squares
th = ((H.get_transpose())*H).get_inverse() * H.get_transpose() * z


difference = float('inf')
delta = 1e-5

while difference > threshold:

th_previous = th

# calculate absolute values of residuals
residuals = (H*th-z).get_absolute_matrix()

# estimate sample variance-covariance matrix V using the OLS residuals
#V = covmat(residuals)
residuals = ((H*th-z).get_absolute_matrix())**(p-2)

# formulate weighting matrix W as the diagonal of the residuals
W = diag(residuals)
print("W: ", W)

# re-estimate the parameters using generalized least squares
th = ((H.get_transpose())*W*H).get_inverse() * H.get_transpose() * W * z

difference = residuals.norm_Euclidean()
difference = (th-th_previous).norm_entrywise(p)
print("diff: ", difference)

th_tuple = tuple(th.get_column(0))
return th_tuple
Expand Down

0 comments on commit 1b0a9a0

Please sign in to comment.