# Horizontal Vector
hv = np.array([1, 2, 3])
print("Horizontal Vector:\n", hv)
# Vertical Vector
vv = np.array([1, 2, 3]).reshape(-1, 1)
print("\nVertical Vector:\n", vv)
# Matrix from list
A = np.array([[1, 2, 0],
[0, 0, 0],
[0, 0, 1]])
print("\nMatrix A:\n", A)
# Matrix using np.matrix
B = np.matrix([[1, 2, 0],
[0, 0, 0],
[0, 0, 1]])
print("\nMatrix B (np.matrix):\n", B)
# Taking matrix input
s = input("Enter matrix rows separated by ';' e.g. 1 2 0; 0 0 0; 0 0 1\n> ")
rows = [r.strip() for r in s.split(';')]
A = np.array([list(map(float, r.split())) for r in rows])
print("\nMatrix A:\n", A)
print("\nTranspose of A:\n", A.T)
print("\nRank of A:", np.linalg.matrix_rank(A))
def minor(A, i, j):
M = np.delete(np.delete(A, i, axis=0), j, axis=1)
return round(np.linalg.det(M))
def cofactor_matrix(A):
C = np.zeros_like(A)
for i in range(len(A)):
for j in range(len(A)):
C[i, j] = ((-1)**(i+j)) * minor(A, i, j)
return C
# Matrix
A = np.array([[1, 2, 0],
[0, 0, 0],
[0, 0, 1]])
C = cofactor_matrix(A)
print("Matrix A:\n", A)
print("\nCofactor Matrix:\n", C)
print("\nAdjugate Matrix:\n", C.T)
A = np.array([[2, 1, -1],
[-3, -1, 2],
[-2, 1, 2]])
B = np.array([8, -11, -3])
# Solve Ax = B
x = np.linalg.solve(A, B)
print("Solution:\n", x)
a. transpose of a vector (matrix))
# Define a vector and a matrix
vector = np.array([1+2j, 3+4j, 5+6j])
matrix = np.array([[1+1j, 2+2j], [3+3j, 4+4j]])
# Transpose of a vector
transpose_vector = vector.T
# Transpose of a matrix
transpose_matrix = matrix.T
print("Transpose of vector:", transpose_vector)
print("Transpose of matrix:\n", transpose_matrix)
space.
from scipy.linalg import null_space
# Define a matrix
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# Basis of column space (using QR decomposition)
Q, R = np.linalg.qr(A)
basis_column_space = Q[:, :np.linalg.matrix_rank(A)]
# Basis of null space
basis_null_space = null_space(A)
# Basis of row space (column space of A transpose)
Q_row, R_row = np.linalg.qr(A.T)
basis_row_space = Q_row[:, :np.linalg.matrix_rank(A)]
# Basis of left null space (null space of A transpose)
basis_left_null_space = null_space(A.T)
print("Basis of column space:\n", basis_column_space)
print("Basis of null space:\n", basis_null_space)
print("Basis of row space:\n", basis_row_space)
print("Basis of left null space:\n", basis_left_null_space)
Q.7 Check the linear dependence of vectors. Generate a linear combination of given vectorsof Rn/ matrices of the same size and find the transition matrix of given matrix space.
from scipy.linalg import lstsq
# Define vectors in R^n (same size)
a = np.array([1, 2, 3])
b = np.array([2, 4, 6])
c = np.array([1, 0, 1])
# Create matrix with vectors as columns
M = np.column_stack((a, b, c))
# Check linear dependence by matrix rank
rank = np.linalg.matrix_rank(M)
linearly_dependent = rank < M.shape[1]
# Generate linear combination: find coefficients x such that x1*a + x2*b + x3*c = target
target = np.array([3, 6, 9])
x, residuals, rank_lstsq, s = lstsq(M, target)
# Transition matrix from standard basis to basis {a,b,c} if invertible
transition_matrix_std_to_basis = None
if np.linalg.matrix_rank(M) == M.shape[0]:
transition_matrix_std_to_basis = np.linalg.inv(M)
print("Linearly dependent:", linearly_dependent)
print("Linear combination coefficients:", x)
print("Transition matrix (standard to basis):", transition_matrix_std_to_basis)
orthogonalization process.
def gram_schmidt(V):
"""
Perform Gram-Schmidt orthogonalization on matrix V whose columns are vectors.
Returns orthonormal basis as columns of a matrix.
"""
n, k = V.shape
U = np.zeros((n, k))
for i in range(k):
vec = V[:, i]
for j in range(i):
proj = np.dot(U[:, j], V[:, i]) * U[:, j]
vec = vec - proj
norm = np.linalg.norm(vec)
if norm < 1e-10:
U[:, i] = 0
else:
U[:, i] = vec / norm
# Remove zero columns if any
U = U[:, np.linalg.norm(U, axis=0) > 1e-10]
return U
# Example: vectors as columns of a matrix
V = np.array([[1, 1, 1], [1, 0, 2], [1, 2, 3]]).T
orthonormal_basis = gram_schmidt(V)
print("Orthonormal basis:\n", orthonormal_basis)
and verify the Cayley- Hamilton theorem
# Define a matrix
A = np.array([[4, 1],
[2, 3]])
# Compute eigenvalues and eigenvectors
values, vectors = np.linalg.eig(A)
# Check if matrix is diagonalizable (eigenvectors are linearly independent)
rank_vectors = np.linalg.matrix_rank(vectors)
diagonalizable = rank_vectors == A.shape[0]
# Construct diagonal matrix of eigenvalues
D = np.diag(values)
# Verify Cayley-Hamilton theorem:
# For 2x2 matrix, characteristic polynomial: p(A) = A^2 - trace(A)*A + det(A)*I
trace_A = np.trace(A)
det_A = np.linalg.det(A)
I = np.eye(A.shape[0])
A2 = np.linalg.matrix_power(A, 2)
cayley_hamilton_result = A2 - trace_A * A + det_A * I
# Check if result is approximately zero matrix
verify_cayley_hamilton = np.allclose(cayley_hamilton_result, np.zeros_like(A))
print("Eigenvalues:", values)
print("Is diagonalizable:", diagonalizable)
print("Cayley-Hamilton theorem verified:", verify_cayley_hamilton)
matrices. eg code “Linear Algebra is fun” and then decode it.
import string
def text_to_vector(text):
text = text.upper().replace(' ', '')
vector = [string.ascii_uppercase.index(c) + 1 for c in text]
return np.array(vector)
def vector_to_text(vector):
text = ''.join([string.ascii_uppercase[int(round(num)) - 1] for num in vector])
return text
# Nonsingular encoding matrix
encoding_matrix = np.array([[2, 3], [1, 4]])
# Message to encode
message = "Linear Algebra is fun"
# Convert message to numeric vector
numeric_vector = text_to_vector(message)
# Pad vector length to multiple of encoding matrix size
n = encoding_matrix.shape[0]
pad_length = (n - len(numeric_vector) % n) % n
numeric_vector = np.concatenate((numeric_vector, np.zeros(pad_length)))
# Split vector into blocks
blocks = numeric_vector.reshape(-1, n).T
# Encode each block
encoded_blocks = np.dot(encoding_matrix, blocks)
# Decode by multiplying with inverse matrix
decoding_matrix = np.linalg.inv(encoding_matrix)
decoded_blocks = np.dot(decoding_matrix, encoded_blocks)
# Flatten decoded vector and round
decoded_vector = np.rint(decoded_blocks.T).flatten()[:len(numeric_vector)-pad_length]
# Convert back to text
decoded_message = vector_to_text(decoded_vector)
print("Original message:", message)
print("Decoded message:", decoded_message)
# Define symbolic variables
x, y, z = sp.symbols('x y z')
# Define scalar field function
f = x**2 + y**2 + z**2
# Compute gradient vector (partial derivatives)
gradient_f = sp.Matrix([sp.diff(f, var) for var in (x, y, z)])
print("Gradient of the scalar field:")
print(gradient_f)
# Define symbolic variables
x, y, z = sp.symbols('x y z')
# Define a vector field F = [x*y, y*z, z*x]
F = sp.Matrix([x*y, y*z, z*x])
# Compute divergence (sum of partial derivatives of each component w.r.t. corresponding variable)
divergence_F = sum(sp.diff(F[i], var) for i, var in enumerate((x, y, z)))
print("Divergence of the vector field:")
print(divergence_F)
# Define symbolic variables
x, y, z = sp.symbols('x y z')
# Define a vector field F = [y*z, z*x, x*y]
F = sp.Matrix([y*z, z*x, x*y])
# Compute curl: curl(F) = (dFz/dy - dFy/dz, dFx/dz - dFz/dx, dFy/dx - dFx/dy)
curl_F = sp.Matrix([
sp.diff(F[2], y) - sp.diff(F[1], z),
sp.diff(F[0], z) - sp.diff(F[2], x),
sp.diff(F[1], x) - sp.diff(F[0], y)
])
print("Curl of the vector field:")
print(curl_F)