diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 83c2ce48c3a0..643026c7b8ea 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -1,73 +1,75 @@ import numpy as np +from typing import Tuple, Union +import numpy.typing as npt def power_iteration( - input_matrix: np.ndarray, - vector: np.ndarray, + input_matrix: npt.NDArray[Union[np.float64, np.complex128]], + vector: npt.NDArray[Union[np.float64, np.complex128]], error_tol: float = 1e-12, max_iterations: int = 100, -) -> tuple[float, np.ndarray]: +) -> Tuple[float, npt.NDArray[Union[np.float64, np.complex128]]]: """ Power Iteration. Find the largest eigenvalue and corresponding eigenvector of matrix input_matrix given a random vector in the same space. - Will work so long as vector has component of largest eigenvector. + Works as long as vector has a component of the largest eigenvector. input_matrix must be either real or Hermitian. - Input - input_matrix: input matrix whose largest eigenvalue we will find. - Numpy array. np.shape(input_matrix) == (N,N). - vector: random initial vector in same space as matrix. - Numpy array. np.shape(vector) == (N,) or (N,1) + Input: + input_matrix: Input matrix whose largest eigenvalue we will find. + A square numpy array. Shape: (N, N). + vector: Random initial vector in the same space as the matrix. + Numpy array. Shape: (N,) or (N,1). - Output - largest_eigenvalue: largest eigenvalue of the matrix input_matrix. - Float. Scalar. - largest_eigenvector: eigenvector corresponding to largest_eigenvalue. - Numpy array. np.shape(largest_eigenvector) == (N,) or (N,1). + Output: + largest_eigenvalue: Largest eigenvalue of the matrix. + largest_eigenvector: Eigenvector corresponding to largest eigenvalue. + Example: >>> import numpy as np >>> input_matrix = np.array([ - ... [41, 4, 20], - ... [ 4, 26, 30], + ... [41, 4, 20], + ... [4, 26, 30], ... [20, 30, 50] ... ]) - >>> vector = np.array([41,4,20]) - >>> power_iteration(input_matrix,vector) + >>> vector = np.array([41, 4, 20]) + >>> power_iteration(input_matrix, vector) (79.66086378788381, array([0.44472726, 0.46209842, 0.76725662])) """ - # Ensure matrix is square. - assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1] + N, M = np.shape(input_matrix) + assert N == M, "Input matrix must be square." # Ensure proper dimensionality. - assert np.shape(input_matrix)[0] == np.shape(vector)[0] + assert N == np.shape(vector)[0], "Vector must be compatible with matrix dimensions." # Ensure inputs are either both complex or both real - assert np.iscomplexobj(input_matrix) == np.iscomplexobj(vector) + assert np.iscomplexobj(input_matrix) == np.iscomplexobj( + vector + ), "Both inputs must be either real or complex." + is_complex = np.iscomplexobj(input_matrix) if is_complex: - # Ensure complex input_matrix is Hermitian - assert np.array_equal(input_matrix, input_matrix.conj().T) - - # Set convergence to False. Will define convergence when we exceed max_iterations - # or when we have small changes from one iteration to next. + # Ensure complex input_matrix is Hermitian (A == A*) + assert np.array_equal( + input_matrix, input_matrix.conj().T + ), "Input matrix must be Hermitian if complex." convergence = False - lambda_previous = 0 + lambda_previous = 0.0 iterations = 0 - error = 1e12 + error = float("inf") while not convergence: - # Multiple matrix by the vector. + # Multiply matrix by the vector. w = np.dot(input_matrix, vector) # Normalize the resulting output vector. vector = w / np.linalg.norm(w) - # Find rayleigh quotient - # (faster than usual b/c we know vector is normalized already) + # Find the Rayleigh quotient (faster since we know vector is normalized). vector_h = vector.conj().T if is_complex else vector.T lambda_ = np.dot(vector_h, np.dot(input_matrix, vector)) # Check convergence. - error = np.abs(lambda_ - lambda_previous) / lambda_ + error = np.abs(lambda_ - lambda_previous) / np.abs(lambda_) iterations += 1 if error <= error_tol or iterations >= max_iterations: @@ -75,6 +77,7 @@ def power_iteration( lambda_previous = lambda_ + # Ensure lambda_ is real if the matrix is complex. if is_complex: lambda_ = np.real(lambda_) @@ -83,7 +86,8 @@ def power_iteration( def test_power_iteration() -> None: """ - >>> test_power_iteration() # self running tests + Test function for power_iteration. + Runs tests on real and complex matrices using the power_iteration function. """ real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) real_vector = np.array([41, 4, 20]) @@ -105,19 +109,12 @@ def test_power_iteration() -> None: eigen_value, eigen_vector = power_iteration(input_matrix, vector) # Numpy implementation. - - # Get eigenvalues and eigenvectors using built-in numpy - # eigh (eigh used for symmetric or hermetian matrices). eigen_values, eigen_vectors = np.linalg.eigh(input_matrix) - # Last eigenvalue is the maximum one. eigen_value_max = eigen_values[-1] - # Last column in this matrix is eigenvector corresponding to largest eigenvalue. eigen_vector_max = eigen_vectors[:, -1] - # Check our implementation and numpy gives close answers. + # Check that our implementation and numpy's give close answers. assert np.abs(eigen_value - eigen_value_max) <= 1e-6 - # Take absolute values element wise of each eigenvector. - # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6