在QR算法中无法获得适当的特征向量?

问题描述 投票:0回答:0

使用 QR 算法,我试图用标量 B

获得 N*N 大小矩阵的 A**B

N=2, B=5, A = [[1,2][3,4]]

我得到了正确的 Q、R 矩阵和特征值,但得到了奇怪的特征向量

实现的代码似乎是正确的,但不知道哪里错了

在理论计算中

特征值是

λ_1≈5.37228 λ_2≈-0.372281

特征向量应该是

v_1≈(0.457427, 1) v_2≈(-1.45743, 1)

但我得到了

N=2, B=5
A=[[1, 2], [3, 4]]
Q:[[-0.3162277660168378, -0.9486832980505135], [-0.9486832980505135, 0.3162277660168381]] * R:[[-3.162277660168378, -4.42718872423573], [7.771561172376096e-16, -0.6324555320336747]] = [[0.9999999999999983, 1.9999999999999978], [2.9999999999999987, 3.9999999999999987]]
Eigenvalues: [5.372281323269012, -0.3722813232690141]
Eigenvector matrix : [[0.4159735579192838, 0.9093767091321241], [-0.9093767091321243, 0.4159735579192842]]
Eigenvector matrix ivnerse: [[0.41597355791928425, -0.9093767091321242], [0.9093767091321244, 0.41597355791928387]]
Diagonal eigenvalue matrix (D): [[4475.007150826551, 0], [0, -0.007150826562162989]]
Reconstructed A: [[774.3224778196204, -1692.793486691768], [-1692.79348669177, 3700.677522180369]]

这似乎与舍入误差有关?

下面是完整代码

import sys
from math import sqrt, copysign

"""
N, B = map(int, sys.stdin.readline().strip().split())
A = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(N)]
"""

N, B = 2, 5
A = [[1, 2], [3, 4]]

def dot_product(a, b): return sum(ai * bi for ai, bi in zip(a, b))

def norm(x): return sqrt(dot_product(x, x))

def matrix_multiply(A, B): return [[sum(a * b for a, b in zip(row, col)) for col in zip(*B)] for row in A]

def matrix_transpose(A): return [[A[j][i] for j in range(len(A))] for i in range(len(A[0]))]

def gram_schmidt(A):
    global N
    Q = [[0.0] * N for _ in range(N)]
    R = [[0.0] * N for _ in range(N)]
    for j in range(N):
        v = A[j]
        for i in range(j):
            R[i][j] = dot_product(Q[i], A[j])
            v = [v_i - R[i][j] * q_i for v_i, q_i in zip(v, Q[i])]
        R[j][j] = norm(v)
        Q[j] = [v_i / R[j][j] for v_i in v]
    return Q, R

def qr_decomposition(A):
    global N
    R = A.copy()
    Q = [[float(i == j) for j in range(N)] for i in range(N)]

    for k in range(N - 1):
        x = [row[k] for row in R[k:]]
        e = [0.0] * len(x)
        e[0] = copysign(norm(x), x[0])
        u = [xi + ei for xi, ei in zip(x, e)]
        if not norm(u): continue
        v = [xi / norm(u) for xi in u]
        Qk = [[float(i == j) - 2.0 * v[i] * v[j] for i in range(k, N)] for j in range(k, N)]
        Qk = [[1.0] * k + row for row in Qk]
        Q[k:] = [Qk[i] + [0.0] * k for i in range(N - k)]
        R = [[sum([Q[i][k] * A[k][j] for k in range(N)]) for j in range(N)] for i in range(N)]
    return Q, R

def qr_algorithm(A, max_iterations=1000, tolerance=1e-9):
    global N
    P = [[float(i == j) for j in range(N)] for i in range(N)]
    for _ in range(max_iterations):
        Q, R = qr_decomposition(A)
        A = matrix_multiply(R, Q)
        P = matrix_multiply(P, Q)
        if all(abs(A[i][j]) < tolerance for i in range(N) for j in range(N) if i != j): break
    eigenvalues = [A[i][i] for i in range(N)]
    eigenvectors = matrix_transpose(P)
    return eigenvalues, eigenvectors

def matrix_minor(A, i, j):
    return [row[:j] + row[j+1:] for row in (A[:i] + A[i+1:])]

def matrix_determinant(A):
    if len(A) == 2:
        return A[0][0]*A[1][1] - A[0][1]*A[1][0]
    determinant = 0
    for c in range(len(A)):
        determinant += ((-1)**c) * A[0][c] * matrix_determinant(matrix_minor(A, 0, c))
    return determinant

def matrix_inverse(A):
    determinant = matrix_determinant(A)
    if determinant == 0:
        return None
    if len(A) == 2:
        return [[A[1][1]/determinant, -1*A[0][1]/determinant],
                [-1*A[1][0]/determinant, A[0][0]/determinant]]
    cofactors = []
    for r in range(len(A)):
        cofactor_row = []
        for c in range(len(A)):
            minor = matrix_minor(A, r, c)
            cofactor = ((-1)**(r+c)) * matrix_determinant(minor)
            cofactor_row.append(cofactor)
        cofactors.append(cofactor_row)
    cofactors = transpose_matrix(cofactors)
    for r in range(len(cofactors)):
        for c in range(len(cofactors)):
            cofactors[r][c] = cofactors[r][c] / determinant
    return cofactors

def tr_pow(eigenvalues, B):
    global N
    tr = [[0 for _ in range(N)] for _ in range(N)]
    for i, e in enumerate(eigenvalues): tr[i][i] = e**B
    return tr

Q, R = qr_decomposition(A)
eigenvalues, eigenvectors = qr_algorithm(A)
D = tr_pow(eigenvalues, B)
eigenvectors_inv = matrix_inverse(eigenvectors)
ans = matrix_multiply(matrix_multiply(eigenvectors,D), eigenvectors_inv)

print(f'N={N}, B={B}')
print(f'A={A}')
print(f'Q:{Q} * R:{R} = {matrix_multiply(Q,R)}')
print("Eigenvalues:", eigenvalues)
print("Eigenvector matrix :", eigenvectors)
print("Eigenvector matrix ivnerse:", eigenvectors_inv)
print("Diagonal eigenvalue matrix (D):", D)
print("Reconstructed A:", ans)
python-3.x precision linear-algebra eigenvalue eigenvector
© www.soinside.com 2019 - 2024. All rights reserved.