-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconjugate.py
More file actions
66 lines (49 loc) · 1.67 KB
/
conjugate.py
File metadata and controls
66 lines (49 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import numpy as np
def conjugate(X, A, S, maxiter, U, mean_data, tao):
"""
Projected conjugate gradient learning
"""
A = A.astype(np.float64)
maxiter = 1
AtA = A.T @ A
AtX = A.T @ X
L, _ = X.shape
c, N = S.shape
cons = 0
if L > N:
cons = 1
mean_data = mean_data.T @ np.ones((1, c))
C = np.vstack((np.ones((1, c)), np.zeros((c - 1, c))))
B = np.vstack((np.zeros((1, c - 1)), np.eye(c - 1)))
Z = C + B @ U.T @ (S.T - mean_data)
ZD = np.linalg.pinv(Z) @ B @ U.T
detz2 = (np.linalg.det(Z) ** 2).reshape(-1, 1)
# initial gradient
if cons == 1:
gradp = AtA @ S - AtX + tao * detz2 * ZD # type: ignore
else:
gradp = AtA @ S - AtX
# initial conjugate direction
conjp = gradp
S = S - 0.001 * gradp
for iter in range(maxiter):
if cons == 1:
Z = C + B @ U.T @ (S.T - mean_data) # type: ignore
ZD = np.linalg.pinv(Z) @ B @ U.T # type: ignore
detz2 = np.linalg.det(Z) ** 2
grad = AtA @ S - AtX + tao * detz2 * ZD
else:
grad = AtA @ S - AtX
# parameter beta
beta = np.sum(grad * (grad - gradp), axis=1) / np.maximum(
np.sum(gradp**2, axis=1), np.finfo(float).eps
).reshape(1, -1)
# new conjugate direction
conj = -grad + np.multiply(beta.reshape(-1, 1), conjp)
AAd = AtA @ conj
alpha = np.sum(conj * (-grad), axis=0, keepdims=True) / np.maximum(
np.sum(conj * AAd, axis=0, keepdims=True), np.finfo(float).eps)
S = S + (conj * alpha)
gradp = grad
conjp = conj
return S