The updates of the gradients are somehow wrong.
I have implemented the below given algorithm. I have done something wrong
'''
Implementation of PALM- proximal alternating linearisation method
'''
def palm(X,m,niter,lamda):
X = X.T
l = X.shape[0]
n = X.shape[1]
W = np.random.rand(m,n)
D = np.random.rand(l,m)
for i in range(niter):
'''
Update dictionary D
'''
tau_d = np.linalg.norm(W,2)**-2
D = D - tau_d * np.matmul((np.matmul(D,W)-X),W.T)
for j in range(1,m):
D[:,j] = D[:,j] - (np.ones((l,1)).T*D[:,j])/l
for j in range(m):
D[:,j] = D[:,j]/max(1,np.linalg.norm(D[:,j],2))
'''
Update coefficients W
'''
tau_w = np.linalg.norm(D,2)**-2
W = W - tau_w * np.matmul(D.T,(np.matmul(D,W)-X))
for j in range(m):
W[j,:] = np.multiply(np.maximum(np.zeros(W[j,:].shape[0]),np.absolute(W[j,:])-lamda),np.sign(W[j,:]))
return D,W
The updates of D and W are wrong from 2nd row for W and 2nd column for D i believe
import numpy as np
def palm(X,m,niter,lamda):
X = X.T
l = X.shape[0]
n = X.shape[1]
W = np.random.rand(m,n)
D = np.random.rand(l,m)
for i in range(niter):
'''
Update dictionary D
'''
tau_d = np.linalg.norm(W,2)**-2
D = D - tau_d * np.matmul((np.matmul(D,W)-X),W.T)
for j in range(1,m):
D[:,j] = D[:,j] - (np.ones((l,1)).T*D[:,j])/l
for j in range(1,m):
D[:,j] = D[:,j] - D[:,j]/max(1,np.linalg.norm(D[:,j],2))
'''
Update coefficients W
'''
tau_w = np.linalg.norm(D,2)**-2
W = W - tau_w * np.matmul(D.T,(np.matmul(D,W)-X))
for j in range(1,m):
W[j,:] = W[j,:] - np.multiply(np.maximum(np.zeros(W[j,:].shape[0]),np.absolute(W[j,:])-lamda),np.sign(W[j,:]))
return D,W