Search code examples
pythonnumpydeep-learningneural-networkbackpropagation

Numpy dot function: operands could not be broadcast together in MLP


I was writing a MLP with python and I did this code:

class NeuralNetwork(object):
    def __init__(self):
        
        #parameters
        self.inputSize = 13
        self.hidden1Size = 13
        self.hidden2Size = 13
        self.outputSize = 13
        
    
        #weights
        self.W1 = np.random.randn(self.inputSize, self.hidden1Size) # weight matrix from input to hidden layer
        self.W2 = np.random.randn(self.hidden1Size, self.hidden2Size) # weight matrix from hidden to hidden2 layer
        self.W3 = np.random.randn(self.hidden2Size, self.outputSize) # weight matrix from hidden to output layer

    def feedForward(self, X):
        self.z = np.dot(X, self.W1)  #Zh1
        self.z2 = self.sigmoid(self.z)  #ah1
        self.z3 = np.dot(self.z2, self.W2) #Zh2
        self.z4 = self.sigmoid(self.z3)  #ah2
        self.z5 = np.dot(self.z3, self.W3) #zout
        output = self.sigmoid(self.z5) #a out
        print(output)
        return output
        
    def sigmoid(self, s, deriv=False):
        if (deriv == True):
            return s * (1 - s)
        return 1/(1 + np.exp(-s))
    
    def backward(self, X, y, output):
        #step1
        self.output_error = output - y # error in output
        
        #step2
        self.output_delta = self.output_error * self.sigmoid(self.z3).T
        self.W3 = self.W3 - self.output_delta
        
        #step3
        self.z2_error = self.output_delta.dot(self.output_delta)
        self.z2_delta = self.z2_error * self.sigmoid(self.z3)
        self.W2 = self.W2 - self.z2_delta
       
        
        #step4
        self.z1_error = self.output_delta.dot(self.W2.T)
        self.z1_delta = self.z2_error * self.sigmoid(self.z2, deriv=True)
        
         
        self.W1 = self.W1 - self.z1_delta
    
        
    def train(self, X, y):
        output = self.feedForward(X)
       
        self.backward(X, y, output)
        
        
        
def accuracy (predict , y_test):
    C = 0
    length = len(y_test)
    for i in length:
        if predict[i] == y_test[i]:
            C = C + 1
    return(C/length)

but I had this problem:


 ValueError                                Traceback (most recent
 call last) <ipython-input-8-69e28bd743d3> in <module>
       1 NN = NeuralNetwork()
       2 
 ----> 3 NN.train(X_train,y_train)
       4 
       5 predict = NN.feedforward(X_train)
 
 <ipython-input-7-55e55429732f> in train(self, X, y)
      54         output = self.feedForward(X)
      55 
 ---> 56         self.backward(X, y, output)
      57 
      58 
 
 <ipython-input-7-55e55429732f> in backward(self, X, y, output)
      31     def backward(self, X, y, output):
      32         #step1
 ---> 33         self.output_error = output - y # error in output
      34 
      35         #step2
 
 ValueError: operands could not be broadcast together with shapes
 (242,13) (242,) 

I know the problem is that numpy can not do the dot function because of the matrix problems, but I don't know how to fix this problem.


Solution

  • Please see the following example which is the same as your error:

    import numpy as np
    
    a = np.zeros((4, 5))
    b = np.zeros(4,)
    # c = a - b error!
    c = a - b[:, np.newaxis] # no error!
    

    So you can try

    self.backward(X, y, output[:, np.newaxis])

    doc. Pretty annoying.


    What this does it converts a 1d array (of shape (n,)) to a 2d array (of shape (n, 1)) then numpy is able to handle it.