Search code examples
python-3.xnumpytensorflowmachine-learningartificial-intelligence

ValueError: matmul: Input operand 0 does not have enough dimensions (has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) requires 1)


im new in the crate and programm A.I, and idk how solve this problem, i was programming with the help of YouTube, but he haven't this problem and since I'm new I have no idea how to solve it, much less what it is about - help pls D:

The problem:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-58-2d579b4cf5ca> in <module>()
     16 
     17 
---> 18 train(neural_net, X, Y, l2_cost, 0.5)

<ipython-input-58-2d579b4cf5ca> in train(neural_net, X, Y, l2_cost, lr)
     11   #Forward pass - Pase adelantado
     12   for l, layer in enumerate(neural_net):
---> 13     z = out[-1][1] @ neural_net[l].W + neural_net[l].b
     14     a = neural_net[l].act_f[0](z)
     15     out.append((z, a))

ValueError: matmul: Input operand 0 does not have enough dimensions (has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) requires 1)

I'm programming in Google Colab - rlly i need help My code: '''

import numpy as np
import scipy as cs
import matplotlib.pyplot as plt

from sklearn.datasets import make_circles

# Crera Data set
n = 500 #cantidad de cosas a analizar
p = 2 #cantidad de caracterizticas de las cosas a analizar

X, Y = make_circles(n_samples = n, factor = 0.5, noise=0.05)

plt.scatter(X[Y == 0,0], X[Y == 0,1])
plt.scatter(X[Y == 1,0], X[Y == 1,1], c = 'salmon')
plt.show()

#Clase de la capa de la RED
class neural_layer():

  def __init__(self, n_conn, n_neur, act_f):

    self.act_f = act_f
    self.b = np.random.rand(1, n_neur) * 2 - 1
    self.W = np.random.rand(n_conn, n_neur) * 2 - 1

#Funciones de Activacion
sigm = (lambda x: 1 / (1 + np.e ** (-x)),
       lambda x: x * (1 - x))

relu = lambda x: np.maximum(0, x)

_x = np.linspace(-5, 5, 100)
plt.plot(_x, relu(_x))

l0 = neural_layer(p, 4, sigm)
l1 = neural_layer(4, 8, sigm)
#hasta el infinito....

def create_nn(topology, act_f):

  nn = [] #contenedor de cada una de las capas de la red 'Neural Network'

  for l, layer in enumerate(topology[:-1]):
    nn.append(neural_layer(topology[1], topology[l+1], act_f))
  return nn

topology = [p, 4, 8, 16, 8, 4, 1] #topologia de la red

neural_net = create_nn(topology, sigm)

l2_cost = (lambda Yp, Yr: np.mean((Yp - Yr) ** 2), #Error cuadratico medio
           lambda Yp, Yr: (Yp - Yr))

def train(neural_net, X, Y, l2_cost, lr=0.5):
  out = [(None, X)]
  #Forward pass - Pase adelantado
  for l, layer in enumerate(neural_net):
    z = out[-1][1] @ neural_net[l].W + neural_net[l].b
    a = neural_net[l].act_f[0](z)
    out.append((z, a))
train(neural_net, X, Y, l2_cost, 0.5)

'''


Solution

  • You are trying to multiply 2 matrices which are incompatible.

    W shape = (4,4)

    out[-1][1] shape = (500,2)

    on this line

      z = out[-1][1] @ neural_net[l].W + neural_net[l].b
    

    Here is how you fix it

    You have to change the line below, instead of "1" it should be "l"

    Chage:

     nn.append(neural_layer(topology[1], topology[l+1], act_f))
    

    to

     nn.append(neural_layer(topology[l], topology[l+1], act_f))