Search code examples
numpytensorflowmat-file

How to make tensorflow cifar10 tutorial read from numpy array?


I am trying to use the CIFAR10 tutorial to create my own training script. My dataset is stored in a MAT file that I convert to a Numpy array using h5py. In the tutorial, they read data using:

reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)

whereas, in my case, I use:

images_placeholder = tf.placeholder(tf.float32, shape=shape)
labels_placeholder = tf.placeholder(tf.int32, shape=batch_size)

The problem is when I try to run the training using a MonitoredTrainingSession as they use in CIFAR10 example:

def train():
with tf.Graph().as_default():
    global_step = tf.contrib.framework.get_or_create_global_step()

    with inputs.read_imdb(FLAGS.input_path) as imdb:
        sets = np.asarray(imdb['images']['set'], dtype=np.int32)
        data_set = inputs.DataSet(imdb, np.where(sets == 1)[0])
    images, labels = inputs.placeholder_inputs(data_set, batch_size=128)

    logits = model.vgg16(images)
    loss = model.loss(logits, labels)
    train_op = model.train(loss, global_step, data_set.num_examples)

    class _LoggerHook(tf.train.SessionRunHook):
        def begin(self):
            self._step = -1

        def before_run(self, run_context):
            self._step += 1
            self._start_time = time.time()
            return tf.train.SessionRunArgs(loss)

        def after_run(self, run_context, run_values):
            duration = time.time() - self._start_time
            loss_value = run_values.results
            if self._step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                              'sec/batch)')
                print(format_str % (datetime.now(), self._step, loss_value,
                                    examples_per_sec, sec_per_batch))

    with tf.train.MonitoredTrainingSession(
            checkpoint_dir=FLAGS.train_dir,
            hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                   tf.train.NanTensorHook(loss),
                   _LoggerHook()],
            config=tf.ConfigProto(
                log_device_placement=FLAGS.log_device_placement)) as mon_sess:
        while not mon_sess.should_stop():
            mon_sess.run(train_op)

where inputs.DataSet is based in MNIST example. Some auxiliary functions:

def read_imdb(path):
  imdb = h5py.File(path)
  check_imdb(imdb)
  return imdb

def placeholder_inputs(data_set, batch_size):
  shape = (batch_size,) + data_set.images.shape[1:][::-1]
  images_placeholder = tf.placeholder(tf.floatz32, shape=shape)
  labels_placeholder = tf.placeholder(tf.int32, shape=batch_size)
  return images_placeholder, labels_placeholder

When I try to run, it obviously return the error You must feed a value for placeholder tensor 'Placeholder' because I didn't create a feed. The point is that I do have the function that creates the feed, but I don't know where should I pass it.

def fill_feed_dict(data_set, images, labels):
  images_feed, labels_feed = data_set.next_batch(images.get_shape()[0].value)
  feed_dict = {images: images_feed, labels: labels_feed}
  return feed_dict

Can anyone help?

Thanks, Daniel


Solution

  • You just need to pass the dict created by fill_feed_dict every time you call the run method:

    mon_sess.run(train_op, feed_dict=fill_feed_dict(data_set, images, labels))