Search code examples
tensorflowmachine-learningdeep-learningtensorflow-servingtensorflow-estimator

How to Save TensorFlow model using estimator.export_savemodel()


How can i Save the TensorFlow model using estimator.export_savedmode() ?

Especially, what should i put inside the serving_input_receiver_fn()?

I have created a Custom Estimator based on VGGNet Architecture, i am using my own images and doing some transformation (you can see them in _parse_function()) on the images.

I have read the documentation here, but i am exactly not sure what to write for my code (please see below). Ultimately i want to save the model and use TensorFlow Serving.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy
from scipy import ndimage
import scipy.misc

tf.logging.set_verbosity(tf.logging.INFO)

def cnn_model_fn(features, labels, mode):
    """Model function for CNN."""
    # Input Layer
    input_layer = tf.reshape(features, [-1, 224, 224, 3])

    # Convolutional Layer #1
    conv1 = tf.layers.conv2d(
      inputs=input_layer,
      filters=64,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    # Convolutional Layer #2
    conv2 = tf.layers.conv2d(
      inputs=conv1,
      filters=64,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    # Pooling Layer #1
    pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    # Convolutional Layer #3
    conv3 = tf.layers.conv2d(
      inputs=pool1,
      filters=128,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #4
    conv4 = tf.layers.conv2d(
      inputs=conv3,
      filters=128,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)

    # Convolutional Layer #5
    conv5 = tf.layers.conv2d(
      inputs=pool2,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #6
    conv6 = tf.layers.conv2d(
      inputs=conv5,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #7
    conv7 = tf.layers.conv2d(
      inputs=conv6,
      filters=256,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    pool3 = tf.layers.max_pooling2d(inputs=conv7, pool_size=[2, 2], strides=2)

    # Convolutional Layer #8
    conv8 = tf.layers.conv2d(
      inputs=pool3,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #9
    conv9 = tf.layers.conv2d(
      inputs=conv8,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #10
    conv10 = tf.layers.conv2d(
      inputs=conv9,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)
    pool4 = tf.layers.max_pooling2d(inputs=conv10, pool_size=[2, 2], strides=2)

    # Convolutional Layer #11
    conv11 = tf.layers.conv2d(
      inputs=pool4,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #12
    conv12 = tf.layers.conv2d(
      inputs=conv11,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    # Convolutional Layer #12
    conv13 = tf.layers.conv2d(
      inputs=conv12,
      filters=512,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu)

    pool5 = tf.layers.max_pooling2d(inputs=conv13, pool_size=[2, 2], strides=2)
    # Dense Layer
    pool5_flat = tf.reshape(pool5, [-1, 7 * 7 * 512])
    dense1 = tf.layers.dense(inputs=pool5_flat, units=4096, activation=tf.nn.relu)
    dense2 = tf.layers.dense(inputs=dense1, units=4096, activation=tf.nn.relu)
    dense3 = tf.layers.dense(inputs=dense2, units=1024, activation=tf.nn.relu)

    dropout = tf.layers.dropout(
      inputs=dense3, rate=0.001, training=mode == tf.estimator.ModeKeys.TRAIN)

    logits1 = tf.layers.dense(inputs=dropout, units=2)
    logits2 = tf.layers.dense(inputs=dropout, units=4)

    predictions = {
        "classes1": tf.argmax(input=logits1, axis=1),
        "classes2": tf.argmax(input=logits2, axis=1),
        "probabilities1": tf.nn.softmax(logits1, name="softmax_tensor_1"),
        "probabilities2": tf.nn.softmax(logits2, name="softmax_tensor_2")
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate Loss (for both TRAIN and EVAL modes)
    loss1 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,0], logits=logits1)
    loss2 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,1], logits=logits2)
    loss = loss1 + loss2

    # Configure the Training Op (for TRAIN mode)
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
        train_op = optimizer.minimize(
            loss=loss,
            global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

    # Add evaluation metrics (for EVAL mode)
    eval_metric_ops = {
        "accuracy1": tf.metrics.accuracy(
            labels=labels[:,0], predictions=predictions["classes1"]),
        "accuracy2": tf.metrics.accuracy(
            labels=labels[:,1], predictions=predictions["classes2"]),
        "precision1": tf.metrics.precision(labels=labels[:,0], predictions=predictions["classes1"]),
        "precision2": tf.metrics.precision(labels=labels[:,1], predictions=predictions["classes2"]),
        "recall1": tf.metrics.recall(labels=labels[:,0], predictions=predictions["classes1"]),
        "recall2": tf.metrics.recall(labels=labels[:,1], predictions=predictions["classes2"])
    }

    return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)

def _parse_function(filename, label):
    image_string = tf.read_file(filename)
    image_decoded = tf.image.decode_image(image_string)
    image_typecasted = tf.cast(image_decoded, tf.float32)
    image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
    return image_reshaped, label

def _parse_function(filename):
    image_string = tf.read_file(filename)
    image_decoded = tf.image.decode_image(image_string)
    image_typecasted = tf.cast(image_decoded, tf.float32)
    image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
    return image_reshaped

def stratified_train_test_split_():
    filenamelist = []
    labelslist = []
    DIRECTORY = 'path_to'

    for filename in os.listdir(DIRECTORY):
        fullfilename = DIRECTORY + filename
        if filename.endswith('.back.0.jpg'):
            #back image, original orientation
            filenamelist.append(fullfilename)
            temp = [0,0]
            labelslist.append(temp)

        elif filename.endswith('.back.90.jpg'):
            #back image, rotated clockwise 90
            filenamelist.append(fullfilename)
            temp = [0,1]
            labelslist.append(temp)

        elif filename.endswith('.back.180.jpg'):
            #back image, rotated clockwise 180
            filenamelist.append(fullfilename)
            temp = [0,2]
            labelslist.append(temp)

        elif filename.endswith('.back.270.jpg'):
            #back image, rotated clockwise 270
            filenamelist.append(fullfilename)
            temp = [0,3]
            labelslist.append(temp)

        elif filename.endswith('.front.0.jpg'):
            #front image, rotated clockwise 0
            filenamelist.append(fullfilename)
            temp = [1,0]
            labelslist.append(temp)

        elif filename.endswith('.front.90.jpg'):
            #front image, rotated clockwise 90
            filenamelist.append(fullfilename)
            temp = [1,1]
            labelslist.append(temp)

        elif filename.endswith('.front.180.jpg'):
            #front image, rotated clockwise 180
            filenamelist.append(fullfilename)
            temp = [1,2]
            labelslist.append(temp)

        elif filename.endswith('.front.270.jpg'):
            #front image, rotated clockwise 270
            filenamelist.append(fullfilename)
            temp = [1,3]
            labelslist.append(temp)

    X_train, X_test, y_train, y_test = train_test_split(filenamelist, labelslist, test_size=0.20, random_state=42, shuffle=True, stratify=labelslist)
    return X_train, X_test, y_train, y_test


def my_input_fn_train(X_train, y_train):
    filenames = tf.constant(X_train)
    labels = tf.constant(y_train)
    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.map(_parse_function)
    # # Shuffle, repeat, and batch the examples.
    dataset = dataset.shuffle(5000).repeat().batch(64)
    # # Build the Iterator, and return the read end of the pipeline.
    return dataset.make_one_shot_iterator().get_next()

def my_input_fn_test(X_test, y_test):
    filenames = tf.constant(X_test)
    labels = tf.constant(y_test)
    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.map(_parse_function)
    # # Shuffle, repeat, and batch the examples.
    dataset = dataset.shuffle(5000).repeat(1).batch(64)        
    # # Build the Iterator, and return the read end of the pipeline.
    return dataset.make_one_shot_iterator().get_next()

def my_input_fn_predict(filename):    
    filenames = tf.constant(filename)
    dataset = tf.data.Dataset.from_tensors((filenames))
    dataset = dataset.map(_parse_function)
    return dataset.make_one_shot_iterator().get_next()

def main(unused_argv):

    # Create the Estimator
    mnist_classifier = tf.estimator.Estimator(
    model_fn=cnn_model_fn, 
    model_dir="path_to_model_directory",
    config = tf.estimator.RunConfig( save_checkpoints_steps=None, save_checkpoints_secs=600, save_summary_steps=5))
    # Set up logging for predictions
    tensors_to_log_1 = {"probabilities1": "softmax_tensor_1"}
    tensors_to_log_2 = {"probabilities2": "softmax_tensor_2"}
    logging_hook_1 = tf.train.LoggingTensorHook(
        tensors=tensors_to_log_1, every_n_iter=100)
    logging_hook_2 = tf.train.LoggingTensorHook(
        tensors=tensors_to_log_2, every_n_iter=100)

    #Splitting the train test split seperately
    X_train, X_test, y_train, y_test = stratified_train_test_split_()

    #Removed the training, testing and prediction calls.

    #Code for exporting the models using 
    def serving_input_receiver_fn():
      #????

    mnist_classifier.export_savedmodel(export_dir_base, serving_input_fn)

if __name__ == "__main__":
  tf.app.run()

Solution

  • This is the answer for the above asked question, i was able to solve so i am posting the answer for anyone who needs it..

    from __future__ import absolute_import
    from __future__ import division
    from __future__ import print_function
    
    import tensorflow as tf
    import numpy as np
    from sklearn.model_selection import train_test_split
    import os
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    import scipy
    from scipy import ndimage
    import scipy.misc
    
    tf.logging.set_verbosity(tf.logging.INFO)
    
    # Our application logic will be added here
    
    
    
    def cnn_model_fn(features, labels, mode):
        """Model function for CNN."""
        #tf.print("shape of features",features.shape)
        #print("type of feature ", type(features))
        #Added this for solving the issue
        features = features["feature"]
        features = tf.Print(features, [tf.shape(features), "shape of features before second reshape"], summarize=10)
        #tf.print("shape of labels ", labels.shape)
    
        # Input Layer
        input_layer = tf.reshape(features, [-1, 224, 224, 3])
        #print(type(input_layer))
        input_layer = tf.Print(input_layer, [tf.shape(input_layer), "shape of input_layer after second reshape"], summarize=10)
        #print("Successfully performed reshape operation")  
        # Convolutional Layer #1
        conv1 = tf.layers.conv2d(
          inputs=input_layer,
          filters=64,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
        # Convolutional Layer #2
        conv2 = tf.layers.conv2d(
          inputs=conv1,
          filters=64,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
        #print("output of convolutional layer 1 : ",type(conv1))
        # Pooling Layer #1
        pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    
        #print(" Successfully done with 1st layer and shape is : ", pool1.shape)
    
        # Convolutional Layer #3
        conv3 = tf.layers.conv2d(
          inputs=pool1,
          filters=128,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #4
        conv4 = tf.layers.conv2d(
          inputs=conv3,
          filters=128,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
        pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
    
        #print(" Successfully done with 2nd layer and shape is : ", pool2.shape)
    
        # Convolutional Layer #5
        conv5 = tf.layers.conv2d(
          inputs=pool2,
          filters=256,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #6
        conv6 = tf.layers.conv2d(
          inputs=conv5,
          filters=256,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #7
        conv7 = tf.layers.conv2d(
          inputs=conv6,
          filters=256,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        pool3 = tf.layers.max_pooling2d(inputs=conv7, pool_size=[2, 2], strides=2)
    
        #print(" Successfully done with 3rd layer and shape is : ", pool3.shape)
    
        # Convolutional Layer #8
        conv8 = tf.layers.conv2d(
          inputs=pool3,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #9
        conv9 = tf.layers.conv2d(
          inputs=conv8,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #10
        conv10 = tf.layers.conv2d(
          inputs=conv9,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
        pool4 = tf.layers.max_pooling2d(inputs=conv10, pool_size=[2, 2], strides=2)
    
        #print(" Successfully done with 4th layer and shape is : ", pool4.shape)
    
        # Convolutional Layer #11
        conv11 = tf.layers.conv2d(
          inputs=pool4,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #12
        conv12 = tf.layers.conv2d(
          inputs=conv11,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
        # Convolutional Layer #12
        conv13 = tf.layers.conv2d(
          inputs=conv12,
          filters=512,
          kernel_size=[3, 3],
          padding="same",
          activation=tf.nn.relu)
    
    
        pool5 = tf.layers.max_pooling2d(inputs=conv13, pool_size=[2, 2], strides=2)
    
        #print(" Successfully done with 5th layer and shape is : ", pool5.shape)
    
        # Dense Layer
        pool5_flat = tf.reshape(pool5, [-1, 7 * 7 * 512])
        dense1 = tf.layers.dense(inputs=pool5_flat, units=4096, activation=tf.nn.relu)
        dense2 = tf.layers.dense(inputs=dense1, units=4096, activation=tf.nn.relu)
        dense3 = tf.layers.dense(inputs=dense2, units=1024, activation=tf.nn.relu)
        #print(dense3.shape)
        dropout = tf.layers.dropout(
          inputs=dense3, rate=0.001, training=mode == tf.estimator.ModeKeys.TRAIN)
        #print(" Completed all the layers ")
    
        #This is the place where we will have two flows of different layers which are used for 
        # Logits Layer for Front Back Prediction 
        #logits = tf.layers.dense(inputs=dropout, units=2)
    
        logits1 = tf.layers.dense(inputs=dropout, units=2)
        logits2 = tf.layers.dense(inputs=dropout, units=4)
    
    
        #Modified this for solving the issue
        predictions1 = {
          # Generate predictions (for PREDICT and EVAL mode)
            "classes1": tf.argmax(input=logits1, axis=1),
            # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
            # `logging_hook`.
            "probabilities1": tf.nn.softmax(logits1, name="softmax_tensor_1"),
        }
        #Modified this for solving the issue
        predictions2 = {
          # Generate predictions (for PREDICT and EVAL mode)
            "classes2": tf.argmax(input=logits2, axis=1),
            # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
            # `logging_hook`.
            "probabilities2": tf.nn.softmax(logits2, name="softmax_tensor_2")
        }
        #Modified this for solving the issue
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions1, export_outputs={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.ClassificationOutput(classes=tf.as_string(predictions1['classes1'])), 'class2': tf.estimator.export.ClassificationOutput(classes=tf.as_string(predictions2['classes2']))})
    
    
        # Calculate Loss (for both TRAIN and EVAL modes)
        loss1 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,0], logits=logits1)
        loss2 = tf.losses.sparse_softmax_cross_entropy(labels=labels[:,1], logits=logits2)
        loss1 = tf.Print(loss1, ["loss1 : ", loss1])
        loss2 = tf.Print(loss2, ["loss2 : ", loss2])
        loss = loss1 + loss2
        loss = tf.Print(loss, ["loss : ", loss])
        tf.summary.scalar('Loss1', loss1)
        tf.summary.scalar('Loss2', loss2)
        tf.summary.scalar('Loss', loss)
    
        #print("Loss of this step is : ",loss)
        # Configure the Training Op (for TRAIN mode)
        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            train_op = optimizer.minimize(
                loss=loss,
                global_step=tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
        #print("completed training")
        # Add evaluation metrics (for EVAL mode)
        eval_metric_ops = {
            "accuracy1": tf.metrics.accuracy(
                labels=labels[:,0], predictions=predictions1["classes1"]),
            "accuracy2": tf.metrics.accuracy(
                labels=labels[:,1], predictions=predictions2["classes2"]),
            "precision1": tf.metrics.precision(labels=labels[:,0], predictions=predictions1["classes1"]),
            "precision2": tf.metrics.precision(labels=labels[:,1], predictions=predictions2["classes2"]),
            "recall1": tf.metrics.recall(labels=labels[:,0], predictions=predictions1["classes1"]),
            "recall2": tf.metrics.recall(labels=labels[:,1], predictions=predictions2["classes2"])
        }
    
        tf.summary.scalar('Accuracy_for_Front/Back', eval_metric_ops.get('accuracy1'))
        tf.summary.scalar('Accuracy_for_Orientation', eval_metric_ops.get('accuracy2'))
        tf.summary.scalar('Precision_for_Front/Back', eval_metric_ops.get('precision1'))
        tf.summary.scalar('Precision_for_Orientation', eval_metric_ops.get('precision2'))
        tf.summary.scalar('Recall_for_Front/Back', eval_metric_ops.get('recall1'))
        tf.summary.scalar('Recall_for_Orientation', eval_metric_ops.get('recall2'))
        #print("Accuracy of this step is : ", eval_metric_ops.get('accuracy1'))
        return tf.estimator.EstimatorSpec(
          mode=mode, loss=loss, eval_metric_ops=eval_metric_ops )
    
    def _parse_function(filename, label):
        filename = tf.Print(filename, [filename, " Names of files "])
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_image(image_string)
        image_typecasted = tf.cast(image_decoded, tf.float32)
        #print("type of the image : ",type(image_typecasted))
        #image_resized = tf.image.resize_images(image_decoded, [28, 28])
        image_typecasted = tf.Print(image_typecasted, [tf.shape(image_typecasted), " shape of image_typecasted before first reshape"],summarize=10)
        image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
        image_reshaped = tf.Print(image_reshaped, [tf.shape(image_reshaped), " shape of image_reshaped after first reshape"], summarize=10)
        return image_reshaped, label
    
    def _parse_function(filename):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_image(image_string)
        image_typecasted = tf.cast(image_decoded, tf.float32)
        #print("type of the image : ",type(image_typecasted))
        #image_resized = tf.image.resize_images(image_decoded, [28, 28])
        image_reshaped = tf.reshape(image_typecasted, [-1, 224, 224, 3])
    
        return image_reshaped
    
    def stratified_train_test_split_():
        filenamelist = []
        labelslist = []
        DIRECTORY = 'path_to_directory'
    
        for filename in os.listdir(DIRECTORY):
            fullfilename = DIRECTORY + filename
            if filename.endswith('.back.0.jpg'):
                #back image, original orientation
                filenamelist.append(fullfilename)
                temp = [0,0]
                labelslist.append(temp)
    
            elif filename.endswith('.back.90.jpg'):
                #back image, rotated clockwise 90
                filenamelist.append(fullfilename)
                temp = [0,1]
                labelslist.append(temp)
    
            elif filename.endswith('.back.180.jpg'):
                #back image, rotated clockwise 180
                filenamelist.append(fullfilename)
                temp = [0,2]
                labelslist.append(temp)
    
            elif filename.endswith('.back.270.jpg'):
                #back image, rotated clockwise 270
                filenamelist.append(fullfilename)
                temp = [0,3]
                labelslist.append(temp)
    
            elif filename.endswith('.front.0.jpg'):
                #front image, rotated clockwise 0
                filenamelist.append(fullfilename)
                temp = [1,0]
                labelslist.append(temp)
    
            elif filename.endswith('.front.90.jpg'):
                #front image, rotated clockwise 90
                filenamelist.append(fullfilename)
                temp = [1,1]
                labelslist.append(temp)
    
            elif filename.endswith('.front.180.jpg'):
                #front image, rotated clockwise 180
                filenamelist.append(fullfilename)
                temp = [1,2]
                labelslist.append(temp)
    
            elif filename.endswith('.front.270.jpg'):
                #front image, rotated clockwise 270
                filenamelist.append(fullfilename)
                temp = [1,3]
                labelslist.append(temp)
    
    
        #splitting the train test data
        #currently we are doing stratified sampling but still we are ignoring the 
        X_train, X_test, y_train, y_test = train_test_split(filenamelist, labelslist, test_size=0.20, random_state=42, shuffle=True, stratify=labelslist)
    
        #print("data is split into test and train set")
    
    
        # # Build the Iterator, and return the read end of the pipeline.
        return X_train, X_test, y_train, y_test
    
    
    def my_input_fn_train(X_train, y_train):
    
        filenames = tf.constant(X_train)
        labels = tf.constant(y_train)
        dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
        dataset = dataset.map(_parse_function)
        # # Shuffle, repeat, and batch the examples.
        dataset = dataset.shuffle(5000).repeat().batch(64)
    
        # # Build the Iterator, and return the read end of the pipeline.
        return dataset.make_one_shot_iterator().get_next()
    
    def my_input_fn_test(X_test, y_test):
    
        filenames = tf.constant(X_test)
        labels = tf.constant(y_test)
        dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
        dataset = dataset.map(_parse_function)
        # # Shuffle, repeat, and batch the examples.
        dataset = dataset.shuffle(5000).repeat(1).batch(64)
    
        # # Build the Iterator, and return the read end of the pipeline.
        return dataset.make_one_shot_iterator().get_next()
    
    def my_input_fn_predict(filename):
    
        filenames = tf.constant(filename)
        dataset = tf.data.Dataset.from_tensors((filenames))
        dataset = dataset.map(_parse_function)
    
        return dataset.make_one_shot_iterator().get_next()
    
    
    def main(unused_argv):
    
        # Create the Estimator
        mnist_classifier = tf.estimator.Estimator(
            model_fn=cnn_model_fn, 
            model_dir="path_to_model_directory",
            config = tf.estimator.RunConfig( save_checkpoints_steps=None, save_checkpoints_secs=600, save_summary_steps=5))
    
        export_dir_base = 'path_to_folder'
        # Set up logging for predictions
        tensors_to_log_1 = {"probabilities1": "softmax_tensor_1"}
        tensors_to_log_2 = {"probabilities2": "softmax_tensor_2"}
        logging_hook_1 = tf.train.LoggingTensorHook(
            tensors=tensors_to_log_1, every_n_iter=100)
        logging_hook_2 = tf.train.LoggingTensorHook(
            tensors=tensors_to_log_2, every_n_iter=100)
        #print("Successfully created Estimator")
    
    
    
    
    
        #Added this for solving the issue
        def serving_input_receiver_fn():
            feature_spec = {'image/encoded': tf.FixedLenFeature(shape=[],
                                             dtype=tf.string)}
    
            serialized_tf_example = tf.placeholder(dtype=tf.string,
                                                 name='input_example_tensor')
            receiver_tensors = {'examples': serialized_tf_example}
    
            features = tf.parse_example(serialized_tf_example, feature_spec)
            jpegs = features['image/encoded']
            images = tf.map_fn(_parse_function, jpegs, dtype=tf.float32)
    
            return tf.estimator.export.ServingInputReceiver(images, receiver_tensors)
    
        mnist_classifier.export_savedmodel(export_dir_base, serving_input_receiver_fn)
    
    
    
    if __name__ == "__main__":
      tf.app.run()