Search code examples
pythontensorflowendpoint

i have problem in my end point for saved ai model


I had made an endpoint for my saved AI model my model checked photos of an eye to tell me if the eye had any diseases like [ 'cataract', 'diabetic', 'glaucoma', 'normal'] my script is

import requests
from PIL import Image
import numpy as np
import tensorflow.lite as tflite

model_path = 'efficientnetb3-EyeDisease-96.22.tflite'
interpreter = tflite.Interpreter(model_path)
interpreter.allocate_tensors()

image_path='1145_right.jpeg'
def preprocess_image(image_path):
    # Load and preprocess the image
    image = Image.open(image_path).convert('RGB')
    image = image.resize((224, 224))
    image_array = np.array(image) / 255.0
    image_array = np.expand_dims(image_array, axis=0)
    return image_array.astype(np.float32)

def perform_inference(image_array):
    # Perform inference
    input_tensor = interpreter.tensor(interpreter.get_input_details()[0]['index'])
    input_tensor()[0] = image_array
    interpreter.invoke()
    output_tensor = interpreter.tensor(interpreter.get_output_details()[0]['index'])
    predictions = output_tensor()
    return predictions.tolist()

if __name__ == '__main__':
    data = ['cataract', 'diabetic', 'glaucoma', 'normal'] 
    image_array = preprocess_image(image_path)
    predictions = perform_inference(image_array)
    result = predictions
    index = np.argmax(result)
    print('Inference Result:', data[index])

the result every time i test my model return cataract

ai model> python x.py 2024-01-18 14:35:50.276823: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable TF_ENABLE_ONEDNN_OPTS=0. 2024-01-18 14:35:52.465150: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable TF_ENABLE_ONEDNN_OPTS=0. INFO: Created TensorFlow Lite XNNPACK delegate for CPU. WARNING: Attempting to use a delegate that only supports static-sized tensors with a graph that has dynamic-sized tensors (tensor#299 is a dynamic-sized tensor).Inference Result: cataract

the model works fine but the result is wrong i need help to make it answer my photos correctly


Solution

  • the fixed code is

    import torch
    import requests
    from torchvision import transforms
    from PIL import Image
    import io
    
    model_path = "public\Aimodel\model_scripted.pt"
    model = torch.jit.load(model_path)
    image_url = 'https://pbs.twimg.com/media/GEEQErLWAAA4GYJ?format=jpg&name=small'
    
    model.eval()
    
    # Download the image from the URL
    response = requests.get(image_url)
    image_data = response.content
    
    # Open the image using PIL
    image = Image.open(io.BytesIO(image_data))
    t = transforms.Compose(
        [
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
        ]
    )
    image = t(image).unsqueeze(0)
    
    pred_idx = model(image).argmax().item()
    
    classes = ["Cataract", "Diabetic", "Glaucoma", "Normal"]
    prediction = classes[pred_idx]
    print(f"Prediction: {prediction}")