When calling the openai.Image.create_edit function and sending for a edit to an existing picture with a mask it seems to only generate the same image that I submit.
import os
import openai
class OpenAIConnect:
def OpenAIConnectToSource():
openai.organization = "###"
openai.api_key = "#######"
from PIL import Image, ImageDraw
from Global.DaleAuth import OpenAIConnect
import openai
OpenAIConnect.OpenAIConnectToSource() # connection to openai with organization and api_key
class UploadFormV2:
def UploadAndRetrieve(self, imageInsert = None):
image = Image.open('otters2.png')
ConvertImage("image",imageInsert)
transparentimagtest = Image.open('mask.png')
ConvertImage("transparent",transparentimagtest)
response = openai.Image.create_edit(
image=open("image.png",'rb'),
mask= open("transparent.png",'rb'),
prompt="baby sea otter wearing a hat",
n=2,
size="1024x1024"
)
image_url = response['data']
print(image_url)
return(image_url)
# resizes and formats images for submission
def ConvertImage(imageName, imageData):
image = imageData
print(image.size)
image = imageData
image = image.resize((1024, 1024),Image.ANTIALIAS)
image = image.convert('RGBA')
image.save(imageName+'.png')
The results returned (screenshot due to file size limitations)
This is not how this works, the masks needs to represent the area of the image which may be edited by the AI, for example if u want the otter to wear glasses then u need to create a mask which needs to have blank alpha channel around the eyes of the otter. If u want the otter to wear a hat than u need to create a mask which includes a blank spot on the top of the head of the otter.