Search code examples
pythontkintertkinter-entrytkinter-layouttkinter-button

How do I stream OpenAi gpt-3.5 response and display in tkinter frame python?


I want to stream Openai gpt-3.5 response and display in a tkinter frame. The below code works fine but the problem is that each streamed chunk is displayed in a new line. I want to join these streamed chunks correctly and display in the chat frame as a complete sentence. I don't know how to do this correctly. Can someone help? Below is my code:

import tkinter as tk
from tkinter import ttk
from datetime import datetime
import openai
import json
import requests


history = []
# Create a function to use ChatGPT 3.5 turbo to answer a question based on the prompt
def get_answer_from_chatgpt(prompt, history):
    openai.api_key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
    print("Trying")

    messages = [
            {"role": "system", "content": "You are a helpful assistant."}
        ]

    for sender, message in history:
            messages.append({"role": sender, "content": message})

    try:
        stream = openai.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=messages,
            stream=True,
            
        )
        for chunk in stream:
            #answer = chunk.message.content.strip()
            if chunk.choices[0].delta.content is not None:
               chunk = chunk.choices[0].delta.content          
               append_to_chat_log("Gpt-3.5-turbo", chunk)
               #answer = response.choices[0].message.content.strip()
               history.append(("assistant", chunk))
               print(chunk)
        print("Streamig complete")        
    except Exception as e:
        print(e)
        return "Sorry, an error occurred while processing your request."

# Create a function to use OpenAI to answer a question based on the search results

def append_to_chat_log(sender, message):
    chat_log.config(state=tk.NORMAL)
    chat_log.insert(tk.END, f"{sender}:\n\n", ("sender",))
    chat_log.insert(tk.END, f"{message}\n\n\n")
    chat_log.tag_config("sender", font=('Arial', 12, 'bold'))
    chat_log.config(state=tk.DISABLED)
    chat_log.see(tk.END)


def send_message(event=None):
    global history
    message = message_entry.get(1.0, "end-1c") 
    message = message.strip()
    message_entry.delete(1.0, tk.END)
    message_entry.update()
    
    if not message:
        pass 
    else:
              
        append_to_chat_log("User", message)
        history.append(("user", message))
        if len(history) >4:
            history = history[-4:]
        print(message)
        response = get_answer_from_chatgpt(message, history)
        append_to_chat_log("Gpt", response)
        history.append(("assistant", response))

root = tk.Tk()

root.title("Chat")

# Maximize the window
root.attributes('-zoomed', True)

chat_frame = tk.Frame(root)
chat_frame.pack(expand=True, fill=tk.BOTH)

chat_log = tk.Text(chat_frame, state='disabled', wrap='word', width=70, height=30, font=('Arial', 12), highlightthickness=0, borderwidth=0)
chat_log.pack(side=tk.LEFT, padx=(500,0), pady=10)

message_entry = tk.Text(root, padx=17, insertbackground='white', width=70, height=1, spacing1=20, spacing3=20, font=('Open Sans', 14))
message_entry.pack(side=tk.LEFT, padx=(500, 0), pady=(0, 70))  # Adjust pady to move it slightly above the bottom
message_entry.insert(1.0, "Ask me anything...")
message_entry.mark_set("insert", "%d.%d" % (0,0))
message_entry.bind("<Return>", send_message)

root.mainloop()

Solution

  • If you want all the chunks to be logged as single sentences, you need to modify append_to_chat_log() not to insert newlines after inserting a chunk:

    def append_to_chat_log(sender=None, message=None):
        chat_log.config(state="normal")
        if sender:
            chat_log.insert("end", f"{sender}:\n", "sender")
        if message:
            chat_log.insert("end", message)
        chat_log.config(state="disabled")
        chat_log.see("end")
        chat_log.update()
    

    Then modify get_answer_from_chatgpt() to cater the change:

    def get_answer_from_chatgpt(prompt, history):
        ...
        try:
            ...
            # log the sender only
            append_to_chat_log("Gpt-3.5-turbo")
            for chunk in stream:
                # log the chunk only
                append_to_chat_log(message=chunk)
            # if you want, append newlines after inserting all the chunks
            append_to_chat_log(message="\n\n\n")
            print("Streaming complete")
        except:
            ...
    

    Note that you need to modify send_message() as well to cater the change of append_to_chat_log().

    Also you need to call chat_log.tag_config("sender", font=("Arial", 12, "bold")) after creating chat_log.