I am getting incomplete answer from meta-llama/Meta-Llama-3-8B-Instruct what can i do to improve the response completeness
import requests
import os
from dotenv import load_dotenv
import streamlit as st
import PyPDF2 as pdf
import warnings
warnings.filterwarnings('ignore')
load_dotenv()
key = os.getenv('HF_TOKEN')
def input_pdf_text(uploaded_file):
reader=pdf.PdfReader(uploaded_file)
text=""
for page in range(len(reader.pages)):
page=reader.pages[page]
text+=str(page.extract_text())
return text
input_prompt = """You are an skilled ATS (Applicant Tracking System) scanner with a deep understanding of Human Resources and ATS functionality, your task is to evaluate the resume against the provided job description. give me the percentage of match if the resume matches
the job description. First the output should come as percentage and then keywords missing and last final thoughts."""
if uploaded_file is not None:
pdf_content = input_pdf_text(uploaded_file)
if uploaded_file is not None:
st.write("PDF Uploaded Successfully")
submit3 = st.button("Percentage match")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {key}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
if submit3:
if uploaded_file is not None:
input_data = f"""
input prompt: {input_prompt}
job description: {input_text}
resume: {pdf_content}
"""
response1 = query({
"inputs": input_data
})
data = response1[0]["generated_text"]
st.subheader("Analysis")
st.write(data)
I need to improve the completeness and accuracy of responses generated by the Llama 3 model