Here's the backend server code:
require("dotenv").config();
const express = require("express");
const cors = require("cors");
const app = express();
const PORT = process.env.PORT || 5000;
const OpenAI = require("openai");
const openai = new OpenAI({
apiKey: process.env.OPENAI_KEY,
});
app.use(
cors({
origin: ["https://xxxxx.vercel.app", "http://localhost:3000"],
})
);
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
app.get("/", (req, res) => res.send("Hello World!"));
// Get answer from GPT model
app.post("/generate", async (req, res) => {
const { question } = req.body;
try {
const response = await openai.chat.completions.create({
messages: [
{
role: "system",
content:
"xxxxxxxxxxxxxx",
},
{ role: "user", content: question },
],
model: "gpt-3.5-turbo",
temperature: 0.2
});
const answer = response.choices[0].message.content;
res.status(200).json({ answer: answer });
} catch (error) {
console.error(error);
res.status(500).json({ message: "Error retrieving answer" });
}
});
app.listen(PORT, () => console.log(`Server up at PORT:${PORT}`));
Here's how I am trying to send a POST request in flutter:
final url = Uri.parse('https://xxxxx-express-api.onrender.com/generate');
final body = jsonEncode({'question': prompt});
final response = await http.post(url, body: body);
print(response.statusCode);
Here's the error in backend server console: BadRequestError: 400 'content' is a required property - 'messages.1'
Here's the error code in flutter: 500
What I am doing wrong?
EDIT
So I figured out that the prompt
I was passing wasn't just a String
, but a List<Map<String, dynamic>>
so I rewrote the backend code like this:
require("dotenv").config();
const express = require("express");
const cors = require("cors");
const app = express();
const PORT = process.env.PORT || 5000;
const OpenAI = require("openai");
const openai = new OpenAI({
apiKey: process.env.OPENAI_KEY,
});
app.use(
cors({
origin: ["https://xxxxx.vercel.app", "http://localhost:3000"],
})
);
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
app.get("/", (req, res) => res.send("Hello World!"));
// Get answer from GPT model
app.post("/generate", async (req, res) => {
const { question } = req.body;
try {
const response = await openai.chat.completions.create({
messages: question,
model: "gpt-3.5-turbo",
temperature: 0.2
});
const answer = response.choices[0].message.content;
res.status(200).json({ answer: answer });
} catch (error) {
console.error(error);
res.status(500).json({ message: "Error retrieving answer" });
}
});
app.listen(PORT, () => console.log(`Server up at PORT:${PORT}`));
Now, the log in backend server console is:
Oct 31 01:27:06 PM BadRequestError: 400 'messages' is a required property
Oct 31 01:27:06 PM at APIError.generate (/opt/render/project/src/node_modules/openai/error.js:43:20)
Oct 31 01:27:06 PM at OpenAI.makeStatusError (/opt/render/project/src/node_modules/openai/core.js:248:33)
Oct 31 01:27:06 PM at OpenAI.makeRequest (/opt/render/project/src/node_modules/openai/core.js:287:30)
Oct 31 01:27:06 PM at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
Oct 31 01:27:06 PM at async /opt/render/project/src/index.js:23:22 {
Oct 31 01:27:06 PM status: 400,
Oct 31 01:27:06 PM headers: {
Oct 31 01:27:06 PM 'access-control-allow-origin': '*',
Oct 31 01:27:06 PM 'alt-svc': 'h3=":443"; ma=86400',
Oct 31 01:27:06 PM 'cf-cache-status': 'DYNAMIC',
Oct 31 01:27:06 PM 'cf-ray': 'xxx-SIN',
Oct 31 01:27:06 PM connection: 'keep-alive',
Oct 31 01:27:06 PM 'content-length': '146',
Oct 31 01:27:06 PM 'content-type': 'application/json',
Oct 31 01:27:06 PM date: 'Tue, 31 Oct 2023 07:57:06 GMT',
Oct 31 01:27:06 PM 'openai-organization': 'user-xxx',
Oct 31 01:27:06 PM 'openai-processing-ms': '4',
Oct 31 01:27:06 PM 'openai-version': '2020-10-01',
Oct 31 01:27:06 PM server: 'cloudflare',
Oct 31 01:27:06 PM 'strict-transport-security': 'max-age=15724800; includeSubDomains',
Oct 31 01:27:06 PM 'x-ratelimit-limit-requests': '3500',
Oct 31 01:27:06 PM 'x-ratelimit-limit-tokens': '90000',
Oct 31 01:27:06 PM 'x-ratelimit-remaining-requests': '3499',
Oct 31 01:27:06 PM 'x-ratelimit-remaining-tokens': '89983',
Oct 31 01:27:06 PM 'x-ratelimit-reset-requests': '17ms',
Oct 31 01:27:06 PM 'x-ratelimit-reset-tokens': '10ms',
Oct 31 01:27:06 PM 'x-request-id': 'xxx'
Oct 31 01:27:06 PM },
Oct 31 01:27:06 PM error: {
Oct 31 01:27:06 PM message: "'messages' is a required property",
Oct 31 01:27:06 PM type: 'invalid_request_error',
Oct 31 01:27:06 PM param: null,
Oct 31 01:27:06 PM code: null
Oct 31 01:27:06 PM },
Oct 31 01:27:06 PM code: null,
Oct 31 01:27:06 PM param: null,
Oct 31 01:27:06 PM type: 'invalid_request_error'
Oct 31 01:27:06 PM }
I don't get it what's the problem with my request.
I figured it out. Needed some changes to be made.
Here's backend server code:
require("dotenv").config();
const express = require("express");
const cors = require("cors");
const app = express();
const PORT = process.env.PORT || 5000;
const OpenAI = require("openai");
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
app.use(cors());
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
app.get("/", (req, res) => res.send("Hello World!"));
// Get answer from GPT model
app.post("/generate", async (req, res) => {
const question = req.body.question;
try {
const response = await openai.chat.completions.create({
messages: question,
model: "gpt-3.5-turbo",
temperature: 0.2,
});
const answer = response.choices[0].message.content;
res.status(200).json({ answer: answer });
} catch (error) {
console.error(error);
res.status(500).json({ message: "Error retrieving answer" });
}
});
app.listen(PORT, () => console.log(`Server up at PORT:${PORT}`));
Here's the flutter code:
final url = Uri.parse('https://xxxxx-express-api.onrender.com/generate');
final body = jsonEncode({'question': prompt});
final response = await http.post(url, headers: {"Content-Type": "application/json"}, body: body);