admin管理员组

文章数量:1667032

         服务器端(node+express)与openai接口对接部分代码:

import express from 'express'
import * as dotenv from 'dotenv'
import cors from 'cors'
import {
	Configuration,
	OpenAIApi
} from 'openai'

dotenv.config()

const configuration = new Configuration({
	apiKey: process.env.OPENAI_API_KEY,
	baseOptions: {
		proxy: {
			host: '127.0.0.1', // 这里在window上搜索代理可查看自己的代理地址和端口,一般应该是一样的
			port: 7890
		}
	}
});

const openai = new OpenAIApi(configuration);

const app = express()
app.use(cors())
app.use(express.json())

app.get('/', async (req, res) => {
	res.status(200).send({
		message: 'Hello from MY-OPENAI!'
	})
})

app.post('/', async (req, res) => {
	// 设置响应头
	res.set({
		'Transfer-Encoding': 'chunked',
		'Cache-Control': 'no-cache',
		Connection: 'keep-alive',
		'Access-Control-Allow-Credentials': 'true',
		'Access-Control-Allow-Methods': 'GET, POST, OPTIONS',
		'Access-Control-Allow-Headers': ' Content-Type',
	});
	try {
		const prompt = req.body.prompt;
		const params = {
			stream: true,
			messages: [{
				role: "user",
				content: `${prompt}`
			}],
			model: "gpt-3.5-turbo",
			logprobs: true,
			top_logprobs: 2,
		};
		const response = await openai.createChatCompletion(params, {
			// 使openai接口返回值为流式对象
			responseType: "stream",
		});
		// 实时监听流式对象response.data
		response.data.on("data", (data) => {
			res.write(data);
		});
		// res.status(200).send(
		// 	response.data
		// );
	} catch (error) {
		console.error(error)
		res.status(500).send(error || 'Something went wrong');
	}
})

app.listen(5000, () => console.log('AI server started on http://localhost:5000'))

       前端部分在uni.request内开启enableChunked: true,并且在requestTask.onHeadersReceived内部监听分块传输的各个数据chunk即可。

       调试一天,分块的数据总是一次全部传输,最终发现问题在于服务器端没有实时监听openai接口返回的流式对象,配置responseType: "stream"和 response.data.on("data", (data) => {res.write(data);});后,requestTask.onHeadersReceived与预期的一样多次触发,成功接收多次的流式数据。

参考文章:滑动验证页面

本文标签: 服务器端流式简单程序node