OpenAI Chat Live Example
At this example the createChatCompletion
generator delegates the async iterable returned from openai.chat.completions.create
and the component utilises using
keyword in order to close the stream safely in case of an interruption, iterating the messages using await for
syntax.
/src/modules/openai/OpenAiController.ts
import { type VovkRequest, post, prefix, HttpException, HttpStatus } from 'vovk';
import OpenAI from 'openai';
@prefix('openai')
export default class OpenAiController {
private static _openai: OpenAI;
private static get openai() {
// to avoid compilation errors if OPENAI_API_KEY is not set
return (this._openai ??= new OpenAI());
}
@post('chat', { cors: true, headers: { 'Access-Control-Allow-Origin': 'https://vovk.dev' } })
static async *createChatCompletion(
req: VovkRequest<{ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] }>
) {
const { messages } = await req.json();
const LIMIT = 5;
if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
}
yield* await this.openai.chat.completions.create({
messages,
model: 'gpt-3.5-turbo',
stream: true,
});
}
}