OpenAI Chat Example

OpenAI Chat Live Example

At this example the createChatCompletion generator delegates the async iterable returned from and the component utilises using keyword in order to close the stream safely in case of an interruption, iterating the messages using await for syntax.

import { type VovkRequest, post, prefix, HttpException, HttpStatus } from 'vovk';
import OpenAI from 'openai';
export default class OpenAiController {
  private static _openai: OpenAI;
  private static get openai() {
    // to avoid compilation errors if OPENAI_API_KEY is not set
    return (this._openai ??= new OpenAI());
  @post('chat', { cors: true, headers: { 'Access-Control-Allow-Origin': '' } })
  static async *createChatCompletion(
    req: VovkRequest<{ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] }>
  ) {
    const { messages } = await req.json();
    const LIMIT = 5;
    if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
      throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
    yield* await{
      model: 'gpt-3.5-turbo',
      stream: true,

Source code (opens in a new tab)