#!/usr/bin/env node //IRC bot that responds to !chat, queries the chatgpt api, and prints the response line by line const irc = require('irc'); const axios = require('axios'); const config = require('./config.json'); // context is a list of strings that are used to seed the chatgpt api and it's responses class Context { messages = []; currentResponse = ''; add_user_message(message) { this.messages.push({ role: 'user', content: message }); } add_assistant_message(message) { this.messages.push({ role: 'assistant', content: message }); } append_current_response(message) { this.currentResponse += message; } end_line() { const response_so_far = this.currentResponse; this.currentResponse += "\n\n" return response_so_far; } finish_current_response() { this.add_assistant_message(this.currentResponse); const the_response = this.currentResponse; this.currentResponse = ''; return the_response; } is_response_in_progress() { return this.currentResponse !== ''; } clear() { this.messages = []; this.currentResponse = ''; } } const context = new Context(); const client = new irc.Client(config.server, config.nick, { channels: config.channels, }); // listen for messages that start with !chat and call the chatgpt api with a callback that prints the response line by line client.addListener('message', async (from, to, message) => { is_chat_cmd = message.startsWith('!chat'); is_cont_cmd = message.startsWith('!cont'); if (is_chat_cmd || is_cont_cmd) { if(context.is_response_in_progress()) { return; } if(is_chat_cmd) { context.clear(); } const query = message.slice(6); chatgpt(query, (line) => { client.say(to, line); }); } }); // function that calls the chatgpt streaming api (with server send events) and calls the callback function for each line async function chatgpt(query, callback) { // a very primitive mutex to prevent multiple calls to the api at once if(context.is_response_in_progress()) { return; } context.add_user_message(query); const apiUrl = 'https://api.openai.com/v1/chat/completions'; const response = await axios.post(apiUrl, { messages: [context.messages], model: 'gpt-3.5-turbo', stream: true, }, { headers: { Authorization: `Bearer ${config.openaiApiKey}`, 'Content-Type': 'application/json', }, responseType: 'stream', }); response.data.on('data', (event) => { let data = event.toString(); let parts = data.split('\n'); // parse if starts with data: for(part of parts) { console.log(part); if(part === 'data: [DONE]') { callback(context.finish_current_response()); } else if(part.startsWith('data: ')) { let jsonString = part.slice(part.indexOf('{'), part.lastIndexOf('}') + 1); try { let json = JSON.parse(jsonString); let chunk = json.choices[0].delta.content; if (!chunk) { callback(context.end_line()); continue; } //split the chunk into lines leaving the delimiter in the array const lines = chunk.split(/\r?\n/); // split by new lines let hasStartNewline = chunk.startsWith("\n"); let hasEndNewline = chunk.endsWith("\n"); if(hasStartNewline) { callback(context.end_line()) } for (let i = 0; i < lines.length - 1; i++) { context.append_current_response(lines[i]); callback(context.end_line()); } context.append_current_response(lines[lines.length - 1]); if(hasEndNewline) { callback(context.end_line()); } if (line.length > 400) { callback(context.end_line()); } } catch (e) { console.log(e); console.log(part); } } } }); }