From 4f4f90d7ad845bbb35f46a82fc4355bf82de123c Mon Sep 17 00:00:00 2001 From: alex Date: Tue, 19 Nov 2024 19:58:28 +0100 Subject: [PATCH] changed for prod --- src/index.ts | 240 ++++++++-------- src/public/index.html | 498 +++++++++++++++++----------------- src/voiceEngine assemblyAI.ts | 465 ++++++++++++++++--------------- 3 files changed, 622 insertions(+), 581 deletions(-) diff --git a/src/index.ts b/src/index.ts index e868926..5943a87 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,146 +1,174 @@ -import express from 'express'; -import path from 'path'; -import { Request, Response } from 'express'; -import http from 'http'; -import { Server } from 'socket.io'; -import dotenv from 'dotenv'; -import { RealtimeClient, RealtimeUtils } from '@openai/realtime-api-beta'; -import { voiceEngineSocketUser } from './voiceEngine'; +import express from "express"; +import path from "path"; +import { Request, Response } from "express"; +import http from "http"; +import { Server } from "socket.io"; +import dotenv from "dotenv"; +import { RealtimeClient, RealtimeUtils } from "@openai/realtime-api-beta"; +import { voiceEngineSocketUser } from "./voiceEngine"; dotenv.config(); const app = express(); -const PORT = 50269; -const publicFolder = path.join(__dirname, 'public'); +if (!process.env.PORT) { + console.error("Please set the PORT environment variable"); + process.exit(1); +} + +const PORT = parseInt(process.env.PORT, 10); + +const publicFolder = path.join(__dirname, "public"); app.use(express.static(publicFolder)); -app.get('/', (req: Request, res: Response) => { - res.sendFile(path.join(publicFolder, 'index.html')); +app.get("/", (req: Request, res: Response) => { + res.sendFile(path.join(publicFolder, "index.html")); }); const server = http.createServer(app); const io = new Server(server); interface ConversationItem { - [key: string]: any; + [key: string]: any; } -io.on('connection', (socket) => { - console.log('A user connected'); +io.on("connection", (socket) => { + console.log("A user connected"); - voiceEngineSocketUser(socket); + voiceEngineSocketUser(socket); - let gptClient: null | RealtimeClient = null; + let gptClient: null | RealtimeClient = null; - socket.on('start', async (data) => { - gptClient = new RealtimeClient({ apiKey: process.env.OPENAI_API_KEY }); + socket.on("start", async (data) => { + gptClient = new RealtimeClient({ apiKey: process.env.OPENAI_API_KEY }); - (async () => { - try { - await gptClient.connect(); + (async () => { + try { + await gptClient.connect(); - console.log('Connected to OpenAI Realtime API'); + console.log("Connected to OpenAI Realtime API"); - socket.on('voice-data', async (audioBuffer) => { - try { - console.log('Voice data received'); + socket.on("voice-data", async (audioBuffer) => { + try { + console.log("Voice data received"); - // Send user audio, must be Int16Array or ArrayBuffer - // Default audio format is pcm16 with sample rate of 24,000 Hz + // Send user audio, must be Int16Array or ArrayBuffer + // Default audio format is pcm16 with sample rate of 24,000 Hz - if (audioBuffer instanceof Float32Array) { - console.log('Received audio data from the input worklet:'); + if (audioBuffer instanceof Float32Array) { + console.log("Received audio data from the input worklet:"); - if (gptClient) gptClient.appendInputAudio(RealtimeUtils.floatTo16BitPCM(audioBuffer)); - } else if (audioBuffer instanceof Buffer) { - console.log('Received audio data as Buffer:'); + if (gptClient) + gptClient.appendInputAudio( + RealtimeUtils.floatTo16BitPCM(audioBuffer) + ); + } else if (audioBuffer instanceof Buffer) { + console.log("Received audio data as Buffer:"); - // Convert Buffer to ArrayBuffer - const arrayBuffer = audioBuffer.buffer.slice(audioBuffer.byteOffset, audioBuffer.byteOffset + audioBuffer.byteLength); + // Convert Buffer to ArrayBuffer + const arrayBuffer = audioBuffer.buffer.slice( + audioBuffer.byteOffset, + audioBuffer.byteOffset + audioBuffer.byteLength + ); - // Convert ArrayBuffer to Int16Array - const float32Array = new Float32Array(arrayBuffer); + // Convert ArrayBuffer to Int16Array + const float32Array = new Float32Array(arrayBuffer); - if (gptClient) gptClient.appendInputAudio(RealtimeUtils.floatTo16BitPCM(float32Array)); - } else { - console.error('Invalid data type received in worklet'); + if (gptClient) + gptClient.appendInputAudio( + RealtimeUtils.floatTo16BitPCM(float32Array) + ); + } else { + console.error("Invalid data type received in worklet"); - // log the data type and return - console.log('Data type:', typeof audioBuffer, audioBuffer); - return; - } - } catch (error) { - console.error('Error with OpenAI Realtime API:', error); - } - }); - - gptClient.on('conversation.updated', (event: ConversationItem) => { - const { item, delta } = event; - if (item.content) { - socket.emit('openai-response', item.content); - } - console.log('Playing audio response...', delta); - if (delta && delta.audio) { - socket.emit('openai-audio', delta.audio); - } - console.log('Conversation updated:', event); - }); - - gptClient.on('conversation.item.completed', (event: ConversationItem) => { - const { item } = event; - console.log('Conversation item completed:', item); - - if (item.type === 'message' && item.role === 'assistant' && item.formatted && item.formatted.audio) { - console.log('Playing audio response...'); - //socket.emit('openai-audio', item.formatted.audio); - } else { - console.log('No audio content in this item.'); - } - }); - } catch (error) { - console.error('Error connecting to OpenAI Realtime API:', error); + // log the data type and return + console.log("Data type:", typeof audioBuffer, audioBuffer); + return; } - })(); - - socket.on('disconnect', () => { - console.log('A user disconnected'); - - if (gptClient) gptClient.disconnect(); + } catch (error) { + console.error("Error with OpenAI Realtime API:", error); + } }); - socket.on('end', () => { - console.log('A user ended the conversation'); - if (gptClient) gptClient.disconnect(); + gptClient.on("conversation.updated", (event: ConversationItem) => { + const { item, delta } = event; + if (item.content) { + socket.emit("openai-response", item.content); + } + console.log("Playing audio response...", delta); + if (delta && delta.audio) { + socket.emit("openai-audio", delta.audio); + } + console.log("Conversation updated:", event); }); - gptClient.updateSession({ - instructions: `Du bist beim Kundensupport von Jannex und möchtest eine Erinnerung für ein Termin nachfragen. -Bitte spreche mit einer ruhigen Stimme.`, - }); - gptClient.updateSession({ voice: 'ballad' }); - gptClient.updateSession({ - turn_detection: { type: 'server_vad', threshold: 0.6, prefix_padding_ms: 300, silence_duration_ms: 500 }, - input_audio_transcription: { model: 'whisper-1' }, - input_audio_format: 'pcm16', - output_audio_format: 'pcm16', - max_response_output_tokens: 1500, - modalities: ['audio', 'text'], - }); + gptClient.on( + "conversation.item.completed", + (event: ConversationItem) => { + const { item } = event; + console.log("Conversation item completed:", item); - gptClient.on('conversation.updated', (event: ConversationItem) => { - const { item, delta } = event; - if (gptClient) { - const items = gptClient.conversation.getItems(); + if ( + item.type === "message" && + item.role === "assistant" && + item.formatted && + item.formatted.audio + ) { + console.log("Playing audio response..."); + //socket.emit('openai-audio', item.formatted.audio); + } else { + console.log("No audio content in this item."); } - // Handle the updated conversation items - }); + } + ); + } catch (error) { + console.error("Error connecting to OpenAI Realtime API:", error); + } + })(); - //gptClient.sendUserMessageContent([{ type: 'input_text', text: `Wie geht es dir?` }]); - gptClient.createResponse(); + socket.on("disconnect", () => { + console.log("A user disconnected"); + + if (gptClient) gptClient.disconnect(); }); + + socket.on("end", () => { + console.log("A user ended the conversation"); + if (gptClient) gptClient.disconnect(); + }); + + gptClient.updateSession({ + instructions: `Du bist beim Kundensupport von Jannex und möchtest eine Erinnerung für ein Termin nachfragen. +Bitte spreche mit einer ruhigen Stimme.`, + }); + gptClient.updateSession({ voice: "ballad" }); + gptClient.updateSession({ + turn_detection: { + type: "server_vad", + threshold: 0.6, + prefix_padding_ms: 300, + silence_duration_ms: 500, + }, + input_audio_transcription: { model: "whisper-1" }, + input_audio_format: "pcm16", + output_audio_format: "pcm16", + max_response_output_tokens: 1500, + modalities: ["audio", "text"], + }); + + gptClient.on("conversation.updated", (event: ConversationItem) => { + const { item, delta } = event; + if (gptClient) { + const items = gptClient.conversation.getItems(); + } + // Handle the updated conversation items + }); + + //gptClient.sendUserMessageContent([{ type: 'input_text', text: `Wie geht es dir?` }]); + gptClient.createResponse(); + }); }); -server.listen(PORT, '127.0.0.1', () => { - console.log(`Server läuft unter http://localhost:${PORT}`); +server.listen(PORT, "127.0.0.1", () => { + console.log(`Server läuft unter http://localhost:${PORT}`); }); diff --git a/src/public/index.html b/src/public/index.html index f6ec391..8e48a79 100644 --- a/src/public/index.html +++ b/src/public/index.html @@ -1,276 +1,272 @@ + + + + + + + + + + - - - - - - - - - - - - - - Voice Call with a voice bot - - - - -
-

Voice Call with a Voice Bot

- -

Status: Idle

-

Input:

-

---

-

Output:

-

---

-
- - - + - - - \ No newline at end of file + // Clear the audio queue fade out + const fadeOut = setInterval(() => { + if (audioQueue.length > 0) { + audioQueue = audioQueue.slice(0, audioQueue.length - 1); + } else { + clearInterval(fadeOut); + } + }, 100); + }); + + + diff --git a/src/voiceEngine assemblyAI.ts b/src/voiceEngine assemblyAI.ts index cf82d7b..142380a 100644 --- a/src/voiceEngine assemblyAI.ts +++ b/src/voiceEngine assemblyAI.ts @@ -1,59 +1,61 @@ -import { Socket } from 'socket.io'; -import { DefaultEventsMap } from 'socket.io/dist/typed-events'; +import { Socket } from "socket.io"; +import { DefaultEventsMap } from "socket.io/dist/typed-events"; -import { createClient, LiveTranscriptionEvents } from '@deepgram/sdk'; +import { createClient, LiveTranscriptionEvents } from "@deepgram/sdk"; -import dotenv from 'dotenv'; -import { RealtimeUtils } from '@openai/realtime-api-beta'; -import { ElevenLabsClient } from 'elevenlabs'; -import { OptimizeStreamingLatency } from 'elevenlabs/api'; -import { Writable } from 'stream'; -import Cartesia from '@cartesia/cartesia-js'; -import WS from 'ws'; -import OpenAI from 'openai'; -import { ChatCompletionMessageParam } from 'openai/resources'; +import dotenv from "dotenv"; +import { RealtimeUtils } from "@openai/realtime-api-beta"; +import { ElevenLabsClient } from "elevenlabs"; +import { OptimizeStreamingLatency } from "elevenlabs/api"; +import { Writable } from "stream"; +import Cartesia from "@cartesia/cartesia-js"; +import WS from "ws"; +import OpenAI from "openai"; +import { ChatCompletionMessageParam } from "openai/resources"; -import elevenlabs_wss from './elevenlabs_wss'; -import fs from 'fs'; +import elevenlabs_wss from "./elevenlabs_wss"; +import fs from "fs"; dotenv.config(); -import { AssemblyAI, RealtimeTranscript } from 'assemblyai'; +import { AssemblyAI, RealtimeTranscript } from "assemblyai"; const assemblyAI = new AssemblyAI({ - apiKey: process.env.ASSEMBLYAI_API_KEY || '', + apiKey: process.env.ASSEMBLYAI_API_KEY || "", }); const cartesia = new Cartesia({ - apiKey: process.env.CARTESIA_API_KEY, + apiKey: process.env.CARTESIA_API_KEY, }); const openai = new OpenAI({ - apiKey: process.env.OpenAI_API_KEY, + apiKey: process.env.OpenAI_API_KEY, }); // @ts-ignore global.WebSocket = WS; -export function voiceEngineSocketUser(socket: Socket) { - let transcript = ''; - let currentSpeachTime = 0; - let currentSpeach: { - charStartTimesMs: number[]; - chars: string[]; - }[] = []; +export function voiceEngineSocketUser( + socket: Socket +) { + let transcript = ""; + let currentSpeachTime = 0; + let currentSpeach: { + charStartTimesMs: number[]; + chars: string[]; + }[] = []; - function logTranscript(prefix: string, text: string) { - transcript += prefix + ':\t' + text + '\n'; - } + function logTranscript(prefix: string, text: string) { + transcript += prefix + ":\t" + text + "\n"; + } - console.log('A user connected'); + console.log("A user connected"); - socket.on('bot-start', async () => { - let userMessages: ChatCompletionMessageParam[] = [ - { - role: 'system', - content: `Telefonvertriebler bei Sentrovo + socket.on("bot-start", async () => { + let userMessages: ChatCompletionMessageParam[] = [ + { + role: "system", + content: `Telefonvertriebler bei Sentrovo Rolle Du bist Kevin von Sentrovo, einer KI-Agentur, die spezialisierte KI-Dienstleistungen anbietet. Deine Hauptaufgabe besteht darin, potenzielle Kunden durch Outbound-Anrufe zu erreichen und ein Kennenlerngespräch für ein KI-Audit zu vereinbaren. Du kennst die typischen Vorteile von KI-Lösungen für Unternehmen und kannst anschaulich erklären, wie ein Audit dabei hilft, Potenziale zu identifizieren. Heutiges Datum: {{now}} @@ -122,187 +124,199 @@ Bestätigung und Abschluss „Super, ich habe den Termin am [Datum] um [Uhrzeit] für uns eingetragen. Ich freue mich auf das Gespräch und bedanke mich für Ihre Zeit. Bis dann!“ Taktik: Positiv und locker abschließen, um den Termin als einfach und unverbindlich zu präsentieren. Hinweis: Der Ton bleibt freundlich und entspannt, um Vertrauen aufzubauen und die Hemmschwelle für ein Kennenlerngespräch zu senken.`, - }, - ]; + }, + ]; - const vars: { [key: string]: string } = { - Vorname: 'Max', - Nachname: 'Mustermann', - Unternehmen: 'Musterfirma', - Position: 'Geschäftsführer', - now: new Date().toLocaleDateString(), - }; + const vars: { [key: string]: string } = { + Vorname: "Max", + Nachname: "Mustermann", + Unternehmen: "Musterfirma", + Position: "Geschäftsführer", + now: new Date().toLocaleDateString(), + }; - for (const message of userMessages) { - if (message.content) { - for (const key in vars) { - if (message.content && message.role === 'system') { - if (typeof message.content === 'string') { - message.content = message.content.replace(new RegExp(`{{${key}}}`, 'g'), vars[key as keyof typeof vars]); - } - } - } + for (const message of userMessages) { + if (message.content) { + for (const key in vars) { + if (message.content && message.role === "system") { + if (typeof message.content === "string") { + message.content = message.content.replace( + new RegExp(`{{${key}}}`, "g"), + vars[key as keyof typeof vars] + ); } + } } + } + } - console.log('Bot started'); + console.log("Bot started"); - async function speakText(text: string) { - console.log('Generated message:', text); + async function speakText(text: string) { + console.log("Generated message:", text); - const time = new Date().getTime(); - let lastTime = 0; - let firstMessage = true; - const labs11 = await elevenlabs_wss.connect( - { - voice_id: 'N2lVS1w4EtoT3dr4eOWO', - model_id: 'eleven_turbo_v2_5', + const time = new Date().getTime(); + let lastTime = 0; + let firstMessage = true; + const labs11 = await elevenlabs_wss.connect( + { + voice_id: "N2lVS1w4EtoT3dr4eOWO", + model_id: "eleven_turbo_v2_5", - optimize_streaming_latency: 4, - output_format: 'pcm_24000', - language_code: 'de', - //sync_alignment: true, - inactivity_timeout: 20, - }, - (data, randomUUID) => { - if (!data.audio) return; + optimize_streaming_latency: 4, + output_format: "pcm_24000", + language_code: "de", + //sync_alignment: true, + inactivity_timeout: 20, + }, + (data, randomUUID) => { + if (!data.audio) return; - const audioBuffer = Buffer.from(data.audio, 'base64'); + const audioBuffer = Buffer.from(data.audio, "base64"); - const audioBufferArray = new Uint8Array(audioBuffer); + const audioBufferArray = new Uint8Array(audioBuffer); - socket.emit('openai-audio', audioBufferArray, randomUUID); - console.log('Received audio data from Eleven Labs'); + socket.emit("openai-audio", audioBufferArray, randomUUID); + console.log("Received audio data from Eleven Labs"); - if (data.normalizedAlignment) { - if (firstMessage) { - firstMessage = false; - currentSpeachTime = new Date().getTime(); - currentSpeach = []; - } - - currentSpeach.push(data.normalizedAlignment); - } - } - ); - - const ws11 = labs11.socket; - - socket.emit('openai-audio-start', labs11.randomUUID); - - console.log('Connected to Eleven Labs. Took', new Date().getTime() - time, 'ms'); - - elevenlabs_wss.generate(ws11, { - flush: true, - text: text + ' ', - - voice_settings: { - style: 0.5, - use_speaker_boost: true, - stability: 0.5, - similarity_boost: 0.5, - }, - }); - elevenlabs_wss.generate(ws11, { - //flush: true, - text: '', - }); - } - - async function generateVoiceMessage() { - const output = await openai.beta.chat.completions.parse({ - model: 'gpt-4o', - temperature: 0.8, - //max_completion_tokens: 100, - messages: [...userMessages], - }); - - const text = output.choices[0].message.content; - - if (!text) return; - - speakText(text); - } - - function addMessageToUser(message: ChatCompletionMessageParam) { - userMessages.push(message); - } - - function addLastMessageToChat() { - let oldText = ''; - let lastMs = 0; - - const speakOffset = new Date().getTime() - currentSpeachTime; - - let inrerrupt = false; - - for (const alignment of currentSpeach) { - let index = 0; - for (const char of alignment.chars) { - const ms = alignment.charStartTimesMs[index]; - - if (lastMs + ms < speakOffset) { - oldText += char; - } else { - inrerrupt = true; - break; - } - - index++; - } - - lastMs = alignment.charStartTimesMs[alignment.charStartTimesMs.length - 1]; + if (data.normalizedAlignment) { + if (firstMessage) { + firstMessage = false; + currentSpeachTime = new Date().getTime(); + currentSpeach = []; } - if (inrerrupt) { - oldText += ' ... **ABGEBROCHEN**'; - } + currentSpeach.push(data.normalizedAlignment); + } + } + ); - if (oldText) { - addMessageToUser({ - role: 'assistant', - content: oldText, - }); - } + const ws11 = labs11.socket; - logTranscript('AI', oldText); + socket.emit("openai-audio-start", labs11.randomUUID); + + console.log( + "Connected to Eleven Labs. Took", + new Date().getTime() - time, + "ms" + ); + + elevenlabs_wss.generate(ws11, { + flush: true, + text: text + " ", + + voice_settings: { + style: 0.5, + use_speaker_boost: true, + stability: 0.5, + similarity_boost: 0.5, + }, + }); + elevenlabs_wss.generate(ws11, { + //flush: true, + text: "", + }); + } + + async function generateVoiceMessage() { + const output = await openai.beta.chat.completions.parse({ + model: "gpt-4o", + temperature: 0.8, + //max_completion_tokens: 100, + messages: [...userMessages], + }); + + const text = output.choices[0].message.content; + + if (!text) return; + + speakText(text); + } + + function addMessageToUser(message: ChatCompletionMessageParam) { + userMessages.push(message); + } + + function addLastMessageToChat() { + let oldText = ""; + let lastMs = 0; + + const speakOffset = new Date().getTime() - currentSpeachTime; + + let inrerrupt = false; + + for (const alignment of currentSpeach) { + let index = 0; + for (const char of alignment.chars) { + const ms = alignment.charStartTimesMs[index]; + + if (lastMs + ms < speakOffset) { + oldText += char; + } else { + inrerrupt = true; + break; + } + + index++; } - speakText('Hallo hier ist Kevin von Sentrovo, spreche ich hier mit Herr Mustermann?'); + lastMs = + alignment.charStartTimesMs[alignment.charStartTimesMs.length - 1]; + } - const transcriber = assemblyAI.realtime.transcriber({ - sampleRate: 16_000, - encoding: 'pcm_s16le', + if (inrerrupt) { + oldText += " ... **ABGEBROCHEN**"; + } + + if (oldText) { + addMessageToUser({ + role: "assistant", + content: oldText, }); + } - transcriber.on('open', ({ sessionId }) => { - console.log(`Session opened with ID: ${sessionId}`); - }); + logTranscript("AI", oldText); + } - transcriber.on('error', (error: Error) => { - console.error('Error:', error); - }); + speakText( + "Hallo hier ist Kevin von Sentrovo, spreche ich hier mit Herr Mustermann?" + ); - transcriber.on('close', (code: number, reason: string) => console.log('Session closed:', code, reason)); + const transcriber = assemblyAI.realtime.transcriber({ + sampleRate: 16_000, + encoding: "pcm_s16le", + }); - transcriber.on('transcript', (transcript: RealtimeTranscript) => { - if (!transcript.text) { - return; - } + transcriber.on("open", ({ sessionId }) => { + console.log(`Session opened with ID: ${sessionId}`); + }); - if (transcript.message_type === 'PartialTranscript') { - console.log('Partial:', transcript.text); - } else { - console.log('Final:', transcript.text); - } - }); + transcriber.on("error", (error: Error) => { + console.error("Error:", error); + }); - console.log('Connecting to real-time transcript service'); - await transcriber.connect(); + transcriber.on("close", (code: number, reason: string) => + console.log("Session closed:", code, reason) + ); - console.log('Starting recording'); + transcriber.on("transcript", (transcript: RealtimeTranscript) => { + if (!transcript.text) { + return; + } - /* + if (transcript.message_type === "PartialTranscript") { + console.log("Partial:", transcript.text); + } else { + console.log("Final:", transcript.text); + } + }); + + console.log("Connecting to real-time transcript service"); + await transcriber.connect(); + + console.log("Starting recording"); + + /* { //data.speech_final if (data.is_final) { @@ -336,47 +350,50 @@ Hinweis: Der Ton bleibt freundlich und entspannt, um Vertrauen aufzubauen und di //console.log('ts\t', data); } */ - // STEP 4: Fetch the audio stream and send it to the live transcription connection + // STEP 4: Fetch the audio stream and send it to the live transcription connection - socket.on('bot-voice-data', (audioBuffer: any) => { - // Convert Buffer to ArrayBuffer - const arrayBuffer = audioBuffer.buffer.slice(audioBuffer.byteOffset, audioBuffer.byteOffset + audioBuffer.byteLength); + socket.on("bot-voice-data", (audioBuffer: any) => { + // Convert Buffer to ArrayBuffer + const arrayBuffer = audioBuffer.buffer.slice( + audioBuffer.byteOffset, + audioBuffer.byteOffset + audioBuffer.byteLength + ); - // Convert ArrayBuffer to Int16Array - const float32Array = new Float32Array(arrayBuffer); + // Convert ArrayBuffer to Int16Array + const float32Array = new Float32Array(arrayBuffer); - //console.log('Received audio data from User:', data); - //dgConnection.send(RealtimeUtils.floatTo16BitPCM(float32Array)); - //transcriber.stream() + //console.log('Received audio data from User:', data); + //dgConnection.send(RealtimeUtils.floatTo16BitPCM(float32Array)); + //transcriber.stream() - const audioBufferArray = RealtimeUtils.floatTo16BitPCM(float32Array); + const audioBufferArray = RealtimeUtils.floatTo16BitPCM(float32Array); - transcriber.sendAudio(audioBufferArray); - }); - - function stopConversation() { - console.log('Ending conversation'); - addLastMessageToChat(); - - //dgConnection.disconnect(); - socket.emit('bot-stopSpeaking'); - - // write the transcript to the file - const uuid = new Date().getTime(); - - // create folder - fs.mkdirSync(`transcripts/${uuid}`, { recursive: true }); - - fs.writeFileSync(`transcripts/${uuid}/transcript.txt`, transcript); - } - - socket.on('bot-end', () => { - stopConversation(); - }); - - socket.on('disconnect', () => { - stopConversation(); - console.log('A user disconnected'); - }); + transcriber.sendAudio(audioBufferArray); }); + + function stopConversation() { + console.log("Ending conversation"); + addLastMessageToChat(); + + //dgConnection.disconnect(); + socket.emit("bot-stopSpeaking"); + + // write the transcript to the file + const uuid = new Date().getTime(); + + // create folder + fs.mkdirSync(`transcripts/${uuid}`, { recursive: true }); + + fs.writeFileSync(`transcripts/${uuid}/transcript.txt`, transcript); + } + + socket.on("bot-end", () => { + stopConversation(); + }); + + socket.on("disconnect", () => { + stopConversation(); + console.log("A user disconnected"); + }); + }); }