import { Configuration, OpenAIApi } from 'openai'; enum GPTCallType { SUMMARY = 'summary', COMPLETION = 'completion', EDIT = 'edit', } type GPTCallOpts = { model: string; maxTokens: number; temp: number; prompt: string; }; const callTypeMap: { [type: string]: GPTCallOpts } = { summary: { model: 'text-davinci-003', maxTokens: 256, temp: 0.5, prompt: 'Summarize this text in simpler terms: ' }, edit: { model: 'text-davinci-003', maxTokens: 256, temp: 0.5, prompt: 'Reword this: ' }, completion: { model: 'text-davinci-003', maxTokens: 256, temp: 0.5, prompt: '' }, }; const configuration = new Configuration({ apiKey: process.env.OPENAI_KEY, }); const openai = new OpenAIApi(configuration); const gptTrailSlideCustomization = async (inputText: string) => { let prompt = 'We are adding customization to a slide in a presentation. Given a natural language input, translate it into a json with the required fields: [title, presentation_transition, presentation_movement, presentation_effect, config_zoom, presentation_effectDirection].'; prompt += 'title is the title/name of the slide. presentation_transition is a number in milliseconds for how long it should take to transition to a slide. presentation_movement is how the slide is moved onscreen. Its only possible values are: [none, center, zoom, pan, jump]. If the input contains zoom, make sure to set presentation_movement to zoom. presentation_effect is an effect applied to the slide when we transition to it. Its only possible values are: [None, Fade in, Flip, Rotate, Bounce, Roll]. presentation_effectDirection is what direction the slide comes in from. Its only possible values are: [Enter from left, Enter from right, Enter from bottom, Enter from Top, Enter from center]. config_zoom is a number from 0 to 1.0 indicating the percentage we should be zooming into the slide.'; prompt += 'If the input does not contain info a specific key, please set their value to null. Please only return the json with these keys and their values.'; try { const response = await openai.createChatCompletion({ model: 'gpt-3.5-turbo', messages: [ { role: 'system', content: prompt }, { role: 'user', content: inputText }, ], temperature: 0.1, max_tokens: 1000, }); return response.data.choices[0].message?.content; } catch (err) { console.log(err); return 'Error connecting with API.'; } }; /** * Calls the OpenAI API. * * @param inputText Text to process * @returns AI Output */ const gptAPICall = async (inputText: string, callType: GPTCallType) => { if (callType === GPTCallType.SUMMARY) inputText += '.'; const opts: GPTCallOpts = callTypeMap[callType]; try { const response = await openai.createCompletion({ model: opts.model, max_tokens: opts.maxTokens, temperature: opts.temp, prompt: `${opts.prompt}${inputText}`, }); return response.data.choices[0].text; } catch (err) { console.log(err); return 'Error connecting with API.'; } }; const gptImageCall = async (prompt: string, n?: number) => { try { const response = await openai.createImage({ prompt: prompt, n: n ?? 1, size: '1024x1024', }); return response.data.data.map(data => data.url); // return response.data.data[0].url; } catch (err) { console.error(err); return; } }; export { gptAPICall, gptImageCall, gptTrailSlideCustomization, GPTCallType };