Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | 3x 9x 9x 4x 5x 4x 1x 8x 4x 13x 3x 3x 3x 9x 9x 9x 9x 1x 8x 8x 8x 13x 13x 4x 4x 9x 6x 6x 6x 6x 3x 3x 3x 1x 1x 1x | /**
* Module for solving requests using the OpenAI GPT-3 API.
* @module solve
*/
import { ChatCompletionRequestMessage, Configuration, CreateChatCompletionRequest, OpenAIApi } from 'openai'
export const openai = new OpenAIApi(new Configuration({
apiKey: process.env.OPENAI_API_KEY
}))
/**
* Type definition for the input request to be passed to the solve function.
* It can be either a string or an array of ChatCompletionRequestMessage objects.
*/
export type SolveRequest = string | Array<ChatCompletionRequestMessage>
/**
* Type definition for the options that can be passed to the solve function.
* It includes optional parameters for model configuration, exponential backoff and verbosity.
* @property {number} [initial_delay=4000] - Exponential backoff initial delay.
* @property {number} [max_retries=4] - Exponential backoff max retires.
* @property {number} [delay_exponential=2] - Exponential backoff delay exponential.
* @property {boolean} [verbose=false] - Verbose configuration.
*/
export type SolveRequestOptions = Partial<Omit<CreateChatCompletionRequest, 'model' | 'messages'>> & {
initial_delay?: number;
max_retries?: number;
delay_exponential?: number;
verbose?: boolean;
}
/**
* Interface for the response returned by the solve function.
* It includes the status code and the data (string) generated by GPT-3.5-tubo.
* @property {number} status - Response status
* @property {string} data - Response data
*/
export interface SolveResponse {
status: number;
data: string;
}
/**
* Formats the input request into an array of ChatCompletionRequestMessage objects.
* @param {SolveRequest} request - The input request to be formatted.
* @returns {Array<ChatCompletionRequestMessage>} - Array of ChatCompletionRequestMessage objects.
*/
function formatRequest(request: SolveRequest): Array<ChatCompletionRequestMessage> {
const messages: Array<ChatCompletionRequestMessage> = []
if (Array.isArray(request)) {
messages.push(...request)
} else if (typeof request === 'string') {
messages.push({
role: 'system',
content: request
});
} else {
throw {
status: 0,
data: '{"error": "Invalid request format", "text": "void" }'
}
}
return messages;
}
/**
* Function for formatting errors returned by the OpenAI API.
* @function
* @param {number} status - The status error code.
* @param {string} error - The error name or code.
* @param {string} text - The error message.
* @returns {SolveResponse} - Formatted error object with status code and error message.
*/
function formatError(status: number, error: string, text: string) {
return {
status,
data: `{"error": "${error}", "text": "${text}" }`
};
}
/**
Function for making a request to the OpenAI GPT-3.5-turbo API to complete text.
@function
@async
@param {Omit<CreateChatCompletionRequest, 'model'>} request - The input request to be processed by GPT-3.5-turbo. It should be an object that conforms to the CreateChatCompletionRequest interface, with the 'model' property excluded.
@returns {string} - The generated text content from the response of the API, or an empty string if the response does not contain any content.
*/
async function callGPT(request: Omit<CreateChatCompletionRequest, 'model'>) {
return (await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
temperature: 0,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
max_tokens: 2000,
n: 1,
...request
})).data.choices[0]!.message!.content || ''
}
const MAX_RETRIES = 4
const INITIAL_DELAY = 4000
const DELAY_EXPONENTIAL = 2
/**
* Function for solving a request using the OpenAI GPT-3.5-turbo API.
* @function
* @async
* @param {SolveRequest} request - The input request to be processed by GPT-3.5-turbo. It can be either a string or an array of ChatCompletionRequestMessage objects.
* @param {SolveRequestOptions} options - Optional parameters for model configuration, exponential backoff and verbosity..
* @returns {Promise<SolveResponse>} - Promise that resolves to a SolveResponse object containing the status code and the data (string) generated by GPT-3.5-turbo.
*/
export async function solve(request: SolveRequest, options?: SolveRequestOptions): Promise<SolveResponse> {
const {
max_retries = MAX_RETRIES,
initial_delay = INITIAL_DELAY,
delay_exponential = DELAY_EXPONENTIAL,
verbose = false,
...apiOptions
} = options || {}
let delay = initial_delay
let messages: Array<ChatCompletionRequestMessage>
try {
messages = formatRequest(request)
} catch (error: any) {
return error as SolveResponse
}
if(verbose) console.log('Sending to GPT-3.5-turbo', messages, apiOptions)
let retries = 0
while(retries <= max_retries) {
try {
const response = await callGPT({
messages,
...apiOptions
})
if(verbose) console.log('GPT-3.5-turbo response: ', response)
return {
status: 200,
data: response
};
} catch (error: any) {
if (error.response && error.response.status === 429) {
await new Promise(resolve => setTimeout(resolve, delay));
delay *= delay_exponential;
retries++;
Iif(verbose) console.log('Retrying...', retries)
} else {
const err = formatError(
error.response.status,
'OpenAI API Error',
`${error.response.statusText}: ${error.response.data.error.message}`
)
Iif (verbose) console.log(err)
return err
}
}
}
const err = formatError(
429,
'MAX RETRIES REACHED',
`Exponential fail with inital delay: ${initial_delay}, max reties: ${max_retries} and delay exponential: ${delay_exponential}`
)
Iif (verbose) console.log(err)
return err
}
|