diff --git a/LICENSE b/LICENSE index d2b786f..017dcee 100644 --- a/LICENSE +++ b/LICENSE @@ -629,7 +629,7 @@ to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - ChatGPT Discord Bot interacts with OpenAIs GPT-3.5 turbo model to generate text + ChatGPT Discord Bot interacts with OpenAIs Chat Completion API to generate text Copyright (C) 2023 Zelda_Fan This program is free software: you can redistribute it and/or modify diff --git a/README.md b/README.md index 8db727f..9655e21 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ChatGPT-Discord-Bot -A basic Discord bot to generate chat completions using OpenAIs GPT-3.5 turbo model. +A basic Discord bot to generate chat completions using OpenAIs Chat Completion API. **DISCLAIMER:** THIS REPOSITORY IS IN NO WAY ASSOCIATED TO OPENAI OFFERING THIS CODE IN FORM OF A PUBLIC DISCORD BOT WHICH CAN BE INVITED BY EVERYBODY IS NOT SUPPORTED. @@ -26,7 +26,7 @@ The bot has the following features: - logging to detect tos-breaking prompts - ability to blacklist -If you want some inspiration on system instructions for the GPT-3.5 model you can view [my repository](https://github.com/ZeldaFan0225/ChatGPT-Discord-Bot-System-Instructions) for it. +If you want some inspiration on system instructions for the GPT model you can view [my repository](https://github.com/ZeldaFan0225/ChatGPT-Discord-Bot-System-Instructions) for it. ## Version Requirements diff --git a/changelog.md b/changelog.md index a796787..afbe331 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,11 @@ # Changelog +## V1.4.0 + +- change wording to remove GPT 3.5 +- add ability to easily switch to GPT-4 +- change how money spent by users is calculated + ## V1.3.0 - sql command for easy database access diff --git a/config.md b/config.md index 93e27e4..a0d3a39 100644 --- a/config.md +++ b/config.md @@ -9,6 +9,7 @@ To see an example look at our [template.config.json](https://github.com/ZeldaFan "staff_roles": The roles which your staff have. This will bypass filters and cooldowns (ARRAY OF ROLE IDS), "staff_users": The staff users who don't have any of the staff roles. This will bypass filters and cooldowns (ARRAY OF USER IDS), "blacklist_roles": Blacklist users based on their roles. Staff have full bypass (ARRAY OF ROLE IDS), + "default_model": The default model to use. Model must support chat completion (STRING) *8, "staff_can_bypass_feature_restrictions": When set to true staff won't be restricted by features turned off (BOOLEAN) *4, "dev": Whether this is a development instance or not (BOOLEAN) *3, "global_user_cooldown": The time until a user can send a new request in milliseconds (NUMBER), @@ -42,7 +43,14 @@ To see an example look at our [template.config.json](https://github.com/ZeldaFan "user_leaderboard": Whether this feature is enabled or not (BOOLEAN) *4 }, "leaderboard_amount_users": How many users to display on the leaderboard (NUMBER), - "englishify_system_instruction": The system instruction to translate a message (STRING) *5 + "englishify_system_instruction": The system instruction to translate a message (STRING) *5, + "context_action_instruction": The system instruction for the context action (STRING), + "costs": { + "MODEL NAME": { + "prompt": The cost for prompt tokens, + "completion": The cost for completion tokens + } + } } ``` @@ -51,4 +59,6 @@ To see an example look at our [template.config.json](https://github.com/ZeldaFan `*3` Developer mode will enable logging and will also show the generations ID in the embed in Discord `*4` This option changes how the command is created. `*5` It is not recommended to change this option. -`*6` It is not recommended to change this option. \ No newline at end of file +`*6` Configuration of `context_action_instruction` is advised. +`*7` Prices for models to save money spent by users. [Read more about pricing](https://openai.com/pricing) +`*8` See API Documentation for compatibility. [Read more](https://platform.openai.com/docs/models/model-endpoint-compatibility) \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index eb027a9..412c3cb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "chatgpt_discord_bot", - "version": "1.3.0", + "version": "1.4.0", "lockfileVersion": 2, "requires": true, "packages": { diff --git a/package.json b/package.json index 9f67d02..d5582dd 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "chatgpt_discord_bot", - "version": "1.3.0", + "version": "1.4.0", "description": "", "main": "dist/index.js", "scripts": { diff --git a/src/classes/client.ts b/src/classes/client.ts index 029e73e..e18d1c1 100644 --- a/src/classes/client.ts +++ b/src/classes/client.ts @@ -91,11 +91,14 @@ export class ChatGPTBotClient extends Client { } async requestChatCompletion(messages: {role: string, content: string}[], user_id: string, database: Pool, override_options?: { - temperature?: number + temperature?: number, + model?: string }) { + const model = override_options?.model || this.config.default_model || "gpt-3.5-turbo" + const openai_req = Centra(`https://api.openai.com/v1/chat/completions`, "POST") .body({ - model: "gpt-3.5-turbo", + model, messages, temperature: override_options?.temperature ?? this.config.generation_parameters?.temperature, top_p: this.config.generation_parameters?.top_p, @@ -123,15 +126,22 @@ export class ChatGPTBotClient extends Client { if(!data?.id) throw new Error("Unable to generate response") - await this.recordSpentTokens(user_id, data.usage.total_tokens ?? 0, database) + await this.recordSpentTokens(user_id, {prompt: data.usage.prompt_tokens, completion: data.usage.completion_tokens}, model, database) return data } - async recordSpentTokens(user_id: string, tokens: number, database: Pool) { + async recordSpentTokens(user_id: string, tokens: {prompt: number, completion: number}, model: string, database: Pool) { if(!this.config.features?.user_stats) return false; - const res = await database.query("UPDATE user_data SET tokens = user_data.tokens + $2 WHERE user_id=$1 RETURNING *", [user_id, tokens]).catch(console.error) + let cost = 0 + + if(this.config.costs?.[model]) { + cost += (this.config.costs?.[model]?.prompt || 0) * (tokens.prompt / 1000) + cost += (this.config.costs?.[model]?.completion || 0) * (tokens.completion / 1000) + } + + const res = await database.query("UPDATE user_data SET tokens=user_data.tokens+$2, cost=user_data.cost+$3 WHERE user_id=$1 RETURNING *", [user_id, (tokens.completion + tokens.prompt), cost]).catch(console.error) return !!res?.rowCount } } diff --git a/src/commands/chat_single.ts b/src/commands/chat_single.ts index 0a6955b..e45f697 100644 --- a/src/commands/chat_single.ts +++ b/src/commands/chat_single.ts @@ -120,6 +120,8 @@ export default class extends Command { payload.components = components } + data.object + if(description.length < 4000) { const embed = new EmbedBuilder({ author: { @@ -128,12 +130,12 @@ export default class extends Command { }, description, color: Colors.Green, - footer: {text: "This text has been generated by OpenAIs GPT-3.5 Model"} + footer: {text: `This text has been generated by OpenAIs Chat Completion API (${data.model})`} }) payload.embeds = [embed] } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT (${system_instruction_name}):\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model`), {name: `${data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT (${system_instruction_name}):\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API`), {name: `${data.id}.txt`}) payload.content = "Result attached below" payload.files = [attachment] } diff --git a/src/commands/chat_thread.ts b/src/commands/chat_thread.ts index c3ef6a4..43ac6f7 100644 --- a/src/commands/chat_thread.ts +++ b/src/commands/chat_thread.ts @@ -66,7 +66,7 @@ export default class extends Command { }, description: ai_data.choices[0]?.message.content?.trim(), color: Colors.Blue, - footer: {text: "This text has been generated by OpenAIs GPT-3.5 Model"} + footer: {text: `This text has been generated by OpenAIs Chat Completion API (${ai_data.model})`} }) ] @@ -75,7 +75,7 @@ export default class extends Command { if((embedLength(embeds[0].toJSON()) + embedLength(embeds[1].toJSON())) <= 6000) { payload = {embeds} } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT:\n${ai_data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model`), {name: `${ai_data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT:\n${ai_data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API`), {name: `${ai_data.id}.txt`}) payload = { content: "Result attached below", files: [attachment] @@ -166,12 +166,12 @@ ${system_instruction ?? "NONE"}`, }, description, color: Colors.Green, - footer: {text: "This text has been generated by OpenAIs GPT-3.5 Model"} + footer: {text: `This text has been generated by OpenAIs Chat Completion API (${data.model})`} }) payload = {embeds: [embed]} } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model`), {name: `${data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API`), {name: `${data.id}.txt`}) payload = { content: "Unable to start thread.\nResult attached below", files: [attachment] @@ -204,7 +204,7 @@ ${system_instruction ?? "NONE"}`, }, description: data.choices[0]?.message.content?.trim(), color: Colors.Blue, - footer: {text: "This text has been generated by OpenAIs GPT-3.5 Model"} + footer: {text: `This text has been generated by OpenAIs Chat Completion API (${data.model})`} }), new EmbedBuilder({ description: !!db_save?.rowCount ? `To create a response to ChatGPTs response use ${await ctx.client.getSlashCommandTag("chat thread")}` : "Unable to save chat for followup", diff --git a/src/commands/info.ts b/src/commands/info.ts index ba7a0ec..a3f1bc8 100644 --- a/src/commands/info.ts +++ b/src/commands/info.ts @@ -21,7 +21,7 @@ export default class extends Command { const embed = new EmbedBuilder({ title: "Info", color: Colors.Blue, - description: `This bot acts as an interface with the OpenAI GPT-3.5 turbo model.\nThis bot is open source and can be viewed on [GitHub](https://github.com/ZeldaFan0225/ChatGPT-Discord-Bot).\n**There is no guarantee that this instance of the bot is unmodified**\n\nCurrent configuration:\n**Logging** ${ctx.client.config.logs?.enabled ? "Enabled" : "Disabled"}` + description: `This bot acts as an interface with the OpenAI Chat Completion API.\nThis bot is open source and can be viewed on [GitHub](https://github.com/ZeldaFan0225/ChatGPT-Discord-Bot).\n**There is no guarantee that this instance of the bot is unmodified**\n\nCurrent configuration:\n**Logging** ${ctx.client.config.logs?.enabled ? "Enabled" : "Disabled"}` }) return ctx.interaction.reply({ diff --git a/src/commands/leaderboard.ts b/src/commands/leaderboard.ts index 5b8d928..fe68957 100644 --- a/src/commands/leaderboard.ts +++ b/src/commands/leaderboard.ts @@ -28,7 +28,7 @@ export default class extends Command { await ctx.interaction.deferReply() const leaders_query = await ctx.database.query(`SELECT * FROM user_data WHERE user_id != '0' ORDER BY tokens DESC LIMIT ${ctx.client.config.leaderboard_amount_users || 10}`).catch(console.error) const own_query = await ctx.database.query("SELECT * FROM user_data WHERE user_id=$1", [ctx.interaction.user.id]).catch(console.error) - const total = await ctx.database.query("SELECT SUM(tokens) as total FROM user_data").catch(console.error) + const total = await ctx.database.query("SELECT SUM(tokens) as tokens, SUM(cost) as cost FROM user_data").catch(console.error) if(!leaders_query?.rowCount || !own_query?.rowCount) return ctx.error({error: "Unable to generate leaderboard", codeblock: true}) @@ -37,12 +37,12 @@ export default class extends Command { const lines = await Promise.all(leaders.map(async (l, i) => { const user = await ctx.client.users.fetch(l.user_id).catch(console.error) - return `${i == (ctx.client.config.leaderboard_amount_users || 10) ? "...\n" : ""}${i == 0 ? "👑" : ""}**${user?.tag ?? "Unknown User#0001"}** \`${l.tokens}\` Tokens (about \`${Math.round(l.tokens/10 * 0.002)/100}$\`)` + return `${i == (ctx.client.config.leaderboard_amount_users || 10) ? "...\n" : ""}${i == 0 ? "👑" : ""}**${user?.tag ?? "Unknown User#0001"}** \`${l.tokens}\` Tokens (about \`${Math.round(l.cost * 100)/100}$\`)` })) const embed = new EmbedBuilder({ title: "Spent tokens leaderboard", - description: `${lines.join("\n")}\n\n**Total Tokens** \`${total?.rows?.[0].total ?? 0}\` (about \`${Math.round(Number(total?.rows?.[0].total ?? 0)/10 * 0.002)/100}$\`)\nAll prices are based on estimations, no guarantees that they are right.`.slice(0, 4000), + description: `${lines.join("\n")}\n\n**Total Tokens** \`${total?.rows?.[0].tokens ?? 0}\` (about \`${Math.round((total?.rows?.[0].cost ?? 0) * 100)/100}$\`)\nAll prices are based on estimations, no guarantees that they are right.`.slice(0, 4000), color: Colors.Green }) diff --git a/src/components/regenerate.ts b/src/components/regenerate.ts index 6f5035b..a21faa4 100644 --- a/src/components/regenerate.ts +++ b/src/components/regenerate.ts @@ -52,12 +52,12 @@ export default class extends Component { }, description, color: Colors.Green, - footer: {text: "This text has been generated by OpenAIs GPT-3.5 Model"} + footer: {text: `This text has been generated by OpenAIs Chat Completion API (${data.model})`} }) payload.embeds = [embed] } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT (${system_instruction_name}):\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model`), {name: `${data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.user.tag}:\n${message}\n\nChatGPT (${system_instruction_name}):\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API`), {name: `${data.id}.txt`}) payload.content = "Result attached below" payload.files = [attachment] } diff --git a/src/contexts/context_action.ts b/src/contexts/context_action.ts index ad42a58..cce6cff 100644 --- a/src/contexts/context_action.ts +++ b/src/contexts/context_action.ts @@ -75,12 +75,12 @@ export default class extends Context { }, description, color: Colors.Green, - footer: {text: `Completion with OpenAIs GPT-3.5 model requested by ${ctx.interaction.user.tag}`, icon_url: ctx.interaction.user.displayAvatarURL()} + footer: {text: `Completion with OpenAIs Chat Completion API requested by ${ctx.interaction.user.tag}`, icon_url: ctx.interaction.user.displayAvatarURL()} }) payload.embeds = [embed] } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.targetMessage.author.tag}:\n${ctx.interaction.targetMessage.content}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model.\nThe completion has been requested by ${ctx.interaction.user.tag}`), {name: `${data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.targetMessage.author.tag}:\n${ctx.interaction.targetMessage.content}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API.\nThe completion has been requested by ${ctx.interaction.user.tag}`), {name: `${data.id}.txt`}) payload.content = "Result attached below" payload.files = [attachment] } diff --git a/src/contexts/englishify.ts b/src/contexts/englishify.ts index 3c7cf04..a4e0e16 100644 --- a/src/contexts/englishify.ts +++ b/src/contexts/englishify.ts @@ -72,12 +72,12 @@ export default class extends Context { }, description, color: Colors.Green, - footer: {text: `Translation with OpenAIs GPT-3.5 model requested by ${ctx.interaction.user.tag}`, icon_url: ctx.interaction.user.displayAvatarURL()} + footer: {text: `Translation with OpenAIs Chat Completion API requested by ${ctx.interaction.user.tag}`, icon_url: ctx.interaction.user.displayAvatarURL()} }) payload.embeds = [embed] } else { - const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.targetMessage.author.tag}:\n${ctx.interaction.targetMessage.content}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs GPT-3.5 model.\nThe translation has been requested by ${ctx.interaction.user.tag}`), {name: `${data.id}.txt`}) + const attachment = new AttachmentBuilder(Buffer.from(`${ctx.interaction.targetMessage.author.tag}:\n${ctx.interaction.targetMessage.content}\n\nChatGPT:\n${data.choices[0]?.message.content?.trim() ?? "Hi there"}\n\nThis response has been generated using OpenAIs Chat Completion API.\nThe translation has been requested by ${ctx.interaction.user.tag}`), {name: `${data.id}.txt`}) payload.content = "Result attached below" payload.files = [attachment] } diff --git a/src/index.ts b/src/index.ts index 7e6252d..a398b7d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -38,9 +38,9 @@ if(client.config.logs?.enabled) { client.on("ready", async () => { await connection.connect().then(async () => { - //console.log(await connection.query("ALTER TABLE user_data ADD COLUMN")) + //console.log(await connection.query("SELECT * FROM user_data")) - await connection.query("CREATE TABLE IF NOT EXISTS user_data (index SERIAL, user_id VARCHAR(100) PRIMARY KEY, consent bool DEFAULT true, tokens int NOT NULL DEFAULT 0, blacklisted bool DEFAULT false)") + await connection.query("CREATE TABLE IF NOT EXISTS user_data (index SERIAL, user_id VARCHAR(100) PRIMARY KEY, consent bool DEFAULT true, tokens int NOT NULL DEFAULT 0, cost double precision default 0, blacklisted bool DEFAULT false)") await connection.query("CREATE TABLE IF NOT EXISTS chats (index SERIAL, id VARCHAR(100) PRIMARY KEY, user_id VARCHAR(100) NOT NULL, messages JSON[] DEFAULT '{}', created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP)") console.log("Tables created") diff --git a/src/types.ts b/src/types.ts index f3d18ce..7850c9d 100644 --- a/src/types.ts +++ b/src/types.ts @@ -77,6 +77,7 @@ export interface OpenAIChatCompletionResponse { id: string, object: string, created: number, + model: string, choices: { index: number, message: { @@ -118,6 +119,7 @@ export interface Config { staff_roles?: string[], staff_users?: string[], blacklist_roles?: string[], + default_model?: string, staff_can_bypass_feature_restrictions?: boolean, dev?: boolean, global_user_cooldown?: number, @@ -155,5 +157,9 @@ export interface Config { }, leaderboard_amount_users?: number, englishify_system_instruction?: string, - context_action_instruction?: string + context_action_instruction?: string, + costs?: Record } \ No newline at end of file diff --git a/template.config.json b/template.config.json index c19ca9a..c1bbd35 100644 --- a/template.config.json +++ b/template.config.json @@ -11,6 +11,7 @@ "blacklist_roles": [ ], + "default_model": "gpt-3.5-turbo", "staff_can_bypass_feature_restrictions": true, "dev": true, "global_user_cooldown": 60000, @@ -50,5 +51,19 @@ }, "leaderboard_amount_users": 10, "englishify_system_instruction": "You are a translation tool. You translate into good english.", - "context_action_instruction": "" + "context_action_instruction": "", + "costs": { + "gpt-3.5-turbo": { + "prompt": 0.002, + "completion": 0.002 + }, + "gpt-4": { + "prompt": 0.03, + "completion": 0.06 + }, + "gpt-4-32k": { + "prompt": 0.06, + "completion": 0.12 + } + } } \ No newline at end of file diff --git a/update_instructions.md b/update_instructions.md index 27b5c2c..e7d19c7 100644 --- a/update_instructions.md +++ b/update_instructions.md @@ -1,5 +1,12 @@ # Update Instructions +# 1.3.0 => 1.4.0 + +Run following queries on your postgres database **in the correct order**: + +- `ALTER TABLE user_data ADD COLUMN cost double precision default 0` +- `UPDATE user_data SET cost=user_data.tokens/1000*0.002` + # 1.2.0 => 1.3.0 No special actions are required to perform this update.