Skip to content

Commit

Permalink
ChatGPT support added!
Browse files Browse the repository at this point in the history
- Added new mode `chat_completion`.
- Chat completion works only within output panel.
- Chat completion stores chat history, both questions and answers (it's the way the ChatGPT API works).
- Added command to clear chat history.
- Chat messages inserts by input panel.
- Additional setting for chat model added `chat_model`.
- `multimarkdown` setting dropped as deprecated.
- `output_panel` setting dropped as deprecated.
- Global code refactoring.
  • Loading branch information
yaroslavyaroslav committed Mar 31, 2023
1 parent b89df64 commit a09bda2
Show file tree
Hide file tree
Showing 6 changed files with 350 additions and 199 deletions.
20 changes: 17 additions & 3 deletions Default.sublime-commands
Original file line number Diff line number Diff line change
@@ -1,20 +1,34 @@
[
{
"caption": "OpenAI Complete",
"caption": "OpenAI: Complete",
"command": "openai",
"args": {
"mode": "completion"
}
},
{
"caption": "OpenAI Insert",
"caption": "OpenAI: New Message",
"command": "openai",
"args": {
"mode": "chat_completion"
}
},
{
"caption": "OpenAI: Reset Chat History ",
"command": "openai",
"args": {
"mode": "reset_char_history"
}
},
{
"caption": "OpenAI: Insert",
"command": "openai",
"args": {
"mode": "insertion"
}
},
{
"caption": "OpenAI Edit",
"caption": "OpenAI: Edit",
"command": "openai",
"args": {
"mode": "edition"
Expand Down
47 changes: 47 additions & 0 deletions cacher.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import sublime
import os
from . import jl_utility as jl


class Cacher():
def __init__(self) -> None:
cache_dir = sublime.cache_path()
plugin_cache_dir = os.path.join(cache_dir, "OpenAI completion")
if not os.path.exists(plugin_cache_dir):
os.makedirs(plugin_cache_dir)

# Create the file path to store the data
self.cache_file = os.path.join(plugin_cache_dir, "chat_history.jl")


def read_all(self):
json_objects = []
reader = jl.reader(self.cache_file)
for json_object in reader:
json_objects.append(json_object)

return json_objects

def append_to_cache(self, cache_lines):
# Create a new JSON Lines writer for output.jl
writer = jl.writer(self.cache_file)
next(writer)
writer.send(cache_lines[0])
# for line in cache_lines:
# writer.send(line)

def drop_first(self, number = 4):
# Read all lines from the JSON Lines file
with open(self.cache_file, "r") as file:
lines = file.readlines()

# Remove the specified number of lines from the beginning
lines = lines[number:]

# Write the remaining lines back to the cache file
with open(self.cache_file, "w") as file:
file.writelines(lines)

def drop_all(self):
with open(self.cache_file, "w") as file:
pass # Truncate the file by opening it in 'w' mode and doing nothing
38 changes: 38 additions & 0 deletions jl_utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import json
from typing import Iterator, Generator


def reader(fname: str) -> Iterator[dict]:
with open(fname) as file:
for line in file:
obj = json.loads(line.strip())
yield obj


def writer(fname: str, mode: str = 'a') -> Generator[None, dict, None]:
with open(fname, mode) as file:
while True:
obj = yield
line = json.dumps(obj, ensure_ascii=False)
file.write(f"{line}\n")


# if __name__ == "__main__":
# # Read employees from employees.jl
# reader = jl_reader("employees.jl")

# # Create a new JSON Lines writer for output.jl
# writer = jl_writer("output.jl")
# next(writer)

# for employee in reader:
# id = employee["id"]
# name = employee["name"]
# dept = employee["department"]
# print(f"#{id} - {name} ({dept})")

# # Write the employee data to output.jl
# writer.send(employee)

# # Close the writer
# writer.close()
16 changes: 6 additions & 10 deletions openAI.sublime-settings
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
// Does not affect editing mode.
"model": "text-davinci-003",

// The model which will generate the completion.
// Some models are suitable for natural language tasks, others specialize in code.
// Learn more at https://beta.openai.com/docs/models
// Does not affect editing mode.
"chat_model": "gpt-3.5-turbo",

// Controls randomness: Lowering results in less random completions.
// As the temperature approaches zero, the model will become deterministic and repetitive.
"temperature": 0.7,
Expand Down Expand Up @@ -36,16 +42,6 @@
// Your openAI token
"token": "",

// Ask the AI to format its answers with multimarkdown markup.
// By "ask", I mean it: it will literally add "format the answer with multimarkdown markup" to the question.
// Affects only `completion` command.
"multimarkdown": false,

// Manages where to print the output of the completion command:
// false — print into the editor
// true — print into separate output panel (named "OpenAI")
"output_panel": false,

// Minimum amount of characters selected to perform completion.
// Does not affect completion command if the "output_panel" setting is true.
"minimum_selection_length": 20
Expand Down
206 changes: 20 additions & 186 deletions openai.py
Original file line number Diff line number Diff line change
@@ -1,184 +1,15 @@
import sublime, sublime_plugin
import functools
import http.client
import threading
import json
from .cacher import Cacher
import logging


class OpenAIWorker(threading.Thread):
def __init__(self, edit, region, text, view, mode, command):
self.edit = edit
self.region = region
self.text = text
self.view = view
self.mode = mode
self.command = command # optional
self.settings = sublime.load_settings("openAI.sublime-settings")
super(OpenAIWorker, self).__init__()

def prompt_completion(self, completion):
completion = completion.replace("$", "\$")
if self.mode == 'insertion':
result = self.view.find(self.settings.get('placeholder'), 0, 1)
if result:
self.view.sel().clear()
self.view.sel().add(result)
# Replace the placeholder with the specified replacement text
self.view.run_command("insert_snippet", {"contents": completion})
return

if self.mode == 'completion':
if self.settings.get('output_panel'):
window = sublime.active_window()

output_view = window.find_output_panel("OpenAI") if window.find_output_panel("OpenAI") != None else window.create_output_panel("OpenAI")
output_view.run_command('append', {'characters': f'## {self.text}'})
output_view.run_command('append', {'characters': '\n\n'})
output_view.run_command('append', {'characters': completion})
output_view.run_command('append', {'characters': '\n============\n\n'})
window.run_command("show_panel", {"panel": "output.OpenAI"})
else:
region = self.view.sel()[0]
if region.a <= region.b:
region.a = region.b
else:
region.b = region.a

self.view.sel().clear()
self.view.sel().add(region)
# Replace the placeholder with the specified replacement text
self.view.run_command("insert_snippet", {"contents": completion})
return

if self.mode == 'edition': # it's just replacing all given text for now.
region = self.view.sel()[0]
self.view.run_command("insert_snippet", {"contents": completion})
return

def exec_net_request(self, connect: http.client.HTTPSConnection):
try:
res = connect.getresponse()
data = res.read()
status = res.status
data_decoded = data.decode('utf-8')
connect.close()
response = json.loads(data_decoded)
print(data_decoded)
completion = response['choices'][0]['message']['content']
completion = completion.strip() # Remove leading and trailing spaces
self.prompt_completion(completion)
except KeyError:
sublime.error_message("Exception\n" + "The OpenAI response could not be decoded. There could be a problem on their side. Please look in the console for additional error info.")
logging.exception("Exception: " + str(data_decoded))
return
except Exception as ex:
sublime.error_message(f"Server Error: {str(status)}\n{ex}")
return

def complete(self):
conn = http.client.HTTPSConnection("api.openai.com")
payload = {
# Todo add uniq name for each output panel (e.g. each window)
"messages": [{"role": "system", "content": "You are a code assistant."}, {"role": "user", "content": self.text}],
"model": "gpt-4",
"temperature": self.settings.get("temperature"),
"max_tokens": self.settings.get("max_tokens"),
"top_p": self.settings.get("top_p"),
}
json_payload = json.dumps(payload)

token = self.settings.get('token')

headers = {
'Content-Type': "application/json",
'Authorization': 'Bearer {}'.format(token),
'cache-control': "no-cache",
}
conn.request("POST", "/v1/chat/completions", json_payload, headers)
self.exec_net_request(connect=conn)

def insert(self):
conn = http.client.HTTPSConnection("api.openai.com")
parts = self.text.split(self.settings.get('placeholder'))
try:
if not len(parts) == 2:
raise AssertionError("There is no placeholder '" + self.settings.get('placeholder') + "' within the selected text. There should be exactly one.")
except Exception as ex:
sublime.error_message("Exception\n" + str(ex))
logging.exception("Exception: " + str(ex))
return

payload = {
"model": self.settings.get("model"),
"prompt": parts[0],
"suffix": parts[1],
"temperature": self.settings.get("temperature"),
"max_tokens": self.settings.get("max_tokens"),
"top_p": self.settings.get("top_p"),
"frequency_penalty": self.settings.get("frequency_penalty"),
"presence_penalty": self.settings.get("presence_penalty")
}
json_payload = json.dumps(payload)

token = self.settings.get('token')

headers = {
'Content-Type': "application/json",
'Authorization': 'Bearer {}'.format(token),
'cache-control': "no-cache",
}
conn.request("POST", "/v1/completions", json_payload, headers)
self.exec_net_request(connect=conn)

def edit_f(self):
conn = http.client.HTTPSConnection("api.openai.com")
payload = {
"model": "code-davinci-edit-001", # could be text-davinci-edit-001
"input": self.text,
"instruction": self.command,
"temperature": self.settings.get("temperature"),
"top_p": self.settings.get("top_p"),
}
json_payload = json.dumps(payload)

token = self.settings.get('token')

headers = {
'Content-Type': "application/json",
'Authorization': 'Bearer {}'.format(token),
'cache-control': "no-cache",
}
conn.request("POST", "/v1/edits", json_payload, headers)
self.exec_net_request(connect=conn)

def run(self):
try:
# if (self.settings.get("max_tokens") + len(self.text)) > 4000:
# raise AssertionError("OpenAI accepts max. 4000 tokens, so the selected text and the max_tokens setting must be lower than 4000.")
if not self.settings.has("token"):
raise AssertionError("No token provided, you have to set the OpenAI token into the settings to make things work.")
token = self.settings.get('token')
if len(token) < 10:
raise AssertionError("No token provided, you have to set the OpenAI token into the settings to make things work.")
except Exception as ex:
sublime.error_message("Exception\n" + str(ex))
logging.exception("Exception: " + str(ex))
return

if self.mode == 'insertion': self.insert()
if self.mode == 'edition': self.edit_f()
if self.mode == 'completion':
if self.settings.get('output_panel'):
self.text = self.command
if self.settings.get('multimarkdown'):
self.text += ' format the answer with multimarkdown markup'
self.complete()
from .openai_worker import OpenAIWorker


class Openai(sublime_plugin.TextCommand):
def on_input(self, edit, region, text, view, mode, input):
worker_thread = OpenAIWorker(edit, region, text, view, mode=mode, command=input)
from .openai_worker import OpenAIWorker # https://stackoverflow.com/a/52927102

worker_thread = OpenAIWorker(region, text, view, mode=mode, command=input)
worker_thread.start()

"""
Expand All @@ -188,7 +19,7 @@ def on_input(self, edit, region, text, view, mode, input):
"""
def run(self, edit, **kwargs):
settings = sublime.load_settings("openAI.sublime-settings")
mode = kwargs.get('mode', 'completion')
mode = kwargs.get('mode', 'chat_completion')

# get selected text
region = ''
Expand All @@ -198,12 +29,10 @@ def run(self, edit, **kwargs):
text = self.view.substr(region)


# Cheching that user select some text
try:
if region.__len__() < settings.get("minimum_selection_length"):
if mode == 'completion':
if not settings.get('output_panel'):
raise AssertionError("Not enough text selected to complete the request, please expand the selection.")
else:
if mode != 'chat_completion' and mode != 'reset_char_history':
raise AssertionError("Not enough text selected to complete the request, please expand the selection.")
except Exception as ex:
sublime.error_message("Exception\n" + str(ex))
Expand All @@ -212,14 +41,19 @@ def run(self, edit, **kwargs):

if mode == 'edition':
sublime.active_window().show_input_panel("Request: ", "Comment the given code line by line", functools.partial(self.on_input, edit, region, text, self.view, mode), None, None)

elif mode == 'insertion':
worker_thread = OpenAIWorker(edit, region, text, self.view, mode, "")
worker_thread.start()
else: # mode == `completion`
if settings.get('output_panel'):
sublime.active_window().show_input_panel("Question: ", "", functools.partial(self.on_input, edit, region, text, self.view, mode), None, None)
else:
worker_thread = OpenAIWorker(edit, region, text, self.view, mode, "")
worker_thread.start()
elif mode == 'completion': # mode == `completion`
worker_thread = OpenAIWorker(edit, region, text, self.view, mode, "")
worker_thread.start()
elif mode == 'reset_char_history':
Cacher().drop_all()
output_panel = sublime.active_window().find_output_panel("OpenAI Chat")
output_panel.set_read_only(False)
region = sublime.Region(0, output_panel.size())
output_panel.erase(edit, region)
output_panel.set_read_only(True)
else: # mode 'chat_completion', always in panel
sublime.active_window().show_input_panel("Question: ", "", functools.partial(self.on_input, edit, "region", "text", self.view, mode), None, None)

Loading

0 comments on commit a09bda2

Please sign in to comment.