diff options
author | Schark <jordan@schark.online> | 2023-12-01 23:11:46 -0800 |
---|---|---|
committer | Schark <jordan@schark.online> | 2023-12-01 23:11:46 -0800 |
commit | f580735aaafdc8e0c9128955fcb93c039d10b8b9 (patch) | |
tree | 604b2179c88bb419ec0421ae2c4a76c67518ae28 | |
download | cli-gpt-f580735aaafdc8e0c9128955fcb93c039d10b8b9.tar.gz cli-gpt-f580735aaafdc8e0c9128955fcb93c039d10b8b9.zip |
Initial work
-rw-r--r-- | .gitignore | 4 | ||||
-rw-r--r-- | README.md | 3 | ||||
-rw-r--r-- | chat.py | 77 | ||||
-rw-r--r-- | help.py | 155 | ||||
-rw-r--r-- | requirements.txt | 3 | ||||
-rw-r--r-- | style.py | 24 |
6 files changed, 266 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..59fe332 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.venv/ +__pycache__/ +*.json +token diff --git a/README.md b/README.md new file mode 100644 index 0000000..12f856b --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# CLI GPT + +A basic CLI program to replicate ChatGPT capabilities in the terminal for those with an OpenAI API key. An easy way to pay for the amount you use GPT-4 as opposed to the monthly flat rate for Plus members. @@ -0,0 +1,77 @@ +import queue +import sys +import threading +import time + +from openai import OpenAI + +from help import HelpCommands, start_chat +from style import StyleLog as styler + +# Read in token from "token" file +# TODO: env variable in future? +with open("token", "r") as file: + token = file.readlines() +client = OpenAI(api_key=token[0].strip()) + +def text_call(api_call_queue, messages, model): + response = client.chat.completions.create( + model=model, + messages=messages + ) + api_call_queue.put(response.choices[0].message.content) + +def image_call(api_call_queue, messages, model): + response = client.chat.completions.create( + model=model, + messages=messages + ) + api_call_queue.put(response.choices[0].message.content) + +def main(): + + model = "" + if len(sys.argv) > 1: + model = sys.argv[1] + else: + model = "gpt-3.5-turbo" + + helper = HelpCommands(model) + messages = start_chat(model) + + while True: + # TODO: Format output nicer :) + user_input = input("\nInput: ") + + status, messages, model = helper.command(user_input, messages, model) + if status == 1: + break + elif status == 2: + continue + + global api_call_done + api_call_done = threading.Event() + api_call_queue = queue.Queue() + + if model == "dall-e-2" or model == "dall-e-3": + response_thread = threading.Thread(target=image_call, args=(api_call_queue, messages, model,)) + else: + response_thread = threading.Thread(target=text_call, args=(api_call_queue, messages, model,)) + response_thread.start() + + ellipsis_thread = threading.Thread(target=show_ellipsis) + ellipsis_thread.start() + + response_thread.join() + api_call_done.set() + ellipsis_thread.join() + + ai_response = api_call_queue.get() + messages.append({"role": "assistant", "content": ai_response}) + print(f"\nAI: {ai_response}\n") + + # TODO: Add some form of token check, as to not overflow + + +if __name__ == "__main__": + main() @@ -0,0 +1,155 @@ +import json +import subprocess +from pathlib import Path + +import tiktoken + +# TODO: I don't love the way this file is structured from an extensibility perspective. +# Find a way to maybe like the 'options' dict with the command list? + +def start_chat(model: str): + print("System: Welcome to cli-gpt. You may type your questions, or seek additional functionality via '/help'.") + print(f"System: Currently using model '{model}'.") + return [{"role": "system", "content": "You are a helpful assistant."}] + +def get_token_count(messages: list, model: str): + total_tokens: int = 0 + encoding = tiktoken.encoding_for_model(model) + for message in messages: + if message.get("role") == "system": + continue + text = message.get("content", "") + message_tokens = len(encoding.encode(text)) + total_tokens += message_tokens + return total_tokens + +class HelpCommands: + + options: dict = { + "/exit": "Closes the chat.", + "/context": "Passthrough a URL to curl the context of into the chat history.", + "/help": "Display this list of available commands.", + "/load": "Load in a previous chat's JSON file.", + "/save": "Saves messages to specified JSON file and closes chat.", + "/clear": "Clears all messages and tokens from the chatlog, restarting the chat.", + "/model": "Change the model being used.", + "/info": "Print model information and cli-gpt version.", + } + + text_models = [ + "gpt-3.5-turbo", + "gpt-4", + "gpt-4-32k", + "gpt-4-0613", + "gpt-4-32k-0613", + "gpt-4-32k-0613", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + ] + + image_models = [ + "dall-e-2", + "dall-e-3", + ] + + model_type = None + + def __init__(self, model: str): + if model in self.text_models: + self.model_type = "text" + elif model in self.image_models: + self.model_type = "image" + else: + raise TypeError(f"System: Model '{model}' is not available. Please start again and re-specify model version, or leave blank.") + + + def command(self, user_input: str, messages: list, model: str) -> (int, list, str): + if user_input.lower() in list(self.options.keys()): + user_input_lower = user_input.lower() + + if user_input_lower == "/exit": + return 1, [None], "" + + if user_input_lower == "/context": + print("\nSystem: Please provide the URL you would like to curl.") + url = input("URL: ") + curl_output = subprocess.check_output(f"curl {url}", shell=True).decode('utf-8').strip() + return [None, f"I would like to provide the following context from a curl command to '{url} to this chat: {curl_output}"] + + if user_input_lower == "/help": + print(f"\nSystem: Below is a list of available commands.\n") + for key in list(self.options.keys()): + print(f" - {key}: {self.options[key]}") + return 2, messages, model + + if user_input_lower == "/load": + print("\nSystem: Please specify the filepath you would like to load in from, or '/cancel'.") + url = input("Path: ") + if url != "/cancel": + with open(Path(url), "r") as file: + messages = json.load(file) + print(f"System: Successfully read in from {url}. Continuing chat.") + return 2, messages, model + + if user_input_lower == "/save": + status, destination = self._save(messages) + if status == 1: + return 2, messages, model + print(f"System: Successfully saved to {destination}. Closing chat.") + return 1, [None], "" + + if user_input_lower == "/clear": + print(f"\nSystem: Clearing messages and restarting log.\n\n") + messages = start_chat(model) + return 2, messages, model + + if user_input_lower == "/info": + print(f"\nSystem: This chatlog has used {get_token_count(messages, model)} token for model version '{model}'.") + print("System: Currently using cli-gpt version 0.0.1.") + return 2, messages, model + + if user_input_lower == "/model": + print("\nSystem: Below is a list of available models. View up-to-date model information in the OpenAI API documentation.") + print("\n - Text Models") + for list_model in self.text_models: + print(f" - {list_model}") + print("\n - Image Models") + for list_model in self.image_models: + print(f" - {list_model}") + print("\nSystem: Change model version below, use '/list' to reprint available models, or '/cancel' to return to chat.") + new_model = input("\nModel: ") + while new_model not in self.text_models and new_model not in self.image_models: + if new_model == "/list": + print("\nSystem: Below is a list of available models. View up-to-date model information in the OpenAI API documentation.") + print("\n - Text Models") + for list_model in self.text_models: + print(f" - {list_model}") + print("\n - Image Models") + for list_model in self.image_models: + print(f" - {list_model}") + elif new_model == "/cancel": + return 2, messages, model + else: + print(f"System: '{new_model}' is not an accepted model. Use 'list' to output available models.") + new_model = input("\nModel: ") + + # "image" models and "text" models behave different, handle switching + if (self.model_type == "text" and new_model in self.image_models) or (self.model_type == "image" and new_model in self.text_models): + print("\nSystem: Switching between 'text' and 'image' models requires clearing the current message log. Would you like to save before switching models?") + user_save = input("Save? (y,N): ") + if user_save.lower() == "y": + self._save(messages) + return 2, messages, new_model + + messages.append({"role": "user", "content": user_input}) + return 0, messages, model + + def _save(self, messages): + print("\nSystem: Please specify the filepath you would like to save to as a *.json file, or '/cancel'.") + url = input("Path: ") + if url == "/cancel": + return 1, None + with open(Path(url), "w+") as file: + json.dump(messages, file) + return 0, url + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ec104a3 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +openai +tiktoken +prompt_toolkit diff --git a/style.py b/style.py new file mode 100644 index 0000000..129bdd7 --- /dev/null +++ b/style.py @@ -0,0 +1,24 @@ +import sys + +from prompt_toolkit import prompt +from prompt_toolkit.styles import Style + +class StyleLog: + + def prompt(role: str): + + + def show_ellipsis(): + loop = True + while loop: + for i in range(0, 4): + if api_call_done.is_set(): + loop = False + sys.stdout.write('\r' + ' ' * 4 + '\r') + break + time.sleep(1) + sys.stdout.write('.') + sys.stdout.flush() + sys.stdout.write('\r' + ' ' * 4 + '\r') + sys.stdout.flush() + |