summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--chat.py12
-rw-r--r--help.py71
-rw-r--r--requirements.txt1
-rw-r--r--style.py42
4 files changed, 82 insertions, 44 deletions
diff --git a/chat.py b/chat.py
index be2d77d..1221d8e 100644
--- a/chat.py
+++ b/chat.py
@@ -1,8 +1,10 @@
import queue
import sys
+import os
import threading
import time
+import curses
from openai import OpenAI
from help import HelpCommands, start_chat
@@ -38,13 +40,13 @@ def main():
helper = HelpCommands(model)
styler = StyleLog()
- messages = start_chat(model)
+ messages = start_chat(model, styler)
while True:
- # TODO: Format output nicer :)
- user_input = input("\nInput: ")
- status, messages, model = helper.command(user_input, messages, model)
+ user_input = styler.prompt("user", "")
+
+ status, messages, model = helper.command(user_input, messages, model, styler)
if status == 1:
break
elif status == 2:
@@ -69,7 +71,7 @@ def main():
ai_response = api_call_queue.get()
messages.append({"role": "assistant", "content": ai_response})
- print(f"\nAI: {ai_response}\n")
+ styler.prompt("assistant", f"{ai_response}\n")
# TODO: Add some form of token check, as to not overflow
diff --git a/help.py b/help.py
index cd7eb4b..530e2eb 100644
--- a/help.py
+++ b/help.py
@@ -1,6 +1,7 @@
import json
import re
import subprocess
+import sys
from pathlib import Path
import tiktoken
@@ -8,9 +9,11 @@ import tiktoken
# TODO: I don't love the way this file is structured from an extensibility perspective.
# Find a way to maybe like the 'options' dict with the command list?
-def start_chat(model: str):
- print("System: Welcome to cli-gpt. You may type your questions, or seek additional functionality via '/help'.")
- print(f"System: Currently using model '{model}'.")
+def start_chat(model: str, styler):
+ sys.stdout.write('\033[2J\033[H')
+ styler.prompt("none", "")
+ styler.prompt("system", "Welcome to cli-gpt. You may type your questions, or seek additional functionality via '/help'.")
+ styler.prompt("system", f"Currently using model '{model}'.")
return [{"role": "system", "content": "You are a helpful assistant."}]
def get_token_count(messages: list, model: str):
@@ -65,70 +68,70 @@ class HelpCommands:
raise TypeError(f"System: Model '{model}' is not available. Please start again and re-specify model version, or leave blank.")
- def command(self, user_input: str, messages: list, model: str) -> (int, list, str):
+ def command(self, user_input: str, messages: list, model: str, styler) -> (int, list, str):
if user_input.lower() in list(self.options.keys()):
user_input_lower = user_input.lower()
- if user_input_lower == "/exit":
+ if "/exit" in user_input_lower:
return 1, [None], ""
- if user_input_lower == "/context":
- print("\nSystem: Please provide the URL you would like to curl.")
+ if "/context" in user_input_lower:
+ styler.prompt("system", "Please provide the URL you would like to curl.")
url = input("URL: ")
curl_output = subprocess.check_output(f"curl {url}", shell=True).decode('utf-8').strip()
return [None, f"I would like to provide the following context from a curl command to '{url} to this chat: {curl_output}"]
- if user_input_lower == "/help":
- print(f"\nSystem: Below is a list of available commands.\n")
+ if "/help" in user_input_lower:
+ styler.prompt("system", "Below is a list of available commands.")
for key in list(self.options.keys()):
- print(f" - {key}: {self.options[key]}")
+ styler.prompt("none", f" - {key}: {self.options[key]}")
return 2, messages, model
- if user_input_lower == "/load":
- print("\nSystem: Please specify the filepath you would like to load in from, or '/cancel'.")
+ if "/load" in user_input_lower:
+ styler.prompt("system", "Please specify the filepath you would like to load in from, or '/cancel'.")
path = input("Path: ")
if path != "/cancel":
with open(Path(path), "r") as file:
messages = json.load(file)
- print(f"System: Successfully read in from {path}. Continuing chat.")
+ styler.prompt("system", f"Successfully read in from {path}. Continuing chat.")
return 2, messages, model
- if user_input_lower == "/save":
+ if "/save" in user_input_lower:
status, destination = self._save(messages)
if status == 1:
return 2, messages, model
- print(f"System: Successfully saved to {destination}. Closing chat.")
+ styler.prompt("system", "Successfully saved to {destination}. Closing chat.")
return 1, [None], ""
- if user_input_lower == "/clear":
- print(f"\nSystem: Clearing messages and restarting log.\n\n")
+ if "/clear" in user_input_lower:
+ styler.prompt("system", "Clearing messages and restarting log.\n\n")
messages = start_chat(model)
return 2, messages, model
- if user_input_lower == "/info":
- print(f"\nSystem: This chatlog has used {get_token_count(messages, model)} token for model version '{model}'.")
- print("System: Currently using cli-gpt version 0.0.1.")
+ if "/info" in user_input_lower:
+ styler.prompt("system", f"This chatlog has used {get_token_count(messages, model)} token for model version '{model}'.")
+ styler.prompt("system", "Currently using cli-gpt version 0.0.1.")
return 2, messages, model
- if user_input_lower == "/model":
- print("\nSystem: Below is a list of available models. View up-to-date model information in the OpenAI API documentation.")
- print("\n - Text Models")
+ if "/model" in user_input_lower:
+ styler.prompt("system", "Below is a list of available models. View up-to-date model information in the OpenAI API documentation.")
+ styler.prompt("none", "\n - Text Models")
for list_model in self.text_models:
- print(f" - {list_model}")
- print("\n - Image Models")
+ styler.prompt("none", f" - {list_model}")
+ styler.prompt("none", "\n - Image Models")
for list_model in self.image_models:
- print(f" - {list_model}")
- print("\nSystem: Change model version below, use '/list' to reprint available models, or '/cancel' to return to chat.")
+ styler.prompt("none", f" - {list_model}")
+ styler.prompt("system", "Change model version below, use '/list' to reprint available models, or '/cancel' to return to chat.")
new_model = input("\nModel: ")
while new_model not in self.text_models and new_model not in self.image_models:
if new_model == "/list":
- print("\nSystem: Below is a list of available models. View up-to-date model information in the OpenAI API documentation.")
- print("\n - Text Models")
+ styler.prompt("system", "Below is a list of available models. View up-to-date model information in the OpenAI API documentation.")
+ styler.prompt("none", "\n - Text Models")
for list_model in self.text_models:
- print(f" - {list_model}")
- print("\n - Image Models")
+ styler.prompt("none", f" - {list_model}")
+ styler.prompt("none", "\n - Image Models")
for list_model in self.image_models:
- print(f" - {list_model}")
+ styler.prompt("none", f" - {list_model}")
elif new_model == "/cancel":
return 2, messages, model
else:
@@ -137,13 +140,13 @@ class HelpCommands:
# "image" models and "text" models behave different, handle switching
if (self.model_type == "text" and new_model in self.image_models) or (self.model_type == "image" and new_model in self.text_models):
- print("\nSystem: Switching between 'text' and 'image' models requires clearing the current message log. Would you like to save before switching models?")
+ styler.prompt("system", "Switching between 'text' and 'image' models requires clearing the current message log. Would you like to save before switching models?")
user_save = input("Save? (y,N): ")
if user_save.lower() == "y":
self._save(messages)
return 2, messages, new_model
- if user_input_lower == "/write":
+ if "/write" in user_input_lower:
pattern = r'```(.*?)```'
code_blocks = re.findall(pattern, messages[-1]['content'], re.DOTALL)
print(f"\nSystem: Found {len(code_blocks)} code examples.")
diff --git a/requirements.txt b/requirements.txt
index ec104a3..cdf3001 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,4 @@
openai
tiktoken
prompt_toolkit
+pygments
diff --git a/style.py b/style.py
index 65fecf1..ad616d8 100644
--- a/style.py
+++ b/style.py
@@ -1,14 +1,47 @@
import sys
import time
-from prompt_toolkit import prompt
+from prompt_toolkit import PromptSession, print_formatted_text, prompt
+from prompt_toolkit.formatted_text import HTML
+from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style
+from pygments.lexers import PythonLexer
+from pygments.styles.native import NativeStyle
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
+from prompt_toolkit.history import FileHistory
+from prompt_toolkit.completion import WordCompleter
class StyleLog:
- def prompt(role: str):
+ styler = None
+
+ style = Style.from_dict({
+ 'input': 'bg:#000000 #00ff00',
+ 'assistant': 'bg:#000000 #7777ff',
+ 'system': 'bg:#000000 #ff00ff',
+ })
+
+ def __init__(self):
+ self.styler = PromptSession(lexer=PygmentsLexer(PythonLexer), auto_suggest=AutoSuggestFromHistory(), history=FileHistory('history.txt'))
+
+ def prompt(self, role: str, message: str):
+ if role == 'assistant':
+ print_formatted_text(HTML(f"<assistant>Assistant: </assistant>%s") % (message, ), style = self.style)
+ elif role == 'user':
+ user_input = prompt(
+ [
+ ('class:input', "\nInput: "),
+ ('', '')
+ ],
+ style = self.style
+ )
+ return user_input
+ elif role == 'system':
+ print_formatted_text(HTML(f'<system>System:</system> {message}'), style = self.style)
+ elif role == 'none':
+ print_formatted_text(HTML(f'{message}'), style = self.style)
return
-
+
def show_ellipsis(self, api_call_done):
loop = True
while loop:
@@ -21,5 +54,4 @@ class StyleLog:
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\r' + ' ' * 4 + '\r')
- sys.stdout.flush()
-
+ sys.stdout.flush()