summaryrefslogtreecommitdiff
path: root/src/chat.py
diff options
context:
space:
mode:
authorSchark <jordan@schark.online>2023-12-05 14:33:42 -0800
committerSchark <jordan@schark.online>2023-12-05 14:33:42 -0800
commit90e2e98f420b7b213ff16c1376a2905f10addb5f (patch)
treee352b92ed9ec39fd7a83166aeba8178c196d1f00 /src/chat.py
parent45cfb6e15597676f3f9be4c76a6ddc78076e2748 (diff)
downloadcli-gpt-90e2e98f420b7b213ff16c1376a2905f10addb5f.tar.gz
cli-gpt-90e2e98f420b7b213ff16c1376a2905f10addb5f.zip
Moving API key location to 'key', file restructure
Diffstat (limited to 'src/chat.py')
-rw-r--r--src/chat.py80
1 files changed, 80 insertions, 0 deletions
diff --git a/src/chat.py b/src/chat.py
new file mode 100644
index 0000000..cfa9819
--- /dev/null
+++ b/src/chat.py
@@ -0,0 +1,80 @@
+import queue
+import sys
+import os
+import threading
+import time
+
+import curses
+from openai import OpenAI
+
+from help import HelpCommands, start_chat
+from style import StyleLog
+
+# Read in token from "token" file
+# TODO: env variable in future?
+with open("key", "r") as file:
+ token = file.readlines()
+client = OpenAI(api_key=token[0].strip())
+
+def text_call(api_call_queue, messages, model):
+ response = client.chat.completions.create(
+ model=model,
+ messages=messages
+ )
+ api_call_queue.put(response.choices[0].message.content)
+
+def image_call(api_call_queue, messages, model):
+ response = client.chat.completions.create(
+ model=model,
+ messages=messages
+ )
+ api_call_queue.put(response.choices[0].message.content)
+
+def main():
+
+ model = ""
+ if len(sys.argv) > 1:
+ model = sys.argv[1]
+ else:
+ model = "gpt-3.5-turbo"
+
+ helper = HelpCommands(model)
+ styler = StyleLog()
+ messages = start_chat(model, styler)
+
+ while True:
+
+ user_input = styler.prompt("user", "")
+
+ status, messages, model = helper.command(user_input, messages, model, styler)
+ if status == 1:
+ break
+ elif status == 2:
+ continue
+
+ global api_call_done
+ api_call_done = threading.Event()
+ api_call_queue = queue.Queue()
+
+ if model == "dall-e-2" or model == "dall-e-3":
+ response_thread = threading.Thread(target=image_call, args=(api_call_queue, messages, model,))
+ else:
+ response_thread = threading.Thread(target=text_call, args=(api_call_queue, messages, model,))
+ response_thread.start()
+
+ ellipsis_thread = threading.Thread(target=styler.show_ellipsis, args=(api_call_done,))
+ ellipsis_thread.start()
+
+ response_thread.join()
+ api_call_done.set()
+ ellipsis_thread.join()
+
+ ai_response = api_call_queue.get()
+ messages.append({"role": "assistant", "content": ai_response})
+ styler.prompt("assistant", f"{ai_response}\n")
+
+ # TODO: Add some form of token check, as to not overflow
+
+
+if __name__ == "__main__":
+ main()