diff --git a/orchestrator.py b/orchestrator.py index 08d378f..911a108 100644 --- a/orchestrator.py +++ b/orchestrator.py @@ -82,11 +82,11 @@ def handle_intro(self, llm_response): # Do this all as one piece, at least for now out = '' for chunk in llm_response: - if len(chunk["choices"]) == 0: + if len(chunk.choices): continue - if "content" in chunk["choices"][0]["delta"]: - if chunk["choices"][0]["delta"]["content"] != {}: #streaming a content chunk - next_chunk = chunk["choices"][0]["delta"]["content"] + if chunk.choices[0].delta.content: + if chunk.choices[0].delta.content != {}: #streaming a content chunk + next_chunk = chunk.choices[0].delta.content out += next_chunk return self.ai_tts_service.run_tts(out) @@ -95,11 +95,11 @@ def handle_llm_response(self, llm_response): full_response = '' prompt_started = False for chunk in llm_response: - if len(chunk["choices"]) == 0: + if len(chunk.choices) == 0: continue - if "content" in chunk["choices"][0]["delta"]: - if chunk["choices"][0]["delta"]["content"] != {}: #streaming a content chunk - next_chunk = chunk["choices"][0]["delta"]["content"] + if chunk.choices[0].delta.content: + if chunk.choices[0].delta.content != {}: #streaming a content chunk + next_chunk = chunk.choices[0].delta.content out += next_chunk full_response += next_chunk diff --git a/services/azure_ai_service.py b/services/azure_ai_service.py index c0e5eb5..0cfbd9f 100644 --- a/services/azure_ai_service.py +++ b/services/azure_ai_service.py @@ -1,6 +1,8 @@ import json import io -import openai +from openai import OpenAI + +client = OpenAI() import os import requests @@ -49,30 +51,26 @@ def run_llm(self, messages, stream = True): messages_for_log = json.dumps(messages) self.logger.error(f"==== generating chat via azure openai: {messages_for_log}") - response = openai.ChatCompletion.create( - api_type = 'azure', - api_version = '2023-06-01-preview', - api_key = os.getenv("AZURE_CHATGPT_KEY"), - api_base = os.getenv("AZURE_CHATGPT_ENDPOINT"), - deployment_id=os.getenv("AZURE_CHATGPT_DEPLOYMENT_ID"), - stream=stream, - messages=messages - ) + response = client.chat.completions.create(api_type = 'azure', + api_version = '2023-06-01-preview', + api_key = os.getenv("AZURE_CHATGPT_KEY"), + api_base = os.getenv("AZURE_CHATGPT_ENDPOINT"), + deployment_id=os.getenv("AZURE_CHATGPT_DEPLOYMENT_ID"), + stream=stream, + messages=messages) return response def run_image_gen(self, sentence): self.logger.info("generating azure image", sentence) - image = openai.Image.create( - api_type = 'azure', - api_version = '2023-06-01-preview', - api_key = os.getenv('AZURE_DALLE_KEY'), - api_base = os.getenv('AZURE_DALLE_ENDPOINT'), - deployment_id = os.getenv("AZURE_DALLE_DEPLOYMENT_ID"), - prompt=f'{sentence} in the style of {self.image_style}', - n=1, - size=f"1024x1024", - ) + image = client.images.generate(api_type = 'azure', + api_version = '2023-06-01-preview', + api_key = os.getenv('AZURE_DALLE_KEY'), + api_base = os.getenv('AZURE_DALLE_ENDPOINT'), + deployment_id = os.getenv("AZURE_DALLE_DEPLOYMENT_ID"), + prompt=f'{sentence} in the style of {self.image_style}', + n=1, + size=f"1024x1024") url = image["data"][0]["url"] response = requests.get(url) diff --git a/services/open_ai_service.py b/services/open_ai_service.py index 9621534..e5cdad9 100644 --- a/services/open_ai_service.py +++ b/services/open_ai_service.py @@ -2,7 +2,10 @@ import requests from PIL import Image import io -import openai +import os +from openai import OpenAI + +client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) import os import time import json @@ -18,14 +21,11 @@ def run_llm(self, messages, stream = True): model = os.getenv("OPEN_AI_MODEL") if not model: model = "gpt-4" - response = openai.ChatCompletion.create( - api_type = 'openai', - api_version = '2020-11-07', - api_base = "https://api.openai.com/v1", - api_key = os.getenv("OPEN_AI_KEY"), - model=model, - stream=stream, - messages=messages + + response = client.chat.completions.create( + messages=messages, + model="gpt-4", + stream=stream ) return response @@ -34,15 +34,13 @@ def run_image_gen(self, sentence): self.logger.info("🖌️ generating openai image async for ", sentence) start = time.time() - image = openai.Image.create( - api_type = 'openai', - api_version = '2020-11-07', - api_base = "https://api.openai.com/v1", - api_key = os.getenv("OPEN_AI_KEY"), - prompt=f'{sentence} in the style of {self.image_style}', - n=1, - size=f"1024x1024", - ) + image = client.images.generate(api_type = 'openai', + api_version = '2020-11-07', + api_base = "https://api.openai.com/v1", + api_key = os.getenv("OPEN_AI_KEY"), + prompt=f'{sentence} in the style of {self.image_style}', + n=1, + size=f"1024x1024") image_url = image["data"][0]["url"] self.logger.info("🖌️ generated image from url", image["data"][0]["url"]) response = requests.get(image_url)