Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@ def handle_intro(self, llm_response):
# Do this all as one piece, at least for now
out = ''
for chunk in llm_response:
if len(chunk["choices"]) == 0:
if len(chunk.choices):
continue
if "content" in chunk["choices"][0]["delta"]:
if chunk["choices"][0]["delta"]["content"] != {}: #streaming a content chunk
next_chunk = chunk["choices"][0]["delta"]["content"]
if chunk.choices[0].delta.content:
if chunk.choices[0].delta.content != {}: #streaming a content chunk
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is more a note for us to follow up on as I realize it is not in scope of this PR (so more for @chadbailey59 and @Moishe maybe!), but the repeated list and property access here is redundant - I suggest, in a follow-up PR, accessing the required elements once for reuse (here and below). e.g.:

choices = chunk.choices
if len(choices):
    continue
delta = chunk.choices[0].delta
if delta.content:
    //... etc

(Hastily written just to give an idea of what I mean)

next_chunk = chunk.choices[0].delta.content
out += next_chunk
return self.ai_tts_service.run_tts(out)

Expand All @@ -95,11 +95,11 @@ def handle_llm_response(self, llm_response):
full_response = ''
prompt_started = False
for chunk in llm_response:
if len(chunk["choices"]) == 0:
if len(chunk.choices) == 0:
continue
if "content" in chunk["choices"][0]["delta"]:
if chunk["choices"][0]["delta"]["content"] != {}: #streaming a content chunk
next_chunk = chunk["choices"][0]["delta"]["content"]
if chunk.choices[0].delta.content:
if chunk.choices[0].delta.content != {}: #streaming a content chunk
next_chunk = chunk.choices[0].delta.content
out += next_chunk
full_response += next_chunk

Expand Down
38 changes: 18 additions & 20 deletions services/azure_ai_service.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import json
import io
import openai
from openai import OpenAI

client = OpenAI()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This appears to only be used from AzureAIService; if so I'd suggest making it a class variable.

import os
import requests

Expand Down Expand Up @@ -49,30 +51,26 @@ def run_llm(self, messages, stream = True):
messages_for_log = json.dumps(messages)
self.logger.error(f"==== generating chat via azure openai: {messages_for_log}")

response = openai.ChatCompletion.create(
api_type = 'azure',
api_version = '2023-06-01-preview',
api_key = os.getenv("AZURE_CHATGPT_KEY"),
api_base = os.getenv("AZURE_CHATGPT_ENDPOINT"),
deployment_id=os.getenv("AZURE_CHATGPT_DEPLOYMENT_ID"),
stream=stream,
messages=messages
)
response = client.chat.completions.create(api_type = 'azure',
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggest just cleaning up the indentation here for readability (and below)

api_version = '2023-06-01-preview',
api_key = os.getenv("AZURE_CHATGPT_KEY"),
api_base = os.getenv("AZURE_CHATGPT_ENDPOINT"),
deployment_id=os.getenv("AZURE_CHATGPT_DEPLOYMENT_ID"),
stream=stream,
messages=messages)
return response

def run_image_gen(self, sentence):
self.logger.info("generating azure image", sentence)

image = openai.Image.create(
api_type = 'azure',
api_version = '2023-06-01-preview',
api_key = os.getenv('AZURE_DALLE_KEY'),
api_base = os.getenv('AZURE_DALLE_ENDPOINT'),
deployment_id = os.getenv("AZURE_DALLE_DEPLOYMENT_ID"),
prompt=f'{sentence} in the style of {self.image_style}',
n=1,
size=f"1024x1024",
)
image = client.images.generate(api_type = 'azure',
api_version = '2023-06-01-preview',
api_key = os.getenv('AZURE_DALLE_KEY'),
api_base = os.getenv('AZURE_DALLE_ENDPOINT'),
deployment_id = os.getenv("AZURE_DALLE_DEPLOYMENT_ID"),
prompt=f'{sentence} in the style of {self.image_style}',
n=1,
size=f"1024x1024")

url = image["data"][0]["url"]
response = requests.get(url)
Expand Down
34 changes: 16 additions & 18 deletions services/open_ai_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
import requests
from PIL import Image
import io
import openai
import os
from openai import OpenAI

client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This appears to only be used from OpenAIService; if so, I'd suggest making it a class variable. It also looks like our earlier reference to the key was using the variable name OPEN_AI_KEY - was this meant to be that?

import os
import time
import json
Expand All @@ -18,14 +21,11 @@ def run_llm(self, messages, stream = True):
model = os.getenv("OPEN_AI_MODEL")
if not model:
model = "gpt-4"
response = openai.ChatCompletion.create(
api_type = 'openai',
api_version = '2020-11-07',
api_base = "https://api.openai.com/v1",
api_key = os.getenv("OPEN_AI_KEY"),
model=model,
stream=stream,
messages=messages

response = client.chat.completions.create(
messages=messages,
model="gpt-4",
stream=stream
)

return response
Expand All @@ -34,15 +34,13 @@ def run_image_gen(self, sentence):
self.logger.info("🖌️ generating openai image async for ", sentence)
start = time.time()

image = openai.Image.create(
api_type = 'openai',
api_version = '2020-11-07',
api_base = "https://api.openai.com/v1",
api_key = os.getenv("OPEN_AI_KEY"),
prompt=f'{sentence} in the style of {self.image_style}',
n=1,
size=f"1024x1024",
)
image = client.images.generate(api_type = 'openai',
api_version = '2020-11-07',
api_base = "https://api.openai.com/v1",
api_key = os.getenv("OPEN_AI_KEY"),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we specify the API key when constructing the client, I'd presume this might no longer be needed. (let's clean up the indentation as well!)

prompt=f'{sentence} in the style of {self.image_style}',
n=1,
size=f"1024x1024")
image_url = image["data"][0]["url"]
self.logger.info("🖌️ generated image from url", image["data"][0]["url"])
response = requests.get(image_url)
Expand Down