Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrating to OpenAI functions #69

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
280 changes: 280 additions & 0 deletions server/src/core/llm/gpt_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,280 @@
GPT_FUNCTIONS = [
{
"type": "function",
"function": {
"name": "browse_website",
"description": "Function to browse a website",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"description": "The thoughts of the bot",
"properties": {
"reasoning": {
"type": "string",
"description": "The reasoning of the bot. Use future tense e.g. 'I will do this'"
},
"speak": {
"type": "string",
"description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language."
}
}
},
"args": {
"type": "object",
"description": "The arguments for the browse website",
"properties": {
"url": {
"type": "string",
"description": "The url of the website"
}
}
},
"request_render": {
"type": "object",
"description": "The instruction of how to render the request fields",
"properties": {
"field_type": {
"type": "string",
"description": "The type of the field",
"enum": [
"input",
"checkbox",
"select",
"password"
]
},
"field_options": {
"type": "array",
"description": "The options for the field",
"items": {
"type": "string"
}
}
}
}
},
"required": [
"args",
"args.url"
]
}
}
},
{
"type": "function",
"function": {
"name": "js_func",
"description": "Execute a javascript function",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"description": "The thoughts of the bot",
"properties": {
"reasoning": {
"type": "string",
"description": "The reasoning of the bot. Use future tense e.g. 'I will do this'"
},
"speak": {
"type": "string",
"description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language."
}
}
},
"name": {
"type": "string",
"description": "The name of the function"
},
"code": {
"type": "string",
"description": "The code of the function"
},
"param": {
"type": "object",
"description": "The parameters of the function"
}
},
"required": [
"name",
"code",
"param"
]
}
}
},
{
"type": "function",
"function": {
"name": "api_call",
"description": "Function to call REST APIs",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"description": "The thoughts of the bot",
"properties": {
"reasoning": {
"type": "string",
"description": "The reasoning of the bot. Use future tense e.g. 'I will do this'"
},
"speak": {
"type": "string",
"description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language."
}
}
},
"args": {
"type": "object",
"description": "The arguments for the http request",
"properties": {
"url": {
"type": "string",
"description": "The url of the http request"
},
"method": {
"type": "string",
"description": "The http method of the http request"
},
"data_request": {
"type": "object",
"description": "The data of the http request"
},
"headers": {
"type": "object",
"description": "The headers of the http request"
}
}
},
"request_render": {
"type": "object",
"description": "The instruction of how to render the request fields",
"properties": {
"field_type": {
"type": "string",
"description": "The type of the field",
"enum": [
"input",
"checkbox",
"select",
"password"
]
},
"field_options": {
"type": "array",
"description": "The options for the field",
"items": {
"type": "string"
}
}
}
},
"response_render": {
"type": "object",
"description": "The instruction of how to render the response",
"properties": {
"render_type": {
"type": "string",
"description": "The type of the render",
"enum": [
"list",
"chart"
]
},
"fields": {
"type": "array",
"description": "The fields to render",
"items": {
"type": "string"
}
}
}
}
},
"required": [
"args.url",
"args.method",
"args.data_request",
"args.headers"
"request_render"
]
}
}
},
{
"type": "function",
"function": {
"name": "ask_question",
"description": "Function to ask a question to the user",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question to ask"
}
},
"required": [
"question"
]
}
}
},
{
"type": "function",
"function": {
"name": "send_email",
"parameters": {
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"description": "The thoughts of the bot",
"properties": {
"reasoning": {
"type": "string",
"description": "The reasoning of the bot. Use future tense e.g. 'I will do this'"
},
"speak": {
"type": "string",
"description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language."
}
}
},
"args": {
"type": "object",
"description": "The thoughts of the bot",
"properties": {
"name": {
"type": "string",
"description": "The name of the sender"
},
"email": {
"type": "string",
"description": "The email of the sender"
},
"subject": {
"type": "string",
"description": "The subject of the email"
},
"body": {
"type": "string",
"description": "The body of the email"
}
}
}
},
"required": [
"name",
"email",
"subject",
"body"
]
},
"description": "Function to send an email"
}
}
]
14 changes: 8 additions & 6 deletions server/src/core/llm/llm_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from core.app.app_dao import App
from core.common.utils import filter_content
from core.llm.gpt_functions import GPT_FUNCTIONS
from core.llm.openapi_client import OpenAI
from core.llm.prompt_handler import build_prompt_answer_questions, build_prompt_command, MessageCompletion, prompt_text_form, get_prompt_objs_from_history, prompt_pick_content
from core.llm.openai_stream import OpenAIStream
Expand All @@ -29,9 +30,9 @@ def __init__(self, gpt3: OpenAI, gpt4: OpenAI, completion_stream: OpenAIStream):
self.openai_api_gpt3 = gpt3
self.openai_api_gpt4 = gpt4

def _gpt3(self, prompts, temperature=0.1) -> LLMResponse:
def _gpt3(self, prompts, functions=None, temperature=0.1) -> LLMResponse:
print("gpt3 prompt", prompts)
response = self.openai_api_gpt3.get_chat_completions(prompts, temperature=temperature)
response = self.openai_api_gpt3.get_chat_completions(prompts, functions=functions, temperature=temperature)
print("gpt3 response successfuly")
usage = response["usage"]
usage["model"] = response["model"]
Expand All @@ -40,9 +41,9 @@ def _gpt3(self, prompts, temperature=0.1) -> LLMResponse:
message=response["choices"][0]["message"]["content"]
)

def _gpt4(self, prompts: List[dict], temperature=0.1) -> LLMResponse:
def _gpt4(self, prompts: List[dict],functions=None, temperature=0.1) -> LLMResponse:
print("gpt4 prompt", prompts)
response = self.openai_api_gpt4.get_chat_completions(prompts, temperature=temperature)
response = self.openai_api_gpt4.get_chat_completions(prompts, functions=functions, temperature=temperature)
usage = response["usage"]
usage["model"] = response["model"]
return LLMResponse(
Expand All @@ -55,7 +56,7 @@ def get_completions_stream(self, prompts: List[dict], model, temperature):

def get_task_command(self, history: List[MessageCompletion], app: App) -> LLMResponse:
prompts = build_prompt_command(history)
return self._gpt3(prompts, app.app_temperature)
return self._gpt3(prompts, functions=GPT_FUNCTIONS, temperature=app.app_temperature)

def get_question_answer(self, user_input: str, app: App, history: List[MessageCompletion]) -> LLMResponse:
prompts, usages = self.get_question_prompts(app, history, user_input)
Expand Down Expand Up @@ -93,6 +94,7 @@ def call_ai_function(self, function, args, description) -> str:

def embed_text(self, text: str) -> List[list]:
return self.openai_api_gpt3.create_openai_embeddings([text])

def audio_to_text(self, audio: str) -> str:
return self.openai_api_gpt3.transcriptions(audio)

Expand All @@ -111,7 +113,7 @@ def get_keywords(self, text: str) -> LLMResponse:

def llm_service_factory(app_key_gpt3: str, app_key_gpt4: str) -> LLMService:
return LLMService(
OpenAI(app_key_gpt3, "gpt-3.5-turbo-0613"),
OpenAI(app_key_gpt3, "gpt-3.5-turbo-1106"),
OpenAI(app_key_gpt4, "gpt-4"),
OpenAIStream(app_key_gpt4),
)
7 changes: 5 additions & 2 deletions server/src/core/llm/openapi_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@ def __init__(self, api_key, model="gpt-3"):
max_retries=OPENAI_MAX_RETRIES,
errors=(OpenAIRateLimitError, OpenAIError),
)
def get_chat_completions(self, messages: List[dict], max_tokens=1000, temperature=0.1):
def get_chat_completions(self, messages: List[dict], functions=None, max_tokens=1000, temperature=0.1):
"""
:param functions:
:param messages:
:param max_tokens:
:param temperature:
Expand All @@ -41,8 +42,10 @@ def get_chat_completions(self, messages: List[dict], max_tokens=1000, temperatur
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": 1,
"stream": False,
"stream": False
}
if functions:
completion_options["tools"] = functions

response = requests.post(
f"{self.base_url}/chat/completions",
Expand Down
Loading