From a17aa07e769ddc033cf0f2057835a6d4a9814a70 Mon Sep 17 00:00:00 2001 From: Alexsandro Souza Date: Sat, 9 Dec 2023 16:53:52 +0000 Subject: [PATCH 1/2] Created the 5 functions --- server/src/core/llm/gpt_functions.py | 280 ++++++++++++++++++++++++++ server/src/core/llm/llm_service.py | 8 +- server/src/core/llm/openapi_client.py | 7 +- server/src/core/llm/prompt_handler.py | 91 +-------- 4 files changed, 299 insertions(+), 87 deletions(-) create mode 100644 server/src/core/llm/gpt_functions.py diff --git a/server/src/core/llm/gpt_functions.py b/server/src/core/llm/gpt_functions.py new file mode 100644 index 0000000..99bac5c --- /dev/null +++ b/server/src/core/llm/gpt_functions.py @@ -0,0 +1,280 @@ +GPT_FUNCTIONS = [ + { + "type": "function", + "function": { + "name": "browse_website", + "description": "Function to browse a website", + "parameters": { + "type": "object", + "properties": { + "thoughts": { + "type": "object", + "description": "The thoughts of the bot", + "properties": { + "reasoning": { + "type": "string", + "description": "The reasoning of the bot. Use future tense e.g. 'I will do this'" + }, + "speak": { + "type": "string", + "description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language." + } + } + }, + "args": { + "type": "object", + "description": "The arguments for the browse website", + "properties": { + "url": { + "type": "string", + "description": "The url of the website" + } + } + }, + "request_render": { + "type": "object", + "description": "The instruction of how to render the request fields", + "properties": { + "field_type": { + "type": "string", + "description": "The type of the field", + "enum": [ + "input", + "checkbox", + "select", + "password" + ] + }, + "field_options": { + "type": "array", + "description": "The options for the field", + "items": { + "type": "string" + } + } + } + } + }, + "required": [ + "args", + "args.url" + ] + } + } + }, + { + "type": "function", + "function": { + "name": "js_func", + "description": "Execute a javascript function", + "parameters": { + "type": "object", + "properties": { + "thoughts": { + "type": "object", + "description": "The thoughts of the bot", + "properties": { + "reasoning": { + "type": "string", + "description": "The reasoning of the bot. Use future tense e.g. 'I will do this'" + }, + "speak": { + "type": "string", + "description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language." + } + } + }, + "name": { + "type": "string", + "description": "The name of the function" + }, + "code": { + "type": "string", + "description": "The code of the function" + }, + "param": { + "type": "object", + "description": "The parameters of the function" + } + }, + "required": [ + "name", + "code", + "param" + ] + } + } + }, + { + "type": "function", + "function": { + "name": "api_call", + "description": "Function to call REST APIs", + "parameters": { + "type": "object", + "properties": { + "thoughts": { + "type": "object", + "description": "The thoughts of the bot", + "properties": { + "reasoning": { + "type": "string", + "description": "The reasoning of the bot. Use future tense e.g. 'I will do this'" + }, + "speak": { + "type": "string", + "description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language." + } + } + }, + "args": { + "type": "object", + "description": "The arguments for the http request", + "properties": { + "url": { + "type": "string", + "description": "The url of the http request" + }, + "method": { + "type": "string", + "description": "The http method of the http request" + }, + "data_request": { + "type": "object", + "description": "The data of the http request" + }, + "headers": { + "type": "object", + "description": "The headers of the http request" + } + } + }, + "request_render": { + "type": "object", + "description": "The instruction of how to render the request fields", + "properties": { + "field_type": { + "type": "string", + "description": "The type of the field", + "enum": [ + "input", + "checkbox", + "select", + "password" + ] + }, + "field_options": { + "type": "array", + "description": "The options for the field", + "items": { + "type": "string" + } + } + } + }, + "response_render": { + "type": "object", + "description": "The instruction of how to render the response", + "properties": { + "render_type": { + "type": "string", + "description": "The type of the render", + "enum": [ + "list", + "chart" + ] + }, + "fields": { + "type": "array", + "description": "The fields to render", + "items": { + "type": "string" + } + } + } + } + }, + "required": [ + "args.url", + "args.method", + "args.data_request", + "args.headers" + "request_render" + ] + } + } + }, + { + "type": "function", + "function": { + "name": "ask_question", + "description": "Function to ask a question to the user", + "parameters": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "The question to ask" + } + }, + "required": [ + "question" + ] + } + } + }, + { + "type": "function", + "function": { + "name": "send_email", + "parameters": { + "type": "object", + "properties": { + "thoughts": { + "type": "object", + "description": "The thoughts of the bot", + "properties": { + "reasoning": { + "type": "string", + "description": "The reasoning of the bot. Use future tense e.g. 'I will do this'" + }, + "speak": { + "type": "string", + "description": "Friendly thoughts summary to say to the user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language." + } + } + }, + "args": { + "type": "object", + "description": "The thoughts of the bot", + "properties": { + "name": { + "type": "string", + "description": "The name of the sender" + }, + "email": { + "type": "string", + "description": "The email of the sender" + }, + "subject": { + "type": "string", + "description": "The subject of the email" + }, + "body": { + "type": "string", + "description": "The body of the email" + } + } + } + }, + "required": [ + "name", + "email", + "subject", + "body" + ] + }, + "description": "Function to send an email" + } + } +] diff --git a/server/src/core/llm/llm_service.py b/server/src/core/llm/llm_service.py index 456210f..eece360 100644 --- a/server/src/core/llm/llm_service.py +++ b/server/src/core/llm/llm_service.py @@ -5,6 +5,7 @@ from core.app.app_dao import App from core.common.utils import filter_content +from core.llm.gpt_functions import GPT_FUNCTIONS from core.llm.openapi_client import OpenAI from core.llm.prompt_handler import build_prompt_answer_questions, build_prompt_command, MessageCompletion, prompt_text_form, get_prompt_objs_from_history, prompt_pick_content from core.llm.openai_stream import OpenAIStream @@ -29,9 +30,9 @@ def __init__(self, gpt3: OpenAI, gpt4: OpenAI, completion_stream: OpenAIStream): self.openai_api_gpt3 = gpt3 self.openai_api_gpt4 = gpt4 - def _gpt3(self, prompts, temperature=0.1) -> LLMResponse: + def _gpt3(self, prompts, functions=None, temperature=0.1) -> LLMResponse: print("gpt3 prompt", prompts) - response = self.openai_api_gpt3.get_chat_completions(prompts, temperature=temperature) + response = self.openai_api_gpt3.get_chat_completions(prompts, functions=functions, temperature=temperature) print("gpt3 response successfuly") usage = response["usage"] usage["model"] = response["model"] @@ -55,7 +56,7 @@ def get_completions_stream(self, prompts: List[dict], model, temperature): def get_task_command(self, history: List[MessageCompletion], app: App) -> LLMResponse: prompts = build_prompt_command(history) - return self._gpt3(prompts, app.app_temperature) + return self._gpt3(prompts, functions=GPT_FUNCTIONS, temperature=app.app_temperature) def get_question_answer(self, user_input: str, app: App, history: List[MessageCompletion]) -> LLMResponse: prompts, usages = self.get_question_prompts(app, history, user_input) @@ -93,6 +94,7 @@ def call_ai_function(self, function, args, description) -> str: def embed_text(self, text: str) -> List[list]: return self.openai_api_gpt3.create_openai_embeddings([text]) + def audio_to_text(self, audio: str) -> str: return self.openai_api_gpt3.transcriptions(audio) diff --git a/server/src/core/llm/openapi_client.py b/server/src/core/llm/openapi_client.py index a60ac84..9b8f351 100644 --- a/server/src/core/llm/openapi_client.py +++ b/server/src/core/llm/openapi_client.py @@ -27,8 +27,9 @@ def __init__(self, api_key, model="gpt-3"): max_retries=OPENAI_MAX_RETRIES, errors=(OpenAIRateLimitError, OpenAIError), ) - def get_chat_completions(self, messages: List[dict], max_tokens=1000, temperature=0.1): + def get_chat_completions(self, messages: List[dict], functions=None, max_tokens=1000, temperature=0.1): """ + :param functions: :param messages: :param max_tokens: :param temperature: @@ -41,8 +42,10 @@ def get_chat_completions(self, messages: List[dict], max_tokens=1000, temperatur "max_tokens": max_tokens, "temperature": temperature, "top_p": 1, - "stream": False, + "stream": False } + if functions: + completion_options["tools"] = functions response = requests.post( f"{self.base_url}/chat/completions", diff --git a/server/src/core/llm/prompt_handler.py b/server/src/core/llm/prompt_handler.py index 67cdbfb..151ead0 100644 --- a/server/src/core/llm/prompt_handler.py +++ b/server/src/core/llm/prompt_handler.py @@ -7,74 +7,6 @@ from core.app.app_dao import App -response_format = { - "thoughts": { - "reasoning": "reasoning. Use future tense e.g. 'I will do this'", - "speak": "Friendly thoughts summary to say to user. Use future tense e.g. 'I will do this'. Important: Make sure to always translate your reply to the user's language.", - "criticism": "constructive self-criticism" - }, - "command": { - "name": "api_call|browse_website|send_email|chat_question", - "args": {"arg name": "value"}, - "request_render": { - "field_name_1": { - "field_type": "select", - "field_options": ["SP", "RJ"] - }, - "field_name_2": { - "field_type": "password", - "field_options": [] - } - }, - "response_render": { - "render_type": "list", - "fields": ["total"] - } - }, -} -formatted_response_format = json.dumps(response_format, indent=4) -response_format_instructions = f"RESPONSE FORMAT INSTRUCTIONS\n----------------------------\n\n" \ - f"When responding to me, please output a response in one of five formats:\n\n" \ - f"**Option 1:**\n" \ - f"Use this if the command is a API call\n" \ - f'Markdown code snippet formatted in the following schema:\n\n' \ - f'```json\n command: {{\n ' \ - f'"name": "api_call" \\ The command will be an api call\n' \ - f'"args": {{"url": "", "method": "","data_request": "","headers": ""}} \\ The arguments for the api call\n' \ - f'"request_render": {{"":{{"field_type": "", field_options: ""}} }} \\ Instruction of how to render the request fields\n' \ - f'"response_render": {{"render_type": "", fields: ""}} \\ Instruction of how to render the response\n' \ - f'}}\n```' \ - f'\n\n**Option 2:**\n' \ - f'Use this if the command is a browse website\n' \ - f'Markdown code snippet formatted in the following schema:\n\n' \ - f'```json\n command:{{\n ' \ - f'"name": "browse_website" \\ The command will be a browse website\n' \ - f'"args": "url": "" \\ The arguments for the browse website\n' \ - f'"request_render": "field_type": "", field_options: "" \\ Instruction of how to render the request fields\n' \ - f'}}\n```' \ - f'\n\n**Option 3:**\n' \ - f'Use this if the command is a send email\n' \ - f'Markdown code snippet formatted in the following schema:\n\n' \ - f'```json\n command:{{\n ' \ - f'"name": "send_email" \\ The command will be a send email\n' \ - f'"args": {{"name": "","email": "", "subject": "", "body": ""}} \\ The arguments for the send email\n' \ - f'}}\n```' \ - f'\n\n**Option 4:**\n' \ - f'Use this command if you want to ask a question to the user\n' \ - f'Markdown code snippet formatted in the following schema:\n\n' \ - f'```json\n command:{{\n ' \ - f'"name": "chat_question" \\ The command will be a chat question\n' \ - f'}}\n ```' \ - f'\n\n**Option 5:**\n' \ - f'Use this if the command is a javascript function\n' \ - f'Markdown code snippet formatted in the following schema:\n\n' \ - f'```json\n command:{{\n ' \ - f'"name": "js_func" \\ The command will be a Javascript function\n' \ - f'"function": {{"name": "", "code": "", "param": ""}} \\ The javascript function details\n' \ - f'}}\n ```' \ - f'Notice: All the options will be along with the ``` thoughts:{{ }}```' - - class MessageRole(Enum): SYSTEM = "system" USER = "user" @@ -120,24 +52,20 @@ def get_msg_cycle(doc_context: str, sanitized_query: str) -> List[MessageDict]: ), MessageDict( role=MessageRole.USER, - content=f"Execute the task using only the provided documentation above." - ), - MessageDict( - role=MessageRole.USER, - content=response_format_instructions - ), - MessageDict( - role=MessageRole.USER, - content='Determine which command to use, and respond using the format specified above' + content=f"Help me to execute the task using only the provided documentation" ), MessageDict( role=MessageRole.USER, - content=f"Under no circumstances should your response deviate from the following JSON FORMAT: \n{formatted_response_format} \n" + content=f"Please use one of the provided functions as the response:\n" + f"Option 1: Use the api_call function if the command is a API call\n\n" + f"Option 2: Use the send_email function if the command is a send email\n\n" + f"Option 3: Use the browse_website function if the command is to browse website\n\n" + f"Option 4: Use the ask_question function f you want to ask a question to the user\n\n" + f"Option 5: Use the js_func function if the command is a javascript function" ), MessageDict( role=MessageRole.USER, - content=f'Here is the user\'s input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n' - f'User input: {sanitized_query}' + content=f'Here is the user\'s input :\n\n {sanitized_query}' ), ] @@ -152,8 +80,7 @@ def build_prompt_command(history: List[MessageCompletion]) -> List[MessageDict]: if message.role == MessageRole.USER and message.context == "": prompts.append(MessageDict( role=MessageRole.USER, - content=f'Here is the user\'s input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n' - f'{message.query}' + content=f'Here is the user\'s input {message.query}' )) continue if message.role == MessageRole.USER and message.context != "": From 8ab69feaa9cfbac69204c357986ff9b82b025669 Mon Sep 17 00:00:00 2001 From: Alexsandro Souza Date: Thu, 28 Dec 2023 12:28:05 +0100 Subject: [PATCH 2/2] functions --- server/src/core/llm/llm_service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/core/llm/llm_service.py b/server/src/core/llm/llm_service.py index eece360..3e254ab 100644 --- a/server/src/core/llm/llm_service.py +++ b/server/src/core/llm/llm_service.py @@ -41,9 +41,9 @@ def _gpt3(self, prompts, functions=None, temperature=0.1) -> LLMResponse: message=response["choices"][0]["message"]["content"] ) - def _gpt4(self, prompts: List[dict], temperature=0.1) -> LLMResponse: + def _gpt4(self, prompts: List[dict],functions=None, temperature=0.1) -> LLMResponse: print("gpt4 prompt", prompts) - response = self.openai_api_gpt4.get_chat_completions(prompts, temperature=temperature) + response = self.openai_api_gpt4.get_chat_completions(prompts, functions=functions, temperature=temperature) usage = response["usage"] usage["model"] = response["model"] return LLMResponse( @@ -113,7 +113,7 @@ def get_keywords(self, text: str) -> LLMResponse: def llm_service_factory(app_key_gpt3: str, app_key_gpt4: str) -> LLMService: return LLMService( - OpenAI(app_key_gpt3, "gpt-3.5-turbo-0613"), + OpenAI(app_key_gpt3, "gpt-3.5-turbo-1106"), OpenAI(app_key_gpt4, "gpt-4"), OpenAIStream(app_key_gpt4), )