From b8679fbf586d5c11c9ef45d96ec9b8c47be5619f Mon Sep 17 00:00:00 2001 From: MalikMAlna Date: Wed, 12 Apr 2023 13:23:38 -0400 Subject: [PATCH 1/3] Lowering temperature to reduce AI hallucinations --- babyagi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/babyagi.py b/babyagi.py index c57637fb..6b422608 100755 --- a/babyagi.py +++ b/babyagi.py @@ -134,7 +134,7 @@ def get_ada_embedding(text): def openai_call( prompt: str, model: str = OPENAI_API_MODEL, - temperature: float = 0.5, + temperature: float = 0.0, max_tokens: int = 100, ): while True: @@ -233,7 +233,7 @@ def execution_agent(objective: str, task: str) -> str: You are an AI who performs one task based on the following objective: {objective}\n. Take into account these previously completed tasks: {context}\n. Your task: {task}\nResponse:""" - return openai_call(prompt, temperature=0.7, max_tokens=2000) + return openai_call(prompt, temperature=0.0, max_tokens=2000) def context_agent(query: str, n: int): From e144b9d766a4df7cbfd2a0538ae37e3f0ebbe955 Mon Sep 17 00:00:00 2001 From: MalikMAlna Date: Wed, 12 Apr 2023 13:43:04 -0400 Subject: [PATCH 2/3] Changing n parameter to top_result_num to be more readable --- babyagi.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/babyagi.py b/babyagi.py index 6b422608..960cb2e3 100755 --- a/babyagi.py +++ b/babyagi.py @@ -226,7 +226,7 @@ def execution_agent(objective: str, task: str) -> str: """ - context = context_agent(query=objective, n=5) + context = context_agent(query=objective, top_results_num=5) # print("\n*******RELEVANT CONTEXT******\n") # print(context) prompt = f""" @@ -236,20 +236,20 @@ def execution_agent(objective: str, task: str) -> str: return openai_call(prompt, temperature=0.0, max_tokens=2000) -def context_agent(query: str, n: int): +def context_agent(query: str, top_results_num: int): """ Retrieves context for a given query from an index of tasks. Args: query (str): The query or objective for retrieving context. - n (int): The number of top results to retrieve. + top_results_num (int): The number of top results to retrieve. Returns: list: A list of tasks as context for the given query, sorted by relevance. """ query_embedding = get_ada_embedding(query) - results = index.query(query_embedding, top_k=n, include_metadata=True, namespace=OBJECTIVE) + results = index.query(query_embedding, top_k=top_results_num, include_metadata=True, namespace=OBJECTIVE) # print("***** RESULTS *****") # print(results) sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True) From 53e9aaf7d766b851ce31f90d7172f76a0fc6f0cf Mon Sep 17 00:00:00 2001 From: MalikMAlna Date: Wed, 12 Apr 2023 23:05:29 -0400 Subject: [PATCH 3/3] Adding OpenAI temperature as an adjustable .env value --- .env.example | 1 + babyagi.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.env.example b/.env.example index 6e2fe868..fb2b8db5 100644 --- a/.env.example +++ b/.env.example @@ -7,6 +7,7 @@ # API CONFIG OPENAI_API_KEY= OPENAI_API_MODEL=gpt-3.5-turbo # alternatively, gpt-4, text-davinci-003, etc +OPENAI_TEMPERATURE=0.0 PINECONE_API_KEY= PINECONE_ENVIRONMENT=us-east1-gcp diff --git a/babyagi.py b/babyagi.py index 960cb2e3..7b4f7c49 100755 --- a/babyagi.py +++ b/babyagi.py @@ -45,6 +45,9 @@ OBJECTIVE = os.getenv("OBJECTIVE", "") INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", "")) +# Model configuration +OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0)) + # Extensions support begin @@ -134,7 +137,7 @@ def get_ada_embedding(text): def openai_call( prompt: str, model: str = OPENAI_API_MODEL, - temperature: float = 0.0, + temperature: float = OPENAI_TEMPERATURE, max_tokens: int = 100, ): while True: @@ -233,7 +236,7 @@ def execution_agent(objective: str, task: str) -> str: You are an AI who performs one task based on the following objective: {objective}\n. Take into account these previously completed tasks: {context}\n. Your task: {task}\nResponse:""" - return openai_call(prompt, temperature=0.0, max_tokens=2000) + return openai_call(prompt, max_tokens=2000) def context_agent(query: str, top_results_num: int):