diff --git a/.env.example b/.env.example index 6dd3956e..037f46bd 100644 --- a/.env.example +++ b/.env.example @@ -7,6 +7,7 @@ # API CONFIG OPENAI_API_KEY= OPENAI_API_MODEL=gpt-3.5-turbo # alternatively, gpt-4, text-davinci-003, etc +OPENAI_TEMPERATURE=0.0 PINECONE_API_KEY= PINECONE_ENVIRONMENT=us-east1-gcp diff --git a/babyagi.py b/babyagi.py index c57637fb..7b4f7c49 100755 --- a/babyagi.py +++ b/babyagi.py @@ -45,6 +45,9 @@ OBJECTIVE = os.getenv("OBJECTIVE", "") INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", "")) +# Model configuration +OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0)) + # Extensions support begin @@ -134,7 +137,7 @@ def get_ada_embedding(text): def openai_call( prompt: str, model: str = OPENAI_API_MODEL, - temperature: float = 0.5, + temperature: float = OPENAI_TEMPERATURE, max_tokens: int = 100, ): while True: @@ -226,30 +229,30 @@ def execution_agent(objective: str, task: str) -> str: """ - context = context_agent(query=objective, n=5) + context = context_agent(query=objective, top_results_num=5) # print("\n*******RELEVANT CONTEXT******\n") # print(context) prompt = f""" You are an AI who performs one task based on the following objective: {objective}\n. Take into account these previously completed tasks: {context}\n. Your task: {task}\nResponse:""" - return openai_call(prompt, temperature=0.7, max_tokens=2000) + return openai_call(prompt, max_tokens=2000) -def context_agent(query: str, n: int): +def context_agent(query: str, top_results_num: int): """ Retrieves context for a given query from an index of tasks. Args: query (str): The query or objective for retrieving context. - n (int): The number of top results to retrieve. + top_results_num (int): The number of top results to retrieve. Returns: list: A list of tasks as context for the given query, sorted by relevance. """ query_embedding = get_ada_embedding(query) - results = index.query(query_embedding, top_k=n, include_metadata=True, namespace=OBJECTIVE) + results = index.query(query_embedding, top_k=top_results_num, include_metadata=True, namespace=OBJECTIVE) # print("***** RESULTS *****") # print(results) sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)