Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add DeepSeek API, Mock Human API, and Environment Loader #1

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file

version: 2
updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "requirements.txt" # Location of package manifests
schedule:
interval: "weekly"
96 changes: 95 additions & 1 deletion pathfinder/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,15 @@

import backoff
import regex
from dotenv import load_dotenv

from ._find import Find
from ._gen import Gen
from ._select import Select
from .backend import PathFinder
from .model import Model
from .trie import MarisaTrie, Trie
from typing import List, Dict, Any


def can_be_int(s):
Expand Down Expand Up @@ -176,13 +178,105 @@ def run(self, lm, r, name, is_gen, save_stop_text):
raise Exception(f"Cant find {r} in {lm.text_to_consume}")
return res

class DeepSeekAPI(ModelAPI):
def __init__(self, model_name, seed):
super().__init__(model_name, seed)
from openai import OpenAI
from dotenv import load_dotenv
import os

load_dotenv() # Load the .env file
api_key = os.getenv("DEEPSEEK_API_KEY") # Get the OpenAI API key

self.client = OpenAI(api_key=api_key,
base_url="https://api.deepseek.com")

def request_api(self, chat, tmeperature, top_p, max_tokens):
import openai

@backoff.on_exception(backoff.expo, openai.RateLimitError)
def completions_with_backoff(**kwargs):
return self.client.chat.completions.create(**kwargs)

out = completions_with_backoff(
model=self.model_name,
messages=chat,
temperature=tmeperature,
top_p=top_p,
seed=self.seed,
max_tokens=max_tokens,
)
logging.info(f"OpenAI system_fingerprint: {out.system_fingerprint}")
return out.choices[0].message.content

class HumanAPI(ModelAPI):
def __init__(self, model_name, seed):
super().__init__(model_name, seed)
self.conversation_history = []
logging.info(f"Initialized Human-in-loop API with model_name: {model_name}, seed: {seed}")

def request_api(self, chat: List[Dict[str, str]], temperature: float, top_p: float, max_tokens: int) -> str:
"""
Simulates an API request by getting input from a human operator.
Maintains similar interface to DeepSeekAPI.
"""
# Format the conversation history for human review
print("\n" + "="*50)
print(f"Model: {self.model_name}")
print(f"Parameters: temperature={temperature}, top_p={top_p}, max_tokens={max_tokens}")
print("="*50 + "\n")

# Display the conversation history
for message in chat:
role = message["role"].upper()
content = message["content"]
print(f"{role}: {content}\n")

# Get human input with backoff-like behavior for consistency
@backoff.on_exception(backoff.expo, (KeyboardInterrupt, EOFError))
def get_human_input_with_backoff():
print("\nEnter your response (press Enter twice to finish):")
lines = []
while True:
try:
line = input()
if line == "":
break
lines.append(line)
except (KeyboardInterrupt, EOFError) as e:
print("\nInput interrupted. Press Ctrl+C again to exit or continue typing.")
raise e
return "\n".join(lines)

response = get_human_input_with_backoff()

# Log the interaction similar to any OpenAI API
logging.info(f"Human-in-loop response received. Length: {len(response)} chars")

# Store in conversation history
self.conversation_history.append({
"role": "human-operator",
"content": response,
"parameters": {
"temperature": temperature,
"top_p": top_p,
"max_tokens": max_tokens
}
})

return response

class OpenAIAPI(ModelAPI):
def __init__(self, model_name, seed):
super().__init__(model_name, seed)
from openai import OpenAI
from dotenv import load_dotenv
import os

load_dotenv() # Load the .env file
api_key = os.getenv("OPENAI_API_KEY") # Get the OpenAI API key

self.client = OpenAI()
self.client = OpenAI(api_key=api_key)

def request_api(self, chat, tmeperature, top_p, max_tokens):
import openai
Expand Down
12 changes: 10 additions & 2 deletions pathfinder/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from accelerate import infer_auto_device_map
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer

from .api import AnthropicAPI, AzureOpenAIAPI, MistralAPI, OpenAIAPI, OpenRouter
from .api import AnthropicAPI, AzureOpenAIAPI, MistralAPI, OpenAIAPI, OpenRouter, DeepSeekAPI, HumanAPI
from .chat import (
ChatML,
Cohere,
Expand Down Expand Up @@ -31,6 +31,10 @@ def get_api_model(name, seed):
elif "openrouter" in name.lower():
name = name.replace("openrouter-", "")
return OpenRouter(name, seed)
elif "deepseek" in name.lower():
return DeepSeekAPI(name, seed)
elif "human" in name.lower():
return HumanAPI(name, seed)
else:
raise ValueError(f"Unknown model name {name}")

Expand Down Expand Up @@ -127,8 +131,12 @@ def get_model(
)

if backend_name == "transformers":
import os
from dotenv import load_dotenv
load_dotenv()
access_token = os.getenv("HUGGINGFACE_HUB_TOKEN")
model_config = AutoConfig.from_pretrained(
name, trust_remote_code=trust_remote_code
name, trust_remote_code=trust_remote_code, use_auth_token=access_token,
)

model = AutoModelForCausalLM.from_pretrained(
Expand Down