Skip to content

Commit 8ac4a8b

Browse files
gustavocidornelaswhoseoyster
authored andcommitted
Remove LLM runners and their dependencies
1 parent 4336587 commit 8ac4a8b

File tree

4 files changed

+1
-400
lines changed

4 files changed

+1
-400
lines changed

openlayer/model_runners/ll_model_runners.py

Lines changed: 0 additions & 331 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,9 @@
99
from abc import ABC, abstractmethod
1010
from typing import Any, Dict, Generator, List, Optional, Tuple, Union
1111

12-
import anthropic
13-
import cohere
1412
import openai
1513
import pandas as pd
1614
import pybars
17-
import requests
18-
from google import generativeai
1915
from tqdm import tqdm
2016

2117
from .. import constants
@@ -288,156 +284,6 @@ def run_and_yield_progress(
288284

289285

290286
# -------------------------- Concrete model runners -------------------------- #
291-
class AnthropicModelRunner(LLModelRunner):
292-
"""Wraps Anthropic's models."""
293-
294-
# Last update: 2023-12-19
295-
COST_PER_TOKEN = {
296-
"claude-2": {
297-
"input": 8e-6,
298-
"output": 24e-6,
299-
},
300-
"claude-instant": {
301-
"input": 0.8e-6,
302-
"output": 2.4e-6,
303-
},
304-
}
305-
306-
def __init__(
307-
self,
308-
logger: Optional[logging.Logger] = None,
309-
**kwargs,
310-
):
311-
super().__init__(logger, **kwargs)
312-
if kwargs.get("anthropic_api_key") is None:
313-
raise openlayer_exceptions.OpenlayerMissingLlmApiKey(
314-
"Please pass your Anthropic API key as the "
315-
"keyword argument 'anthropic_api_key'"
316-
)
317-
318-
self.anthropic_api_key = kwargs["anthropic_api_key"]
319-
self._initialize_llm()
320-
321-
def _initialize_llm(self):
322-
"""Initializes Cohere's Generate model."""
323-
self.anthropic_client = anthropic.Anthropic(
324-
api_key=self.anthropic_api_key,
325-
)
326-
if self.model_config.get("model") is None:
327-
warnings.warn("No model specified. Defaulting to model 'claude-2'.")
328-
if self.model_config.get("model_parameters") is None:
329-
warnings.warn("No model parameters specified. Using default parameters.")
330-
self.model_config["model_parameters"]["max_tokens_to_sample"] = 200
331-
elif "max_tokens_to_sample" not in self.model_config.get("model_parameters"):
332-
warnings.warn(
333-
"max_tokens_to_sample not specified. Using default max_tokens_to_sample of 200.",
334-
)
335-
self.model_config["model_parameters"]["max_tokens_to_sample"] = 200
336-
337-
def _get_llm_input(self, injected_prompt: List[Dict[str, str]]) -> str:
338-
"""Prepares the input for Anthropic's generate model."""
339-
llm_input = ""
340-
for message in injected_prompt:
341-
if message["role"] == "assistant":
342-
llm_input += f"{anthropic.AI_PROMPT} {message['content']} "
343-
elif message["role"] == "user" or message["role"] == "system":
344-
llm_input += f"{anthropic.HUMAN_PROMPT} {message['content']} "
345-
else:
346-
raise ValueError(
347-
"Message role must be either 'assistant', 'user', or 'system' for Anthropic LLMs. "
348-
f"Got: {message['role']}"
349-
)
350-
llm_input += f"{anthropic.AI_PROMPT}"
351-
return llm_input
352-
353-
def _make_request(self, llm_input: str) -> Dict[str, Any]:
354-
"""Make the request to Anthropic's model
355-
for a given input."""
356-
return self.anthropic_client.completions.create(
357-
model=self.model_config.get("model", "claude-2"),
358-
prompt=llm_input,
359-
**self.model_config.get("model_parameters", {}),
360-
)
361-
362-
def _get_output(self, response: Dict[str, Any]) -> str:
363-
"""Gets the output from the response."""
364-
return response["completion"]
365-
366-
def _get_cost_estimate(self, response: Dict[str, Any]) -> float:
367-
"""Estimates the cost from the response."""
368-
return 0
369-
370-
371-
class CohereGenerateModelRunner(LLModelRunner):
372-
"""Wraps Cohere's Generate model."""
373-
374-
def __init__(
375-
self,
376-
logger: Optional[logging.Logger] = None,
377-
**kwargs,
378-
):
379-
super().__init__(logger, **kwargs)
380-
if kwargs.get("cohere_api_key") is None:
381-
raise openlayer_exceptions.OpenlayerMissingLlmApiKey(
382-
"Please pass your Cohere API key as the "
383-
"keyword argument 'cohere_api_key'"
384-
)
385-
386-
self.cohere_api_key = kwargs["cohere_api_key"]
387-
self._initialize_llm()
388-
389-
def _initialize_llm(self):
390-
"""Initializes Cohere's Generate model."""
391-
# Check if API key is valid -- Cohere's validation seems to be very shallow
392-
try:
393-
# pylint: disable=unexpected-keyword-arg
394-
self.cohere_client = cohere.Client(
395-
api_key=self.cohere_api_key, check_api_key=True
396-
)
397-
except Exception as e:
398-
raise openlayer_exceptions.OpenlayerInvalidLlmApiKey(
399-
"Please pass a valid Cohere API key as the "
400-
f"keyword argument 'cohere_api_key' \n Error message: {e}"
401-
) from e
402-
if self.model_config.get("model") is None:
403-
warnings.warn("No model specified. Defaulting to model 'command'.")
404-
if self.model_config.get("model_parameters") is None:
405-
warnings.warn("No model parameters specified. Using default parameters.")
406-
407-
def _get_llm_input(self, injected_prompt: List[Dict[str, str]]) -> str:
408-
"""Prepares the input for Cohere's generate model."""
409-
llm_input = ""
410-
for message in injected_prompt:
411-
if message["role"] == "system":
412-
llm_input += f"S: {message['content']} \n"
413-
elif message["role"] == "assistant":
414-
llm_input += f"A: {message['content']} \n"
415-
elif message["role"] == "user":
416-
llm_input += f"U: {message['content']} \n"
417-
else:
418-
raise ValueError(
419-
"Message role must be either 'system', 'assistant' or 'user'. "
420-
f"Got: {message['role']}"
421-
)
422-
llm_input += "A:"
423-
return llm_input
424-
425-
def _make_request(self, llm_input: str) -> Dict[str, Any]:
426-
"""Make the request to Cohere's Generate model
427-
for a given input."""
428-
return self.cohere_client.generate(
429-
model=self.model_config.get("model", "command"),
430-
prompt=llm_input,
431-
**self.model_config.get("model_parameters", {}),
432-
)
433-
434-
def _get_output(self, response: Dict[str, Any]) -> str:
435-
"""Gets the output from the response."""
436-
return response[0].text
437-
438-
def _get_cost_estimate(self, response: Dict[str, Any]) -> float:
439-
"""Estimates the cost from the response."""
440-
return 0
441287

442288

443289
class OpenAIChatCompletionRunner(LLModelRunner):
@@ -507,180 +353,3 @@ def _get_cost_estimate(self, response: Dict[str, Any]) -> None:
507353
num_input_tokens * constants.OPENAI_COST_PER_TOKEN[model]["input"]
508354
+ num_output_tokens * constants.OPENAI_COST_PER_TOKEN[model]["output"]
509355
)
510-
511-
512-
class GoogleGenAIModelRunner(LLModelRunner):
513-
"""Wraps Google's Gen AI models."""
514-
515-
def __init__(
516-
self,
517-
logger: Optional[logging.Logger] = None,
518-
**kwargs,
519-
):
520-
super().__init__(logger, **kwargs)
521-
if kwargs.get("google_api_key") is None:
522-
raise openlayer_exceptions.OpenlayerMissingLlmApiKey(
523-
"Please pass your Google API key generated with "
524-
"https://makersuite.google.com/ as the keyword argument"
525-
" 'google_api_key'"
526-
)
527-
self.google_api_key = kwargs["google_api_key"]
528-
529-
self._initialize_llm()
530-
531-
self.cost: List[float] = []
532-
533-
def _initialize_llm(self):
534-
"""Initializes the OpenAI chat completion model."""
535-
if self.model_config.get("model") is None:
536-
warnings.warn("No model specified. Defaulting to model 'gemini-pro'.")
537-
if self.model_config.get("model_parameters") is None:
538-
warnings.warn("No model parameters specified. Using default parameters.")
539-
# Check if API key is valid
540-
try:
541-
generativeai.configure(api_key=self.google_api_key)
542-
self.model = generativeai.GenerativeModel(
543-
self.model_config.get("model", "gemini-pro")
544-
)
545-
except Exception as e:
546-
raise openlayer_exceptions.OpenlayerInvalidLlmApiKey(
547-
"Please pass your Google API key generated with "
548-
"https://makersuite.google.com/ as the keyword argument"
549-
f" 'google_api_key' \n Error message: {e}"
550-
) from e
551-
552-
def _get_llm_input(
553-
self, injected_prompt: List[Dict[str, str]]
554-
) -> List[Dict[str, str]]:
555-
"""Prepares the input for Google's model."""
556-
llm_input = ""
557-
for message in injected_prompt:
558-
if message["role"] == "system":
559-
llm_input += f"S: {message['content']} \n"
560-
elif message["role"] == "assistant":
561-
llm_input += f"A: {message['content']} \n"
562-
elif message["role"] == "user":
563-
llm_input += f"U: {message['content']} \n"
564-
else:
565-
raise ValueError(
566-
"Message role must be either 'system', 'assistant' or 'user'. "
567-
f"Got: {message['role']}"
568-
)
569-
llm_input += "A:"
570-
return llm_input
571-
572-
def _make_request(self, llm_input: List[Dict[str, str]]) -> Dict[str, Any]:
573-
"""Make the request to Google's model
574-
for a given input."""
575-
response = self.model.generate_content(
576-
contents=llm_input,
577-
**self.model_config.get("model_parameters", {}),
578-
)
579-
return response
580-
581-
def _get_output(self, response: Dict[str, Any]) -> str:
582-
"""Gets the output from the response."""
583-
return response.text
584-
585-
def _get_cost_estimate(self, response: Dict[str, Any]) -> None:
586-
"""Estimates the cost from the response."""
587-
return 0
588-
589-
590-
class SelfHostedLLModelRunner(LLModelRunner):
591-
"""Wraps a self-hosted LLM."""
592-
593-
def __init__(
594-
self,
595-
logger: Optional[logging.Logger] = None,
596-
**kwargs,
597-
):
598-
super().__init__(logger, **kwargs)
599-
if kwargs.get("url") is None:
600-
raise ValueError(
601-
"URL must be provided. Please pass it as the keyword argument 'url'"
602-
)
603-
if kwargs.get("api_key") is None:
604-
raise ValueError(
605-
"API key must be provided for self-hosted LLMs. "
606-
"Please pass it as the keyword argument 'api_key'"
607-
)
608-
if kwargs.get("input_key") is None:
609-
raise ValueError(
610-
"Input key must be provided for self-hosted LLMs. "
611-
"Please pass it as the keyword argument 'input_key'"
612-
)
613-
if kwargs.get("output_key") is None:
614-
raise ValueError(
615-
"Output key must be provided for self-hosted LLMs. "
616-
"Please pass it as the keyword argument 'output_key'"
617-
)
618-
619-
self.url = kwargs["url"]
620-
self.api_key = kwargs["api_key"]
621-
self.input_key = kwargs["input_key"]
622-
self.output_key = kwargs["output_key"]
623-
self._initialize_llm()
624-
625-
def _initialize_llm(self):
626-
"""Initializes the self-hosted LL model."""
627-
# Check if API key is valid
628-
try:
629-
requests.get(self.url, timeout=constants.REQUESTS_TIMEOUT)
630-
except Exception as e:
631-
raise ValueError(
632-
"URL is invalid. Please pass a valid URL as the "
633-
f"keyword argument 'url' \n Error message: {e}"
634-
) from e
635-
636-
def _get_llm_input(self, injected_prompt: List[Dict[str, str]]) -> str:
637-
"""Prepares the input for the self-hosted LLM."""
638-
llm_input = ""
639-
for message in injected_prompt:
640-
if message["role"] == "system":
641-
llm_input += f"S: {message['content']} \n"
642-
elif message["role"] == "assistant":
643-
llm_input += f"A: {message['content']} \n"
644-
elif message["role"] == "user":
645-
llm_input += f"U: {message['content']} \n"
646-
else:
647-
raise ValueError(
648-
"Message role must be either 'system', 'assistant' or 'user'. "
649-
f"Got: {message['role']}"
650-
)
651-
llm_input += "A:"
652-
return llm_input
653-
654-
def _make_request(self, llm_input: str) -> Dict[str, Any]:
655-
"""Make the request to the self-hosted LL model
656-
for a given input."""
657-
headers = {
658-
"Authorization": f"Bearer {self.api_key}",
659-
"Content-Type": "application/json",
660-
}
661-
data = {self.input_key: llm_input}
662-
response = requests.post(
663-
self.url, headers=headers, json=data, timeout=constants.REQUESTS_TIMEOUT
664-
)
665-
if response.status_code == 200:
666-
response_data = response.json()[0]
667-
return response_data
668-
else:
669-
raise ValueError(f"Request failed with status code {response.status_code}")
670-
671-
def _get_output(self, response: Dict[str, Any]) -> str:
672-
"""Gets the output from the response."""
673-
return response[self.output_key]
674-
675-
def _get_cost_estimate(self, response: Dict[str, Any]) -> float:
676-
"""Estimates the cost from the response."""
677-
return 0
678-
679-
680-
class HuggingFaceModelRunner(SelfHostedLLModelRunner):
681-
"""Wraps LLMs hosted in HuggingFace."""
682-
683-
def __init__(self, url, api_key):
684-
super().__init__(
685-
url=url, ali_key=api_key, input_key="inputs", output_key="generated_text"
686-
)

0 commit comments

Comments
 (0)