Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implemented OpenRouter support #916

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions Gemfile.lock
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,11 @@ GEM
racc (~> 1.4)
nokogiri (1.18.2-x86_64-linux-musl)
racc (~> 1.4)
open_router (0.3.3)
activesupport (>= 6.0)
dotenv (>= 2)
faraday (>= 1)
faraday-multipart (>= 1)
os (1.1.4)
paco (0.2.3)
parallel (1.25.1)
Expand Down Expand Up @@ -475,6 +480,7 @@ DEPENDENCIES
milvus (~> 0.10.3)
mistral-ai
nokogiri (~> 1.13)
open_router (>= 0.3.3)
pdf-reader (~> 2.0)
pg (~> 1.5)
pgvector (~> 0.2.1)
Expand Down
1 change: 1 addition & 0 deletions langchain.gemspec
Original file line number Diff line number Diff line change
Expand Up @@ -76,4 +76,5 @@ Gem::Specification.new do |spec|
spec.add_development_dependency "weaviate-ruby", "~> 0.9.2"
spec.add_development_dependency "wikipedia-client", "~> 1.17.0"
spec.add_development_dependency "power_point_pptx", "~> 0.1.0"
spec.add_development_dependency "open_router", ">= 0.3.3"
end
2 changes: 2 additions & 0 deletions lib/langchain/assistant/llm/adapter.rb
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ def self.build(llm)
LLM::Adapters::Ollama.new
elsif llm.is_a?(Langchain::LLM::OpenAI)
LLM::Adapters::OpenAI.new
elsif llm.is_a?(Langchain::LLM::OpenRouter)
LLM::Adapters::OpenAI.new # Open Router follows OpenAI's API format
else
raise ArgumentError, "Unsupported LLM type: #{llm.class}"
end
Expand Down
66 changes: 66 additions & 0 deletions lib/langchain/llm/open_router.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# frozen_string_literal: true

module Langchain::LLM
# LLM interface for Open Router APIs: https://openrouter.ai/docs
#
# Gem requirements:
# gem "open_router"
#
# Usage:
# llm = Langchain::LLM::OpenRouter.new(
# api_key: ENV["OPENROUTER_API_KEY"],
# default_options: {}
# )
class OpenRouter < Base
DEFAULTS = {
temperature: 0.0,
chat_model: "openrouter/auto",
embedding_model: "openrouter/auto"
}.freeze

attr_reader :defaults

def initialize(api_key:, default_options: {})
depends_on "open_router"

@client = ::OpenRouter::Client.new(access_token: api_key)
@defaults = DEFAULTS.merge(default_options)

chat_parameters.update(
model: {default: @defaults[:chat_model]},
temperature: {default: @defaults[:temperature]},
providers: {default: []},
transforms: {default: []},
extras: {default: {}}
)
end

def chat(params = {})
parameters = chat_parameters.to_params(params)
messages = parameters.delete(:messages)

# Ensure default values for providers, transforms, extras
parameters[:providers] ||= []
parameters[:transforms] ||= []
parameters[:extras] ||= {}

response = client.complete(
messages,
model: parameters[:model],
providers: parameters[:providers],
transforms: parameters[:transforms],
extras: parameters[:extras]
)

Langchain::LLM::OpenRouterResponse.new(response)
end

def embed(text:, model: nil)
raise NotImplementedError, "Open Router does not support embeddings yet"
end

def models
client.models
end
end
end
47 changes: 47 additions & 0 deletions lib/langchain/llm/response/open_router_response.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# frozen_string_literal: true

module Langchain::LLM
class OpenRouterResponse < BaseResponse
def model
raw_response["model"]
end

def chat_completion
chat_completions.dig(0, "message", "content")
end

def chat_completions
raw_response.dig("choices")
end

def tool_calls
chat_completions.dig(0, "message", "tool_calls") || []
end

def role
raw_response.dig("choices", 0, "message", "role")
end

def embedding
raw_response.dig("data", 0, "embedding")
end

def prompt_tokens
raw_response.dig("usage", "prompt_tokens")
end

def total_tokens
raw_response.dig("usage", "total_tokens")
end

def completion_tokens
raw_response.dig("usage", "completion_tokens")
end

def created_at
if raw_response.dig("created")
Time.at(raw_response.dig("created"))
end
end
end
end
76 changes: 76 additions & 0 deletions spec/langchain/llm/open_router_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# frozen_string_literal: true

require "open_router"

RSpec.describe Langchain::LLM::OpenRouter do
let(:subject) { described_class.new(api_key: "123") }
let(:mock_client) { instance_double(OpenRouter::Client) }

before do
allow(OpenRouter::Client).to receive(:new).and_return(mock_client)
end

describe "#initialize" do
context "when default_options are passed" do
let(:default_options) { {temperature: 0.7, chat_model: "mistralai/mixtral-8x7b-instruct"} }
subject { described_class.new(api_key: "123", default_options: default_options) }

it "sets the defaults options" do
expect(subject.defaults[:temperature]).to eq(0.7)
expect(subject.defaults[:chat_model]).to eq("mistralai/mixtral-8x7b-instruct")
end

it "gets passed to consecutive chat() call" do
allow(mock_client).to receive(:complete)
subject.chat(messages: [{role: "user", content: "Hello!"}])
expect(subject.client).to have_received(:complete).with(
[{role: "user", content: "Hello!"}],
model: "mistralai/mixtral-8x7b-instruct",
providers: [],
transforms: [],
extras: {}
)
end
end
end

describe "#chat" do
before do
allow(mock_client).to receive(:complete)
end

it "calls the client with the requested parameters" do
params = {
messages: [{role: "user", content: "Hello!"}],
temperature: 0.7,
providers: ["anthropic", "openai"],
transforms: ["fix-grammar"],
extras: {max_tokens: 100}
}

subject.chat(params)

expect(mock_client).to have_received(:complete).with(
params[:messages],
model: subject.defaults[:chat_model],
providers: ["anthropic", "openai"],
transforms: ["fix-grammar"],
extras: {max_tokens: 100}
)
end
end

describe "#embed" do
it "raises NotImplementedError" do
expect { subject.embed(text: "test") }.to raise_error(NotImplementedError)
end
end

describe "#models" do
it "calls the client models method" do
allow(mock_client).to receive(:models)
subject.models
expect(mock_client).to have_received(:models)
end
end
end
69 changes: 69 additions & 0 deletions spec/langchain/llm/response/open_router_response_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# frozen_string_literal: true

RSpec.describe Langchain::LLM::OpenRouterResponse do
let(:raw_chat_completions_response) {
{
"id" => "gen-xyz123",
"object" => "chat.completion",
"created" => 1710807304,
"model" => "mistralai/mixtral-8x7b-instruct",
"choices" => [{
"index" => 0,
"message" => {
"role" => "assistant",
"content" => "Hello! How can I help you today?"
},
"finish_reason" => "stop"
}],
"usage" => {
"prompt_tokens" => 14,
"total_tokens" => 61,
"completion_tokens" => 47
}
}
}

subject { described_class.new(raw_chat_completions_response) }

describe "#chat_completion" do
it "returns chat completion" do
expect(subject.chat_completion).to eq("Hello! How can I help you today?")
end
end

describe "#role" do
it "returns role" do
expect(subject.role).to eq("assistant")
end
end

describe "#model" do
it "returns model" do
expect(subject.model).to eq("mistralai/mixtral-8x7b-instruct")
end
end

describe "#created_at" do
it "returns created_at" do
expect(subject.created_at).to eq(Time.at(1710807304))
end
end

describe "#prompt_tokens" do
it "returns prompt_tokens" do
expect(subject.prompt_tokens).to eq(14)
end
end

describe "#completion_tokens" do
it "returns completion_tokens" do
expect(subject.completion_tokens).to eq(47)
end
end

describe "#total_tokens" do
it "returns total_tokens" do
expect(subject.total_tokens).to eq(61)
end
end
end