Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions bin/lua/infra/AI/proxy.lua
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ local http = require("infra.HTTP.HTTP")
local json = require("infra.HTTP.json")
local log = require("framework.logger")
local config = require("interface.config")
local inspect = require('framework.inspect')

local proxy = {}

Expand All @@ -23,6 +24,7 @@ local PRESET = {

-- helpers --------------------------------------------------------------
local API_URL = "http://127.0.0.1:8000/v1/chat/completions"
local API_MODELS_URL = "http://127.0.0.1:8000/v1/models"
local API_KEY = config.PROXY_API_KEY
Comment on lines 26 to 28
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Honestly, this is a perfect thing to add to advanced settings in MCM - Base URL(API_URL in code). Maybe in the future. including the key


local function build_body(messages, opts)
Expand Down Expand Up @@ -84,6 +86,44 @@ local function send(messages, cb, opts)
end)
end

local function get_model_list()
local headers = {
["Content-Type"] = "application/json",
["Authorization"] = "Bearer "..API_KEY,
}

local body_tbl = {}
local requestId = http.send_request(API_MODELS_URL, "GET", headers, body_tbl)

local response
local error

-- ugly 5s busy timeout
local sec = tonumber(os.clock() + 5);
while not response and not error and ( os.clock() < sec ) do
log.info("waiting for models response");
response, error = http.check_response(requestId)
end

log.info("models response " .. inspect(response))

local resultPairs = {}
if response and response.data then
log.info("models list array result" .. inspect(response.data))
for i, v in ipairs(response.data) do
local pair = { v["id"], v["id"] }
log.info("adding the pair: " .. inspect(pair))
table.insert(resultPairs, pair)
end
elseif error then
log.error("models error" .. inspect(error))
else
log.info("models probably timeout")
end
log.info("models final table", inspect(resultPairs))
return resultPairs
end

Comment on lines +89 to +126
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This reminds me i need to add model list sanitization by default. I keep forgetting.
This returns... like 200+ models with gemini, nvidia combined.

-- public shortcuts -----------------------------------------------------
function proxy.generate_dialogue(msgs, cb)
return send(msgs, cb, PRESET.creative)
Expand All @@ -97,4 +137,8 @@ function proxy.summarize_story(msgs, cb)
return send(msgs, cb, {model=MODEL.fast, temperature=0.2})
end

function proxy.get_model_list()
return get_model_list()
end

return proxy
15 changes: 11 additions & 4 deletions gamedata/scripts/talker_mcm.script
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@



local language = require("infra.language")
local proxy = require("infra.AI.proxy")


function on_mcm_load()
options = {
Expand Down Expand Up @@ -29,9 +28,17 @@ function on_mcm_load()
{"0", "open-ai"},
{"1", "open-router-ai"},
{"2", "local-deepseek"},
{"3", "proxy"},
{"3", "proxy"}
}
},
{
id = "custom_ai_model2",
type = "list",
val = 0,
def = "",
no_str = true,
content = proxy.get_model_list()
},
{
id = "custom_ai_model",
type = "input",
Expand Down