Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 14 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,7 @@ Cluster configurations

* `skey`: `_M.xxxxx`. `xxxxx` is the `skey`(service key) of this Cluster.
* `enable`: Enable or disable heartbeats to servers. Default is `true`.
* `dns`: Enable dns resolver to servers. Default is `false`.
* `typ`: Cluster type, must be one of `general`, `redis`, `mysql`, `http`. Default is `general`.
* `general`: Heartbeat by TCP `sock:connect`.
* `redis`: Heartbeat by redis `PING`. [lua-resty-redis](https://github.com/openresty/lua-resty-redis) module is required.
Expand All @@ -334,6 +335,7 @@ Cluster configurations
* `weight`: Sets the weight of the server. Default is `1`.
* `max_fails`: Sets the number of unsuccessful attempts to communicate with the server that should happen in the duration set by the `fail_timeout` parameter. By default, the number of unsuccessful attempts is set to `0`, which disables the accounting of attempts. What is considered an unsuccessful attempt is defined by `http_opts.statuses` if `typ="http"` or a `nil`/`false` returned by [checkups.ready_ok](#ready_ok). This options is only available in round-robin.
* `fail_timeout`: Sets the time during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable and the period of time the server will be considered unavailable. By default, the parameter is set to `10` seconds. This options is only available in round-robin.
* `dns`: Enable dns resolver to server. Default is `cluster.dns`.

* `upstream`: Name of Nginx upstream blocks. Checkups will extract servers from Nginx conf's upstream blocks in [prepare_checker](#prepare_checker). [lua-upstream-nginx-module](https://github.com/openresty/lua-upstream-nginx-module) module is required.
* `upstream_only_backup`: If set to `true`, checkups will only extract backup servers from Nginx upstream blocks.
Expand Down Expand Up @@ -377,12 +379,23 @@ Copy upstreams from `config.lua` to shdict, extract servers from Nginx upstream
prepare_checker
---------------

**syntax:** *prepare_checker(config)*
**syntax:** *prepare_checker(dns_config_getter)*

**phase:** *init_worker_by_lua*

Copy configurations from `config.lua` to worker checkups, extract servers from Nginx upstream blocks and do some basic initialization.

```
default_config_getter = {
nameservers = {"8.8.8.8", {"8.8.4.4", 53} },
retrans = 5, -- 5 retransmissions on receive timeout
timeout = 2000, -- 2 sec
interval = 30, -- timer interval
}
```

type of `dns_config_getter` can be `function` or `table`. Default is `default_config_getter`.


create_checker
--------------
Expand Down
43 changes: 38 additions & 5 deletions lib/resty/checkups/api.lua
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ local heartbeat = require "resty.checkups.heartbeat"
local dyconfig = require "resty.checkups.dyconfig"
local base = require "resty.checkups.base"
local try = require "resty.checkups.try"
local dns = require "resty.checkups.dns"
local subsystem = require "resty.subsystem"

local str_format = string.format
Expand Down Expand Up @@ -170,7 +171,17 @@ function _M.get_ups_timeout(skey)
end


function _M.create_checker()
function _M.get_ups(skey)
if not skey then
return
end

local ups = base.upstream.checkups[skey]
return ups
end


function _M.create_checker(dns_config_getter)
local phase = get_phase()
if phase ~= "init_worker" then
error("create_checker must be called in init_worker phase")
Expand All @@ -189,7 +200,7 @@ function _M.create_checker()
if base.upstream.ups_status_sync_enable and not base.ups_status_timer_created then
local ok, err = ngx.timer.at(0, base.ups_status_checker)
if not ok then
log(WARN, "failed to create ups_status_checker: ", err)
log(ERR, "failed to create ups_status_checker: ", err)
return
end
base.ups_status_timer_created = true
Expand All @@ -202,10 +213,11 @@ function _M.create_checker()
return
end

dns.create_timer(dns_config_getter)
-- only worker 0 will create heartbeat timer
local ok, err = ngx.timer.at(0, heartbeat.active_checkup)
if not ok then
log(WARN, "failed to create timer: ", err)
log(ERR, "failed to create timer: ", err)
return
end

Expand Down Expand Up @@ -233,14 +245,25 @@ local function gen_upstream(skey, upstream)
return nil, "cluster invalid"
end
else
-- only servers
local dyupstream, err = dyconfig.do_get_upstream(skey)
if err then
return nil, err
end

dyupstream = dyupstream or {}
dyupstream.cluster = upstream
if upstream.servers then
-- store config
for k, v in pairs(upstream) do
if k ~= "servers" then
dyupstream[k] = v
else
dyupstream.cluster = { { servers = v } }
end
end
else
-- only cluster
dyupstream.cluster = upstream
end
ups = dyupstream
end

Expand Down Expand Up @@ -321,4 +344,14 @@ function _M.delete_upstream(skey)
end


function _M.try_register(name, module)
return try.register(name, module)
end


function _M.try_unregister(name)
return try.unregister(name)
end


return _M
1 change: 1 addition & 0 deletions lib/resty/checkups/base.lua
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ function _M.extract_servers_from_upstream(skey, cls)
weight = srv.weight,
max_fails = srv.max_fails,
fail_timeout = srv.fail_timeout,
down = srv.down,
})
end
end
Expand Down
41 changes: 29 additions & 12 deletions lib/resty/checkups/consistent_hash.lua
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ local floor = math.floor
local str_byte = string.byte
local tab_sort = table.sort
local tab_insert = table.insert
local ipairs = ipairs
local type = type

local _M = { _VERSION = "0.11" }

Expand All @@ -22,7 +24,7 @@ local function hash_string(str)
end


local function init_consistent_hash_state(servers)
local function init_state(servers)
local weight_sum = 0
for _, srv in ipairs(servers) do
weight_sum = weight_sum + (srv.weight or 1)
Expand Down Expand Up @@ -62,37 +64,52 @@ local function binary_search(circle, key)
end


function _M.next_consistent_hash_server(servers, peer_cb, hash_key)
local is_tab = require "resty.checkups.base".is_tab
servers.chash = is_tab(servers.chash) and servers.chash
or init_consistent_hash_state(servers)
local function next_server(servers, peer_cb, opts)
servers.chash = type(servers.chash) == "table" and servers.chash
or init_state(servers)

local chash = servers.chash
if chash.members == 1 then
if peer_cb(1, servers[1]) then
return servers[1]
return 1, servers[1]
end

return nil, "consistent hash: no servers available"
return nil, nil, nil, "consistent hash: no servers available"
end

local circle = chash.circle
local st = binary_search(circle, hash_string(hash_key))
local st = binary_search(circle, hash_string(opts.hash_key))
local size = #circle
local ed = st + size - 1
for i = st, ed do -- TODO: algorithm O(n)
local idx = circle[(i - 1) % size + 1][2]
if peer_cb(idx, servers[idx]) then
return servers[idx]
return idx, servers[idx]
end
end

return nil, "consistent hash: no servers available"
return nil, nil, nil, "consistent hash: no servers available"
end


function _M.free_consitent_hash_server(srv, failed)
return
local function gen_opts(ups, opts, skey)
local key
local mode = ups.mode
if mode == "hash" then
key = opts.hash_key or ngx.var.uri
elseif mode == "url_hash" then
key = ngx.var.uri
elseif mode == "ip_hash" then
key = ngx.var.remote_addr
elseif mode == "header_hash" then
key = ngx.var.http_x_hash_key or ngx.var.uri
end
return { hash_key=key }
end

function _M.ipairsrvs(servers, peer_cb, ups, opts, skey)
local mopts = gen_opts(ups, opts, skey)
return function() return next_server(servers, peer_cb, mopts) end
end


Expand Down
Loading