Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 22 additions & 11 deletions fw/cfg.c
Original file line number Diff line number Diff line change
Expand Up @@ -1826,27 +1826,38 @@ tfw_cfg_set_str(TfwCfgSpec *cs, TfwCfgEntry *e)
}

int
tfw_cfg_set_mem(TfwCfgSpec *cs, TfwCfgEntry *e)
tfw_cfg_set_mem_val(TfwCfgSpec *cs, TfwCfgEntry *e,
unsigned int val_num, unsigned long *dst)
{
long *dest_long;
unsigned long val;
unsigned int n = val_num;
TfwCfgSpecMem *cse;

BUG_ON(!cs->dest);

if (tfw_cfg_check_single_val(e))
return -EINVAL;

/* Check value restrictions if we have any in the spec extension. */
cse = cs->spec_ext;
if (cse) {
if (tfw_cfg_mem_check_multiple_of(e->vals[0], cse->multiple_of)
|| tfw_cfg_mem_check_range(e->vals[0], cse->range.min,
if (tfw_cfg_mem_check_multiple_of(e->vals[n], cse->multiple_of)
|| tfw_cfg_mem_check_range(e->vals[n], cse->range.min,
cse->range.max))
return -EINVAL;
}

val = memparse(e->vals[0], NULL);
*dst = memparse(e->vals[n], NULL);

return 0;
}

int
tfw_cfg_set_mem(TfwCfgSpec *cs, TfwCfgEntry *e)
{
long *dest_long;
unsigned long val;
int r;

BUG_ON(!cs->dest);

r = tfw_cfg_set_mem_val(cs, e, 0, &val);
if (r)
return r;

dest_long = cs->dest;
*dest_long = val;
Expand Down
2 changes: 2 additions & 0 deletions fw/cfg.h
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,8 @@ int tfw_cfg_set_int(TfwCfgSpec *spec, TfwCfgEntry *parsed_entry);
int tfw_cfg_set_long(TfwCfgSpec *spec, TfwCfgEntry *parsed_entry);
int tfw_cfg_set_str(TfwCfgSpec *spec, TfwCfgEntry *parsed_entry);
int tfw_cfg_set_mem(TfwCfgSpec *spec, TfwCfgEntry *parsed_entry);
int tfw_cfg_set_mem_val(TfwCfgSpec *spec, TfwCfgEntry *parsed_entry,
unsigned int val_num, unsigned long *dst);
int tfw_cfg_handle_children(TfwCfgSpec *self, TfwCfgEntry *parsed_entry);
void tfw_cfg_cleanup_children(TfwCfgSpec *cs);

Expand Down
154 changes: 110 additions & 44 deletions fw/client.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,18 +71,13 @@ static TDB *client_db;
static atomic_t shutdown_pending = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(shutdown_wq);

static struct kmem_cache *cli_mem_cache;
static struct kmem_cache *tfw_cli_mem_cache;
static struct {
TfwClientMem *mem;
struct list_head free_list;
TfwClientMem *free_list;
unsigned int size;
unsigned int order;
} cli_mem_pool = {
.mem = NULL,
.free_list = LIST_HEAD_INIT(cli_mem_pool.free_list),
.size = 0,
.order = 0,
};
} cli_mem_pool;

static inline bool
tfw_cli_mem_belongs_to_pool(TfwClientMem *cli_mem)
Expand All @@ -97,9 +92,13 @@ __cli_mem_release(TfwClientMem *cli_mem)
percpu_ref_exit(&cli_mem->refcnt);
free_percpu(cli_mem->mem);
if (!tfw_cli_mem_belongs_to_pool(cli_mem))
kmem_cache_free(cli_mem_cache, cli_mem);
kmem_cache_free(tfw_cli_mem_cache, cli_mem);
}

/*
* Reset counters, reinit refcnt and put `cli_mem` back to the pool.
* Sohuld be called under `ga_lock`, to protect `cli_mem_pool.free_list`
*/
static inline void
tfw_cli_mem_pool_free(TfwClientMem *cli_mem)
{
Expand All @@ -110,26 +109,60 @@ tfw_cli_mem_pool_free(TfwClientMem *cli_mem)
for_each_online_cpu(cpu)
*per_cpu_ptr(cli_mem->mem, cpu) = 0;
percpu_ref_reinit(&cli_mem->refcnt);
list_add_tail(&cli_mem->in_free_list, &cli_mem_pool.free_list);
cli_mem->next_free = cli_mem_pool.free_list;
cli_mem_pool.free_list = cli_mem;
}

/*
* Workqueue handler for asynchronous cli_mem destruction.
*
* This function initiates final teardown of a TfwClientMem object:
* - percpu_ref_kill() marks the refcount as dead, preventing any new
* users from acquiring references.
* - percpu_ref_put() drops the caller’s reference, which may trigger
* final release via cli_mem_release() once all outstanding users
* are gone.
*/
static void
tfw_cli_mem_kill_work_fn(struct work_struct *work)
{
TfwClientMem *cli_mem = container_of(work, TfwClientMem, kill_work);

percpu_ref_kill(&cli_mem->refcnt);
percpu_ref_put(&cli_mem->refcnt);
}

/*
* Get `TfwClientMem` object from pool if present.
* Object was already initialized during pool creation or
* releasing to pool.
*/
static inline TfwClientMem *
tfw_cli_mem_pool_alloc(void)
{
TfwClientMem *cli_mem;

assert_spin_locked(&client_db->ga_lock);

cli_mem = list_first_entry_or_null(&cli_mem_pool.free_list,
TfwClientMem, in_free_list);
if (!cli_mem)
if (!cli_mem_pool.free_list)
return NULL;

list_del_init(&cli_mem->in_free_list);
cli_mem = cli_mem_pool.free_list;
cli_mem_pool.free_list = cli_mem->next_free;
/*
* Should be called only after `free_list` initialization
* using `next_free` pointer, because `next_free` and
* `kill_work` members belong to the same union.
*/
INIT_WORK(&cli_mem->kill_work, tfw_cli_mem_kill_work_fn);

return cli_mem;
}

/*
* Final release of cli_mem: verify refcnt/memory are zero and either
* return to pool or free it. Signals shutdown completion if needed.
*/
static void
cli_mem_release(struct percpu_ref *ref)
{
Expand All @@ -138,6 +171,7 @@ cli_mem_release(struct percpu_ref *ref)
spin_lock_bh(&client_db->ga_lock);

WARN_ON_ONCE(!percpu_ref_is_zero(ref));
WARN_ON_ONCE(tfw_client_mem(cli_mem));
if (tfw_cli_mem_belongs_to_pool(cli_mem))
tfw_cli_mem_pool_free(cli_mem);
else
Expand All @@ -149,15 +183,6 @@ cli_mem_release(struct percpu_ref *ref)
wake_up(&shutdown_wq);
}

static void
tfw_cli_mem_kill_work_fn(struct work_struct *work)
{
TfwClientMem *cli_mem = container_of(work, TfwClientMem, kill_work);

percpu_ref_kill(&cli_mem->refcnt);
percpu_ref_put(&cli_mem->refcnt);
}

static inline int
tfw_cli_mem_init(TfwClientMem *cli_mem, gfp_t flags)
{
Expand All @@ -172,39 +197,51 @@ tfw_cli_mem_init(TfwClientMem *cli_mem, gfp_t flags)
if (unlikely(r))
goto free_per_cpu_mem;

INIT_LIST_HEAD(&cli_mem->in_free_list);
INIT_WORK(&cli_mem->kill_work, tfw_cli_mem_kill_work_fn);

return 0;

free_per_cpu_mem:
free_percpu(cli_mem->mem);
cli_mem->mem = NULL;

return r;
}

static inline void
tfw_cli_mem_pool_exit(void)
{
TfwClientMem *curr, *tmp;
TfwClientMem *tmp, *curr = cli_mem_pool.free_list;

list_for_each_entry_safe(curr, tmp, &cli_mem_pool.free_list,
in_free_list)
{
list_del_init(&curr->in_free_list);
__cli_mem_release(curr);
while (curr) {
tmp = curr;
curr = tmp->next_free;
__cli_mem_release(tmp);
}

free_pages((unsigned long)cli_mem_pool.mem, cli_mem_pool.order);
cli_mem_pool.mem = NULL;
bzero_fast(&cli_mem_pool, sizeof(cli_mem_pool));
}

/*
* Initialize cli_mem pool.
*
* Allocates a contiguous block of TfwClientMem objects and initializes each
* element, then builds a free list for fast allocation.
*
* Steps:
* - Validate pool size from configuration.
* - Compute allocation order and clamp it to MAX_PAGE_ORDER.
* - Allocate zeroed pages for the entire pool.
* - Initialize each TfwClientMem (per-cpu counters + refcnt + work).
* - Link all objects into a singly-linked free list.
*
* Provide fast allocations of `TfwClientMem` later.
*/
static inline int
tfw_cli_mem_pool_init(void)
{
TfwClientMem *block;
unsigned int order;
int i, r;
TfwClientMem *block, *tail = NULL;
unsigned int i, order;
int r;

if (WARN_ON_ONCE(!client_cfg.lru_size))
return -EINVAL;
Expand All @@ -219,12 +256,31 @@ tfw_cli_mem_pool_init(void)
if (unlikely(!cli_mem_pool.mem))
return -ENOMEM;

/*
* Initialize pool in forward order and build free_list as
* 0 -> 1 -> ... -> N-1.
*
* This preserves the natural memory layout of the preallocated array,
* which is important because tfw_cli_mem_belongs_to_pool() relies on
* the pool being a contiguous range [mem, mem + size).
*
* Using tail insertion avoids reversing the order (which would happen
* with head insertion) and keeps allocation predictable and
* cache-friendly.
*/
block = cli_mem_pool.mem;
for (i = 0; i < client_cfg.lru_size; i++) {
r = tfw_cli_mem_init(&block[i], GFP_KERNEL);
if (unlikely(r))
return r;
list_add(&block[i].in_free_list, &cli_mem_pool.free_list);

if (!cli_mem_pool.free_list)
cli_mem_pool.free_list = &block[i];
else
tail->next_free = &block[i];

block[i].next_free = NULL;
tail = &block[i];
cli_mem_pool.size++;
}

Expand Down Expand Up @@ -350,26 +406,36 @@ tfw_client_addr_eq(TdbRec *rec, void *data)
return true;
}

/*
* Allocate cli_mem from slab cache and fully initialize it.
* Used as a fallback when pool allocation is exhausted.
*/
static inline TfwClientMem *
tfw_cli_mem_alloc_from_cache(void)
{
TfwClientMem *cli_mem;

cli_mem = kmem_cache_alloc(cli_mem_cache, GFP_ATOMIC);
cli_mem = kmem_cache_alloc(tfw_cli_mem_cache, GFP_ATOMIC);
if (unlikely(!cli_mem))
return NULL;

if (unlikely(tfw_cli_mem_init(cli_mem, GFP_ATOMIC)))
goto free_cli_mem;

INIT_WORK(&cli_mem->kill_work, tfw_cli_mem_kill_work_fn);
return cli_mem;

free_cli_mem:
kmem_cache_free(cli_mem_cache, cli_mem);
kmem_cache_free(tfw_cli_mem_cache, cli_mem);

return NULL;
}

/*
* Allocate cli_mem:
* - Try fast pool first, then fallback to slab cache.
* - On success, take an extra refcnt reference before returning.
*/
static inline TfwClientMem *
tfw_cli_mem_alloc(void)
{
Expand Down Expand Up @@ -589,10 +655,10 @@ TfwMod tfw_client_mod = {
int __init
tfw_client_init(void)
{
cli_mem_cache = kmem_cache_create("cli_mem_cache",
sizeof(TfwClientMem),
0, 0, NULL);
if (!cli_mem_cache)
tfw_cli_mem_cache = kmem_cache_create("tfw_cli_mem_cache",
sizeof(TfwClientMem),
0, 0, NULL);
if (!tfw_cli_mem_cache)
return -ENOMEM;
tfw_mod_register(&tfw_client_mod);

Expand All @@ -602,6 +668,6 @@ tfw_client_init(void)
void
tfw_client_exit(void)
{
kmem_cache_destroy(cli_mem_cache);
kmem_cache_destroy(tfw_cli_mem_cache);
tfw_mod_unregister(&tfw_client_mod);
}
21 changes: 17 additions & 4 deletions fw/client.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,24 @@
#include "http_limits.h"
#include "connection.h"

/*
* Client memory accounting structure for Tempesta FW.
*
* @kill_work - Workqueue item used for asynchronous structure
* cleanup/destruction;
* @next_free - Pointer to the next free object in the freelist;
* @refcnt - Per-CPU reference counter. Provides scalable and
* thread-safe reference tracking on SMP systems with
* minimal contention;
* @mem - Per-CPU memory accounting storage.
*/
typedef struct tfw_client_mem_t {
union {
struct work_struct kill_work;
struct tfw_client_mem_t *next_free;
};
struct percpu_ref refcnt;
struct work_struct kill_work;
long __percpu *mem;
struct list_head in_free_list;
} TfwClientMem;

/**
Expand Down Expand Up @@ -82,13 +95,13 @@ tfw_client_mem_put(TfwClientMem *cli_mem)
}

static inline long
tfw_client_mem(TfwClient *cli)
tfw_client_mem(TfwClientMem *cli_mem)
{
long mem = 0;
int cpu;

for_each_online_cpu(cpu)
mem += *(per_cpu_ptr(cli->cli_mem->mem, cpu));
mem += *(per_cpu_ptr(cli_mem->mem, cpu));

return mem;
}
Expand Down
Loading