Skip to content

Commit 73299ba

Browse files
mrpreKernel Patches Daemon
authored andcommitted
bpf, sockmap: Affinitize workqueue to a specific CPU
Introduce a sk_psock_schedule_delayed_work() wrapper function, which calls schedule_delayed_work_on() to specify the CPU for running the workqueue if the BPF program has set the redirect CPU using bpf_sk_skb_set_redirect_cpu(). Otherwise, it falls back to the original logic. Signed-off-by: Jiayuan Chen <[email protected]>
1 parent 9c6cf6b commit 73299ba

File tree

2 files changed

+17
-4
lines changed

2 files changed

+17
-4
lines changed

include/linux/skmsg.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,18 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
396396
sk_error_report(sk);
397397
}
398398

399+
static inline void sk_psock_schedule_delayed_work(struct sk_psock *psock,
400+
int delay)
401+
{
402+
s32 redir_cpu = psock->redir_cpu;
403+
404+
if (redir_cpu != BPF_SK_REDIR_CPU_UNSET)
405+
schedule_delayed_work_on(redir_cpu, &psock->work,
406+
delay);
407+
else
408+
schedule_delayed_work(&psock->work, delay);
409+
}
410+
399411
struct sk_psock *sk_psock_init(struct sock *sk, int node);
400412
void sk_psock_stop(struct sk_psock *psock);
401413

net/core/skmsg.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -689,7 +689,7 @@ static void sk_psock_backlog(struct work_struct *work)
689689
* other work that might be here.
690690
*/
691691
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
692-
schedule_delayed_work(&psock->work, 1);
692+
sk_psock_schedule_delayed_work(psock, 1);
693693
goto end;
694694
}
695695
/* Hard errors break pipe and stop xmit. */
@@ -940,6 +940,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
940940
sock_drop(from->sk, skb);
941941
return -EIO;
942942
}
943+
psock_other->redir_cpu = from->redir_cpu;
943944
spin_lock_bh(&psock_other->ingress_lock);
944945
if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
945946
spin_unlock_bh(&psock_other->ingress_lock);
@@ -949,7 +950,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
949950
}
950951

951952
skb_queue_tail(&psock_other->ingress_skb, skb);
952-
schedule_delayed_work(&psock_other->work, 0);
953+
sk_psock_schedule_delayed_work(psock_other, 0);
953954
spin_unlock_bh(&psock_other->ingress_lock);
954955
return 0;
955956
}
@@ -1027,7 +1028,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
10271028
spin_lock_bh(&psock->ingress_lock);
10281029
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
10291030
skb_queue_tail(&psock->ingress_skb, skb);
1030-
schedule_delayed_work(&psock->work, 0);
1031+
sk_psock_schedule_delayed_work(psock, 0);
10311032
err = 0;
10321033
}
10331034
spin_unlock_bh(&psock->ingress_lock);
@@ -1059,7 +1060,7 @@ static void sk_psock_write_space(struct sock *sk)
10591060
psock = sk_psock(sk);
10601061
if (likely(psock)) {
10611062
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1062-
schedule_delayed_work(&psock->work, 0);
1063+
sk_psock_schedule_delayed_work(psock, 0);
10631064
write_space = psock->saved_write_space;
10641065
}
10651066
rcu_read_unlock();

0 commit comments

Comments
 (0)