Skip to content

Conversation

@kernel-patches-daemon-bpf
Copy link

Pull request for series with
subject: Add tnum_scast helper
version: 1
url: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: 590699d
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-review-bot
Copy link

commit 6fff854903f923b125ac374c03b50382d27b87ec
Author: Dimitar Kanaliev <[email protected]>

bpf: Introduce tnum_scast as a tnum native sign extension helper

This patch introduces a new helper function tnum_scast() which
sign-extends a tnum from a smaller integer size to the full 64-bit
bpf register range using arithmetic shift operations.

> diff --git a/include/linux/tnum.h b/include/linux/tnum.h
> index c52b862da..ed18ee114 100644
> --- a/include/linux/tnum.h
> +++ b/include/linux/tnum.h
> @@ -63,6 +63,9 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2);
>  /* Return @a with all but the lowest @size bytes cleared */
>  struct tnum tnum_cast(struct tnum a, u8 size);
>
> +/* Return @a sign-extended from @size bytes */
> +struct tnum tnum_scast(struct tnum a, u8 size);
> +
>  /* Returns true if @a is a known constant */
>  static inline bool tnum_is_const(struct tnum a)
>  {
> diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
> index f8e70e9c3..eabcec2eb 100644
> --- a/kernel/bpf/tnum.c
> +++ b/kernel/bpf/tnum.c
> @@ -199,6 +199,19 @@ struct tnum tnum_cast(struct tnum a, u8 size)
>  	return a;
>  }
>
> +struct tnum tnum_scast(struct tnum a, u8 size)
> +{
> +	u8 s = 64 - size * 8;
> +	u64 value, mask;
> +
> +	if (size >= 8)
> +		return a;
> +
> +	value = ((s64)a.value << s) >> s;
> +	mask = ((s64)a.mask << s) >> s;
                                    ^^

Can this invoke undefined behavior when size is 0? When size==0,
s becomes 64, and shifting a 64-bit value by 64 bits is undefined
behavior according to the C standard. The guard only checks size >= 8,
allowing size==0 to reach the shift operations.

The similar function tnum_cast() handles size==0 by producing a zero
value (since (1ULL << 0) - 1 == 0), which is well-defined. Should
tnum_scast() also handle size==0 explicitly, perhaps by adding
"size == 0 ||" to the guard condition?

> +	return TNUM(value, mask);
> +}
> +
>  bool tnum_is_aligned(struct tnum a, u64 size)
>  {
>  	if (!size)



AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

In-Reply-To-Subject: bpf: Introduce tnum_scast as a tnum native sign extension helper
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19670538430

@kernel-patches-review-bot
Copy link

commit f5a86f7996231c8c1386ef9a179279bf03b89c54
Author: Dimitar Kanaliev <[email protected]>

bpf: verifier: Simplify register sign extension with tnum_scast

This patch refactors the verifier's sign-extension logic for narrow
register values to use the new tnum_scast helper, delegating bounds
calculation to tnum_scast + the existing bounds synchronization logic.

> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 766695491..c9a6bf85b 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c

[ ... ]

> @@ -6876,147 +6876,57 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
>  	reg_bounds_sync(reg);
>  }
>
> -static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
> -{

[ ... ]

> -}
> -
>  static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
>  {

[ ... ]

>  }
>
> -static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
> -{

[ ... ]

> -}
> -
>  static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
>  {
> -	s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val;
> -	u32 top_smax_value, top_smin_value;
> -	u32 num_bits = size * 8;
> -
> -	if (tnum_is_const(reg->var_off)) {
> -		u32_val = reg->var_off.value;
> -		if (size == 1)
> -			reg->var_off = tnum_const((s8)u32_val);
> -		else
> -			reg->var_off = tnum_const((s16)u32_val);
> +	s32 smin_value, smax_value;
>
> -		u32_val = reg->var_off.value;
> -		reg->s32_min_value = reg->s32_max_value = u32_val;
> -		reg->u32_min_value = reg->u32_max_value = u32_val;
> +	if (size >= 4)
>  		return;
> -	}
>
> -	top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits;
> -	top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits;
> +	reg->var_off = tnum_subreg(tnum_scast(reg->var_off, size));
>
> -	if (top_smax_value != top_smin_value)
> -		goto out;
> +	smin_value = -(1 << (size * 8 - 1));
> +	smax_value = (1 << (size * 8 - 1)) - 1;
>
> -	/* find the s32_min and s32_min after sign extension */
> -	if (size == 1) {
> -		init_s32_max = (s8)reg->s32_max_value;
> -		init_s32_min = (s8)reg->s32_min_value;
> -	} else {
> -		/* size == 2 */
> -		init_s32_max = (s16)reg->s32_max_value;
> -		init_s32_min = (s16)reg->s32_min_value;
> -	}
> -	s32_max = max(init_s32_max, init_s32_min);
> -	s32_min = min(init_s32_max, init_s32_min);
> -
> -	if ((s32_min >= 0) == (s32_max >= 0)) {
> -		reg->s32_min_value = s32_min;
> -		reg->s32_max_value = s32_max;
> -		reg->u32_min_value = (u32)s32_min;
> -		reg->u32_max_value = (u32)s32_max;
> -		reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
> -		return;
> -	}
> +	reg->s32_min_value = smin_value;
> +	reg->s32_max_value = smax_value;
>
> -out:
> -	set_sext32_default_val(reg, size);
> +	reg->u32_min_value = 0;
> +	reg->u32_max_value = U32_MAX;
> +
> +	__update_reg32_bounds(reg);
> +
> +	reg->umin_value = reg->u32_min_value;
> +	reg->umax_value = reg->u32_max_value;
> +
> +	reg->smin_value = reg->umin_value;
> +	reg->smax_value = reg->umax_value;
                        ^^^^^^^^
Can reg->smin_value and reg->smax_value be correctly set from unsigned
values here? For sign-extended negative values, the 64-bit unsigned
bounds (umin/umax) will be positive after zero-extension, but the 64-bit
signed bounds (smin/smax) should remain negative.

Consider an 8-bit value -1 (0xFF) that gets sign-extended:
- After sign extension: s32_min_value = s32_max_value = -1 (0xFFFFFFFF)
- After __update_reg32_bounds: u32_min_value = u32_max_value = 0xFFFFFFFF
- After zero-extension to 64-bit: umin_value = umax_value = 0x00000000FFFFFFFF
- Then: smin_value = 0x00000000FFFFFFFF (interpreted as +4294967295)

But the correct 64-bit signed value should be -1 (0xFFFFFFFFFFFFFFFF),
not +4294967295.

The pattern in __reg_assign_32_into_64() at verifier.c:2771-2772 shows
the correct approach for propagating signed 32-bit bounds to 64-bit:

    reg->smin_value = reg->s32_min_value;
    reg->smax_value = reg->s32_max_value;

This uses implicit sign-extension from s32 to s64. Should
coerce_subreg_to_size_sx() follow the same pattern?

>  }
>
>  static bool bpf_map_is_rdonly(const struct bpf_map *map)



AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

In-Reply-To-Subject: bpf: verifier: Simplify register sign extension with tnum_scast
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19670538430

@kernel-patches-daemon-bpf
Copy link
Author

Forwarding comment 3575602262 via email
In-Reply-To: [email protected]
Patch: https://patchwork.kernel.org/project/netdevbpf/patch/[email protected]/

@kernel-patches-daemon-bpf
Copy link
Author

Forwarding comment 3575618805 via email
In-Reply-To: [email protected]
Patch: https://patchwork.kernel.org/project/netdevbpf/patch/[email protected]/

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: f2cb066
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: 8c868a3
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: 5262cb2
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: 688b745
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: bd5bdd2
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf
Copy link
Author

Upstream branch: 34235a3
series: https://patchwork.kernel.org/project/netdevbpf/list/?series=1027352
version: 1

@kernel-patches-daemon-bpf kernel-patches-daemon-bpf bot force-pushed the bpf-next_base branch 5 times, most recently from 162c0b3 to 305a67f Compare December 16, 2025 18:53
@kernel-patches-daemon-bpf kernel-patches-daemon-bpf bot force-pushed the bpf-next_base branch 7 times, most recently from e8b906b to 302988a Compare December 23, 2025 19:40
@kernel-patches-daemon-bpf kernel-patches-daemon-bpf bot force-pushed the bpf-next_base branch 15 times, most recently from 22bf7db to d1dcae1 Compare January 6, 2026 05:09
@kernel-patches-daemon-bpf kernel-patches-daemon-bpf bot force-pushed the bpf-next_base branch 3 times, most recently from e7b5368 to 1e194b1 Compare January 7, 2026 05:14
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants