Skip to content

bpf, verifier: Improve precision of BPF_ADD and BPF_SUB #5502

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: bpf-next_base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 56 additions & 20 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -14605,14 +14605,25 @@ static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
s32 *dst_smax = &dst_reg->s32_max_value;
u32 *dst_umin = &dst_reg->u32_min_value;
u32 *dst_umax = &dst_reg->u32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
bool min_overflow, max_overflow;

if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
*dst_smin = S32_MIN;
*dst_smax = S32_MAX;
}
if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {

/* If either all additions overflow or no additions overflow, then
* it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
* dst_umax + src_umax. Otherwise (some additions overflow), set
* the output bounds to unbounded.
*/
min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);

if (!min_overflow && max_overflow) {
*dst_umin = 0;
*dst_umax = U32_MAX;
}
Expand All @@ -14625,14 +14636,25 @@ static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
s64 *dst_smax = &dst_reg->smax_value;
u64 *dst_umin = &dst_reg->umin_value;
u64 *dst_umax = &dst_reg->umax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
bool min_overflow, max_overflow;

if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
*dst_smin = S64_MIN;
*dst_smax = S64_MAX;
}
if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {

/* If either all additions overflow or no additions overflow, then
* it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
* dst_umax + src_umax. Otherwise (some additions overflow), set
* the output bounds to unbounded.
*/
min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);

if (!min_overflow && max_overflow) {
*dst_umin = 0;
*dst_umax = U64_MAX;
}
Expand All @@ -14643,23 +14665,30 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
{
s32 *dst_smin = &dst_reg->s32_min_value;
s32 *dst_smax = &dst_reg->s32_max_value;
u32 *dst_umin = &dst_reg->u32_min_value;
u32 *dst_umax = &dst_reg->u32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
bool min_underflow, max_underflow;

if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
/* Overflow possible, we know nothing */
*dst_smin = S32_MIN;
*dst_smax = S32_MAX;
}
if (dst_reg->u32_min_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->u32_min_value = 0;
dst_reg->u32_max_value = U32_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->u32_min_value -= umax_val;
dst_reg->u32_max_value -= umin_val;

/* If either all subtractions underflow or no subtractions
* underflow, it is okay to set: dst_umin = dst_umin - src_umax,
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
* underflow), set the output bounds to unbounded.
*/
min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);

if (min_underflow && !max_underflow) {
*dst_umin = 0;
*dst_umax = U32_MAX;
}
}

Expand All @@ -14668,23 +14697,30 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
{
s64 *dst_smin = &dst_reg->smin_value;
s64 *dst_smax = &dst_reg->smax_value;
u64 *dst_umin = &dst_reg->umin_value;
u64 *dst_umax = &dst_reg->umax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
bool min_underflow, max_underflow;

if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
/* Overflow possible, we know nothing */
*dst_smin = S64_MIN;
*dst_smax = S64_MAX;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;

/* If either all subtractions underflow or no subtractions
* underflow, it is okay to set: dst_umin = dst_umin - src_umax,
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
* underflow), set the output bounds to unbounded.
*/
min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);

if (min_underflow && !max_underflow) {
*dst_umin = 0;
*dst_umax = U64_MAX;
}
}

Expand Down
161 changes: 161 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_bounds.c
Original file line number Diff line number Diff line change
Expand Up @@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void)
__imm(bpf_skb_store_bytes)
: __clobber_all);
}

SEC("socket")
__description("64-bit addition, all outcomes overflow")
__success __log_level(2)
__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
__retval(0)
__naked void add64_full_overflow(void)
{
asm volatile (
"r4 = 0;"
"r4 = -r4;"
"r3 = 0xa000000000000000 ll;"
"r3 |= r4;"
"r3 += r3;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("64-bit addition, partial overflow, result in unbounded reg")
__success __log_level(2)
__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()")
__retval(0)
__naked void add64_partial_overflow(void)
{
asm volatile (
"r4 = 0;"
"r4 = -r4;"
"r3 = 2;"
"r3 |= r4;"
"r3 += r3;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("32-bit addition overflow, all outcomes overflow")
__success __log_level(2)
__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
__retval(0)
__naked void add32_full_overflow(void)
{
asm volatile (
"w4 = 0;"
"w4 = -w4;"
"w3 = 0xa0000000;"
"w3 |= w4;"
"w3 += w3;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("32-bit addition, partial overflow, result in unbounded u32 bounds")
__success __log_level(2)
__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
__retval(0)
__naked void add32_partial_overflow(void)
{
asm volatile (
"w4 = 0;"
"w4 = -w4;"
"w3 = 2;"
"w3 |= w4;"
"w3 += w3;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("64-bit subtraction, all outcomes underflow")
__success __log_level(2)
__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)")
__retval(0)
__naked void sub64_full_overflow(void)
{
asm volatile (
"r1 = 0;"
"r1 = -r1;"
"r2 = 0x8000000000000000 ll;"
"r1 |= r2;"
"r3 = 0;"
"r3 -= r1;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("64-bit subtration, partial overflow, result in unbounded reg")
__success __log_level(2)
__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()")
__retval(0)
__naked void sub64_partial_overflow(void)
{
asm volatile (
"r3 = 0;"
"r3 = -r3;"
"r2 = 1;"
"r3 -= r2;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("32-bit subtraction overflow, all outcomes underflow")
__success __log_level(2)
__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
__retval(0)
__naked void sub32_full_overflow(void)
{
asm volatile (
"w1 = 0;"
"w1 = -w1;"
"w2 = 0x80000000;"
"w1 |= w2;"
"w3 = 0;"
"w3 -= w1;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

SEC("socket")
__description("32-bit subtration, partial overflow, result in unbounded u32 bounds")
__success __log_level(2)
__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
__retval(0)
__naked void sub32_partial_overflow(void)
{
asm volatile (
"w3 = 0;"
"w3 = -w3;"
"w2 = 1;"
"w3 -= w2;"
"r0 = 0;"
"exit"
:
:
: __clobber_all);
}

char _license[] SEC("license") = "GPL";
Loading