
commit 6715df8d5d24655b9fd368e904028112b54c7de1 upstream. This commits updates the following functions to allow reads from uninitialized stack locations when env->allow_uninit_stack option is enabled: - check_stack_read_fixed_off() - check_stack_range_initialized(), called from: - check_stack_read_var_off() - check_helper_mem_access() Such change allows to relax logic in stacksafe() to treat STACK_MISC and STACK_INVALID in a same way and make the following stack slot configurations equivalent: | Cached state | Current state | | stack slot | stack slot | |------------------+------------------| | STACK_INVALID or | STACK_INVALID or | | STACK_MISC | STACK_SPILL or | | | STACK_MISC or | | | STACK_ZERO or | | | STACK_DYNPTR | This leads to significant verification speed gains (see below). The idea was suggested by Andrii Nakryiko [1] and initial patch was created by Alexei Starovoitov [2]. Currently the env->allow_uninit_stack is allowed for programs loaded by users with CAP_PERFMON or CAP_SYS_ADMIN capabilities. A number of test cases from verifier/*.c were expecting uninitialized stack access to be an error. These test cases were updated to execute in unprivileged mode (thus preserving the tests). The test progs/test_global_func10.c expected "invalid indirect read from stack" error message because of the access to uninitialized memory region. This error is no longer possible in privileged mode. The test is updated to provoke an error "invalid indirect access to stack" because of access to invalid stack address (such error is not verified by progs/test_global_func*.c series of tests). The following tests had to be removed because these can't be made unprivileged: - verifier/sock.c: - "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value" BPF_PROG_TYPE_SCHED_CLS programs are not executed in unprivileged mode. - verifier/var_off.c: - "indirect variable-offset stack access, max_off+size > max_initialized" - "indirect variable-offset stack access, uninitialized" These tests verify that access to uninitialized stack values is detected when stack offset is not a constant. However, variable stack access is prohibited in unprivileged mode, thus these tests are no longer valid. * * * Here is veristat log comparing this patch with current master on a set of selftest binaries listed in tools/testing/selftests/bpf/veristat.cfg and cilium BPF binaries (see [3]): $ ./veristat -e file,prog,states -C -f 'states_pct<-30' master.log current.log File Program States (A) States (B) States (DIFF) -------------------------- -------------------------- ---------- ---------- ---------------- bpf_host.o tail_handle_ipv6_from_host 349 244 -105 (-30.09%) bpf_host.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%) bpf_lxc.o tail_handle_nat_fwd_ipv4 1320 895 -425 (-32.20%) bpf_sock.o cil_sock4_connect 70 48 -22 (-31.43%) bpf_sock.o cil_sock4_sendmsg 68 46 -22 (-32.35%) bpf_xdp.o tail_handle_nat_fwd_ipv4 1554 803 -751 (-48.33%) bpf_xdp.o tail_lb_ipv4 6457 2473 -3984 (-61.70%) bpf_xdp.o tail_lb_ipv6 7249 3908 -3341 (-46.09%) pyperf600_bpf_loop.bpf.o on_event 287 145 -142 (-49.48%) strobemeta.bpf.o on_event 15915 4772 -11143 (-70.02%) strobemeta_nounroll2.bpf.o on_event 17087 3820 -13267 (-77.64%) xdp_synproxy_kern.bpf.o syncookie_tc 21271 6635 -14636 (-68.81%) xdp_synproxy_kern.bpf.o syncookie_xdp 23122 6024 -17098 (-73.95%) -------------------------- -------------------------- ---------- ---------- ---------------- Note: I limited selection by states_pct<-30%. Inspection of differences in pyperf600_bpf_loop behavior shows that the following patch for the test removes almost all differences: - a/tools/testing/selftests/bpf/progs/pyperf.h + b/tools/testing/selftests/bpf/progs/pyperf.h @ -266,8 +266,8 @ int __on_event(struct bpf_raw_tracepoint_args *ctx) } if (event->pthread_match || !pidData->use_tls) { - void* frame_ptr; - FrameData frame; + void* frame_ptr = 0; + FrameData frame = {}; Symbol sym = {}; int cur_cpu = bpf_get_smp_processor_id(); W/o this patch the difference comes from the following pattern (for different variables): static bool get_frame_data(... FrameData *frame ...) { ... bpf_probe_read_user(&frame->f_code, ...); if (!frame->f_code) return false; ... bpf_probe_read_user(&frame->co_name, ...); if (frame->co_name) ...; } int __on_event(struct bpf_raw_tracepoint_args *ctx) { FrameData frame; ... get_frame_data(... &frame ...) // indirectly via a bpf_loop & callback ... } SEC("raw_tracepoint/kfree_skb") int on_event(struct bpf_raw_tracepoint_args* ctx) { ... ret |= __on_event(ctx); ret |= __on_event(ctx); ... } With regards to value `frame->co_name` the following is important: - Because of the conditional `if (!frame->f_code)` each call to __on_event() produces two states, one with `frame->co_name` marked as STACK_MISC, another with it as is (and marked STACK_INVALID on a first call). - The call to bpf_probe_read_user() does not mark stack slots corresponding to `&frame->co_name` as REG_LIVE_WRITTEN but it marks these slots as BPF_MISC, this happens because of the following loop in the check_helper_call(): for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } Note the size of the write, it is a one byte write for each byte touched by a helper. The BPF_B write does not lead to write marks for the target stack slot. - Which means that w/o this patch when second __on_event() call is verified `if (frame->co_name)` will propagate read marks first to a stack slot with STACK_MISC marks and second to a stack slot with STACK_INVALID marks and these states would be considered different. [1] https://lore.kernel.org/bpf/CAEf4BzY3e+ZuC6HUa8dCiUovQRg2SzEk7M-dSkqNZyn=xEmnPA@mail.gmail.com/ [2] https://lore.kernel.org/bpf/CAADnVQKs2i1iuZ5SUGuJtxWVfGYR9kDgYKhq3rNV+kBLQCu7rA@mail.gmail.com/ [3] git@github.com:anakryiko/cilium.git Suggested-by: Andrii Nakryiko <andrii@kernel.org> Co-developed-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20230219200427.606541-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Maxim Mikityanskiy <maxim@isovalent.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
317 lines
10 KiB
C
Executable file
317 lines
10 KiB
C
Executable file
{
|
|
"check valid spill/fill",
|
|
.insns = {
|
|
/* spill R1(ctx) into stack */
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
|
/* fill it back into R2 */
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
|
|
/* should be able to access R0 = *(R2 + 8) */
|
|
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.errstr_unpriv = "R0 leaks addr",
|
|
.result = ACCEPT,
|
|
.result_unpriv = REJECT,
|
|
.retval = POINTER_VALUE,
|
|
},
|
|
{
|
|
"check valid spill/fill, skb mark",
|
|
.insns = {
|
|
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
|
|
offsetof(struct __sk_buff, mark)),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.result_unpriv = ACCEPT,
|
|
},
|
|
{
|
|
"check valid spill/fill, ptr to mem",
|
|
.insns = {
|
|
/* reserve 8 byte ringbuf memory */
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
BPF_MOV64_IMM(BPF_REG_2, 8),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
/* store a pointer to the reserved memory in R6 */
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
|
/* check whether the reservation was successful */
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
|
/* spill R6(mem) into the stack */
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
|
|
/* fill it back in R7 */
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
|
|
/* should be able to access *(R7) = 0 */
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
|
|
/* submit the reserved ringbuf memory */
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_ringbuf = { 1 },
|
|
.result = ACCEPT,
|
|
.result_unpriv = ACCEPT,
|
|
},
|
|
{
|
|
"check corrupted spill/fill",
|
|
.insns = {
|
|
/* spill R1(ctx) into stack */
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
|
/* mess up with R1 pointer on stack */
|
|
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
|
|
/* fill back into R0 is fine for priv.
|
|
* R0 now becomes SCALAR_VALUE.
|
|
*/
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
/* Load from R0 should fail. */
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.errstr_unpriv = "attempt to corrupt spilled",
|
|
.errstr = "R0 invalid mem access 'inv",
|
|
.result = REJECT,
|
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
},
|
|
{
|
|
"check corrupted spill/fill, LSB",
|
|
.insns = {
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
|
BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.errstr_unpriv = "attempt to corrupt spilled",
|
|
.result_unpriv = REJECT,
|
|
.result = ACCEPT,
|
|
.retval = POINTER_VALUE,
|
|
},
|
|
{
|
|
"check corrupted spill/fill, MSB",
|
|
.insns = {
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
|
|
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.errstr_unpriv = "attempt to corrupt spilled",
|
|
.result_unpriv = REJECT,
|
|
.result = ACCEPT,
|
|
.retval = POINTER_VALUE,
|
|
},
|
|
{
|
|
"Spill and refill a u32 const scalar. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
/* r4 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 20),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = *(u32 *)(r10 -8) */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
|
|
/* r0 = r2 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
|
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill a u32 const, refill from another half of the uninit u32 from the stack",
|
|
.insns = {
|
|
/* r4 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 20),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result_unpriv = REJECT,
|
|
.errstr_unpriv = "invalid read from stack off -4+0 size 4",
|
|
/* in privileged mode reads from uninitialized stack locations are permitted */
|
|
.result = ACCEPT,
|
|
},
|
|
{
|
|
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
/* r4 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 20),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = *(u16 *)(r10 -8) */
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
|
|
/* r0 = r2 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
|
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = REJECT,
|
|
.errstr = "invalid access to packet",
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill u32 const scalars. Refill as u64. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
/* r6 = 0 */
|
|
BPF_MOV32_IMM(BPF_REG_6, 0),
|
|
/* r7 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_7, 20),
|
|
/* *(u32 *)(r10 -4) = r6 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
|
|
/* *(u32 *)(r10 -8) = r7 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
|
|
/* r4 = *(u64 *)(r10 -8) */
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
|
|
/* r0 = r2 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
|
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = REJECT,
|
|
.errstr = "invalid access to packet",
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
/* r4 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 20),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = *(u16 *)(r10 -6) */
|
|
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
|
|
/* r0 = r2 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
|
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = REJECT,
|
|
.errstr = "invalid access to packet",
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
/* r4 = 20 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 20),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* *(u32 *)(r10 -4) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
|
|
/* r4 = *(u32 *)(r10 -4), */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
|
|
/* r0 = r2 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
|
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = REJECT,
|
|
.errstr = "invalid access to packet",
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill and refill a umax=40 bounded scalar. Offset to skb->data",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data)),
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
|
offsetof(struct __sk_buff, data_end)),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
|
|
offsetof(struct __sk_buff, tstamp)),
|
|
BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
/* *(u32 *)(r10 -8) = r4 R4=umax=40 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = (*u32 *)(r10 - 8) */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
|
|
/* r2 += r4 R2=pkt R4=umax=40 */
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
|
|
/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
|
|
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
|
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
|
|
/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
|
|
/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|
|
{
|
|
"Spill a u32 scalar at fp-4 and then at fp-8",
|
|
.insns = {
|
|
/* r4 = 4321 */
|
|
BPF_MOV32_IMM(BPF_REG_4, 4321),
|
|
/* *(u32 *)(r10 -4) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
|
|
/* *(u32 *)(r10 -8) = r4 */
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
|
|
/* r4 = *(u64 *)(r10 -8) */
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
},
|