From a371f374654913102a3f189113c4507714c45942 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Thu, 13 Nov 2025 20:52:48 -0800 Subject: [PATCH 1/7] bpf: Set up update_prog scaffolding for bpf_tracing_link_lops The type and expected_attach_type of the new program must match the original program and the new program must be compatible with the attachment target. Use a global mutex, trace_link_mutex, to synchronize parallel update operations similar to other link types (sock_map, xdp, etc.) that use a global mutex. Contention should be low, so this should be OK. Subsequent patches fill in the program update logic for freplace/fentry/fmod_ret/fexit links. Signed-off-by: Jordan Rife --- include/linux/bpf.h | 11 +++++++ kernel/bpf/syscall.c | 68 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/trampoline.c | 29 ++++++++++++++++++ 3 files changed, 108 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 09d5dc541d1ce..23fcbcd26aa44 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1420,6 +1420,9 @@ static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u6 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, struct bpf_prog *tgt_prog); +int bpf_trampoline_update_prog(struct bpf_tramp_link *link, + struct bpf_prog *new_prog, + struct bpf_trampoline *tr); int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, struct bpf_prog *tgt_prog); @@ -1509,6 +1512,14 @@ static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, { return -ENOTSUPP; } + +static inline int bpf_trampoline_update_prog(struct bpf_tramp_link *link, + struct bpf_prog *new_prog, + struct bpf_trampoline *tr) +{ + return -ENOTSUPP; +} + static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, struct bpf_prog *tgt_prog) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index f62d61b6730a2..b0da7c428a659 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -63,6 +63,8 @@ static DEFINE_IDR(map_idr); static DEFINE_SPINLOCK(map_idr_lock); static DEFINE_IDR(link_idr); static DEFINE_SPINLOCK(link_idr_lock); +/* Synchronizes access to prog between link update operations. */ +static DEFINE_MUTEX(trace_link_mutex); int sysctl_unprivileged_bpf_disabled __read_mostly = IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; @@ -3562,11 +3564,77 @@ static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, return 0; } +static int bpf_tracing_link_update_prog(struct bpf_link *link, + struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + struct bpf_tracing_link *tr_link = + container_of(link, struct bpf_tracing_link, link.link); + struct bpf_attach_target_info tgt_info = {0}; + int err = 0; + u32 btf_id; + + mutex_lock(&trace_link_mutex); + + if (old_prog && link->prog != old_prog) { + err = -EPERM; + goto out; + } + old_prog = link->prog; + if (old_prog->type != new_prog->type || + old_prog->expected_attach_type != new_prog->expected_attach_type) { + err = -EINVAL; + goto out; + } + + mutex_lock(&new_prog->aux->dst_mutex); + + if (!new_prog->aux->dst_trampoline || + new_prog->aux->dst_trampoline->key != tr_link->trampoline->key) { + bpf_trampoline_unpack_key(tr_link->trampoline->key, NULL, + &btf_id); + /* If there is no saved target, or the target associated with + * this link is different from the destination specified at + * load time, we need to check for compatibility. + */ + err = bpf_check_attach_target(NULL, new_prog, tr_link->tgt_prog, + btf_id, &tgt_info); + if (err) + goto out_unlock; + } + + err = bpf_trampoline_update_prog(&tr_link->link, new_prog, + tr_link->trampoline); + if (err) + goto out_unlock; + + /* Clear the trampoline, mod, and target prog from new_prog->aux to make + * sure the original attach destination is not kept alive after a + * program is (re-)attached to another target. + */ + if (new_prog->aux->dst_prog) + bpf_prog_put(new_prog->aux->dst_prog); + bpf_trampoline_put(new_prog->aux->dst_trampoline); + module_put(new_prog->aux->mod); + + new_prog->aux->dst_prog = NULL; + new_prog->aux->dst_trampoline = NULL; + new_prog->aux->mod = tgt_info.tgt_mod; + tgt_info.tgt_mod = NULL; /* Make module_put() below do nothing. */ +out_unlock: + mutex_unlock(&new_prog->aux->dst_mutex); +out: + mutex_unlock(&trace_link_mutex); + module_put(tgt_info.tgt_mod); + return err; +} + static const struct bpf_link_ops bpf_tracing_link_lops = { .release = bpf_tracing_link_release, .dealloc = bpf_tracing_link_dealloc, .show_fdinfo = bpf_tracing_link_show_fdinfo, .fill_link_info = bpf_tracing_link_fill_link_info, + .update_prog = bpf_tracing_link_update_prog, }; static int bpf_tracing_prog_attach(struct bpf_prog *prog, diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 5949095e51c3d..010bcba0db65a 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -610,6 +610,35 @@ int bpf_trampoline_link_prog(struct bpf_tramp_link *link, return err; } +static int __bpf_trampoline_update_prog(struct bpf_tramp_link *link, + struct bpf_prog *new_prog, + struct bpf_trampoline *tr) +{ + return -ENOTSUPP; +} + +int bpf_trampoline_update_prog(struct bpf_tramp_link *link, + struct bpf_prog *new_prog, + struct bpf_trampoline *tr) +{ + struct bpf_prog *old_prog; + int err; + + mutex_lock(&tr->mutex); + err = __bpf_trampoline_update_prog(link, new_prog, tr); + if (!err) { + /* If a program update was successful, switch the program + * in the link before releasing tr->mutex; otherwise, another + * operation could come along and update the trampoline with + * the link still pointing at the old program. + */ + old_prog = xchg(&link->link.prog, new_prog); + bpf_prog_put(old_prog); + } + mutex_unlock(&tr->mutex); + return err; +} + static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, struct bpf_prog *tgt_prog) From 7df78d3978c38422d031341290e582e4199880e1 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Thu, 13 Nov 2025 19:03:28 -0800 Subject: [PATCH 2/7] bpf: Enable BPF_LINK_UPDATE for freplace links Implement program update logic for freplace links. Signed-off-by: Jordan Rife --- kernel/bpf/trampoline.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 010bcba0db65a..0b6a5433dd424 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -614,6 +614,21 @@ static int __bpf_trampoline_update_prog(struct bpf_tramp_link *link, struct bpf_prog *new_prog, struct bpf_trampoline *tr) { + enum bpf_tramp_prog_type kind; + int err = 0; + + kind = bpf_attach_type_to_tramp(link->link.prog); + if (kind == BPF_TRAMP_REPLACE) { + WARN_ON_ONCE(!tr->extension_prog); + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, + tr->extension_prog->bpf_func, + new_prog->bpf_func); + if (err) + return err; + tr->extension_prog = new_prog; + return 0; + } + return -ENOTSUPP; } From 1f366c19e20c6142148d9f3abe4b331b7958f9a5 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Wed, 12 Nov 2025 15:32:32 -0800 Subject: [PATCH 3/7] bpf: Enable BPF_LINK_UPDATE for fentry/fexit/fmod_ret links Implement program update for other link types supported by bpf_tracing_link_lops (fentry, fexit, fmod_ret, ...). I wanted to make the implementation generic so that no architecture-specific support would be required, but couldn't think of a good way to do this: * I considered updating link.prog before calling bpf_trampoline_update and reverting it back to the old prog if the update fails, but this could create inconsistencies with concurrent operations that read links where they see the uncommitted program associated with the link. * I considered making a deep copy of the link whose program is being updated and putting a pointer to that copy into tlinks when calling bpf_trampoline_get_progs where the copy references the new program instead of the current program. This would avoid updating the original link.prog before the update was committed; however, this seemed slightly hacky and I wasn't sure if this was better than just making the architecture-specific layer aware of the intent to update one of the link programs. This patch sets up the scaffolding for trampoline program updates while subsequent patches enable this for various architectures. For now, only x86, arm64, and s390 are implemented since that's what I could test in CI. Add update_link and update_prog to bpf_tramp_links. When these are set, arch_bpf_trampoline_size() and arch_prepare_bpf_trampoline() use update_prog in place of update_link->link.prog when calculating the trampoline size and constructing a new trampoline image. link.prog is only updated after the trampoline update is successfully committed. If the current architecture does not support program updates (i.e. bpf_trampoline_supports_update_prog() is not implemented) then the BPF_LINK_UPDATE operation will return -ENOTSUPP. Signed-off-by: Jordan Rife --- include/linux/bpf.h | 10 ++++++++++ kernel/bpf/trampoline.c | 33 +++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 23fcbcd26aa44..7daf40cbdd9b7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1215,6 +1215,8 @@ enum { struct bpf_tramp_links { struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; + struct bpf_tramp_link *update_link; + struct bpf_prog *update_prog; int nr_links; }; @@ -1245,6 +1247,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); +bool bpf_trampoline_supports_update_prog(void); void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); @@ -1840,6 +1843,13 @@ struct bpf_tramp_link { u64 cookie; }; +static inline struct bpf_prog * +bpf_tramp_links_prog(struct bpf_tramp_links *tl, int i) +{ + return tl->links[i] == tl->update_link ? tl->update_prog : + tl->links[i]->link.prog; +} + struct bpf_shim_tramp_link { struct bpf_tramp_link link; struct bpf_trampoline *trampoline; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 0b6a5433dd424..0c0373b768163 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -230,7 +230,10 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) } static struct bpf_tramp_links * -bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg) +bpf_trampoline_get_progs(const struct bpf_trampoline *tr, + struct bpf_tramp_link *update_link, + struct bpf_prog *update_prog, + int *total, bool *ip_arg) { struct bpf_tramp_link *link; struct bpf_tramp_links *tlinks; @@ -250,6 +253,11 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { *ip_arg |= link->link.prog->call_get_func_ip; *links++ = link; + if (link == update_link) { + *ip_arg |= update_prog->call_get_func_ip; + tlinks[kind].update_link = update_link; + tlinks[kind].update_prog = update_prog; + } } } return tlinks; @@ -395,7 +403,10 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size) return ERR_PTR(err); } -static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex) +static int __bpf_trampoline_update(struct bpf_trampoline *tr, + struct bpf_tramp_link *update_link, + struct bpf_prog *update_prog, + bool lock_direct_mutex) { struct bpf_tramp_image *im; struct bpf_tramp_links *tlinks; @@ -403,7 +414,11 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut bool ip_arg = false; int err, total, size; - tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg); + if (update_link && !bpf_trampoline_supports_update_prog()) + return -ENOTSUPP; + + tlinks = bpf_trampoline_get_progs(tr, update_link, update_prog, + &total, &ip_arg); if (IS_ERR(tlinks)) return PTR_ERR(tlinks); @@ -506,6 +521,11 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut goto out; } +static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex) +{ + return __bpf_trampoline_update(tr, NULL, NULL, lock_direct_mutex); +} + static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) { switch (prog->expected_attach_type) { @@ -629,7 +649,7 @@ static int __bpf_trampoline_update_prog(struct bpf_tramp_link *link, return 0; } - return -ENOTSUPP; + return __bpf_trampoline_update(tr, link, new_prog, true); } int bpf_trampoline_update_prog(struct bpf_tramp_link *link, @@ -1139,6 +1159,11 @@ arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image return -ENOTSUPP; } +bool __weak bpf_trampoline_supports_update_prog(void) +{ + return false; +} + void * __weak arch_alloc_bpf_trampoline(unsigned int size) { void *image; From aba78db4f165650f9e4d466c30dcedd14a47c544 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Wed, 12 Nov 2025 15:35:30 -0800 Subject: [PATCH 4/7] bpf, x86: Make program update work for trampoline ops Use update_prog in place of current link prog when link matches update_link. Signed-off-by: Jordan Rife --- arch/x86/net/bpf_jit_comp.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 36a0d4db9f686..2b3ea20b39b9a 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2956,15 +2956,13 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, } static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, - struct bpf_tramp_link *l, int stack_size, + struct bpf_prog *p, u64 cookie, int stack_size, int run_ctx_off, bool save_ret, void *image, void *rw_image) { u8 *prog = *pprog; u8 *jmp_insn; int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); - struct bpf_prog *p = l->link.prog; - u64 cookie = l->cookie; /* mov rdi, cookie */ emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); @@ -3079,7 +3077,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, u8 *prog = *pprog; for (i = 0; i < tl->nr_links; i++) { - if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, + if (invoke_bpf_prog(m, &prog, bpf_tramp_links_prog(tl, i), + tl->links[i]->cookie, stack_size, run_ctx_off, save_ret, image, rw_image)) return -EINVAL; } @@ -3101,8 +3100,9 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, emit_mov_imm32(&prog, false, BPF_REG_0, 0); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); for (i = 0; i < tl->nr_links; i++) { - if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, - image, rw_image)) + if (invoke_bpf_prog(m, &prog, bpf_tramp_links_prog(tl, i), + tl->links[i]->cookie, stack_size, + run_ctx_off, true, image, rw_image)) return -EINVAL; /* mod_ret prog stored return value into [rbp - 8]. Emit: @@ -3486,6 +3486,11 @@ int arch_protect_bpf_trampoline(void *image, unsigned int size) return 0; } +bool bpf_trampoline_supports_update_prog(void) +{ + return true; +} + int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, From 02492c6e53e26d24399a4c35929dc8972a378986 Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Wed, 12 Nov 2025 15:55:25 -0800 Subject: [PATCH 5/7] bpf, s390: Make program update work for trampoline ops Use update_prog in place of current link prog when link matches update_link. Signed-off-by: Jordan Rife --- arch/s390/net/bpf_jit_comp.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index cf461d76e9da3..bf70302d10093 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2508,20 +2508,19 @@ static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val) static int invoke_bpf_prog(struct bpf_tramp_jit *tjit, const struct btf_func_model *m, - struct bpf_tramp_link *tlink, bool save_ret) + struct bpf_prog *p, u64 cookie, bool save_ret) { struct bpf_jit *jit = &tjit->common; int cookie_off = tjit->run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie); - struct bpf_prog *p = tlink->link.prog; int patch; /* - * run_ctx.cookie = tlink->cookie; + * run_ctx.cookie = cookie; */ - /* %r0 = tlink->cookie */ - load_imm64(jit, REG_W0, tlink->cookie); + /* %r0 = cookie */ + load_imm64(jit, REG_W0, cookie); /* stg %r0,cookie_off(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off); @@ -2768,7 +2767,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } for (i = 0; i < fentry->nr_links; i++) - if (invoke_bpf_prog(tjit, m, fentry->links[i], + if (invoke_bpf_prog(tjit, m, bpf_tramp_links_prog(fentry, i), + fentry->links[i]->cookie, flags & BPF_TRAMP_F_RET_FENTRY_RET)) return -EINVAL; @@ -2782,7 +2782,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, 0xf000 | tjit->retval_off); for (i = 0; i < fmod_ret->nr_links; i++) { - if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true)) + if (invoke_bpf_prog(tjit, m, + bpf_tramp_links_prog(fmod_ret, i), + fmod_ret->links[i]->cookie, true)) return -EINVAL; /* @@ -2850,7 +2852,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, /* do_fexit: */ tjit->do_fexit = jit->prg; for (i = 0; i < fexit->nr_links; i++) - if (invoke_bpf_prog(tjit, m, fexit->links[i], false)) + if (invoke_bpf_prog(tjit, m, bpf_tramp_links_prog(fexit, i), + fexit->links[i]->cookie, false)) return -EINVAL; if (flags & BPF_TRAMP_F_CALL_ORIG) { @@ -2946,6 +2949,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, return ret < 0 ? ret : tjit.common.prg; } +bool bpf_trampoline_supports_update_prog(void) +{ + return true; +} + bool bpf_jit_supports_subprog_tailcalls(void) { return true; From 9c2d435f12fec4c2e9dbe4c64d13b90b14ff402a Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Wed, 12 Nov 2025 16:03:41 -0800 Subject: [PATCH 6/7] bpf, arm64: Make program update work for trampoline ops Use update_prog in place of current link prog when link matches update_link. Signed-off-by: Jordan Rife --- arch/arm64/net/bpf_jit_comp.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 0c9a50a1e73e7..1725d4cebdf2e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2284,24 +2284,23 @@ bool bpf_jit_supports_subprog_tailcalls(void) return true; } -static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, +static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_prog *p, u64 cookie, int bargs_off, int retval_off, int run_ctx_off, bool save_ret) { __le32 *branch; u64 enter_prog; u64 exit_prog; - struct bpf_prog *p = l->link.prog; int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); enter_prog = (u64)bpf_trampoline_enter(p); exit_prog = (u64)bpf_trampoline_exit(p); - if (l->cookie == 0) { + if (cookie == 0) { /* if cookie is zero, one instruction is enough to store it */ emit(A64_STR64I(A64_ZR, A64_SP, run_ctx_off + cookie_off), ctx); } else { - emit_a64_mov_i64(A64_R(10), l->cookie, ctx); + emit_a64_mov_i64(A64_R(10), cookie, ctx); emit(A64_STR64I(A64_R(10), A64_SP, run_ctx_off + cookie_off), ctx); } @@ -2362,7 +2361,8 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, */ emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx); for (i = 0; i < tl->nr_links; i++) { - invoke_bpf_prog(ctx, tl->links[i], bargs_off, retval_off, + invoke_bpf_prog(ctx, bpf_tramp_links_prog(tl, i), + tl->links[i]->cookie, bargs_off, retval_off, run_ctx_off, true); /* if (*(u64 *)(sp + retval_off) != 0) * goto do_fexit; @@ -2656,8 +2656,9 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fentry->nr_links; i++) - invoke_bpf_prog(ctx, fentry->links[i], bargs_off, - retval_off, run_ctx_off, + invoke_bpf_prog(ctx, bpf_tramp_links_prog(fentry, i), + fentry->links[i]->cookie, bargs_off, retval_off, + run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET); if (fmod_ret->nr_links) { @@ -2691,7 +2692,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fexit->nr_links; i++) - invoke_bpf_prog(ctx, fexit->links[i], bargs_off, retval_off, + invoke_bpf_prog(ctx, bpf_tramp_links_prog(fexit, i), + fexit->links[i]->cookie, bargs_off, retval_off, run_ctx_off, false); if (flags & BPF_TRAMP_F_CALL_ORIG) { @@ -2829,6 +2831,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, return ret; } +bool bpf_trampoline_supports_update_prog(void) +{ + return true; +} + static bool is_long_jump(void *ip, void *target) { long offset; From 32491b33f6a73e213ee6020fb10d6ea5d08981ce Mon Sep 17 00:00:00 2001 From: Jordan Rife Date: Mon, 3 Nov 2025 16:39:42 -0800 Subject: [PATCH 7/7] selftests/bpf: Test BPF_LINK_UPDATE behavior for tracing links Exercise a series of edge cases and happy path scenarios across a gamut of link types (fentry, fmod_ret, fexit, freplace) to test BPF_LINK_UPDATE behavior for tracing links. This test swaps fentry/fmod_ret/fexit programs in-place and makes sure that the sequence and attach cookie are as expected based on the link positions. Signed-off-by: Jordan Rife --- .../bpf/prog_tests/prog_update_tracing.c | 460 ++++++++++++++++++ .../selftests/bpf/progs/prog_update_tracing.c | 133 +++++ 2 files changed, 593 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/prog_update_tracing.c create mode 100644 tools/testing/selftests/bpf/progs/prog_update_tracing.c diff --git a/tools/testing/selftests/bpf/prog_tests/prog_update_tracing.c b/tools/testing/selftests/bpf/prog_tests/prog_update_tracing.c new file mode 100644 index 0000000000000..0b1e0d780c822 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_update_tracing.c @@ -0,0 +1,460 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "prog_update_tracing.skel.h" +#include "test_pkt_access.skel.h" + +static bool assert_program_order(struct prog_update_tracing *skel_trace, + struct test_pkt_access *skel_pkt, + const struct prog_update_tracing__bss *expected) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts_trace); + LIBBPF_OPTS(bpf_test_run_opts, topts_skb, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + int prog_fd; + + prog_fd = bpf_program__fd(skel_trace->progs.fmod_ret1); + if (!ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &topts_trace), + "bpf_prog_test_run trace")) + return false; + + if (!ASSERT_EQ(skel_trace->bss->fentry1_seq, + expected->fentry1_seq, "fentry1_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry1_cookie, + expected->fentry1_cookie, "fentry1_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry2_seq, + expected->fentry2_seq, "fentry2_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry2_cookie, + expected->fentry2_cookie, "fentry2_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fmod_ret1_seq, + expected->fmod_ret1_seq, "fmod_ret1_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fmod_ret1_cookie, + expected->fmod_ret1_cookie, "fmod_ret1_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fmod_ret2_seq, + expected->fmod_ret2_seq, "fmod_ret2_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fmod_ret2_cookie, + expected->fmod_ret2_cookie, "fmod_ret2_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit1_seq, + expected->fexit1_seq, "fexit1_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit1_cookie, + expected->fexit1_cookie, "fexit1_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit2_seq, + expected->fexit2_seq, "fexit2_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit2_cookie, + expected->fexit2_cookie, "fexit2_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->sequence, expected->sequence, + "sequence")) + return false; + if (!ASSERT_EQ(skel_trace->bss->sequence_bpf, 0, "sequence_bpf")) + return false; + + prog_fd = bpf_program__fd(skel_pkt->progs.test_pkt_access); + if (!ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &topts_skb), + "bpf_prog_test_run skb")) + return false; + + if (!ASSERT_EQ(skel_trace->bss->sequence, expected->sequence, + "sequence")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry1_bpf_seq, + expected->fentry1_bpf_seq, "fentry1_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry1_bpf_cookie, + expected->fentry1_bpf_cookie, "fentry1_bpf_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry2_bpf_seq, + expected->fentry2_bpf_seq, "fentry2_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fentry2_bpf_cookie, + expected->fentry2_bpf_cookie, "fentry2_bpf_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->freplace1_bpf_seq, + expected->freplace1_bpf_seq, "freplace1_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->freplace2_bpf_seq, + expected->freplace2_bpf_seq, "freplace2_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit1_bpf_seq, + expected->fexit1_bpf_seq, "fexit1_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit1_bpf_cookie, + expected->fexit1_bpf_cookie, "fexit1_bpf_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit2_bpf_seq, + expected->fexit2_bpf_seq, "fexit2_bpf_seq")) + return false; + if (!ASSERT_EQ(skel_trace->bss->fexit2_bpf_cookie, + expected->fexit2_bpf_cookie, "fexit2_bpf_cookie")) + return false; + if (!ASSERT_EQ(skel_trace->bss->sequence_bpf, expected->sequence_bpf, + "sequence_bpf")) + return false; + + memset(skel_trace->bss, 0, sizeof(*skel_trace->bss)); + + return true; +} + +void test_prog_update_tracing(void) +{ + const struct prog_update_tracing__bss expected_inverted = { + .fentry1_seq = 2, + .fentry1_cookie = 102, + .fentry2_seq = 1, + .fentry2_cookie = 101, + .fmod_ret1_seq = 4, + .fmod_ret1_cookie = 104, + .fmod_ret2_seq = 3, + .fmod_ret2_cookie = 103, + .fexit1_seq = 6, + .fexit1_cookie = 106, + .fexit2_seq = 5, + .fexit2_cookie = 105, + .sequence = 6, + .fentry1_bpf_seq = 2, + .fentry1_bpf_cookie = 202, + .fentry2_bpf_seq = 1, + .fentry2_bpf_cookie = 201, + .freplace1_bpf_seq = 3, + .freplace2_bpf_seq = 0, + .fexit1_bpf_seq = 5, + .fexit1_bpf_cookie = 205, + .fexit2_bpf_seq = 4, + .fexit2_bpf_cookie = 204, + .sequence_bpf = 5, + }; + const struct prog_update_tracing__bss expected_normal = { + .fentry1_seq = 1, + .fentry1_cookie = 101, + .fentry2_seq = 2, + .fentry2_cookie = 102, + .fmod_ret1_seq = 3, + .fmod_ret1_cookie = 103, + .fmod_ret2_seq = 4, + .fmod_ret2_cookie = 104, + .fexit1_seq = 5, + .fexit1_cookie = 105, + .fexit2_seq = 6, + .fexit2_cookie = 106, + .sequence = 6, + .fentry1_bpf_seq = 1, + .fentry1_bpf_cookie = 201, + .fentry2_bpf_seq = 2, + .fentry2_bpf_cookie = 202, + .freplace1_bpf_seq = 3, + .freplace2_bpf_seq = 0, + .fexit1_bpf_seq = 4, + .fexit1_bpf_cookie = 204, + .fexit2_bpf_seq = 5, + .fexit2_bpf_cookie = 205, + .sequence_bpf = 5, + }; + LIBBPF_OPTS(bpf_link_update_opts, update_opts); + LIBBPF_OPTS(bpf_trace_opts, trace_opts); + struct prog_update_tracing *skel_trace1 = NULL; + struct prog_update_tracing *skel_trace2 = NULL; + struct test_pkt_access *skel_pkt1 = NULL; + struct test_pkt_access *skel_pkt2 = NULL; + int link_fd, prog_fd, err; + struct bpf_link *link; + + skel_trace1 = prog_update_tracing__open(); + if (!ASSERT_OK_PTR(skel_trace1, "skel_trace1")) + goto cleanup; + skel_pkt1 = test_pkt_access__open_and_load(); + if (!ASSERT_OK_PTR(skel_pkt1, "skel_pkt1")) + goto cleanup; + prog_fd = bpf_program__fd(skel_pkt1->progs.test_pkt_access); + bpf_program__set_attach_target(skel_trace1->progs.fentry1_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace1->progs.fentry2_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace1->progs.fexit1_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace1->progs.fexit2_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace1->progs.freplace1_bpf, + prog_fd, "test_pkt_access_subprog3"); + bpf_program__set_attach_target(skel_trace1->progs.freplace2_bpf, + prog_fd, "test_pkt_access_subprog3"); + bpf_program__set_attach_target(skel_trace1->progs.freplace3_bpf, + prog_fd, "get_skb_ifindex"); + err = prog_update_tracing__load(skel_trace1); + if (!ASSERT_OK(err, "skel_trace1 load")) + goto cleanup; + trace_opts.cookie = expected_normal.fentry2_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fentry2, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fentry2")) + goto cleanup; + skel_trace1->links.fentry2 = link; + trace_opts.cookie = expected_normal.fentry1_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fentry1, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fentry1")) + goto cleanup; + skel_trace1->links.fentry1 = link; + trace_opts.cookie = expected_normal.fmod_ret2_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fmod_ret2, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fmod_ret2")) + goto cleanup; + skel_trace1->links.fmod_ret2 = link; + trace_opts.cookie = expected_normal.fmod_ret1_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fmod_ret1, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fmod_ret1")) + goto cleanup; + skel_trace1->links.fmod_ret1 = link; + trace_opts.cookie = expected_normal.fexit2_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fexit2, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fexit2")) + goto cleanup; + skel_trace1->links.fexit2 = link; + trace_opts.cookie = expected_normal.fexit1_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fexit1, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fexit1")) + goto cleanup; + skel_trace1->links.fexit1 = link; + trace_opts.cookie = expected_normal.fentry2_bpf_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fentry2_bpf, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fentry2_bpf")) + goto cleanup; + skel_trace1->links.fentry2_bpf = link; + trace_opts.cookie = expected_normal.fentry1_bpf_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fentry1_bpf, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fentry1_bpf")) + goto cleanup; + skel_trace1->links.fentry1_bpf = link; + trace_opts.cookie = expected_normal.fexit2_bpf_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fexit2_bpf, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fexit2_bpf")) + goto cleanup; + skel_trace1->links.fexit2_bpf = link; + trace_opts.cookie = expected_normal.fexit1_bpf_cookie; + link = bpf_program__attach_trace_opts(skel_trace1->progs.fexit1_bpf, + &trace_opts); + if (!ASSERT_OK_PTR(link, "attach fexit1_bpf")) + goto cleanup; + skel_trace1->links.fexit1_bpf = link; + link = bpf_program__attach_trace(skel_trace1->progs.freplace1_bpf); + if (!ASSERT_OK_PTR(link, "attach freplace1_bpf")) + goto cleanup; + skel_trace1->links.freplace1_bpf = link; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Program update should fail if expected_attach_type is a mismatch. */ + err = bpf_link__update_program(skel_trace1->links.fentry1, + skel_trace1->progs.fexit1); + if (!ASSERT_EQ(err, -EINVAL, + "fentry1 update wrong expected_attach_type")) + goto cleanup; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Program update should fail if type is a mismatch. */ + err = bpf_link__update_program(skel_trace1->links.fentry1, + skel_pkt1->progs.test_pkt_access); + if (!ASSERT_EQ(err, -EINVAL, "fentry1 update wrong type")) + goto cleanup; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Program update should fail if program is incompatible */ + err = bpf_link__update_program(skel_trace1->links.freplace1_bpf, + skel_trace1->progs.freplace3_bpf); + if (!ASSERT_EQ(err, -EINVAL, "freplace1_bpf update incompatible")) + goto cleanup; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Update with BPF_F_REPLACE should fail if old_prog_fd does not match + * current link program. + */ + prog_fd = bpf_program__fd(skel_trace1->progs.fentry2); + link_fd = bpf_link__fd(skel_trace1->links.fentry1); + update_opts.old_prog_fd = bpf_program__fd(skel_trace1->progs.fentry2); + update_opts.flags = BPF_F_REPLACE; + err = bpf_link_update(link_fd, prog_fd, &update_opts); + if (!ASSERT_EQ(err, -EPERM, "BPF_F_REPLACE EPERM")) + goto cleanup; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Do an in-place swap of program order using link updates. + * + * Update with BPF_F_REPLACE should succeed if old_prog_fd matches + * current link program. + * Update should succeed if the new program's dst_trampoline has been + * cleared by a previous attach operation. + */ + update_opts.old_prog_fd = bpf_program__fd(skel_trace1->progs.fentry1); + err = bpf_link_update(link_fd, prog_fd, &update_opts); + if (!ASSERT_OK(err, "BPF_F_REPLACE")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry2, + skel_trace1->progs.fentry1); + if (!ASSERT_OK(err, "fentry2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fmod_ret1, + skel_trace1->progs.fmod_ret2); + if (!ASSERT_OK(err, "fmod_ret1 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fmod_ret2, + skel_trace1->progs.fmod_ret1); + if (!ASSERT_OK(err, "fmod_ret2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit1, + skel_trace1->progs.fexit2); + if (!ASSERT_OK(err, "fexit1 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit2, + skel_trace1->progs.fexit1); + if (!ASSERT_OK(err, "fexit2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry1_bpf, + skel_trace1->progs.fentry2_bpf); + if (!ASSERT_OK(err, "fentry1_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry2_bpf, + skel_trace1->progs.fentry1_bpf); + if (!ASSERT_OK(err, "fentry2_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit1_bpf, + skel_trace1->progs.fexit2_bpf); + if (!ASSERT_OK(err, "fexit1_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit2_bpf, + skel_trace1->progs.fexit1_bpf); + if (!ASSERT_OK(err, "fexit2_bpf update")) + goto cleanup; + /* Only one freplace program can be attached at a time, so instead of + * swapping the order as with fentry/fmod_ret/fexit just shuffle + * freplace1_bpf and freplace_bpf2. + */ + err = bpf_link__destroy(skel_trace1->links.freplace1_bpf); + if (!ASSERT_OK(err, "freplace1_bpf destroy")) + goto cleanup; + skel_trace1->links.freplace1_bpf = NULL; + link = bpf_program__attach_trace(skel_trace1->progs.freplace2_bpf); + if (!ASSERT_OK_PTR(link, "attach freplace2_bpf")) + goto cleanup; + skel_trace1->links.freplace2_bpf = link; + err = bpf_link__update_program(skel_trace1->links.freplace2_bpf, + skel_trace1->progs.freplace1_bpf); + if (!ASSERT_OK(err, "freplace2_bpf update")) + goto cleanup; + if (!assert_program_order(skel_trace1, skel_pkt1, &expected_inverted)) + goto cleanup; + + /* Update should work when the new program's dst_trampoline points to + * another trampoline or the same trampoline. + */ + skel_trace2 = prog_update_tracing__open(); + if (!ASSERT_OK_PTR(skel_trace2, "skel_trace2")) + goto cleanup; + skel_pkt2 = test_pkt_access__open_and_load(); + if (!ASSERT_OK_PTR(skel_pkt2, "skel_pkt2")) + goto cleanup; + prog_fd = bpf_program__fd(skel_pkt2->progs.test_pkt_access); + bpf_program__set_attach_target(skel_trace2->progs.fentry1_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace2->progs.fentry2_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace2->progs.fexit1_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace2->progs.fexit2_bpf, prog_fd, + NULL); + bpf_program__set_attach_target(skel_trace2->progs.freplace1_bpf, + prog_fd, "test_pkt_access_subprog3"); + bpf_program__set_attach_target(skel_trace2->progs.freplace2_bpf, + prog_fd, "test_pkt_access_subprog3"); + bpf_program__set_attach_target(skel_trace2->progs.freplace3_bpf, + prog_fd, "get_skb_ifindex"); + err = prog_update_tracing__load(skel_trace2); + if (!ASSERT_OK(err, "skel_trace2 load")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry1, + skel_trace2->progs.fentry1); + if (!ASSERT_OK(err, "fentry1 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry2, + skel_trace2->progs.fentry2); + if (!ASSERT_OK(err, "fentry2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fmod_ret1, + skel_trace2->progs.fmod_ret1); + if (!ASSERT_OK(err, "fmod_ret1 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fmod_ret2, + skel_trace2->progs.fmod_ret2); + if (!ASSERT_OK(err, "fmod_ret2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit1, + skel_trace2->progs.fexit1); + if (!ASSERT_OK(err, "fexit1 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit2, + skel_trace2->progs.fexit2); + if (!ASSERT_OK(err, "fexit2 update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry1_bpf, + skel_trace2->progs.fentry1_bpf); + if (!ASSERT_OK(err, "fentry1_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fentry2_bpf, + skel_trace2->progs.fentry2_bpf); + if (!ASSERT_OK(err, "fentry2_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.freplace2_bpf, + skel_trace2->progs.freplace1_bpf); + if (!ASSERT_OK(err, "freplace2_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit1_bpf, + skel_trace2->progs.fexit1_bpf); + if (!ASSERT_OK(err, "fexit1_bpf update")) + goto cleanup; + err = bpf_link__update_program(skel_trace1->links.fexit2_bpf, + skel_trace2->progs.fexit2_bpf); + if (!ASSERT_OK(err, "fexit2_bpf update")) + goto cleanup; + if (!assert_program_order(skel_trace2, skel_pkt1, &expected_normal)) + goto cleanup; + + /* Update should work when the new program is the same as the current + * program. + */ + err = bpf_link__update_program(skel_trace1->links.fentry1, + skel_trace2->progs.fentry1); + if (!ASSERT_OK(err, "fentry1 update")) + goto cleanup; + if (!assert_program_order(skel_trace2, skel_pkt1, &expected_normal)) + goto cleanup; +cleanup: + prog_update_tracing__destroy(skel_trace1); + prog_update_tracing__destroy(skel_trace2); + test_pkt_access__destroy(skel_pkt1); + test_pkt_access__destroy(skel_pkt2); +} diff --git a/tools/testing/selftests/bpf/progs/prog_update_tracing.c b/tools/testing/selftests/bpf/progs/prog_update_tracing.c new file mode 100644 index 0000000000000..bdf314e284259 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/prog_update_tracing.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include +#include + +__u64 sequence = 0; + +__u64 fentry1_seq = 0; +__u64 fentry1_cookie = 0; +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(fentry1, int a, __u64 b) +{ + fentry1_seq = ++sequence; + fentry1_cookie = bpf_get_attach_cookie(ctx); + return 0; +} + +__u64 fentry2_seq = 0; +__u64 fentry2_cookie = 0; +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(fentry2, int a, __u64 b) +{ + fentry2_seq = ++sequence; + fentry2_cookie = bpf_get_attach_cookie(ctx); + return 0; +} + +__u64 fmod_ret1_seq = 0; +__u64 fmod_ret1_cookie = 0; +SEC("fmod_ret/bpf_modify_return_test") +int BPF_PROG(fmod_ret1, int a, int *b, int ret) +{ + fmod_ret1_seq = ++sequence; + fmod_ret1_cookie = bpf_get_attach_cookie(ctx); + return ret; +} + +__u64 fmod_ret2_seq = 0; +__u64 fmod_ret2_cookie = 0; +SEC("fmod_ret/bpf_modify_return_test") +int BPF_PROG(fmod_ret2, int a, int *b, int ret) +{ + fmod_ret2_seq = ++sequence; + fmod_ret2_cookie = bpf_get_attach_cookie(ctx); + return ret; +} + +__u64 fexit1_seq = 0; +__u64 fexit1_cookie = 0; +SEC("fexit/bpf_modify_return_test") +int BPF_PROG(fexit1, int a, __u64 b, int ret) +{ + fexit1_seq = ++sequence; + fexit1_cookie = bpf_get_attach_cookie(ctx); + return 0; +} + +__u64 fexit2_seq = 0; +__u64 fexit2_cookie = 0; +SEC("fexit/bpf_modify_return_test") +int BPF_PROG(fexit2, int a, __u64 b, int ret) +{ + fexit2_seq = ++sequence; + fexit2_cookie = bpf_get_attach_cookie(ctx); + return 0; +} + +__u64 sequence_bpf = 0; + +__u64 fentry1_bpf_seq = 0; +__u64 fentry1_bpf_cookie = 0; +SEC("fentry/test_pkt_access") +int fentry1_bpf(struct __sk_buff *skb) +{ + fentry1_bpf_seq = ++sequence_bpf; + fentry1_bpf_cookie = bpf_get_attach_cookie(skb); + return 0; +} + +__u64 fentry2_bpf_seq = 0; +__u64 fentry2_bpf_cookie = 0; +SEC("fentry/test_pkt_access") +int fentry2_bpf(struct __sk_buff *skb) +{ + fentry2_bpf_seq = ++sequence_bpf; + fentry2_bpf_cookie = bpf_get_attach_cookie(skb); + return 0; +} + +__u64 freplace1_bpf_seq = 0; +SEC("freplace/test_pkt_access_subprog3") +int freplace1_bpf(int val, struct __sk_buff *skb) +{ + freplace1_bpf_seq = ++sequence_bpf; + return 0; +} + +__u64 freplace2_bpf_seq = 0; +SEC("freplace/test_pkt_access_subprog3") +int freplace2_bpf(int val, struct __sk_buff *skb) +{ + freplace2_bpf_seq = ++sequence_bpf; + return 0; +} + +SEC("freplace/get_skb_ifindex") +int freplace3_bpf(int val, struct __sk_buff *skb, int var) +{ + return 0; +} + +__u64 fexit1_bpf_seq = 0; +__u64 fexit1_bpf_cookie = 0; +SEC("fexit/test_pkt_access") +int fexit1_bpf(struct __sk_buff *skb, int ret) +{ + fexit1_bpf_seq = ++sequence_bpf; + fexit1_bpf_cookie = bpf_get_attach_cookie(skb); + return 0; +} + +__u64 fexit2_bpf_seq = 0; +__u64 fexit2_bpf_cookie = 0; +SEC("fexit/test_pkt_access") +int fexit2_bpf(struct __sk_buff *skb, int ret) +{ + fexit2_bpf_seq = ++sequence_bpf; + fexit2_bpf_cookie = bpf_get_attach_cookie(skb); + return 0; +} + +char _license[] SEC("license") = "GPL";