Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <linux/static_call.h>
#include <linux/memcontrol.h>
#include <linux/cfi.h>
#include <linux/unaligned.h>
#include <asm/rqspinlock.h>

struct bpf_verifier_env;
Expand Down Expand Up @@ -1729,6 +1730,8 @@ struct bpf_prog_aux {
struct bpf_stream stream[2];
};

#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */

struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
Expand All @@ -1755,7 +1758,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
int __percpu *active;
u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for rerecursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
Expand Down Expand Up @@ -1985,6 +1988,28 @@ struct bpf_struct_ops_common_value {
enum bpf_struct_ops_state state;
};

static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
{
u8 rctx = interrupt_context_level();
u8 *active = this_cpu_ptr(prog->active);

active[rctx]++;
barrier();
if (get_unaligned_le32(active) != BIT(rctx * 8))
return false;

return true;
}

static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
{
u8 rctx = interrupt_context_level();
u8 *active = this_cpu_ptr(prog->active);

barrier();
active[rctx]--;
}

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
/* This macro helps developer to register a struct_ops type and generate
* type information correctly. Developers should use this macro to register
Expand Down
3 changes: 2 additions & 1 deletion kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
vfree(fp);
return NULL;
}
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 8,
bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
if (!fp->active) {
vfree(fp);
kfree(aux);
Expand Down
8 changes: 4 additions & 4 deletions kernel/bpf/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,7 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram

run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
bpf_prog_inc_misses_counter(prog);
if (prog->aux->recursion_detected)
prog->aux->recursion_detected(prog);
Expand Down Expand Up @@ -947,7 +947,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
bpf_reset_run_ctx(run_ctx->saved_run_ctx);

update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
bpf_prog_put_recursion_context(prog);
rcu_read_unlock_migrate();
}

Expand Down Expand Up @@ -983,7 +983,7 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,

run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
bpf_prog_inc_misses_counter(prog);
if (prog->aux->recursion_detected)
prog->aux->recursion_detected(prog);
Expand All @@ -998,7 +998,7 @@ void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
bpf_reset_run_ctx(run_ctx->saved_run_ctx);

update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
bpf_prog_put_recursion_context(prog);
migrate_enable();
rcu_read_unlock_trace();
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2063,7 +2063,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
struct bpf_trace_run_ctx run_ctx;

cant_sleep();
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
bpf_prog_inc_misses_counter(prog);
goto out;
}
Expand All @@ -2077,7 +2077,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)

bpf_reset_run_ctx(old_run_ctx);
out:
this_cpu_dec(*(prog->active));
bpf_prog_put_recursion_context(prog);
}

#define UNPACK(...) __VA_ARGS__
Expand Down
Loading