bpf: Use u64_stats_t in struct bpf_prog_stats

[ Upstream commit 61a0abaee2092eee69e44fe60336aa2f5b578938 ]

Commit 316580b69d ("u64_stats: provide u64_stats_t type")
fixed possible load/store tearing on 64bit arches.

For instance the following C code

stats->nsecs += sched_clock() - start;

Could be rightfully implemented like this by a compiler,
confusing concurrent readers a lot:

stats->nsecs += sched_clock();
// arbitrary delay
stats->nsecs -= start;

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211026214133.3114279-4-eric.dumazet@gmail.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Eric Dumazet
2021-10-26 14:41:33 -07:00
committed by Greg Kroah-Hartman
parent 82a82ad927
commit aa5040691c
3 changed files with 20 additions and 14 deletions

View File

@@ -554,9 +554,9 @@ struct bpf_binary_header {
}; };
struct bpf_prog_stats { struct bpf_prog_stats {
u64 cnt; u64_stats_t cnt;
u64 nsecs; u64_stats_t nsecs;
u64 misses; u64_stats_t misses;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64)); } __aligned(2 * sizeof(u64));
@@ -618,8 +618,8 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
ret = dfunc(ctx, prog->insnsi, prog->bpf_func); ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
stats = this_cpu_ptr(prog->stats); stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp); flags = u64_stats_update_begin_irqsave(&stats->syncp);
stats->cnt++; u64_stats_inc(&stats->cnt);
stats->nsecs += sched_clock() - start; u64_stats_add(&stats->nsecs, sched_clock() - start);
u64_stats_update_end_irqrestore(&stats->syncp, flags); u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else { } else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func); ret = dfunc(ctx, prog->insnsi, prog->bpf_func);

View File

@@ -1824,8 +1824,14 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
struct bpf_prog_kstats {
u64 nsecs;
u64 cnt;
u64 misses;
};
static void bpf_prog_get_stats(const struct bpf_prog *prog, static void bpf_prog_get_stats(const struct bpf_prog *prog,
struct bpf_prog_stats *stats) struct bpf_prog_kstats *stats)
{ {
u64 nsecs = 0, cnt = 0, misses = 0; u64 nsecs = 0, cnt = 0, misses = 0;
int cpu; int cpu;
@@ -1838,9 +1844,9 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog,
st = per_cpu_ptr(prog->stats, cpu); st = per_cpu_ptr(prog->stats, cpu);
do { do {
start = u64_stats_fetch_begin_irq(&st->syncp); start = u64_stats_fetch_begin_irq(&st->syncp);
tnsecs = st->nsecs; tnsecs = u64_stats_read(&st->nsecs);
tcnt = st->cnt; tcnt = u64_stats_read(&st->cnt);
tmisses = st->misses; tmisses = u64_stats_read(&st->misses);
} while (u64_stats_fetch_retry_irq(&st->syncp, start)); } while (u64_stats_fetch_retry_irq(&st->syncp, start));
nsecs += tnsecs; nsecs += tnsecs;
cnt += tcnt; cnt += tcnt;
@@ -1856,7 +1862,7 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{ {
const struct bpf_prog *prog = filp->private_data; const struct bpf_prog *prog = filp->private_data;
char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
struct bpf_prog_stats stats; struct bpf_prog_kstats stats;
bpf_prog_get_stats(prog, &stats); bpf_prog_get_stats(prog, &stats);
bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
@@ -3595,7 +3601,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
struct bpf_prog_info info; struct bpf_prog_info info;
u32 info_len = attr->info.info_len; u32 info_len = attr->info.info_len;
struct bpf_prog_stats stats; struct bpf_prog_kstats stats;
char __user *uinsns; char __user *uinsns;
u32 ulen; u32 ulen;
int err; int err;

View File

@@ -544,7 +544,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
stats = this_cpu_ptr(prog->stats); stats = this_cpu_ptr(prog->stats);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->misses++; u64_stats_inc(&stats->misses);
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} }
@@ -589,8 +589,8 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
stats = this_cpu_ptr(prog->stats); stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp); flags = u64_stats_update_begin_irqsave(&stats->syncp);
stats->cnt++; u64_stats_inc(&stats->cnt);
stats->nsecs += sched_clock() - start; u64_stats_add(&stats->nsecs, sched_clock() - start);
u64_stats_update_end_irqrestore(&stats->syncp, flags); u64_stats_update_end_irqrestore(&stats->syncp, flags);
} }
} }