xsk: Restructure/inline XSKMAP lookup/redirect/flush
In this commit the XSKMAP entry lookup function used by the XDP redirect code is moved from the xskmap.c file to the xdp_sock.h header, so the lookup can be inlined from, e.g., the bpf_xdp_redirect_map() function. Further the __xsk_map_redirect() and __xsk_map_flush() is moved to the xsk.c, which lets the compiler inline the xsk_rcv() and xsk_flush() functions. Finally, all the XDP socket functions were moved from linux/bpf.h to net/xdp_sock.h, where most of the XDP sockets functions are anyway. This yields a ~2% performance boost for the xdpsock "rx_drop" scenario. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191101110346.15004-4-bjorn.topel@gmail.com
This commit is contained in:
committed by
Daniel Borkmann
parent
e65650f291
commit
d817991cc7
@@ -1009,31 +1009,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_XDP_SOCKETS)
|
|
||||||
struct xdp_sock;
|
|
||||||
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
|
|
||||||
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
|
||||||
struct xdp_sock *xs);
|
|
||||||
void __xsk_map_flush(struct bpf_map *map);
|
|
||||||
#else
|
|
||||||
struct xdp_sock;
|
|
||||||
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
|
||||||
u32 key)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
|
||||||
struct xdp_sock *xs)
|
|
||||||
{
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __xsk_map_flush(struct bpf_map *map)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||||
void bpf_sk_reuseport_detach(struct sock *sk);
|
void bpf_sk_reuseport_detach(struct sock *sk);
|
||||||
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
||||||
|
|||||||
@@ -69,7 +69,14 @@ struct xdp_umem {
|
|||||||
/* Nodes are linked in the struct xdp_sock map_list field, and used to
|
/* Nodes are linked in the struct xdp_sock map_list field, and used to
|
||||||
* track which maps a certain socket reside in.
|
* track which maps a certain socket reside in.
|
||||||
*/
|
*/
|
||||||
struct xsk_map;
|
|
||||||
|
struct xsk_map {
|
||||||
|
struct bpf_map map;
|
||||||
|
struct list_head __percpu *flush_list;
|
||||||
|
spinlock_t lock; /* Synchronize map updates */
|
||||||
|
struct xdp_sock *xsk_map[];
|
||||||
|
};
|
||||||
|
|
||||||
struct xsk_map_node {
|
struct xsk_map_node {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct xsk_map *map;
|
struct xsk_map *map;
|
||||||
@@ -109,8 +116,6 @@ struct xdp_sock {
|
|||||||
struct xdp_buff;
|
struct xdp_buff;
|
||||||
#ifdef CONFIG_XDP_SOCKETS
|
#ifdef CONFIG_XDP_SOCKETS
|
||||||
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
|
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
|
||||||
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
|
|
||||||
void xsk_flush(struct xdp_sock *xs);
|
|
||||||
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
|
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
|
||||||
/* Used from netdev driver */
|
/* Used from netdev driver */
|
||||||
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
|
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
|
||||||
@@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
|
|||||||
struct xdp_sock **map_entry);
|
struct xdp_sock **map_entry);
|
||||||
int xsk_map_inc(struct xsk_map *map);
|
int xsk_map_inc(struct xsk_map *map);
|
||||||
void xsk_map_put(struct xsk_map *map);
|
void xsk_map_put(struct xsk_map *map);
|
||||||
|
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||||
|
struct xdp_sock *xs);
|
||||||
|
void __xsk_map_flush(struct bpf_map *map);
|
||||||
|
|
||||||
|
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||||
|
u32 key)
|
||||||
|
{
|
||||||
|
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||||
|
struct xdp_sock *xs;
|
||||||
|
|
||||||
|
if (key >= map->max_entries)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
xs = READ_ONCE(m->xsk_map[key]);
|
||||||
|
return xs;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 xsk_umem_extract_addr(u64 addr)
|
static inline u64 xsk_umem_extract_addr(u64 addr)
|
||||||
{
|
{
|
||||||
@@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
|||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
|
||||||
{
|
|
||||||
return -ENOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void xsk_flush(struct xdp_sock *xs)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
|
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
@@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||||
|
struct xdp_sock *xs)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __xsk_map_flush(struct bpf_map *map)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||||
|
u32 key)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif /* CONFIG_XDP_SOCKETS */
|
#endif /* CONFIG_XDP_SOCKETS */
|
||||||
|
|
||||||
#endif /* _LINUX_XDP_SOCK_H */
|
#endif /* _LINUX_XDP_SOCK_H */
|
||||||
|
|||||||
@@ -9,13 +9,6 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
struct xsk_map {
|
|
||||||
struct bpf_map map;
|
|
||||||
struct list_head __percpu *flush_list;
|
|
||||||
spinlock_t lock; /* Synchronize map updates */
|
|
||||||
struct xdp_sock *xsk_map[];
|
|
||||||
};
|
|
||||||
|
|
||||||
int xsk_map_inc(struct xsk_map *map)
|
int xsk_map_inc(struct xsk_map *map)
|
||||||
{
|
{
|
||||||
struct bpf_map *m = &map->map;
|
struct bpf_map *m = &map->map;
|
||||||
@@ -151,18 +144,6 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
|
|
||||||
{
|
|
||||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
|
||||||
struct xdp_sock *xs;
|
|
||||||
|
|
||||||
if (key >= map->max_entries)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
xs = READ_ONCE(m->xsk_map[key]);
|
|
||||||
return xs;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||||
{
|
{
|
||||||
const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
|
const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
|
||||||
@@ -179,35 +160,6 @@ static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|||||||
return insn - insn_buf;
|
return insn - insn_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
|
||||||
struct xdp_sock *xs)
|
|
||||||
{
|
|
||||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
|
||||||
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = xsk_rcv(xs, xdp);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
if (!xs->flush_node.prev)
|
|
||||||
list_add(&xs->flush_node, flush_list);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __xsk_map_flush(struct bpf_map *map)
|
|
||||||
{
|
|
||||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
|
||||||
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
|
||||||
struct xdp_sock *xs, *tmp;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
|
|
||||||
xsk_flush(xs);
|
|
||||||
__list_del_clearprev(&xs->flush_node);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
|
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||||
|
|||||||
@@ -196,7 +196,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
u32 len;
|
u32 len;
|
||||||
|
|
||||||
@@ -212,7 +212,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
|||||||
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
|
__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xsk_flush(struct xdp_sock *xs)
|
static void xsk_flush(struct xdp_sock *xs)
|
||||||
{
|
{
|
||||||
xskq_produce_flush_desc(xs->rx);
|
xskq_produce_flush_desc(xs->rx);
|
||||||
xs->sk.sk_data_ready(&xs->sk);
|
xs->sk.sk_data_ready(&xs->sk);
|
||||||
@@ -264,6 +264,35 @@ out_unlock:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
|
||||||
|
struct xdp_sock *xs)
|
||||||
|
{
|
||||||
|
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||||
|
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = xsk_rcv(xs, xdp);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (!xs->flush_node.prev)
|
||||||
|
list_add(&xs->flush_node, flush_list);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __xsk_map_flush(struct bpf_map *map)
|
||||||
|
{
|
||||||
|
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||||
|
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
|
||||||
|
struct xdp_sock *xs, *tmp;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
|
||||||
|
xsk_flush(xs);
|
||||||
|
__list_del_clearprev(&xs->flush_node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
|
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
|
||||||
{
|
{
|
||||||
xskq_produce_flush_addr_n(umem->cq, nb_entries);
|
xskq_produce_flush_addr_n(umem->cq, nb_entries);
|
||||||
|
|||||||
Reference in New Issue
Block a user