__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
/* SPDX-License-Identifier: GPL-2.0-only */
/* include/net/xdp.h
 *
 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
 */
#ifndef __LINUX_NET_XDP_H__
#define __LINUX_NET_XDP_H__

#include <linux/bitfield.h>
#include <linux/filter.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* skb_shared_info */

#include <net/page_pool/types.h>

/**
 * DOC: XDP RX-queue information
 *
 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
 * level RX-ring queues.  It is information that is specific to how
 * the driver has configured a given RX-ring queue.
 *
 * Each xdp_buff frame received in the driver carries a (pointer)
 * reference to this xdp_rxq_info structure.  This provides the XDP
 * data-path read-access to RX-info for both kernel and bpf-side
 * (limited subset).
 *
 * For now, direct access is only safe while running in NAPI/softirq
 * context.  Contents are read-mostly and must not be updated during
 * driver NAPI/softirq poll.
 *
 * The driver usage API is a register and unregister API.
 *
 * The struct is not directly tied to the XDP prog.  A new XDP prog
 * can be attached as long as it doesn't change the underlying
 * RX-ring.  If the RX-ring does change significantly, the NIC driver
 * naturally needs to stop the RX-ring before purging and reallocating
 * memory.  In that process the driver MUST call unregister (which
 * also applies for driver shutdown and unload).  The register API is
 * also mandatory during RX-ring setup.
 */

enum xdp_mem_type {
	MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
	MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
	MEM_TYPE_PAGE_POOL,
	MEM_TYPE_XSK_BUFF_POOL,
	MEM_TYPE_MAX,
};

/* XDP flags for ndo_xdp_xmit */
#define XDP_XMIT_FLUSH		(1U << 0)	/* doorbell signal consumer */
#define XDP_XMIT_FLAGS_MASK	XDP_XMIT_FLUSH

struct xdp_mem_info {
	u32 type; /* enum xdp_mem_type, but known size type */
	u32 id;
};

struct page_pool;

struct xdp_rxq_info {
	struct net_device *dev;
	u32 queue_index;
	u32 reg_state;
	struct xdp_mem_info mem;
	u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */

struct xdp_txq_info {
	struct net_device *dev;
};

enum xdp_buff_flags {
	XDP_FLAGS_HAS_FRAGS		= BIT(0), /* non-linear xdp buff */
	XDP_FLAGS_FRAGS_PF_MEMALLOC	= BIT(1), /* xdp paged memory is under
						   * pressure
						   */
};

struct xdp_buff {
	void *data;
	void *data_end;
	void *data_meta;
	void *data_hard_start;
	struct xdp_rxq_info *rxq;
	struct xdp_txq_info *txq;
	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
	u32 flags; /* supported values defined in xdp_buff_flags */
};

static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp)
{
	return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
}

static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
{
	xdp->flags |= XDP_FLAGS_HAS_FRAGS;
}

static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
{
	xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
}

static __always_inline bool
xdp_buff_is_frag_pfmemalloc(const struct xdp_buff *xdp)
{
	return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}

static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
{
	xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
}

static __always_inline void
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
{
	xdp->frame_sz = frame_sz;
	xdp->rxq = rxq;
	xdp->flags = 0;
}

static __always_inline void
xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
		 int headroom, int data_len, const bool meta_valid)
{
	unsigned char *data = hard_start + headroom;

	xdp->data_hard_start = hard_start;
	xdp->data = data;
	xdp->data_end = data + data_len;
	xdp->data_meta = meta_valid ? data : data + 1;
}

/* Reserve memory area at end-of data area.
 *
 * This macro reserves tailroom in the XDP buffer by limiting the
 * XDP/BPF data access to data_hard_end.  Notice same area (and size)
 * is used for XDP_PASS, when constructing the SKB via build_skb().
 */
#define xdp_data_hard_end(xdp)				\
	((xdp)->data_hard_start + (xdp)->frame_sz -	\
	 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))

static inline struct skb_shared_info *
xdp_get_shared_info_from_buff(const struct xdp_buff *xdp)
{
	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}

static __always_inline unsigned int
xdp_get_buff_len(const struct xdp_buff *xdp)
{
	unsigned int len = xdp->data_end - xdp->data;
	const struct skb_shared_info *sinfo;

	if (likely(!xdp_buff_has_frags(xdp)))
		goto out;

	sinfo = xdp_get_shared_info_from_buff(xdp);
	len += sinfo->xdp_frags_size;
out:
	return len;
}

void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp);

/**
 * __xdp_buff_add_frag - attach frag to &xdp_buff
 * @xdp: XDP buffer to attach the frag to
 * @netmem: network memory containing the frag
 * @offset: offset at which the frag starts
 * @size: size of the frag
 * @truesize: total memory size occupied by the frag
 * @try_coalesce: whether to try coalescing the frags (not valid for XSk)
 *
 * Attach frag to the XDP buffer. If it currently has no frags attached,
 * initialize the related fields, otherwise check that the frag number
 * didn't reach the limit of ``MAX_SKB_FRAGS``. If possible, try coalescing
 * the frag with the previous one.
 * The function doesn't check/update the pfmemalloc bit. Please use the
 * non-underscored wrapper in drivers.
 *
 * Return: true on success, false if there's no space for the frag in
 * the shared info struct.
 */
static inline bool __xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
				       u32 offset, u32 size, u32 truesize,
				       bool try_coalesce)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	skb_frag_t *prev;
	u32 nr_frags;

	if (!xdp_buff_has_frags(xdp)) {
		xdp_buff_set_frags_flag(xdp);

		nr_frags = 0;
		sinfo->xdp_frags_size = 0;
		sinfo->xdp_frags_truesize = 0;

		goto fill;
	}

	nr_frags = sinfo->nr_frags;
	prev = &sinfo->frags[nr_frags - 1];

	if (try_coalesce && netmem == skb_frag_netmem(prev) &&
	    offset == skb_frag_off(prev) + skb_frag_size(prev)) {
		skb_frag_size_add(prev, size);
		/* Guaranteed to only decrement the refcount */
		xdp_return_frag(netmem, xdp);
	} else if (unlikely(nr_frags == MAX_SKB_FRAGS)) {
		return false;
	} else {
fill:
		__skb_fill_netmem_desc_noacc(sinfo, nr_frags++, netmem,
					     offset, size);
	}

	sinfo->nr_frags = nr_frags;
	sinfo->xdp_frags_size += size;
	sinfo->xdp_frags_truesize += truesize;

	return true;
}

/**
 * xdp_buff_add_frag - attach frag to &xdp_buff
 * @xdp: XDP buffer to attach the frag to
 * @netmem: network memory containing the frag
 * @offset: offset at which the frag starts
 * @size: size of the frag
 * @truesize: total memory size occupied by the frag
 *
 * Version of __xdp_buff_add_frag() which takes care of the pfmemalloc bit.
 *
 * Return: true on success, false if there's no space for the frag in
 * the shared info struct.
 */
static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
				     u32 offset, u32 size, u32 truesize)
{
	if (!__xdp_buff_add_frag(xdp, netmem, offset, size, truesize, true))
		return false;

	if (unlikely(netmem_is_pfmemalloc(netmem)))
		xdp_buff_set_frag_pfmemalloc(xdp);

	return true;
}

struct xdp_frame {
	void *data;
	u32 len;
	u32 headroom;
	u32 metasize; /* uses lower 8-bits */
	/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
	 * while mem_type is valid on remote CPU.
	 */
	enum xdp_mem_type mem_type:32;
	struct net_device *dev_rx; /* used by cpumap */
	u32 frame_sz;
	u32 flags; /* supported values defined in xdp_buff_flags */
};

static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame)
{
	return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
}

static __always_inline bool
xdp_frame_is_frag_pfmemalloc(const struct xdp_frame *frame)
{
	return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}

#define XDP_BULK_QUEUE_SIZE	16
struct xdp_frame_bulk {
	int count;
	netmem_ref q[XDP_BULK_QUEUE_SIZE];
};

static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
{
	bq->count = 0;
}

static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(const struct xdp_frame *frame)
{
	void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);

	return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
}

struct xdp_cpumap_stats {
	unsigned int redirect;
	unsigned int pass;
	unsigned int drop;
};

/* Clear kernel pointers in xdp_frame */
static inline void xdp_scrub_frame(struct xdp_frame *frame)
{
	frame->data = NULL;
	frame->dev_rx = NULL;
}

static inline void
xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
			   unsigned int size, unsigned int truesize,
			   bool pfmemalloc)
{
	struct skb_shared_info *sinfo = skb_shinfo(skb);

	sinfo->nr_frags = nr_frags;
	/*
	 * ``destructor_arg`` is unionized with ``xdp_frags_{,true}size``,
	 * reset it after that these fields aren't used anymore.
	 */
	sinfo->destructor_arg = NULL;

	skb->len += size;
	skb->data_len += size;
	skb->truesize += truesize;
	skb->pfmemalloc |= pfmemalloc;
}

/* Avoids inlining WARN macro in fast-path */
void xdp_warn(const char *msg, const char *func, const int line);
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)

struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp);
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
					   struct sk_buff *skb,
					   struct net_device *dev);
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
					 struct net_device *dev);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);

static inline
void xdp_convert_frame_to_buff(const struct xdp_frame *frame,
			       struct xdp_buff *xdp)
{
	xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
	xdp->data = frame->data;
	xdp->data_end = frame->data + frame->len;
	xdp->data_meta = frame->data - frame->metasize;
	xdp->frame_sz = frame->frame_sz;
	xdp->flags = frame->flags;
}

static inline
int xdp_update_frame_from_buff(const struct xdp_buff *xdp,
			       struct xdp_frame *xdp_frame)
{
	int metasize, headroom;

	/* Assure headroom is available for storing info */
	headroom = xdp->data - xdp->data_hard_start;
	metasize = xdp->data - xdp->data_meta;
	metasize = metasize > 0 ? metasize : 0;
	if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
		return -ENOSPC;

	/* Catch if driver didn't reserve tailroom for skb_shared_info */
	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
		XDP_WARN("Driver BUG: missing reserved tailroom");
		return -ENOSPC;
	}

	xdp_frame->data = xdp->data;
	xdp_frame->len  = xdp->data_end - xdp->data;
	xdp_frame->headroom = headroom - sizeof(*xdp_frame);
	xdp_frame->metasize = metasize;
	xdp_frame->frame_sz = xdp->frame_sz;
	xdp_frame->flags = xdp->flags;

	return 0;
}

/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
{
	struct xdp_frame *xdp_frame;

	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
		return xdp_convert_zc_to_xdp_frame(xdp);

	/* Store info in top of packet */
	xdp_frame = xdp->data_hard_start;
	if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
		return NULL;

	/* rxq only valid until napi_schedule ends, convert to xdp_mem_type */
	xdp_frame->mem_type = xdp->rxq->mem.type;

	return xdp_frame;
}

void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type,
		  bool napi_direct, struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
			   struct xdp_frame_bulk *bq);

static inline void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
{
	if (unlikely(!bq->count))
		return;

	page_pool_put_netmem_bulk(bq->q, bq->count);
	bq->count = 0;
}

static __always_inline unsigned int
xdp_get_frame_len(const struct xdp_frame *xdpf)
{
	const struct skb_shared_info *sinfo;
	unsigned int len = xdpf->len;

	if (likely(!xdp_frame_has_frags(xdpf)))
		goto out;

	sinfo = xdp_get_shared_info_from_frame(xdpf);
	len += sinfo->xdp_frags_size;
out:
	return len;
}

int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
		       struct net_device *dev, u32 queue_index,
		       unsigned int napi_id, u32 frag_size);
static inline int
xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
		 struct net_device *dev, u32 queue_index,
		 unsigned int napi_id)
{
	return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
}

void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
			       enum xdp_mem_type type, void *allocator);
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
int xdp_reg_mem_model(struct xdp_mem_info *mem,
		      enum xdp_mem_type type, void *allocator);
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
int xdp_reg_page_pool(struct page_pool *pool);
void xdp_unreg_page_pool(const struct page_pool *pool);
void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
				   const struct page_pool *pool);

/**
 * xdp_rxq_info_attach_mem_model - attach registered mem info to RxQ info
 * @xdp_rxq: XDP RxQ info to attach the memory info to
 * @mem: already registered memory info
 *
 * If the driver registers its memory providers manually, it must use this
 * function instead of xdp_rxq_info_reg_mem_model().
 */
static inline void
xdp_rxq_info_attach_mem_model(struct xdp_rxq_info *xdp_rxq,
			      const struct xdp_mem_info *mem)
{
	xdp_rxq->mem = *mem;
}

/**
 * xdp_rxq_info_detach_mem_model - detach registered mem info from RxQ info
 * @xdp_rxq: XDP RxQ info to detach the memory info from
 *
 * If the driver registers its memory providers manually and then attaches it
 * via xdp_rxq_info_attach_mem_model(), it must call this function before
 * xdp_rxq_info_unreg().
 */
static inline void xdp_rxq_info_detach_mem_model(struct xdp_rxq_info *xdp_rxq)
{
	xdp_rxq->mem = (struct xdp_mem_info){ };
}

/* Drivers not supporting XDP metadata can use this helper, which
 * rejects any room expansion for metadata as a result.
 */
static __always_inline void
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
{
	xdp->data_meta = xdp->data + 1;
}

static __always_inline bool
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
{
	return unlikely(xdp->data_meta > xdp->data);
}

static inline bool xdp_metalen_invalid(unsigned long metalen)
{
	unsigned long meta_max;

	meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
	BUILD_BUG_ON(!__builtin_constant_p(meta_max));

	return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
}

struct xdp_attachment_info {
	struct bpf_prog *prog;
	u32 flags;
};

struct netdev_bpf;
void xdp_attachment_setup(struct xdp_attachment_info *info,
			  struct netdev_bpf *bpf);

#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE

/* Define the relationship between xdp-rx-metadata kfunc and
 * various other entities:
 * - xdp_rx_metadata enum
 * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
 * - kfunc name
 * - xdp_metadata_ops field
 */
#define XDP_METADATA_KFUNC_xxx	\
	XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
			   NETDEV_XDP_RX_METADATA_TIMESTAMP, \
			   bpf_xdp_metadata_rx_timestamp, \
			   xmo_rx_timestamp) \
	XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
			   NETDEV_XDP_RX_METADATA_HASH, \
			   bpf_xdp_metadata_rx_hash, \
			   xmo_rx_hash) \
	XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
			   NETDEV_XDP_RX_METADATA_VLAN_TAG, \
			   bpf_xdp_metadata_rx_vlan_tag, \
			   xmo_rx_vlan_tag) \

enum xdp_rx_metadata {
#define XDP_METADATA_KFUNC(name, _, __, ___) name,
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
MAX_XDP_METADATA_KFUNC,
};

enum xdp_rss_hash_type {
	/* First part: Individual bits for L3/L4 types */
	XDP_RSS_L3_IPV4		= BIT(0),
	XDP_RSS_L3_IPV6		= BIT(1),

	/* The fixed (L3) IPv4 and IPv6 headers can both be followed by
	 * variable/dynamic headers, IPv4 called Options and IPv6 called
	 * Extension Headers. HW RSS type can contain this info.
	 */
	XDP_RSS_L3_DYNHDR	= BIT(2),

	/* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
	 * addition to the protocol specific bit.  This ease interaction with
	 * SKBs and avoids reserving a fixed mask for future L4 protocol bits.
	 */
	XDP_RSS_L4		= BIT(3), /* L4 based hash, proto can be unknown */
	XDP_RSS_L4_TCP		= BIT(4),
	XDP_RSS_L4_UDP		= BIT(5),
	XDP_RSS_L4_SCTP		= BIT(6),
	XDP_RSS_L4_IPSEC	= BIT(7), /* L4 based hash include IPSEC SPI */
	XDP_RSS_L4_ICMP		= BIT(8),

	/* Second part: RSS hash type combinations used for driver HW mapping */
	XDP_RSS_TYPE_NONE            = 0,
	XDP_RSS_TYPE_L2              = XDP_RSS_TYPE_NONE,

	XDP_RSS_TYPE_L3_IPV4         = XDP_RSS_L3_IPV4,
	XDP_RSS_TYPE_L3_IPV6         = XDP_RSS_L3_IPV6,
	XDP_RSS_TYPE_L3_IPV4_OPT     = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
	XDP_RSS_TYPE_L3_IPV6_EX      = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,

	XDP_RSS_TYPE_L4_ANY          = XDP_RSS_L4,
	XDP_RSS_TYPE_L4_IPV4_TCP     = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
	XDP_RSS_TYPE_L4_IPV4_UDP     = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
	XDP_RSS_TYPE_L4_IPV4_SCTP    = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
	XDP_RSS_TYPE_L4_IPV4_IPSEC   = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
	XDP_RSS_TYPE_L4_IPV4_ICMP    = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,

	XDP_RSS_TYPE_L4_IPV6_TCP     = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
	XDP_RSS_TYPE_L4_IPV6_UDP     = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
	XDP_RSS_TYPE_L4_IPV6_SCTP    = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
	XDP_RSS_TYPE_L4_IPV6_IPSEC   = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
	XDP_RSS_TYPE_L4_IPV6_ICMP    = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,

	XDP_RSS_TYPE_L4_IPV6_TCP_EX  = XDP_RSS_TYPE_L4_IPV6_TCP  | XDP_RSS_L3_DYNHDR,
	XDP_RSS_TYPE_L4_IPV6_UDP_EX  = XDP_RSS_TYPE_L4_IPV6_UDP  | XDP_RSS_L3_DYNHDR,
	XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
};

struct xdp_metadata_ops {
	int	(*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
	int	(*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
			       enum xdp_rss_hash_type *rss_type);
	int	(*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
				   u16 *vlan_tci);
};

#ifdef CONFIG_NET
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
void xdp_features_clear_redirect_target(struct net_device *dev);
#else
static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }

static inline void
xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
{
}

static inline void
xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
{
}

static inline void
xdp_features_clear_redirect_target(struct net_device *dev)
{
}
#endif

static inline void xdp_clear_features_flag(struct net_device *dev)
{
	xdp_set_features_flag(dev, 0);
}

static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
					    struct xdp_buff *xdp)
{
	/* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
	 * under local_bh_disable(), which provides the needed RCU protection
	 * for accessing map entries.
	 */
	u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));

	if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
		if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
			act = xdp_master_redirect(xdp);
	}

	return act;
}
#endif /* __LINUX_NET_XDP_H__ */

Filemanager

Name Type Size Permission Actions
9p Folder 0755
bluetooth Folder 0755
caif Folder 0755
iucv Folder 0755
libeth Folder 0755
mana Folder 0755
netfilter Folder 0755
netns Folder 0755
nfc Folder 0755
page_pool Folder 0755
phonet Folder 0755
sctp Folder 0755
tc_act Folder 0755
6lowpan.h File 10.03 KB 0644
Space.h File 455 B 0644
act_api.h File 9.31 KB 0644
addrconf.h File 15.1 KB 0644
af_ieee802154.h File 1.19 KB 0644
af_rxrpc.h File 3.3 KB 0644
af_unix.h File 3.25 KB 0644
af_vsock.h File 8.48 KB 0644
ah.h File 382 B 0644
amt.h File 8.35 KB 0644
arp.h File 1.95 KB 0644
atmclip.h File 1.48 KB 0644
ax25.h File 14.93 KB 0644
ax88796.h File 1.43 KB 0644
bareudp.h File 333 B 0644
bond_3ad.h File 9.45 KB 0644
bond_alb.h File 6.11 KB 0644
bond_options.h File 4.83 KB 0644
bonding.h File 21.34 KB 0644
bpf_sk_storage.h File 1.74 KB 0644
busy_poll.h File 4.33 KB 0644
calipso.h File 1.55 KB 0644
cfg80211-wext.h File 1.86 KB 0644
cfg80211.h File 345.53 KB 0644
cfg802154.h File 16.68 KB 0644
checksum.h File 5.11 KB 0644
cipso_ipv4.h File 7.42 KB 0644
cls_cgroup.h File 2.04 KB 0644
codel.h File 5.86 KB 0644
codel_impl.h File 8.3 KB 0644
codel_qdisc.h File 2.95 KB 0644
compat.h File 2.48 KB 0644
datalink.h File 590 B 0644
dcbevent.h File 766 B 0644
dcbnl.h File 4.98 KB 0644
devlink.h File 72.01 KB 0644
dropreason-core.h File 18.44 KB 0644
dropreason.h File 1.18 KB 0644
dsa.h File 40.96 KB 0644
dsa_stubs.h File 1.28 KB 0644
dscp.h File 3.18 KB 0644
dsfield.h File 1.12 KB 0644
dst.h File 14.76 KB 0644
dst_cache.h File 2.97 KB 0644
dst_metadata.h File 6.56 KB 0644
dst_ops.h File 2.07 KB 0644
eee.h File 832 B 0644
erspan.h File 9.03 KB 0644
esp.h File 1.18 KB 0644
espintcp.h File 972 B 0644
ethoc.h File 439 B 0644
failover.h File 1.18 KB 0644
fib_notifier.h File 1.36 KB 0644
fib_rules.h File 5.39 KB 0644
firewire.h File 599 B 0644
flow.h File 4.96 KB 0644
flow_dissector.h File 12.65 KB 0644
flow_offload.h File 20.18 KB 0644
fou.h File 578 B 0644
fq.h File 2.41 KB 0644
fq_impl.h File 7.96 KB 0644
garp.h File 2.67 KB 0644
gen_stats.h File 2.99 KB 0644
genetlink.h File 20.23 KB 0644
geneve.h File 1.84 KB 0644
gre.h File 3.79 KB 0644
gro.h File 15.56 KB 0644
gro_cells.h File 443 B 0644
gso.h File 3.2 KB 0644
gtp.h File 1.51 KB 0644
gue.h File 3.29 KB 0644
handshake.h File 1.39 KB 0644
hotdata.h File 1.62 KB 0644
hwbm.h File 997 B 0644
icmp.h File 1.87 KB 0644
ieee80211_radiotap.h File 23.25 KB 0644
ieee802154_netdev.h File 13.02 KB 0644
ieee8021q.h File 1.42 KB 0644
if_inet6.h File 6.62 KB 0644
ife.h File 1.03 KB 0644
inet6_connection_sock.h File 794 B 0644
inet6_hashtables.h File 5.57 KB 0644
inet_common.h File 2.83 KB 0644
inet_connection_sock.h File 11.77 KB 0644
inet_dscp.h File 1.55 KB 0644
inet_ecn.h File 7.83 KB 0644
inet_frag.h File 5.25 KB 0644
inet_hashtables.h File 16.6 KB 0644
inet_sock.h File 11.77 KB 0644
inet_timewait_sock.h File 3.79 KB 0644
inetpeer.h File 3.23 KB 0644
ioam6.h File 1.33 KB 0644
ip.h File 23.37 KB 0644
ip6_checksum.h File 2.3 KB 0644
ip6_fib.h File 17.08 KB 0644
ip6_route.h File 10.21 KB 0644
ip6_tunnel.h File 5.04 KB 0644
ip_fib.h File 17.36 KB 0644
ip_tunnels.h File 19.36 KB 0644
ip_vs.h File 53.83 KB 0644
ipcomp.h File 737 B 0644
ipconfig.h File 837 B 0644
ipv6.h File 37.89 KB 0644
ipv6_frag.h File 3.38 KB 0644
ipv6_stubs.h File 3.92 KB 0644
iw_handler.h File 18.97 KB 0644
kcm.h File 4.84 KB 0644
l3mdev.h File 7.03 KB 0644
lag.h File 409 B 0644
lapb.h File 4.82 KB 0644
llc.h File 4.41 KB 0644
llc_c_ac.h File 9.32 KB 0644
llc_c_ev.h File 10.61 KB 0644
llc_c_st.h File 1.78 KB 0644
llc_conn.h File 4.11 KB 0644
llc_if.h File 2.17 KB 0644
llc_pdu.h File 14.46 KB 0644
llc_s_ac.h File 1.59 KB 0644
llc_s_ev.h File 2.22 KB 0644
llc_s_st.h File 1.03 KB 0644
llc_sap.h File 1.08 KB 0644
lwtunnel.h File 6.69 KB 0644
mac80211.h File 303.91 KB 0644
mac802154.h File 14.88 KB 0644
macsec.h File 10.53 KB 0644
mctp.h File 8.67 KB 0644
mctpdevice.h File 1.33 KB 0644
mip6.h File 1016 B 0644
mld.h File 2.85 KB 0644
mpls.h File 943 B 0644
mpls_iptunnel.h File 481 B 0644
mptcp.h File 7.65 KB 0644
mrp.h File 3.13 KB 0644
ncsi.h File 1.94 KB 0644
ndisc.h File 14.14 KB 0644
neighbour.h File 16.64 KB 0644
neighbour_tables.h File 253 B 0644
net_debug.h File 5.23 KB 0644
net_failover.h File 1023 B 0644
net_namespace.h File 14.51 KB 0644
net_ratelimit.h File 220 B 0644
net_shaper.h File 3.49 KB 0644
net_trackers.h File 424 B 0644
netdev_queues.h File 10.38 KB 0644
netdev_rx_queue.h File 1.44 KB 0644
netevent.h File 1.04 KB 0644
netkit.h File 1.16 KB 0644
netlabel.h File 20.57 KB 0644
netlink.h File 72.67 KB 0644
netmem.h File 6.69 KB 0644
netprio_cgroup.h File 1.02 KB 0644
netrom.h File 7.73 KB 0644
nexthop.h File 12.68 KB 0644
nl802154.h File 16.04 KB 0644
nsh.h File 12.3 KB 0644
p8022.h File 403 B 0644
pfcp.h File 1.91 KB 0644
pie.h File 3.6 KB 0644
ping.h File 2.66 KB 0644
pkt_cls.h File 25.99 KB 0644
pkt_sched.h File 7.4 KB 0644
pptp.h File 604 B 0644
proto_memory.h File 1.9 KB 0644
protocol.h File 3.85 KB 0644
psample.h File 1.15 KB 0644
psnap.h File 430 B 0644
raw.h File 2.46 KB 0644
rawv6.h File 862 B 0644
red.h File 11.39 KB 0644
regulatory.h File 9.88 KB 0644
request_sock.h File 7.25 KB 0644
rose.h File 7.71 KB 0644
route.h File 11.89 KB 0644
rpl.h File 749 B 0644
rps.h File 3.91 KB 0644
rsi_91x.h File 1.67 KB 0644
rstreason.h File 7.22 KB 0644
rtnetlink.h File 8.06 KB 0644
rtnh.h File 859 B 0644
sch_generic.h File 34.42 KB 0644
scm.h File 5.36 KB 0644
secure_seq.h File 868 B 0644
seg6.h File 2.43 KB 0644
seg6_hmac.h File 1.7 KB 0644
seg6_local.h File 667 B 0644
selftests.h File 582 B 0644
slhc_vj.h File 6.67 KB 0644
smc.h File 2.41 KB 0644
snmp.h File 5.14 KB 0644
sock.h File 84.05 KB 0644
sock_reuseport.h File 1.82 KB 0644
stp.h File 412 B 0644
strparser.h File 4.34 KB 0644
switchdev.h File 15.1 KB 0644
tc_wrapper.h File 6.29 KB 0644
tcp.h File 84.45 KB 0644
tcp_ao.h File 11.15 KB 0644
tcp_states.h File 1.3 KB 0644
tcx.h File 4.39 KB 0644
timewait_sock.h File 641 B 0644
tipc.h File 2.35 KB 0644
tls.h File 13.37 KB 0644
tls_prot.h File 1.84 KB 0644
tls_toe.h File 2.94 KB 0644
transp_v6.h File 1.88 KB 0644
tso.h File 721 B 0644
tun_proto.h File 1015 B 0644
udp.h File 18.83 KB 0644
udp_tunnel.h File 12.49 KB 0644
udplite.h File 2.3 KB 0644
vsock_addr.h File 662 B 0644
vxlan.h File 15.86 KB 0644
wext.h File 1.47 KB 0644
x25.h File 9.46 KB 0644
x25device.h File 387 B 0644
xdp.h File 19.73 KB 0644
xdp_priv.h File 427 B 0644
xdp_sock.h File 6.64 KB 0644
xdp_sock_drv.h File 9.28 KB 0644
xfrm.h File 62.99 KB 0644
xsk_buff_pool.h File 7.04 KB 0644
Filemanager