PageRenderTime 106ms CodeModel.GetById 14ms app.highlight 79ms RepoModel.GetById 1ms app.codeStats 1ms

/include/linux/skbuff.h

https://github.com/airy09/android_kernel_sony_apq8064
C Header | 2564 lines | 1412 code | 298 blank | 854 comment | 112 complexity | 0fe0a09de4fab852acdab926de7f7a76 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 *	Definitions for the 'struct sk_buff' memory handlers.
   3 *
   4 *	Authors:
   5 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
   6 *		Florian La Roche, <rzsfl@rz.uni-sb.de>
   7 *
   8 *	This program is free software; you can redistribute it and/or
   9 *	modify it under the terms of the GNU General Public License
  10 *	as published by the Free Software Foundation; either version
  11 *	2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _LINUX_SKBUFF_H
  15#define _LINUX_SKBUFF_H
  16
  17#include <linux/kernel.h>
  18#include <linux/kmemcheck.h>
  19#include <linux/compiler.h>
  20#include <linux/time.h>
  21#include <linux/bug.h>
  22#include <linux/cache.h>
  23
  24#include <linux/atomic.h>
  25#include <asm/types.h>
  26#include <linux/spinlock.h>
  27#include <linux/net.h>
  28#include <linux/textsearch.h>
  29#include <net/checksum.h>
  30#include <linux/rcupdate.h>
  31#include <linux/dmaengine.h>
  32#include <linux/hrtimer.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/netdev_features.h>
  35
  36/* Don't change this without changing skb_csum_unnecessary! */
  37#define CHECKSUM_NONE 0
  38#define CHECKSUM_UNNECESSARY 1
  39#define CHECKSUM_COMPLETE 2
  40#define CHECKSUM_PARTIAL 3
  41
  42#define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
  43				 ~(SMP_CACHE_BYTES - 1))
  44#define SKB_WITH_OVERHEAD(X)	\
  45	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  46#define SKB_MAX_ORDER(X, ORDER) \
  47	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  48#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
  49#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
  50
  51/* return minimum truesize of one skb containing X bytes of data */
  52#define SKB_TRUESIZE(X) ((X) +						\
  53			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
  54			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  55
  56/* A. Checksumming of received packets by device.
  57 *
  58 *	NONE: device failed to checksum this packet.
  59 *		skb->csum is undefined.
  60 *
  61 *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
  62 *		skb->csum is undefined.
  63 *	      It is bad option, but, unfortunately, many of vendors do this.
  64 *	      Apparently with secret goal to sell you new device, when you
  65 *	      will add new protocol to your host. F.e. IPv6. 8)
  66 *
  67 *	COMPLETE: the most generic way. Device supplied checksum of _all_
  68 *	    the packet as seen by netif_rx in skb->csum.
  69 *	    NOTE: Even if device supports only some protocols, but
  70 *	    is able to produce some skb->csum, it MUST use COMPLETE,
  71 *	    not UNNECESSARY.
  72 *
  73 *	PARTIAL: identical to the case for output below.  This may occur
  74 *	    on a packet received directly from another Linux OS, e.g.,
  75 *	    a virtualised Linux kernel on the same host.  The packet can
  76 *	    be treated in the same way as UNNECESSARY except that on
  77 *	    output (i.e., forwarding) the checksum must be filled in
  78 *	    by the OS or the hardware.
  79 *
  80 * B. Checksumming on output.
  81 *
  82 *	NONE: skb is checksummed by protocol or csum is not required.
  83 *
  84 *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
  85 *	from skb->csum_start to the end and to record the checksum
  86 *	at skb->csum_start + skb->csum_offset.
  87 *
  88 *	Device must show its capabilities in dev->features, set
  89 *	at device setup time.
  90 *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
  91 *			  everything.
  92 *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  93 *			  TCP/UDP over IPv4. Sigh. Vendors like this
  94 *			  way by an unknown reason. Though, see comment above
  95 *			  about CHECKSUM_UNNECESSARY. 8)
  96 *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  97 *
  98 *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
  99 *	that do not want net to perform the checksum calculation should use
 100 *	this flag in their outgoing skbs.
 101 *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
 102 *			  offload. Correspondingly, the FCoE protocol driver
 103 *			  stack should use CHECKSUM_UNNECESSARY.
 104 *
 105 *	Any questions? No questions, good. 		--ANK
 106 */
 107
 108struct net_device;
 109struct scatterlist;
 110struct pipe_inode_info;
 111
 112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 113struct nf_conntrack {
 114	atomic_t use;
 115};
 116#endif
 117
 118#ifdef CONFIG_BRIDGE_NETFILTER
 119struct nf_bridge_info {
 120	atomic_t use;
 121	struct net_device *physindev;
 122	struct net_device *physoutdev;
 123	unsigned int mask;
 124	unsigned long data[32 / sizeof(unsigned long)];
 125};
 126#endif
 127
 128struct sk_buff_head {
 129	/* These two members must be first. */
 130	struct sk_buff	*next;
 131	struct sk_buff	*prev;
 132
 133	__u32		qlen;
 134	spinlock_t	lock;
 135};
 136
 137struct sk_buff;
 138
 139/* To allow 64K frame to be packed as single skb without frag_list we
 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 141 * buffers which do not start on a page boundary.
 142 *
 143 * Since GRO uses frags we allocate at least 16 regardless of page
 144 * size.
 145 */
 146#if (65536/PAGE_SIZE + 1) < 16
 147#define MAX_SKB_FRAGS 16UL
 148#else
 149#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 150#endif
 151
 152typedef struct skb_frag_struct skb_frag_t;
 153
 154struct skb_frag_struct {
 155	struct {
 156		struct page *p;
 157	} page;
 158#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 159	__u32 page_offset;
 160	__u32 size;
 161#else
 162	__u16 page_offset;
 163	__u16 size;
 164#endif
 165};
 166
 167static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 168{
 169	return frag->size;
 170}
 171
 172static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 173{
 174	frag->size = size;
 175}
 176
 177static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 178{
 179	frag->size += delta;
 180}
 181
 182static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 183{
 184	frag->size -= delta;
 185}
 186
 187#define HAVE_HW_TIME_STAMP
 188
 189/**
 190 * struct skb_shared_hwtstamps - hardware time stamps
 191 * @hwtstamp:	hardware time stamp transformed into duration
 192 *		since arbitrary point in time
 193 * @syststamp:	hwtstamp transformed to system time base
 194 *
 195 * Software time stamps generated by ktime_get_real() are stored in
 196 * skb->tstamp. The relation between the different kinds of time
 197 * stamps is as follows:
 198 *
 199 * syststamp and tstamp can be compared against each other in
 200 * arbitrary combinations.  The accuracy of a
 201 * syststamp/tstamp/"syststamp from other device" comparison is
 202 * limited by the accuracy of the transformation into system time
 203 * base. This depends on the device driver and its underlying
 204 * hardware.
 205 *
 206 * hwtstamps can only be compared against other hwtstamps from
 207 * the same device.
 208 *
 209 * This structure is attached to packets as part of the
 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 211 */
 212struct skb_shared_hwtstamps {
 213	ktime_t	hwtstamp;
 214	ktime_t	syststamp;
 215};
 216
 217/* Definitions for tx_flags in struct skb_shared_info */
 218enum {
 219	/* generate hardware time stamp */
 220	SKBTX_HW_TSTAMP = 1 << 0,
 221
 222	/* generate software time stamp */
 223	SKBTX_SW_TSTAMP = 1 << 1,
 224
 225	/* device driver is going to provide hardware time stamp */
 226	SKBTX_IN_PROGRESS = 1 << 2,
 227
 228	/* ensure the originating sk reference is available on driver level */
 229	SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
 230
 231	/* device driver supports TX zero-copy buffers */
 232	SKBTX_DEV_ZEROCOPY = 1 << 4,
 233
 234	/* generate wifi status information (where possible) */
 235	SKBTX_WIFI_STATUS = 1 << 5,
 236};
 237
 238/*
 239 * The callback notifies userspace to release buffers when skb DMA is done in
 240 * lower device, the skb last reference should be 0 when calling this.
 241 * The ctx field is used to track device context.
 242 * The desc field is used to track userspace buffer index.
 243 */
 244struct ubuf_info {
 245	void (*callback)(struct ubuf_info *);
 246	void *ctx;
 247	unsigned long desc;
 248};
 249
 250/* This data is invariant across clones and lives at
 251 * the end of the header data, ie. at skb->end.
 252 */
 253struct skb_shared_info {
 254	unsigned char	nr_frags;
 255	__u8		tx_flags;
 256	unsigned short	gso_size;
 257	/* Warning: this field is not always filled in (UFO)! */
 258	unsigned short	gso_segs;
 259	unsigned short  gso_type;
 260	struct sk_buff	*frag_list;
 261	struct skb_shared_hwtstamps hwtstamps;
 262	__be32          ip6_frag_id;
 263
 264	/*
 265	 * Warning : all fields before dataref are cleared in __alloc_skb()
 266	 */
 267	atomic_t	dataref;
 268
 269	/* Intermediate layers must ensure that destructor_arg
 270	 * remains valid until skb destructor */
 271	void *		destructor_arg;
 272
 273	/* must be last field, see pskb_expand_head() */
 274	skb_frag_t	frags[MAX_SKB_FRAGS];
 275};
 276
 277/* We divide dataref into two halves.  The higher 16 bits hold references
 278 * to the payload part of skb->data.  The lower 16 bits hold references to
 279 * the entire skb->data.  A clone of a headerless skb holds the length of
 280 * the header in skb->hdr_len.
 281 *
 282 * All users must obey the rule that the skb->data reference count must be
 283 * greater than or equal to the payload reference count.
 284 *
 285 * Holding a reference to the payload part means that the user does not
 286 * care about modifications to the header part of skb->data.
 287 */
 288#define SKB_DATAREF_SHIFT 16
 289#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 290
 291
 292enum {
 293	SKB_FCLONE_UNAVAILABLE,
 294	SKB_FCLONE_ORIG,
 295	SKB_FCLONE_CLONE,
 296};
 297
 298enum {
 299	SKB_GSO_TCPV4 = 1 << 0,
 300	SKB_GSO_UDP = 1 << 1,
 301
 302	/* This indicates the skb is from an untrusted source. */
 303	SKB_GSO_DODGY = 1 << 2,
 304
 305	/* This indicates the tcp segment has CWR set. */
 306	SKB_GSO_TCP_ECN = 1 << 3,
 307
 308	SKB_GSO_TCPV6 = 1 << 4,
 309
 310	SKB_GSO_FCOE = 1 << 5,
 311};
 312
 313#if BITS_PER_LONG > 32
 314#define NET_SKBUFF_DATA_USES_OFFSET 1
 315#endif
 316
 317#ifdef NET_SKBUFF_DATA_USES_OFFSET
 318typedef unsigned int sk_buff_data_t;
 319#else
 320typedef unsigned char *sk_buff_data_t;
 321#endif
 322
 323#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
 324    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
 325#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 326#endif
 327
 328/** 
 329 *	struct sk_buff - socket buffer
 330 *	@next: Next buffer in list
 331 *	@prev: Previous buffer in list
 332 *	@tstamp: Time we arrived
 333 *	@sk: Socket we are owned by
 334 *	@dev: Device we arrived on/are leaving by
 335 *	@cb: Control buffer. Free for use by every layer. Put private vars here
 336 *	@_skb_refdst: destination entry (with norefcount bit)
 337 *	@sp: the security path, used for xfrm
 338 *	@len: Length of actual data
 339 *	@data_len: Data length
 340 *	@mac_len: Length of link layer header
 341 *	@hdr_len: writable header length of cloned skb
 342 *	@csum: Checksum (must include start/offset pair)
 343 *	@csum_start: Offset from skb->head where checksumming should start
 344 *	@csum_offset: Offset from csum_start where checksum should be stored
 345 *	@priority: Packet queueing priority
 346 *	@local_df: allow local fragmentation
 347 *	@cloned: Head may be cloned (check refcnt to be sure)
 348 *	@ip_summed: Driver fed us an IP checksum
 349 *	@nohdr: Payload reference only, must not modify header
 350 *	@nfctinfo: Relationship of this skb to the connection
 351 *	@pkt_type: Packet class
 352 *	@fclone: skbuff clone status
 353 *	@ipvs_property: skbuff is owned by ipvs
 354 *	@peeked: this packet has been seen already, so stats have been
 355 *		done for it, don't do them again
 356 *	@nf_trace: netfilter packet trace flag
 357 *	@protocol: Packet protocol from driver
 358 *	@destructor: Destruct function
 359 *	@nfct: Associated connection, if any
 360 *	@nfct_reasm: netfilter conntrack re-assembly pointer
 361 *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 362 *	@skb_iif: ifindex of device we arrived on
 363 *	@tc_index: Traffic control index
 364 *	@tc_verd: traffic control verdict
 365 *	@rxhash: the packet hash computed on receive
 366 *	@queue_mapping: Queue mapping for multiqueue devices
 367 *	@ndisc_nodetype: router type (from link layer)
 368 *	@ooo_okay: allow the mapping of a socket to a queue to be changed
 369 *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
 370 *		ports.
 371 *	@wifi_acked_valid: wifi_acked was set
 372 *	@wifi_acked: whether frame was acked on wifi or not
 373 *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
 374 *	@dma_cookie: a cookie to one of several possible DMA operations
 375 *		done by skb DMA functions
 376 *	@secmark: security marking
 377 *	@mark: Generic packet mark
 378 *	@dropcount: total number of sk_receive_queue overflows
 379 *	@vlan_tci: vlan tag control information
 380 *	@transport_header: Transport layer header
 381 *	@network_header: Network layer header
 382 *	@mac_header: Link layer header
 383 *	@tail: Tail pointer
 384 *	@end: End pointer
 385 *	@head: Head of buffer
 386 *	@data: Data head pointer
 387 *	@truesize: Buffer size
 388 *	@users: User count - see {datagram,tcp}.c
 389 */
 390
 391struct sk_buff {
 392	/* These two members must be first. */
 393	struct sk_buff		*next;
 394	struct sk_buff		*prev;
 395
 396	ktime_t			tstamp;
 397
 398	struct sock		*sk;
 399	struct net_device	*dev;
 400
 401	/*
 402	 * This is the control buffer. It is free to use for every
 403	 * layer. Please put your private variables there. If you
 404	 * want to keep them across layers you have to do a skb_clone()
 405	 * first. This is owned by whoever has the skb queued ATM.
 406	 */
 407	char			cb[48] __aligned(8);
 408
 409	unsigned long		_skb_refdst;
 410#ifdef CONFIG_XFRM
 411	struct	sec_path	*sp;
 412#endif
 413	unsigned int		len,
 414				data_len;
 415	__u16			mac_len,
 416				hdr_len;
 417	union {
 418		__wsum		csum;
 419		struct {
 420			__u16	csum_start;
 421			__u16	csum_offset;
 422		};
 423	};
 424	__u32			priority;
 425	kmemcheck_bitfield_begin(flags1);
 426	__u8			local_df:1,
 427				cloned:1,
 428				ip_summed:2,
 429				nohdr:1,
 430				nfctinfo:3;
 431	__u8			pkt_type:3,
 432				fclone:2,
 433				ipvs_property:1,
 434				peeked:1,
 435				nf_trace:1;
 436	kmemcheck_bitfield_end(flags1);
 437	__be16			protocol;
 438
 439	void			(*destructor)(struct sk_buff *skb);
 440#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 441	struct nf_conntrack	*nfct;
 442#endif
 443#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 444	struct sk_buff		*nfct_reasm;
 445#endif
 446#ifdef CONFIG_BRIDGE_NETFILTER
 447	struct nf_bridge_info	*nf_bridge;
 448#endif
 449
 450	int			skb_iif;
 451
 452	__u32			rxhash;
 453
 454	__u16			vlan_tci;
 455
 456#ifdef CONFIG_NET_SCHED
 457	__u16			tc_index;	/* traffic control index */
 458#ifdef CONFIG_NET_CLS_ACT
 459	__u16			tc_verd;	/* traffic control verdict */
 460#endif
 461#endif
 462
 463	__u16			queue_mapping;
 464	kmemcheck_bitfield_begin(flags2);
 465#ifdef CONFIG_IPV6_NDISC_NODETYPE
 466	__u8			ndisc_nodetype:2;
 467#endif
 468	__u8			ooo_okay:1;
 469	__u8			l4_rxhash:1;
 470	__u8			wifi_acked_valid:1;
 471	__u8			wifi_acked:1;
 472	__u8			no_fcs:1;
 473	/* 9/11 bit hole (depending on ndisc_nodetype presence) */
 474	kmemcheck_bitfield_end(flags2);
 475
 476#ifdef CONFIG_NET_DMA
 477	dma_cookie_t		dma_cookie;
 478#endif
 479#ifdef CONFIG_NETWORK_SECMARK
 480	__u32			secmark;
 481#endif
 482	union {
 483		__u32		mark;
 484		__u32		dropcount;
 485		__u32		avail_size;
 486	};
 487
 488	sk_buff_data_t		transport_header;
 489	sk_buff_data_t		network_header;
 490	sk_buff_data_t		mac_header;
 491	/* These elements must be at the end, see alloc_skb() for details.  */
 492	sk_buff_data_t		tail;
 493	sk_buff_data_t		end;
 494	unsigned char		*head,
 495				*data;
 496	unsigned int		truesize;
 497	atomic_t		users;
 498};
 499
 500#ifdef __KERNEL__
 501/*
 502 *	Handling routines are only of interest to the kernel
 503 */
 504#include <linux/slab.h>
 505
 506
 507/*
 508 * skb might have a dst pointer attached, refcounted or not.
 509 * _skb_refdst low order bit is set if refcount was _not_ taken
 510 */
 511#define SKB_DST_NOREF	1UL
 512#define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
 513
 514/**
 515 * skb_dst - returns skb dst_entry
 516 * @skb: buffer
 517 *
 518 * Returns skb dst_entry, regardless of reference taken or not.
 519 */
 520static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 521{
 522	/* If refdst was not refcounted, check we still are in a 
 523	 * rcu_read_lock section
 524	 */
 525	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 526		!rcu_read_lock_held() &&
 527		!rcu_read_lock_bh_held());
 528	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 529}
 530
 531/**
 532 * skb_dst_set - sets skb dst
 533 * @skb: buffer
 534 * @dst: dst entry
 535 *
 536 * Sets skb dst, assuming a reference was taken on dst and should
 537 * be released by skb_dst_drop()
 538 */
 539static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 540{
 541	skb->_skb_refdst = (unsigned long)dst;
 542}
 543
 544extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
 545
 546/**
 547 * skb_dst_is_noref - Test if skb dst isn't refcounted
 548 * @skb: buffer
 549 */
 550static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 551{
 552	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 553}
 554
 555static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 556{
 557	return (struct rtable *)skb_dst(skb);
 558}
 559
 560extern void kfree_skb(struct sk_buff *skb);
 561extern void consume_skb(struct sk_buff *skb);
 562extern void	       __kfree_skb(struct sk_buff *skb);
 563extern struct sk_buff *__alloc_skb(unsigned int size,
 564				   gfp_t priority, int fclone, int node);
 565extern struct sk_buff *build_skb(void *data);
 566static inline struct sk_buff *alloc_skb(unsigned int size,
 567					gfp_t priority)
 568{
 569	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 570}
 571
 572static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 573					       gfp_t priority)
 574{
 575	return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
 576}
 577
 578extern void skb_recycle(struct sk_buff *skb);
 579extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
 580
 581extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 582extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 583extern struct sk_buff *skb_clone(struct sk_buff *skb,
 584				 gfp_t priority);
 585extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 586				gfp_t priority);
 587extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
 588				 int headroom, gfp_t gfp_mask);
 589
 590extern int	       pskb_expand_head(struct sk_buff *skb,
 591					int nhead, int ntail,
 592					gfp_t gfp_mask);
 593extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 594					    unsigned int headroom);
 595extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 596				       int newheadroom, int newtailroom,
 597				       gfp_t priority);
 598extern int	       skb_to_sgvec(struct sk_buff *skb,
 599				    struct scatterlist *sg, int offset,
 600				    int len);
 601extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
 602				    struct sk_buff **trailer);
 603extern int	       skb_pad(struct sk_buff *skb, int pad);
 604#define dev_kfree_skb(a)	consume_skb(a)
 605
 606extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 607			int getfrag(void *from, char *to, int offset,
 608			int len,int odd, struct sk_buff *skb),
 609			void *from, int length);
 610
 611struct skb_seq_state {
 612	__u32		lower_offset;
 613	__u32		upper_offset;
 614	__u32		frag_idx;
 615	__u32		stepped_offset;
 616	struct sk_buff	*root_skb;
 617	struct sk_buff	*cur_skb;
 618	__u8		*frag_data;
 619};
 620
 621extern void	      skb_prepare_seq_read(struct sk_buff *skb,
 622					   unsigned int from, unsigned int to,
 623					   struct skb_seq_state *st);
 624extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
 625				   struct skb_seq_state *st);
 626extern void	      skb_abort_seq_read(struct skb_seq_state *st);
 627
 628extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
 629				    unsigned int to, struct ts_config *config,
 630				    struct ts_state *state);
 631
 632extern void __skb_get_rxhash(struct sk_buff *skb);
 633static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 634{
 635	if (!skb->rxhash)
 636		__skb_get_rxhash(skb);
 637
 638	return skb->rxhash;
 639}
 640
 641#ifdef NET_SKBUFF_DATA_USES_OFFSET
 642static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 643{
 644	return skb->head + skb->end;
 645}
 646#else
 647static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 648{
 649	return skb->end;
 650}
 651#endif
 652
 653/* Internal */
 654#define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
 655
 656static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 657{
 658	return &skb_shinfo(skb)->hwtstamps;
 659}
 660
 661/**
 662 *	skb_queue_empty - check if a queue is empty
 663 *	@list: queue head
 664 *
 665 *	Returns true if the queue is empty, false otherwise.
 666 */
 667static inline int skb_queue_empty(const struct sk_buff_head *list)
 668{
 669	return list->next == (struct sk_buff *)list;
 670}
 671
 672/**
 673 *	skb_queue_is_last - check if skb is the last entry in the queue
 674 *	@list: queue head
 675 *	@skb: buffer
 676 *
 677 *	Returns true if @skb is the last buffer on the list.
 678 */
 679static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 680				     const struct sk_buff *skb)
 681{
 682	return skb->next == (struct sk_buff *)list;
 683}
 684
 685/**
 686 *	skb_queue_is_first - check if skb is the first entry in the queue
 687 *	@list: queue head
 688 *	@skb: buffer
 689 *
 690 *	Returns true if @skb is the first buffer on the list.
 691 */
 692static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 693				      const struct sk_buff *skb)
 694{
 695	return skb->prev == (struct sk_buff *)list;
 696}
 697
 698/**
 699 *	skb_queue_next - return the next packet in the queue
 700 *	@list: queue head
 701 *	@skb: current buffer
 702 *
 703 *	Return the next packet in @list after @skb.  It is only valid to
 704 *	call this if skb_queue_is_last() evaluates to false.
 705 */
 706static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 707					     const struct sk_buff *skb)
 708{
 709	/* This BUG_ON may seem severe, but if we just return then we
 710	 * are going to dereference garbage.
 711	 */
 712	BUG_ON(skb_queue_is_last(list, skb));
 713	return skb->next;
 714}
 715
 716/**
 717 *	skb_queue_prev - return the prev packet in the queue
 718 *	@list: queue head
 719 *	@skb: current buffer
 720 *
 721 *	Return the prev packet in @list before @skb.  It is only valid to
 722 *	call this if skb_queue_is_first() evaluates to false.
 723 */
 724static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 725					     const struct sk_buff *skb)
 726{
 727	/* This BUG_ON may seem severe, but if we just return then we
 728	 * are going to dereference garbage.
 729	 */
 730	BUG_ON(skb_queue_is_first(list, skb));
 731	return skb->prev;
 732}
 733
 734/**
 735 *	skb_get - reference buffer
 736 *	@skb: buffer to reference
 737 *
 738 *	Makes another reference to a socket buffer and returns a pointer
 739 *	to the buffer.
 740 */
 741static inline struct sk_buff *skb_get(struct sk_buff *skb)
 742{
 743	atomic_inc(&skb->users);
 744	return skb;
 745}
 746
 747/*
 748 * If users == 1, we are the only owner and are can avoid redundant
 749 * atomic change.
 750 */
 751
 752/**
 753 *	skb_cloned - is the buffer a clone
 754 *	@skb: buffer to check
 755 *
 756 *	Returns true if the buffer was generated with skb_clone() and is
 757 *	one of multiple shared copies of the buffer. Cloned buffers are
 758 *	shared data so must not be written to under normal circumstances.
 759 */
 760static inline int skb_cloned(const struct sk_buff *skb)
 761{
 762	return skb->cloned &&
 763	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 764}
 765
 766/**
 767 *	skb_header_cloned - is the header a clone
 768 *	@skb: buffer to check
 769 *
 770 *	Returns true if modifying the header part of the buffer requires
 771 *	the data to be copied.
 772 */
 773static inline int skb_header_cloned(const struct sk_buff *skb)
 774{
 775	int dataref;
 776
 777	if (!skb->cloned)
 778		return 0;
 779
 780	dataref = atomic_read(&skb_shinfo(skb)->dataref);
 781	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 782	return dataref != 1;
 783}
 784
 785/**
 786 *	skb_header_release - release reference to header
 787 *	@skb: buffer to operate on
 788 *
 789 *	Drop a reference to the header part of the buffer.  This is done
 790 *	by acquiring a payload reference.  You must not read from the header
 791 *	part of skb->data after this.
 792 */
 793static inline void skb_header_release(struct sk_buff *skb)
 794{
 795	BUG_ON(skb->nohdr);
 796	skb->nohdr = 1;
 797	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 798}
 799
 800/**
 801 *	skb_shared - is the buffer shared
 802 *	@skb: buffer to check
 803 *
 804 *	Returns true if more than one person has a reference to this
 805 *	buffer.
 806 */
 807static inline int skb_shared(const struct sk_buff *skb)
 808{
 809	return atomic_read(&skb->users) != 1;
 810}
 811
 812/**
 813 *	skb_share_check - check if buffer is shared and if so clone it
 814 *	@skb: buffer to check
 815 *	@pri: priority for memory allocation
 816 *
 817 *	If the buffer is shared the buffer is cloned and the old copy
 818 *	drops a reference. A new clone with a single reference is returned.
 819 *	If the buffer is not shared the original buffer is returned. When
 820 *	being called from interrupt status or with spinlocks held pri must
 821 *	be GFP_ATOMIC.
 822 *
 823 *	NULL is returned on a memory allocation failure.
 824 */
 825static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
 826					      gfp_t pri)
 827{
 828	might_sleep_if(pri & __GFP_WAIT);
 829	if (skb_shared(skb)) {
 830		struct sk_buff *nskb = skb_clone(skb, pri);
 831		kfree_skb(skb);
 832		skb = nskb;
 833	}
 834	return skb;
 835}
 836
 837/*
 838 *	Copy shared buffers into a new sk_buff. We effectively do COW on
 839 *	packets to handle cases where we have a local reader and forward
 840 *	and a couple of other messy ones. The normal one is tcpdumping
 841 *	a packet thats being forwarded.
 842 */
 843
 844/**
 845 *	skb_unshare - make a copy of a shared buffer
 846 *	@skb: buffer to check
 847 *	@pri: priority for memory allocation
 848 *
 849 *	If the socket buffer is a clone then this function creates a new
 850 *	copy of the data, drops a reference count on the old copy and returns
 851 *	the new copy with the reference count at 1. If the buffer is not a clone
 852 *	the original buffer is returned. When called with a spinlock held or
 853 *	from interrupt state @pri must be %GFP_ATOMIC
 854 *
 855 *	%NULL is returned on a memory allocation failure.
 856 */
 857static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 858					  gfp_t pri)
 859{
 860	might_sleep_if(pri & __GFP_WAIT);
 861	if (skb_cloned(skb)) {
 862		struct sk_buff *nskb = skb_copy(skb, pri);
 863		kfree_skb(skb);	/* Free our shared copy */
 864		skb = nskb;
 865	}
 866	return skb;
 867}
 868
 869/**
 870 *	skb_peek - peek at the head of an &sk_buff_head
 871 *	@list_: list to peek at
 872 *
 873 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 874 *	be careful with this one. A peek leaves the buffer on the
 875 *	list and someone else may run off with it. You must hold
 876 *	the appropriate locks or have a private queue to do this.
 877 *
 878 *	Returns %NULL for an empty list or a pointer to the head element.
 879 *	The reference count is not incremented and the reference is therefore
 880 *	volatile. Use with caution.
 881 */
 882static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 883{
 884	struct sk_buff *list = ((const struct sk_buff *)list_)->next;
 885	if (list == (struct sk_buff *)list_)
 886		list = NULL;
 887	return list;
 888}
 889
 890/**
 891 *	skb_peek_next - peek skb following the given one from a queue
 892 *	@skb: skb to start from
 893 *	@list_: list to peek at
 894 *
 895 *	Returns %NULL when the end of the list is met or a pointer to the
 896 *	next element. The reference count is not incremented and the
 897 *	reference is therefore volatile. Use with caution.
 898 */
 899static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
 900		const struct sk_buff_head *list_)
 901{
 902	struct sk_buff *next = skb->next;
 903	if (next == (struct sk_buff *)list_)
 904		next = NULL;
 905	return next;
 906}
 907
 908/**
 909 *	skb_peek_tail - peek at the tail of an &sk_buff_head
 910 *	@list_: list to peek at
 911 *
 912 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 913 *	be careful with this one. A peek leaves the buffer on the
 914 *	list and someone else may run off with it. You must hold
 915 *	the appropriate locks or have a private queue to do this.
 916 *
 917 *	Returns %NULL for an empty list or a pointer to the tail element.
 918 *	The reference count is not incremented and the reference is therefore
 919 *	volatile. Use with caution.
 920 */
 921static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
 922{
 923	struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
 924	if (list == (struct sk_buff *)list_)
 925		list = NULL;
 926	return list;
 927}
 928
 929/**
 930 *	skb_queue_len	- get queue length
 931 *	@list_: list to measure
 932 *
 933 *	Return the length of an &sk_buff queue.
 934 */
 935static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 936{
 937	return list_->qlen;
 938}
 939
 940/**
 941 *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 942 *	@list: queue to initialize
 943 *
 944 *	This initializes only the list and queue length aspects of
 945 *	an sk_buff_head object.  This allows to initialize the list
 946 *	aspects of an sk_buff_head without reinitializing things like
 947 *	the spinlock.  It can also be used for on-stack sk_buff_head
 948 *	objects where the spinlock is known to not be used.
 949 */
 950static inline void __skb_queue_head_init(struct sk_buff_head *list)
 951{
 952	list->prev = list->next = (struct sk_buff *)list;
 953	list->qlen = 0;
 954}
 955
 956/*
 957 * This function creates a split out lock class for each invocation;
 958 * this is needed for now since a whole lot of users of the skb-queue
 959 * infrastructure in drivers have different locking usage (in hardirq)
 960 * than the networking core (in softirq only). In the long run either the
 961 * network layer or drivers should need annotation to consolidate the
 962 * main types of usage into 3 classes.
 963 */
 964static inline void skb_queue_head_init(struct sk_buff_head *list)
 965{
 966	spin_lock_init(&list->lock);
 967	__skb_queue_head_init(list);
 968}
 969
 970static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 971		struct lock_class_key *class)
 972{
 973	skb_queue_head_init(list);
 974	lockdep_set_class(&list->lock, class);
 975}
 976
 977/*
 978 *	Insert an sk_buff on a list.
 979 *
 980 *	The "__skb_xxxx()" functions are the non-atomic ones that
 981 *	can only be called with interrupts disabled.
 982 */
 983extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
 984static inline void __skb_insert(struct sk_buff *newsk,
 985				struct sk_buff *prev, struct sk_buff *next,
 986				struct sk_buff_head *list)
 987{
 988	newsk->next = next;
 989	newsk->prev = prev;
 990	next->prev  = prev->next = newsk;
 991	list->qlen++;
 992}
 993
 994static inline void __skb_queue_splice(const struct sk_buff_head *list,
 995				      struct sk_buff *prev,
 996				      struct sk_buff *next)
 997{
 998	struct sk_buff *first = list->next;
 999	struct sk_buff *last = list->prev;
1000
1001	first->prev = prev;
1002	prev->next = first;
1003
1004	last->next = next;
1005	next->prev = last;
1006}
1007
1008/**
1009 *	skb_queue_splice - join two skb lists, this is designed for stacks
1010 *	@list: the new list to add
1011 *	@head: the place to add it in the first list
1012 */
1013static inline void skb_queue_splice(const struct sk_buff_head *list,
1014				    struct sk_buff_head *head)
1015{
1016	if (!skb_queue_empty(list)) {
1017		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1018		head->qlen += list->qlen;
1019	}
1020}
1021
1022/**
1023 *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1024 *	@list: the new list to add
1025 *	@head: the place to add it in the first list
1026 *
1027 *	The list at @list is reinitialised
1028 */
1029static inline void skb_queue_splice_init(struct sk_buff_head *list,
1030					 struct sk_buff_head *head)
1031{
1032	if (!skb_queue_empty(list)) {
1033		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1034		head->qlen += list->qlen;
1035		__skb_queue_head_init(list);
1036	}
1037}
1038
1039/**
1040 *	skb_queue_splice_tail - join two skb lists, each list being a queue
1041 *	@list: the new list to add
1042 *	@head: the place to add it in the first list
1043 */
1044static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1045					 struct sk_buff_head *head)
1046{
1047	if (!skb_queue_empty(list)) {
1048		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1049		head->qlen += list->qlen;
1050	}
1051}
1052
1053/**
1054 *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1055 *	@list: the new list to add
1056 *	@head: the place to add it in the first list
1057 *
1058 *	Each of the lists is a queue.
1059 *	The list at @list is reinitialised
1060 */
1061static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1062					      struct sk_buff_head *head)
1063{
1064	if (!skb_queue_empty(list)) {
1065		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1066		head->qlen += list->qlen;
1067		__skb_queue_head_init(list);
1068	}
1069}
1070
1071/**
1072 *	__skb_queue_after - queue a buffer at the list head
1073 *	@list: list to use
1074 *	@prev: place after this buffer
1075 *	@newsk: buffer to queue
1076 *
1077 *	Queue a buffer int the middle of a list. This function takes no locks
1078 *	and you must therefore hold required locks before calling it.
1079 *
1080 *	A buffer cannot be placed on two lists at the same time.
1081 */
1082static inline void __skb_queue_after(struct sk_buff_head *list,
1083				     struct sk_buff *prev,
1084				     struct sk_buff *newsk)
1085{
1086	__skb_insert(newsk, prev, prev->next, list);
1087}
1088
1089extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1090		       struct sk_buff_head *list);
1091
1092static inline void __skb_queue_before(struct sk_buff_head *list,
1093				      struct sk_buff *next,
1094				      struct sk_buff *newsk)
1095{
1096	__skb_insert(newsk, next->prev, next, list);
1097}
1098
1099/**
1100 *	__skb_queue_head - queue a buffer at the list head
1101 *	@list: list to use
1102 *	@newsk: buffer to queue
1103 *
1104 *	Queue a buffer at the start of a list. This function takes no locks
1105 *	and you must therefore hold required locks before calling it.
1106 *
1107 *	A buffer cannot be placed on two lists at the same time.
1108 */
1109extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1110static inline void __skb_queue_head(struct sk_buff_head *list,
1111				    struct sk_buff *newsk)
1112{
1113	__skb_queue_after(list, (struct sk_buff *)list, newsk);
1114}
1115
1116/**
1117 *	__skb_queue_tail - queue a buffer at the list tail
1118 *	@list: list to use
1119 *	@newsk: buffer to queue
1120 *
1121 *	Queue a buffer at the end of a list. This function takes no locks
1122 *	and you must therefore hold required locks before calling it.
1123 *
1124 *	A buffer cannot be placed on two lists at the same time.
1125 */
1126extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1127static inline void __skb_queue_tail(struct sk_buff_head *list,
1128				   struct sk_buff *newsk)
1129{
1130	__skb_queue_before(list, (struct sk_buff *)list, newsk);
1131}
1132
1133/*
1134 * remove sk_buff from list. _Must_ be called atomically, and with
1135 * the list known..
1136 */
1137extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1138static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1139{
1140	struct sk_buff *next, *prev;
1141
1142	list->qlen--;
1143	next	   = skb->next;
1144	prev	   = skb->prev;
1145	skb->next  = skb->prev = NULL;
1146	next->prev = prev;
1147	prev->next = next;
1148}
1149
1150/**
1151 *	__skb_dequeue - remove from the head of the queue
1152 *	@list: list to dequeue from
1153 *
1154 *	Remove the head of the list. This function does not take any locks
1155 *	so must be used with appropriate locks held only. The head item is
1156 *	returned or %NULL if the list is empty.
1157 */
1158extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1159static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1160{
1161	struct sk_buff *skb = skb_peek(list);
1162	if (skb)
1163		__skb_unlink(skb, list);
1164	return skb;
1165}
1166
1167/**
1168 *	__skb_dequeue_tail - remove from the tail of the queue
1169 *	@list: list to dequeue from
1170 *
1171 *	Remove the tail of the list. This function does not take any locks
1172 *	so must be used with appropriate locks held only. The tail item is
1173 *	returned or %NULL if the list is empty.
1174 */
1175extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1176static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1177{
1178	struct sk_buff *skb = skb_peek_tail(list);
1179	if (skb)
1180		__skb_unlink(skb, list);
1181	return skb;
1182}
1183
1184
1185static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1186{
1187	return skb->data_len;
1188}
1189
1190static inline unsigned int skb_headlen(const struct sk_buff *skb)
1191{
1192	return skb->len - skb->data_len;
1193}
1194
1195static inline int skb_pagelen(const struct sk_buff *skb)
1196{
1197	int i, len = 0;
1198
1199	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1200		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1201	return len + skb_headlen(skb);
1202}
1203
1204/**
1205 * __skb_fill_page_desc - initialise a paged fragment in an skb
1206 * @skb: buffer containing fragment to be initialised
1207 * @i: paged fragment index to initialise
1208 * @page: the page to use for this fragment
1209 * @off: the offset to the data with @page
1210 * @size: the length of the data
1211 *
1212 * Initialises the @i'th fragment of @skb to point to &size bytes at
1213 * offset @off within @page.
1214 *
1215 * Does not take any additional reference on the fragment.
1216 */
1217static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1218					struct page *page, int off, int size)
1219{
1220	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1221
1222	frag->page.p		  = page;
1223	frag->page_offset	  = off;
1224	skb_frag_size_set(frag, size);
1225}
1226
1227/**
1228 * skb_fill_page_desc - initialise a paged fragment in an skb
1229 * @skb: buffer containing fragment to be initialised
1230 * @i: paged fragment index to initialise
1231 * @page: the page to use for this fragment
1232 * @off: the offset to the data with @page
1233 * @size: the length of the data
1234 *
1235 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1236 * @skb to point to &size bytes at offset @off within @page. In
1237 * addition updates @skb such that @i is the last fragment.
1238 *
1239 * Does not take any additional reference on the fragment.
1240 */
1241static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1242				      struct page *page, int off, int size)
1243{
1244	__skb_fill_page_desc(skb, i, page, off, size);
1245	skb_shinfo(skb)->nr_frags = i + 1;
1246}
1247
1248extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1249			    int off, int size, unsigned int truesize);
1250
1251#define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1252#define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
1253#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1254
1255#ifdef NET_SKBUFF_DATA_USES_OFFSET
1256static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1257{
1258	return skb->head + skb->tail;
1259}
1260
1261static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1262{
1263	skb->tail = skb->data - skb->head;
1264}
1265
1266static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1267{
1268	skb_reset_tail_pointer(skb);
1269	skb->tail += offset;
1270}
1271#else /* NET_SKBUFF_DATA_USES_OFFSET */
1272static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1273{
1274	return skb->tail;
1275}
1276
1277static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1278{
1279	skb->tail = skb->data;
1280}
1281
1282static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1283{
1284	skb->tail = skb->data + offset;
1285}
1286
1287#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1288
1289/*
1290 *	Add data to an sk_buff
1291 */
1292extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1293static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1294{
1295	unsigned char *tmp = skb_tail_pointer(skb);
1296	SKB_LINEAR_ASSERT(skb);
1297	skb->tail += len;
1298	skb->len  += len;
1299	return tmp;
1300}
1301
1302extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1303static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1304{
1305	skb->data -= len;
1306	skb->len  += len;
1307	return skb->data;
1308}
1309
1310extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1311static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1312{
1313	skb->len -= len;
1314	BUG_ON(skb->len < skb->data_len);
1315	return skb->data += len;
1316}
1317
1318static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1319{
1320	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1321}
1322
1323extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1324
1325static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1326{
1327	if (len > skb_headlen(skb) &&
1328	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1329		return NULL;
1330	skb->len -= len;
1331	return skb->data += len;
1332}
1333
1334static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1335{
1336	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1337}
1338
1339static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1340{
1341	if (likely(len <= skb_headlen(skb)))
1342		return 1;
1343	if (unlikely(len > skb->len))
1344		return 0;
1345	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1346}
1347
1348/**
1349 *	skb_headroom - bytes at buffer head
1350 *	@skb: buffer to check
1351 *
1352 *	Return the number of bytes of free space at the head of an &sk_buff.
1353 */
1354static inline unsigned int skb_headroom(const struct sk_buff *skb)
1355{
1356	return skb->data - skb->head;
1357}
1358
1359/**
1360 *	skb_tailroom - bytes at buffer end
1361 *	@skb: buffer to check
1362 *
1363 *	Return the number of bytes of free space at the tail of an sk_buff
1364 */
1365static inline int skb_tailroom(const struct sk_buff *skb)
1366{
1367	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1368}
1369
1370/**
1371 *	skb_availroom - bytes at buffer end
1372 *	@skb: buffer to check
1373 *
1374 *	Return the number of bytes of free space at the tail of an sk_buff
1375 *	allocated by sk_stream_alloc()
1376 */
1377static inline int skb_availroom(const struct sk_buff *skb)
1378{
1379	return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
1380}
1381
1382/**
1383 *	skb_reserve - adjust headroom
1384 *	@skb: buffer to alter
1385 *	@len: bytes to move
1386 *
1387 *	Increase the headroom of an empty &sk_buff by reducing the tail
1388 *	room. This is only allowed for an empty buffer.
1389 */
1390static inline void skb_reserve(struct sk_buff *skb, int len)
1391{
1392	skb->data += len;
1393	skb->tail += len;
1394}
1395
1396static inline void skb_reset_mac_len(struct sk_buff *skb)
1397{
1398	skb->mac_len = skb->network_header - skb->mac_header;
1399}
1400
1401#ifdef NET_SKBUFF_DATA_USES_OFFSET
1402static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1403{
1404	return skb->head + skb->transport_header;
1405}
1406
1407static inline void skb_reset_transport_header(struct sk_buff *skb)
1408{
1409	skb->transport_header = skb->data - skb->head;
1410}
1411
1412static inline void skb_set_transport_header(struct sk_buff *skb,
1413					    const int offset)
1414{
1415	skb_reset_transport_header(skb);
1416	skb->transport_header += offset;
1417}
1418
1419static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1420{
1421	return skb->head + skb->network_header;
1422}
1423
1424static inline void skb_reset_network_header(struct sk_buff *skb)
1425{
1426	skb->network_header = skb->data - skb->head;
1427}
1428
1429static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1430{
1431	skb_reset_network_header(skb);
1432	skb->network_header += offset;
1433}
1434
1435static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1436{
1437	return skb->head + skb->mac_header;
1438}
1439
1440static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1441{
1442	return skb->mac_header != ~0U;
1443}
1444
1445static inline void skb_reset_mac_header(struct sk_buff *skb)
1446{
1447	skb->mac_header = skb->data - skb->head;
1448}
1449
1450static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1451{
1452	skb_reset_mac_header(skb);
1453	skb->mac_header += offset;
1454}
1455
1456#else /* NET_SKBUFF_DATA_USES_OFFSET */
1457
1458static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1459{
1460	return skb->transport_header;
1461}
1462
1463static inline void skb_reset_transport_header(struct sk_buff *skb)
1464{
1465	skb->transport_header = skb->data;
1466}
1467
1468static inline void skb_set_transport_header(struct sk_buff *skb,
1469					    const int offset)
1470{
1471	skb->transport_header = skb->data + offset;
1472}
1473
1474static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1475{
1476	return skb->network_header;
1477}
1478
1479static inline void skb_reset_network_header(struct sk_buff *skb)
1480{
1481	skb->network_header = skb->data;
1482}
1483
1484static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1485{
1486	skb->network_header = skb->data + offset;
1487}
1488
1489static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1490{
1491	return skb->mac_header;
1492}
1493
1494static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1495{
1496	return skb->mac_header != NULL;
1497}
1498
1499static inline void skb_reset_mac_header(struct sk_buff *skb)
1500{
1501	skb->mac_header = skb->data;
1502}
1503
1504static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1505{
1506	skb->mac_header = skb->data + offset;
1507}
1508#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1509
1510static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1511{
1512	if (skb_mac_header_was_set(skb)) {
1513		const unsigned char *old_mac = skb_mac_header(skb);
1514
1515		skb_set_mac_header(skb, -skb->mac_len);
1516		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1517	}
1518}
1519
1520static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1521{
1522	return skb->csum_start - skb_headroom(skb);
1523}
1524
1525static inline int skb_transport_offset(const struct sk_buff *skb)
1526{
1527	return skb_transport_header(skb) - skb->data;
1528}
1529
1530static inline u32 skb_network_header_len(const struct sk_buff *skb)
1531{
1532	return skb->transport_header - skb->network_header;
1533}
1534
1535static inline int skb_network_offset(const struct sk_buff *skb)
1536{
1537	return skb_network_header(skb) - skb->data;
1538}
1539
1540static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1541{
1542	return pskb_may_pull(skb, skb_network_offset(skb) + len);
1543}
1544
1545/*
1546 * CPUs often take a performance hit when accessing unaligned memory
1547 * locations. The actual performance hit varies, it can be small if the
1548 * hardware handles it or large if we have to take an exception and fix it
1549 * in software.
1550 *
1551 * Since an ethernet header is 14 bytes network drivers often end up with
1552 * the IP header at an unaligned offset. The IP header can be aligned by
1553 * shifting the start of the packet by 2 bytes. Drivers should do this
1554 * with:
1555 *
1556 * skb_reserve(skb, NET_IP_ALIGN);
1557 *
1558 * The downside to this alignment of the IP header is that the DMA is now
1559 * unaligned. On some architectures the cost of an unaligned DMA is high
1560 * and this cost outweighs the gains made by aligning the IP header.
1561 *
1562 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1563 * to be overridden.
1564 */
1565#ifndef NET_IP_ALIGN
1566#define NET_IP_ALIGN	2
1567#endif
1568
1569/*
1570 * The networking layer reserves some headroom in skb data (via
1571 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1572 * the header has to grow. In the default case, if the header has to grow
1573 * 32 bytes or less we avoid the reallocation.
1574 *
1575 * Unfortunately this headroom changes the DMA alignment of the resulting
1576 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1577 * on some architectures. An architecture can override this value,
1578 * perhaps setting it to a cacheline in size (since that will maintain
1579 * cacheline alignment of the DMA). It must be a power of 2.
1580 *
1581 * Various parts of the networking layer expect at least 32 bytes of
1582 * headroom, you should not reduce this.
1583 *
1584 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1585 * to reduce average number of cache lines per packet.
1586 * get_rps_cpus() for example only access one 64 bytes aligned block :
1587 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1588 */
1589#ifndef NET_SKB_PAD
1590#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
1591#endif
1592
1593extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1594
1595static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1596{
1597	if (unlikely(skb_is_nonlinear(skb))) {
1598		WARN_ON(1);
1599		return;
1600	}
1601	skb->len = len;
1602	skb_set_tail_pointer(skb, len);
1603}
1604
1605extern void skb_trim(struct sk_buff *skb, unsigned int len);
1606
1607static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1608{
1609	if (skb->data_len)
1610		return ___pskb_trim(skb, len);
1611	__skb_trim(skb, len);
1612	return 0;
1613}
1614
1615static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1616{
1617	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1618}
1619
1620/**
1621 *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1622 *	@skb: buffer to alter
1623 *	@len: new length
1624 *
1625 *	This is identical to pskb_trim except that the caller knows that
1626 *	the skb is not cloned so we should never get an error due to out-
1627 *	of-memory.
1628 */
1629static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1630{
1631	int err = pskb_trim(skb, len);
1632	BUG_ON(err);
1633}
1634
1635/**
1636 *	skb_orphan - orphan a buffer
1637 *	@skb: buffer to orphan
1638 *
1639 *	If a buffer currently has an owner then we call the owner's
1640 *	destructor function and make the @skb unowned. The buffer continues
1641 *	to exist but is no longer charged to its former owner.
1642 */
1643static inline void skb_orphan(struct sk_buff *skb)
1644{
1645	if (skb->destructor)
1646		skb->destructor(skb);
1647	skb->destructor = NULL;
1648	skb->sk		= NULL;
1649}
1650
1651/**
1652 *	__skb_queue_purge - empty a list
1653 *	@list: list to empty
1654 *
1655 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1656 *	the list and one reference dropped. This function does not take the
1657 *	list lock and the caller must hold the relevant locks to use it.
1658 */
1659extern void skb_queue_purge(struct sk_buff_head *list);
1660static inline void __skb_queue_purge(struct sk_buff_head *list)
1661{
1662	struct sk_buff *skb;
1663	while ((skb = __skb_dequeue(list)) != NULL)
1664		kfree_skb(skb);
1665}
1666
1667/**
1668 *	__dev_alloc_skb - allocate an skbuff for receiving
1669 *	@length: length to allocate
1670 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
1671 *
1672 *	Allocate a new &sk_buff and assign it a usage count of one. The
1673 *	buffer has unspecified headroom built in. Users should allocate
1674 *	the headroom they think they need without accounting for the
1675 *	built in space. The built in space is used for optimisations.
1676 *
1677 *	%NULL is returned if there is no free memory.
1678 */
1679static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1680					      gfp_t gfp_mask)
1681{
1682	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1683	if (likely(skb))
1684		skb_reserve(skb, NET_SKB_PAD);
1685	return skb;
1686}
1687
1688extern struct sk_buff *dev_alloc_skb(unsigned int length);
1689
1690extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1691		unsigned int length, gfp_t gfp_mask);
1692
1693/**
1694 *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1695 *	@dev: network device to receive on
1696 *	@length: length to allocate
1697 *
1698 *	Allocate a new &sk_buff and assign it a usage count of one. The
1699 *	buffer has unspecified headroom built in. Users should allocate
1700 *	the headroom they think they need without accounting for the
1701 *	built in space. The built in space is used for optimisations.
1702 *
1703 *	%NULL is returned if there is no free memory. Although this function
1704 *	allocates memory it can be called from an interrupt.
1705 */
1706static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1707		unsigned int length)
1708{
1709	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1710}
1711
1712static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1713		unsigned int length, gfp_t gfp)
1714{
1715	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1716
1717	if (NET_IP_ALIGN && skb)
1718		skb_reserve(skb, NET_IP_ALIGN);
1719	return skb;
1720}
1721
1722static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1723		unsigned int length)
1724{
1725	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1726}
1727
1728/**
1729 * skb_frag_page - retrieve the page refered to by a paged fragment
1730 * @frag: the paged fragment
1731 *
1732 * Returns the &struct page associated with @frag.
1733 */
1734static inline struct page *skb_frag_page(const skb_frag_t *frag)
1735{
1736	return frag->page.p;
1737}
1738
1739/**
1740 * __skb_frag_ref - take an addition reference on a paged fragment.
1741 * @frag: the paged fragment
1742 *
1743 * Takes an additional reference on the paged fragment @frag.
1744 */
1745static inline void __skb_frag_ref(skb_frag_t *frag)
1746{
1747	get_page(skb_frag_page(frag));
1748}
1749
1750/**
1751 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1752 * @skb: the buffer
1753 * @f: the fragment offset.
1754 *
1755 * Takes an additional reference on the @f'th paged fragment of @skb.
1756 */
1757static inline void skb_frag_ref(struct sk_buff *skb, int f)
1758{
1759	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1760}
1761
1762/**
1763 * __skb_frag_unref - release a reference on a paged fragment.
1764 * @frag: the paged fragment
1765 *
1766 * Releases a referenc…

Large files files are truncated, but you can click here to view the full file