PageRenderTime 108ms CodeModel.GetById 45ms app.highlight 20ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/wireless/bcmdhd/include/linuxver.h

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C Header | 614 lines | 476 code | 112 blank | 26 comment | 37 complexity | bce1a5c476978be7018b8fe914374d67 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Linux-specific abstractions to gain some independence from linux kernel versions.
  3 * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
  4 *
  5 * Copyright (C) 1999-2011, Broadcom Corporation
  6 * 
  7 *         Unless you and Broadcom execute a separate written software license
  8 * agreement governing use of this software, this software is licensed to you
  9 * under the terms of the GNU General Public License version 2 (the "GPL"),
 10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
 11 * following added to such license:
 12 * 
 13 *      As a special exception, the copyright holders of this software give you
 14 * permission to link this software with independent modules, and to copy and
 15 * distribute the resulting executable under terms of your choice, provided that
 16 * you also meet, for each linked independent module, the terms and conditions of
 17 * the license of that module.  An independent module is a module which is not
 18 * derived from this software.  The special exception does not apply to any
 19 * modifications of the software.
 20 * 
 21 *      Notwithstanding the above, under no circumstances may you combine this
 22 * software in any way with any other Broadcom software provided under a license
 23 * other than the GPL, without Broadcom's express prior written consent.
 24 *
 25 * $Id: linuxver.h 312264 2012-02-02 00:49:43Z $
 26 */
 27
 28
 29#ifndef _linuxver_h_
 30#define _linuxver_h_
 31
 32#include <linux/version.h>
 33#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
 34#include <linux/config.h>
 35#else
 36#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
 37#include <generated/autoconf.h>
 38#else
 39#include <linux/autoconf.h>
 40#endif
 41#endif 
 42#include <linux/module.h>
 43
 44#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
 45
 46#ifdef __UNDEF_NO_VERSION__
 47#undef __NO_VERSION__
 48#else
 49#define __NO_VERSION__
 50#endif
 51#endif	
 52
 53#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
 54#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
 55#define module_param_string(_name_, _string_, _size_, _perm_) \
 56		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
 57#endif
 58
 59
 60#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
 61#include <linux/malloc.h>
 62#else
 63#include <linux/slab.h>
 64#endif
 65
 66#include <linux/types.h>
 67#include <linux/init.h>
 68#include <linux/mm.h>
 69#include <linux/string.h>
 70#include <linux/pci.h>
 71#include <linux/interrupt.h>
 72#include <linux/netdevice.h>
 73#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
 74#include <linux/semaphore.h>
 75#endif 
 76#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
 77#undef IP_TOS
 78#endif 
 79#include <asm/io.h>
 80
 81#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
 82#include <linux/workqueue.h>
 83#else
 84#include <linux/tqueue.h>
 85#ifndef work_struct
 86#define work_struct tq_struct
 87#endif
 88#ifndef INIT_WORK
 89#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
 90#endif
 91#ifndef schedule_work
 92#define schedule_work(_work) schedule_task((_work))
 93#endif
 94#ifndef flush_scheduled_work
 95#define flush_scheduled_work() flush_scheduled_tasks()
 96#endif
 97#endif	
 98
 99#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
100#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
101#else
102#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
103typedef void (*work_func_t)(void *work);
104#endif	
105
106#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
107
108#ifndef IRQ_NONE
109typedef void irqreturn_t;
110#define IRQ_NONE
111#define IRQ_HANDLED
112#define IRQ_RETVAL(x)
113#endif
114#else
115typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
116#endif	
117
118#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
119#define IRQF_SHARED	SA_SHIRQ
120#endif 
121
122#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
123#ifdef	CONFIG_NET_RADIO
124#define	CONFIG_WIRELESS_EXT
125#endif
126#endif	
127
128#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
129#define MOD_INC_USE_COUNT
130#define MOD_DEC_USE_COUNT
131#endif 
132
133#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
134#include <linux/sched.h>
135#endif
136
137#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
138#include <net/lib80211.h>
139#endif
140#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
141#include <linux/ieee80211.h>
142#else
143#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
144#include <net/ieee80211.h>
145#endif
146#endif 
147
148
149#ifndef __exit
150#define __exit
151#endif
152#ifndef __devexit
153#define __devexit
154#endif
155#ifndef __devinit
156#define __devinit	__init
157#endif
158#ifndef __devinitdata
159#define __devinitdata
160#endif
161#ifndef __devexit_p
162#define __devexit_p(x)	x
163#endif
164
165#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
166
167#define pci_get_drvdata(dev)		(dev)->sysdata
168#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
169
170
171
172struct pci_device_id {
173	unsigned int vendor, device;		
174	unsigned int subvendor, subdevice;	
175	unsigned int class, class_mask;		
176	unsigned long driver_data;		
177};
178
179struct pci_driver {
180	struct list_head node;
181	char *name;
182	const struct pci_device_id *id_table;	
183	int (*probe)(struct pci_dev *dev,
184	             const struct pci_device_id *id); 
185	void (*remove)(struct pci_dev *dev);	
186	void (*suspend)(struct pci_dev *dev);	
187	void (*resume)(struct pci_dev *dev);	
188};
189
190#define MODULE_DEVICE_TABLE(type, name)
191#define PCI_ANY_ID (~0)
192
193
194#define pci_module_init pci_register_driver
195extern int pci_register_driver(struct pci_driver *drv);
196extern void pci_unregister_driver(struct pci_driver *drv);
197
198#endif 
199
200#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
201#define pci_module_init pci_register_driver
202#endif
203
204#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
205#ifdef MODULE
206#define module_init(x) int init_module(void) { return x(); }
207#define module_exit(x) void cleanup_module(void) { x(); }
208#else
209#define module_init(x)	__initcall(x);
210#define module_exit(x)	__exitcall(x);
211#endif
212#endif	
213
214#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
215#define WL_USE_NETDEV_OPS
216#else
217#undef WL_USE_NETDEV_OPS
218#endif
219
220#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL_INPUT)
221#define WL_CONFIG_RFKILL_INPUT
222#else
223#undef WL_CONFIG_RFKILL_INPUT
224#endif
225
226#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
227#define list_for_each(pos, head) \
228	for (pos = (head)->next; pos != (head); pos = pos->next)
229#endif
230
231#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
232#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
233#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
234#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
235#endif
236
237#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
238#define pci_enable_device(dev) do { } while (0)
239#endif
240
241#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
242#define net_device device
243#endif
244
245#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
246
247
248
249#ifndef PCI_DMA_TODEVICE
250#define	PCI_DMA_TODEVICE	1
251#define	PCI_DMA_FROMDEVICE	2
252#endif
253
254typedef u32 dma_addr_t;
255
256
257static inline int get_order(unsigned long size)
258{
259	int order;
260
261	size = (size-1) >> (PAGE_SHIFT-1);
262	order = -1;
263	do {
264		size >>= 1;
265		order++;
266	} while (size);
267	return order;
268}
269
270static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
271                                         dma_addr_t *dma_handle)
272{
273	void *ret;
274	int gfp = GFP_ATOMIC | GFP_DMA;
275
276	ret = (void *)__get_free_pages(gfp, get_order(size));
277
278	if (ret != NULL) {
279		memset(ret, 0, size);
280		*dma_handle = virt_to_bus(ret);
281	}
282	return ret;
283}
284static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
285                                       void *vaddr, dma_addr_t dma_handle)
286{
287	free_pages((unsigned long)vaddr, get_order(size));
288}
289#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
290#define pci_unmap_single(cookie, address, size, dir)
291
292#endif 
293
294#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
295
296#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
297#define netif_down(dev)			do { (dev)->start = 0; } while (0)
298
299
300#ifndef _COMPAT_NETDEVICE_H
301
302
303
304#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
305#define netif_wake_queue(dev) \
306		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
307#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
308
309static inline void netif_start_queue(struct net_device *dev)
310{
311	dev->tbusy = 0;
312	dev->interrupt = 0;
313	dev->start = 1;
314}
315
316#define netif_queue_stopped(dev)	(dev)->tbusy
317#define netif_running(dev)		(dev)->start
318
319#endif 
320
321#define netif_device_attach(dev)	netif_start_queue(dev)
322#define netif_device_detach(dev)	netif_stop_queue(dev)
323
324
325#define tasklet_struct				tq_struct
326static inline void tasklet_schedule(struct tasklet_struct *tasklet)
327{
328	queue_task(tasklet, &tq_immediate);
329	mark_bh(IMMEDIATE_BH);
330}
331
332static inline void tasklet_init(struct tasklet_struct *tasklet,
333                                void (*func)(unsigned long),
334                                unsigned long data)
335{
336	tasklet->next = NULL;
337	tasklet->sync = 0;
338	tasklet->routine = (void (*)(void *))func;
339	tasklet->data = (void *)data;
340}
341#define tasklet_kill(tasklet)	{ do {} while (0); }
342
343
344#define del_timer_sync(timer) del_timer(timer)
345
346#else
347
348#define netif_down(dev)
349
350#endif 
351
352#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
353
354
355#define PREPARE_TQUEUE(_tq, _routine, _data)			\
356	do {							\
357		(_tq)->routine = _routine;			\
358		(_tq)->data = _data;				\
359	} while (0)
360
361
362#define INIT_TQUEUE(_tq, _routine, _data)			\
363	do {							\
364		INIT_LIST_HEAD(&(_tq)->list);			\
365		(_tq)->sync = 0;				\
366		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
367	} while (0)
368
369#endif	
370
371
372#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
373#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
374#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
375#else
376#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
377#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
378#endif
379
380#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
381static inline int
382pci_save_state(struct pci_dev *dev, u32 *buffer)
383{
384	int i;
385	if (buffer) {
386		for (i = 0; i < 16; i++)
387			pci_read_config_dword(dev, i * 4, &buffer[i]);
388	}
389	return 0;
390}
391
392static inline int
393pci_restore_state(struct pci_dev *dev, u32 *buffer)
394{
395	int i;
396
397	if (buffer) {
398		for (i = 0; i < 16; i++)
399			pci_write_config_dword(dev, i * 4, buffer[i]);
400	}
401	
402	else {
403		for (i = 0; i < 6; i ++)
404			pci_write_config_dword(dev,
405			                       PCI_BASE_ADDRESS_0 + (i * 4),
406			                       pci_resource_start(dev, i));
407		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
408	}
409	return 0;
410}
411#endif 
412
413
414#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
415#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
416#endif
417
418
419#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
420#ifndef SET_MODULE_OWNER
421#define SET_MODULE_OWNER(dev)		do {} while (0)
422#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
423#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
424#else
425#define OLD_MOD_INC_USE_COUNT		do {} while (0)
426#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
427#endif
428#else 
429#ifndef SET_MODULE_OWNER
430#define SET_MODULE_OWNER(dev)		do {} while (0)
431#endif
432#ifndef MOD_INC_USE_COUNT
433#define MOD_INC_USE_COUNT			do {} while (0)
434#endif
435#ifndef MOD_DEC_USE_COUNT
436#define MOD_DEC_USE_COUNT			do {} while (0)
437#endif
438#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
439#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
440#endif 
441
442#ifndef SET_NETDEV_DEV
443#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
444#endif
445
446#ifndef HAVE_FREE_NETDEV
447#define free_netdev(dev)		kfree(dev)
448#endif
449
450#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
451
452#define af_packet_priv			data
453#endif
454
455
456#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
457#define DRV_SUSPEND_STATE_TYPE pm_message_t
458#else
459#define DRV_SUSPEND_STATE_TYPE uint32
460#endif
461
462#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
463#define CHECKSUM_HW	CHECKSUM_PARTIAL
464#endif
465
466typedef struct {
467	void 	*parent;  
468	struct	task_struct *p_task;
469	long 	thr_pid;
470	int 	prio; 
471	struct	semaphore sema;
472	bool	terminated;
473	struct	completion completed;
474} tsk_ctl_t;
475
476
477
478
479#ifdef DHD_DEBUG
480#define DBG_THR(x) printk x
481#else
482#define DBG_THR(x)
483#endif
484
485#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
486#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
487#else
488#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
489#endif
490
491
492#define PROC_START(thread_func, owner, tsk_ctl, flags) \
493{ \
494	sema_init(&((tsk_ctl)->sema), 0); \
495	init_completion(&((tsk_ctl)->completed)); \
496	(tsk_ctl)->parent = owner; \
497	(tsk_ctl)->terminated = FALSE; \
498	(tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \
499	if ((tsk_ctl)->thr_pid > 0) \
500		wait_for_completion(&((tsk_ctl)->completed)); \
501	DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
502}
503
504#define PROC_STOP(tsk_ctl) \
505{ \
506	(tsk_ctl)->terminated = TRUE; \
507	smp_wmb(); \
508	up(&((tsk_ctl)->sema));	\
509	wait_for_completion(&((tsk_ctl)->completed)); \
510	DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
511	(tsk_ctl)->thr_pid = -1; \
512}
513
514#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
515#define DAEMONIZE(a) daemonize(a); \
516	allow_signal(SIGKILL); \
517	allow_signal(SIGTERM);
518#else /* Linux 2.4 (w/o preemption patch) */
519#define RAISE_RX_SOFTIRQ() \
520	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
521#define DAEMONIZE(a) daemonize(); \
522	do { if (a) \
523		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
524	} while (0);
525#endif /* LINUX_VERSION_CODE  */
526
527#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
528#define BLOCKABLE()	(!in_atomic())
529#else
530#define BLOCKABLE()	(!in_interrupt())
531#endif
532
533#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
534#define KILL_PROC(nr, sig) \
535{ \
536struct task_struct *tsk; \
537struct pid *pid;    \
538pid = find_get_pid((pid_t)nr);    \
539tsk = pid_task(pid, PIDTYPE_PID);    \
540if (tsk) send_sig(sig, tsk, 1); \
541}
542#else
543#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
544	KERNEL_VERSION(2, 6, 30))
545#define KILL_PROC(pid, sig) \
546{ \
547	struct task_struct *tsk; \
548	tsk = find_task_by_vpid(pid); \
549	if (tsk) send_sig(sig, tsk, 1); \
550}
551#else
552#define KILL_PROC(pid, sig) \
553{ \
554	kill_proc(pid, sig, 1); \
555}
556#endif
557#endif 
558
559#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
560#include <linux/time.h>
561#include <linux/wait.h>
562#else
563#include <linux/sched.h>
564
565#define __wait_event_interruptible_timeout(wq, condition, ret)		\
566do {									\
567	wait_queue_t __wait;						\
568	init_waitqueue_entry(&__wait, current);				\
569									\
570	add_wait_queue(&wq, &__wait);					\
571	for (;;) {							\
572		set_current_state(TASK_INTERRUPTIBLE);			\
573		if (condition)						\
574			break;						\
575		if (!signal_pending(current)) {				\
576			ret = schedule_timeout(ret);			\
577			if (!ret)					\
578				break;					\
579			continue;					\
580		}							\
581		ret = -ERESTARTSYS;					\
582		break;							\
583	}								\
584	current->state = TASK_RUNNING;					\
585	remove_wait_queue(&wq, &__wait);				\
586} while (0)
587
588#define wait_event_interruptible_timeout(wq, condition, timeout)	\
589({									\
590	long __ret = timeout;						\
591	if (!(condition))						\
592		__wait_event_interruptible_timeout(wq, condition, __ret); \
593	__ret;								\
594})
595
596#endif 
597
598#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
599#define WL_DEV_IF(dev)          ((wl_if_t*)netdev_priv(dev))
600#else
601#define WL_DEV_IF(dev)          ((wl_if_t*)(dev)->priv)
602#endif
603
604#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
605#define WL_ISR(i, d, p)         wl_isr((i), (d))
606#else
607#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
608#endif  
609
610#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
611#define netdev_priv(dev) dev->priv
612#endif 
613
614#endif