PageRenderTime 31ms CodeModel.GetById 20ms app.highlight 8ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/x86/include/asm/system.h

https://bitbucket.org/ndreys/linux-sunxi
C++ Header | 515 lines | 345 code | 64 blank | 106 comment | 10 complexity | 71c886b51b7a273e949c162ecde7ee68 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1#ifndef _ASM_X86_SYSTEM_H
  2#define _ASM_X86_SYSTEM_H
  3
  4#include <asm/asm.h>
  5#include <asm/segment.h>
  6#include <asm/cpufeature.h>
  7#include <asm/cmpxchg.h>
  8#include <asm/nops.h>
  9
 10#include <linux/kernel.h>
 11#include <linux/irqflags.h>
 12
 13/* entries in ARCH_DLINFO: */
 14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
 15# define AT_VECTOR_SIZE_ARCH 2
 16#else /* else it's non-compat x86-64 */
 17# define AT_VECTOR_SIZE_ARCH 1
 18#endif
 19
 20struct task_struct; /* one of the stranger aspects of C forward declarations */
 21struct task_struct *__switch_to(struct task_struct *prev,
 22				struct task_struct *next);
 23struct tss_struct;
 24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 25		      struct tss_struct *tss);
 26extern void show_regs_common(void);
 27
 28#ifdef CONFIG_X86_32
 29
 30#ifdef CONFIG_CC_STACKPROTECTOR
 31#define __switch_canary							\
 32	"movl %P[task_canary](%[next]), %%ebx\n\t"			\
 33	"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
 34#define __switch_canary_oparam						\
 35	, [stack_canary] "=m" (stack_canary.canary)
 36#define __switch_canary_iparam						\
 37	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
 38#else	/* CC_STACKPROTECTOR */
 39#define __switch_canary
 40#define __switch_canary_oparam
 41#define __switch_canary_iparam
 42#endif	/* CC_STACKPROTECTOR */
 43
 44/*
 45 * Saving eflags is important. It switches not only IOPL between tasks,
 46 * it also protects other tasks from NT leaking through sysenter etc.
 47 */
 48#define switch_to(prev, next, last)					\
 49do {									\
 50	/*								\
 51	 * Context-switching clobbers all registers, so we clobber	\
 52	 * them explicitly, via unused output variables.		\
 53	 * (EAX and EBP is not listed because EBP is saved/restored	\
 54	 * explicitly for wchan access and EAX is the return value of	\
 55	 * __switch_to())						\
 56	 */								\
 57	unsigned long ebx, ecx, edx, esi, edi;				\
 58									\
 59	asm volatile("pushfl\n\t"		/* save    flags */	\
 60		     "pushl %%ebp\n\t"		/* save    EBP   */	\
 61		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \
 62		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \
 63		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
 64		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\
 65		     __switch_canary					\
 66		     "jmp __switch_to\n"	/* regparm call  */	\
 67		     "1:\t"						\
 68		     "popl %%ebp\n\t"		/* restore EBP   */	\
 69		     "popfl\n"			/* restore flags */	\
 70									\
 71		     /* output parameters */				\
 72		     : [prev_sp] "=m" (prev->thread.sp),		\
 73		       [prev_ip] "=m" (prev->thread.ip),		\
 74		       "=a" (last),					\
 75									\
 76		       /* clobbered output registers: */		\
 77		       "=b" (ebx), "=c" (ecx), "=d" (edx),		\
 78		       "=S" (esi), "=D" (edi)				\
 79		       							\
 80		       __switch_canary_oparam				\
 81									\
 82		       /* input parameters: */				\
 83		     : [next_sp]  "m" (next->thread.sp),		\
 84		       [next_ip]  "m" (next->thread.ip),		\
 85		       							\
 86		       /* regparm parameters for __switch_to(): */	\
 87		       [prev]     "a" (prev),				\
 88		       [next]     "d" (next)				\
 89									\
 90		       __switch_canary_iparam				\
 91									\
 92		     : /* reloaded segment registers */			\
 93			"memory");					\
 94} while (0)
 95
 96#else
 97
 98/* frame pointer must be last for get_wchan */
 99#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
100#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
101
102#define __EXTRA_CLOBBER  \
103	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
104	  "r12", "r13", "r14", "r15"
105
106#ifdef CONFIG_CC_STACKPROTECTOR
107#define __switch_canary							  \
108	"movq %P[task_canary](%%rsi),%%r8\n\t"				  \
109	"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
110#define __switch_canary_oparam						  \
111	, [gs_canary] "=m" (irq_stack_union.stack_canary)
112#define __switch_canary_iparam						  \
113	, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
114#else	/* CC_STACKPROTECTOR */
115#define __switch_canary
116#define __switch_canary_oparam
117#define __switch_canary_iparam
118#endif	/* CC_STACKPROTECTOR */
119
120/* Save restore flags to clear handle leaking NT */
121#define switch_to(prev, next, last) \
122	asm volatile(SAVE_CONTEXT					  \
123	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
124	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
125	     "call __switch_to\n\t"					  \
126	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
127	     __switch_canary						  \
128	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
129	     "movq %%rax,%%rdi\n\t" 					  \
130	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"		  \
131	     "jnz   ret_from_fork\n\t"					  \
132	     RESTORE_CONTEXT						  \
133	     : "=a" (last)					  	  \
134	       __switch_canary_oparam					  \
135	     : [next] "S" (next), [prev] "D" (prev),			  \
136	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
137	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
138	       [_tif_fork] "i" (_TIF_FORK),			  	  \
139	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
140	       [current_task] "m" (current_task)			  \
141	       __switch_canary_iparam					  \
142	     : "memory", "cc" __EXTRA_CLOBBER)
143#endif
144
145#ifdef __KERNEL__
146
147extern void native_load_gs_index(unsigned);
148
149/*
150 * Load a segment. Fall back on loading the zero
151 * segment if something goes wrong..
152 */
153#define loadsegment(seg, value)						\
154do {									\
155	unsigned short __val = (value);					\
156									\
157	asm volatile("						\n"	\
158		     "1:	movl %k0,%%" #seg "		\n"	\
159									\
160		     ".section .fixup,\"ax\"			\n"	\
161		     "2:	xorl %k0,%k0			\n"	\
162		     "		jmp 1b				\n"	\
163		     ".previous					\n"	\
164									\
165		     _ASM_EXTABLE(1b, 2b)				\
166									\
167		     : "+r" (__val) : : "memory");			\
168} while (0)
169
170/*
171 * Save a segment register away
172 */
173#define savesegment(seg, value)				\
174	asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
175
176/*
177 * x86_32 user gs accessors.
178 */
179#ifdef CONFIG_X86_32
180#ifdef CONFIG_X86_32_LAZY_GS
181#define get_user_gs(regs)	(u16)({unsigned long v; savesegment(gs, v); v;})
182#define set_user_gs(regs, v)	loadsegment(gs, (unsigned long)(v))
183#define task_user_gs(tsk)	((tsk)->thread.gs)
184#define lazy_save_gs(v)		savesegment(gs, (v))
185#define lazy_load_gs(v)		loadsegment(gs, (v))
186#else	/* X86_32_LAZY_GS */
187#define get_user_gs(regs)	(u16)((regs)->gs)
188#define set_user_gs(regs, v)	do { (regs)->gs = (v); } while (0)
189#define task_user_gs(tsk)	(task_pt_regs(tsk)->gs)
190#define lazy_save_gs(v)		do { } while (0)
191#define lazy_load_gs(v)		do { } while (0)
192#endif	/* X86_32_LAZY_GS */
193#endif	/* X86_32 */
194
195static inline unsigned long get_limit(unsigned long segment)
196{
197	unsigned long __limit;
198	asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
199	return __limit + 1;
200}
201
202static inline void native_clts(void)
203{
204	asm volatile("clts");
205}
206
207/*
208 * Volatile isn't enough to prevent the compiler from reordering the
209 * read/write functions for the control registers and messing everything up.
210 * A memory clobber would solve the problem, but would prevent reordering of
211 * all loads stores around it, which can hurt performance. Solution is to
212 * use a variable and mimic reads and writes to it to enforce serialization
213 */
214static unsigned long __force_order;
215
216static inline unsigned long native_read_cr0(void)
217{
218	unsigned long val;
219	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
220	return val;
221}
222
223static inline void native_write_cr0(unsigned long val)
224{
225	asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
226}
227
228static inline unsigned long native_read_cr2(void)
229{
230	unsigned long val;
231	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
232	return val;
233}
234
235static inline void native_write_cr2(unsigned long val)
236{
237	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
238}
239
240static inline unsigned long native_read_cr3(void)
241{
242	unsigned long val;
243	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
244	return val;
245}
246
247static inline void native_write_cr3(unsigned long val)
248{
249	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
250}
251
252static inline unsigned long native_read_cr4(void)
253{
254	unsigned long val;
255	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
256	return val;
257}
258
259static inline unsigned long native_read_cr4_safe(void)
260{
261	unsigned long val;
262	/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
263	 * exists, so it will never fail. */
264#ifdef CONFIG_X86_32
265	asm volatile("1: mov %%cr4, %0\n"
266		     "2:\n"
267		     _ASM_EXTABLE(1b, 2b)
268		     : "=r" (val), "=m" (__force_order) : "0" (0));
269#else
270	val = native_read_cr4();
271#endif
272	return val;
273}
274
275static inline void native_write_cr4(unsigned long val)
276{
277	asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
278}
279
280#ifdef CONFIG_X86_64
281static inline unsigned long native_read_cr8(void)
282{
283	unsigned long cr8;
284	asm volatile("movq %%cr8,%0" : "=r" (cr8));
285	return cr8;
286}
287
288static inline void native_write_cr8(unsigned long val)
289{
290	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
291}
292#endif
293
294static inline void native_wbinvd(void)
295{
296	asm volatile("wbinvd": : :"memory");
297}
298
299#ifdef CONFIG_PARAVIRT
300#include <asm/paravirt.h>
301#else
302
303static inline unsigned long read_cr0(void)
304{
305	return native_read_cr0();
306}
307
308static inline void write_cr0(unsigned long x)
309{
310	native_write_cr0(x);
311}
312
313static inline unsigned long read_cr2(void)
314{
315	return native_read_cr2();
316}
317
318static inline void write_cr2(unsigned long x)
319{
320	native_write_cr2(x);
321}
322
323static inline unsigned long read_cr3(void)
324{
325	return native_read_cr3();
326}
327
328static inline void write_cr3(unsigned long x)
329{
330	native_write_cr3(x);
331}
332
333static inline unsigned long read_cr4(void)
334{
335	return native_read_cr4();
336}
337
338static inline unsigned long read_cr4_safe(void)
339{
340	return native_read_cr4_safe();
341}
342
343static inline void write_cr4(unsigned long x)
344{
345	native_write_cr4(x);
346}
347
348static inline void wbinvd(void)
349{
350	native_wbinvd();
351}
352
353#ifdef CONFIG_X86_64
354
355static inline unsigned long read_cr8(void)
356{
357	return native_read_cr8();
358}
359
360static inline void write_cr8(unsigned long x)
361{
362	native_write_cr8(x);
363}
364
365static inline void load_gs_index(unsigned selector)
366{
367	native_load_gs_index(selector);
368}
369
370#endif
371
372/* Clear the 'TS' bit */
373static inline void clts(void)
374{
375	native_clts();
376}
377
378#endif/* CONFIG_PARAVIRT */
379
380#define stts() write_cr0(read_cr0() | X86_CR0_TS)
381
382#endif /* __KERNEL__ */
383
384static inline void clflush(volatile void *__p)
385{
386	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
387}
388
389#define nop() asm volatile ("nop")
390
391void cpu_idle_wait(void);
392
393extern unsigned long arch_align_stack(unsigned long sp);
394extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
395
396void default_idle(void);
397
398void stop_this_cpu(void *dummy);
399
400/*
401 * Force strict CPU ordering.
402 * And yes, this is required on UP too when we're talking
403 * to devices.
404 */
405#ifdef CONFIG_X86_32
406/*
407 * Some non-Intel clones support out of order store. wmb() ceases to be a
408 * nop for these.
409 */
410#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
411#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
412#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
413#else
414#define mb() 	asm volatile("mfence":::"memory")
415#define rmb()	asm volatile("lfence":::"memory")
416#define wmb()	asm volatile("sfence" ::: "memory")
417#endif
418
419/**
420 * read_barrier_depends - Flush all pending reads that subsequents reads
421 * depend on.
422 *
423 * No data-dependent reads from memory-like regions are ever reordered
424 * over this barrier.  All reads preceding this primitive are guaranteed
425 * to access memory (but not necessarily other CPUs' caches) before any
426 * reads following this primitive that depend on the data return by
427 * any of the preceding reads.  This primitive is much lighter weight than
428 * rmb() on most CPUs, and is never heavier weight than is
429 * rmb().
430 *
431 * These ordering constraints are respected by both the local CPU
432 * and the compiler.
433 *
434 * Ordering is not guaranteed by anything other than these primitives,
435 * not even by data dependencies.  See the documentation for
436 * memory_barrier() for examples and URLs to more information.
437 *
438 * For example, the following code would force ordering (the initial
439 * value of "a" is zero, "b" is one, and "p" is "&a"):
440 *
441 * <programlisting>
442 *	CPU 0				CPU 1
443 *
444 *	b = 2;
445 *	memory_barrier();
446 *	p = &b;				q = p;
447 *					read_barrier_depends();
448 *					d = *q;
449 * </programlisting>
450 *
451 * because the read of "*q" depends on the read of "p" and these
452 * two reads are separated by a read_barrier_depends().  However,
453 * the following code, with the same initial values for "a" and "b":
454 *
455 * <programlisting>
456 *	CPU 0				CPU 1
457 *
458 *	a = 2;
459 *	memory_barrier();
460 *	b = 3;				y = b;
461 *					read_barrier_depends();
462 *					x = a;
463 * </programlisting>
464 *
465 * does not enforce ordering, since there is no data dependency between
466 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
467 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
468 * in cases like this where there are no data dependencies.
469 **/
470
471#define read_barrier_depends()	do { } while (0)
472
473#ifdef CONFIG_SMP
474#define smp_mb()	mb()
475#ifdef CONFIG_X86_PPRO_FENCE
476# define smp_rmb()	rmb()
477#else
478# define smp_rmb()	barrier()
479#endif
480#ifdef CONFIG_X86_OOSTORE
481# define smp_wmb() 	wmb()
482#else
483# define smp_wmb()	barrier()
484#endif
485#define smp_read_barrier_depends()	read_barrier_depends()
486#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
487#else
488#define smp_mb()	barrier()
489#define smp_rmb()	barrier()
490#define smp_wmb()	barrier()
491#define smp_read_barrier_depends()	do { } while (0)
492#define set_mb(var, value) do { var = value; barrier(); } while (0)
493#endif
494
495/*
496 * Stop RDTSC speculation. This is needed when you need to use RDTSC
497 * (or get_cycles or vread that possibly accesses the TSC) in a defined
498 * code region.
499 *
500 * (Could use an alternative three way for this if there was one.)
501 */
502static __always_inline void rdtsc_barrier(void)
503{
504	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
505	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
506}
507
508/*
509 * We handle most unaligned accesses in hardware.  On the other hand
510 * unaligned DMA can be quite expensive on some Nehalem processors.
511 *
512 * Based on this we disable the IP header alignment in network drivers.
513 */
514#define NET_IP_ALIGN	0
515#endif /* _ASM_X86_SYSTEM_H */