PageRenderTime 37ms CodeModel.GetById 12ms app.highlight 17ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/x86/kernel/irq_64.c

https://gitlab.com/Team-OSE-old/SimpleKernel
C | 110 lines | 73 code | 20 blank | 17 comment | 10 complexity | a202d1d9273fd63e5cf2e6486fd7d9b9 MD5 | raw file
  1/*
  2 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  3 *
  4 * This file contains the lowest level x86_64-specific interrupt
  5 * entry and irq statistics code. All the remaining irq logic is
  6 * done by the generic kernel/irq/ code and in the
  7 * x86_64-specific irq controller code. (e.g. i8259.c and
  8 * io_apic.c.)
  9 */
 10
 11#include <linux/kernel_stat.h>
 12#include <linux/interrupt.h>
 13#include <linux/seq_file.h>
 14#include <linux/module.h>
 15#include <linux/delay.h>
 16#include <linux/ftrace.h>
 17#include <linux/uaccess.h>
 18#include <linux/smp.h>
 19#include <asm/io_apic.h>
 20#include <asm/idle.h>
 21#include <asm/apic.h>
 22
 23DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 24EXPORT_PER_CPU_SYMBOL(irq_stat);
 25
 26DEFINE_PER_CPU(struct pt_regs *, irq_regs);
 27EXPORT_PER_CPU_SYMBOL(irq_regs);
 28
 29int sysctl_panic_on_stackoverflow;
 30
 31/*
 32 * Probabilistic stack overflow check:
 33 *
 34 * Only check the stack in process context, because everything else
 35 * runs on the big interrupt stacks. Checking reliably is too expensive,
 36 * so we just check from interrupts.
 37 */
 38static inline void stack_overflow_check(struct pt_regs *regs)
 39{
 40#ifdef CONFIG_DEBUG_STACKOVERFLOW
 41#define STACK_TOP_MARGIN	128
 42	struct orig_ist *oist;
 43	u64 irq_stack_top, irq_stack_bottom;
 44	u64 estack_top, estack_bottom;
 45	u64 curbase = (u64)task_stack_page(current);
 46
 47	if (user_mode_vm(regs))
 48		return;
 49
 50	if (regs->sp >= curbase + sizeof(struct thread_info) +
 51				  sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
 52	    regs->sp <= curbase + THREAD_SIZE)
 53		return;
 54
 55	irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) +
 56			STACK_TOP_MARGIN;
 57	irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr);
 58	if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
 59		return;
 60
 61	oist = &__get_cpu_var(orig_ist);
 62	estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
 63	estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
 64	if (regs->sp >= estack_top && regs->sp <= estack_bottom)
 65		return;
 66
 67	WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
 68		current->comm, curbase, regs->sp,
 69		irq_stack_top, irq_stack_bottom,
 70		estack_top, estack_bottom);
 71
 72	if (sysctl_panic_on_stackoverflow)
 73		panic("low stack detected by irq handler - check messages\n");
 74#endif
 75}
 76
 77bool handle_irq(unsigned irq, struct pt_regs *regs)
 78{
 79	struct irq_desc *desc;
 80
 81	stack_overflow_check(regs);
 82
 83	desc = irq_to_desc(irq);
 84	if (unlikely(!desc))
 85		return false;
 86
 87	generic_handle_irq_desc(irq, desc);
 88	return true;
 89}
 90
 91
 92extern void call_softirq(void);
 93
 94asmlinkage void do_softirq(void)
 95{
 96	__u32 pending;
 97	unsigned long flags;
 98
 99	if (in_interrupt())
100		return;
101
102	local_irq_save(flags);
103	pending = local_softirq_pending();
104	/* Switch to interrupt stack */
105	if (pending) {
106		call_softirq();
107		WARN_ON_ONCE(softirq_count());
108	}
109	local_irq_restore(flags);
110}