PageRenderTime 58ms CodeModel.GetById 2ms app.highlight 48ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/alpha/kernel/traps.c

http://github.com/mirrors/linux
C | 994 lines | 759 code | 101 blank | 134 comment | 86 complexity | a0c1ed4132ecf8f0725502b94dea5c89 MD5 | raw file
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/alpha/kernel/traps.c
  4 *
  5 * (C) Copyright 1994 Linus Torvalds
  6 */
  7
  8/*
  9 * This file initializes the trap entry points
 10 */
 11
 12#include <linux/jiffies.h>
 13#include <linux/mm.h>
 14#include <linux/sched/signal.h>
 15#include <linux/sched/debug.h>
 16#include <linux/tty.h>
 17#include <linux/delay.h>
 18#include <linux/extable.h>
 19#include <linux/kallsyms.h>
 20#include <linux/ratelimit.h>
 21
 22#include <asm/gentrap.h>
 23#include <linux/uaccess.h>
 24#include <asm/unaligned.h>
 25#include <asm/sysinfo.h>
 26#include <asm/hwrpb.h>
 27#include <asm/mmu_context.h>
 28#include <asm/special_insns.h>
 29
 30#include "proto.h"
 31
 32/* Work-around for some SRMs which mishandle opDEC faults.  */
 33
 34static int opDEC_fix;
 35
 36static void
 37opDEC_check(void)
 38{
 39	__asm__ __volatile__ (
 40	/* Load the address of... */
 41	"	br	$16, 1f\n"
 42	/* A stub instruction fault handler.  Just add 4 to the
 43	   pc and continue.  */
 44	"	ldq	$16, 8($sp)\n"
 45	"	addq	$16, 4, $16\n"
 46	"	stq	$16, 8($sp)\n"
 47	"	call_pal %[rti]\n"
 48	/* Install the instruction fault handler.  */
 49	"1:	lda	$17, 3\n"
 50	"	call_pal %[wrent]\n"
 51	/* With that in place, the fault from the round-to-minf fp
 52	   insn will arrive either at the "lda 4" insn (bad) or one
 53	   past that (good).  This places the correct fixup in %0.  */
 54	"	lda %[fix], 0\n"
 55	"	cvttq/svm $f31,$f31\n"
 56	"	lda %[fix], 4"
 57	: [fix] "=r" (opDEC_fix)
 58	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
 59	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
 60
 61	if (opDEC_fix)
 62		printk("opDEC fixup enabled.\n");
 63}
 64
 65void
 66dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
 67{
 68	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
 69	       regs->pc, regs->r26, regs->ps, print_tainted());
 70	printk("pc is at %pSR\n", (void *)regs->pc);
 71	printk("ra is at %pSR\n", (void *)regs->r26);
 72	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
 73	       regs->r0, regs->r1, regs->r2);
 74	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
 75 	       regs->r3, regs->r4, regs->r5);
 76	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
 77	       regs->r6, regs->r7, regs->r8);
 78
 79	if (r9_15) {
 80		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
 81		       r9_15[9], r9_15[10], r9_15[11]);
 82		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
 83		       r9_15[12], r9_15[13], r9_15[14]);
 84		printk("s6 = %016lx\n", r9_15[15]);
 85	}
 86
 87	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
 88	       regs->r16, regs->r17, regs->r18);
 89	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
 90 	       regs->r19, regs->r20, regs->r21);
 91 	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
 92	       regs->r22, regs->r23, regs->r24);
 93	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
 94	       regs->r25, regs->r27, regs->r28);
 95	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
 96#if 0
 97__halt();
 98#endif
 99}
100
101#if 0
102static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
103			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
104			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
105			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
106#endif
107
108static void
109dik_show_code(unsigned int *pc)
110{
111	long i;
112
113	printk("Code:");
114	for (i = -6; i < 2; i++) {
115		unsigned int insn;
116		if (__get_user(insn, (unsigned int __user *)pc + i))
117			break;
118		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
119	}
120	printk("\n");
121}
122
123static void
124dik_show_trace(unsigned long *sp)
125{
126	long i = 0;
127	printk("Trace:\n");
128	while (0x1ff8 & (unsigned long) sp) {
129		extern char _stext[], _etext[];
130		unsigned long tmp = *sp;
131		sp++;
132		if (tmp < (unsigned long) &_stext)
133			continue;
134		if (tmp >= (unsigned long) &_etext)
135			continue;
136		printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
137		if (i > 40) {
138			printk(" ...");
139			break;
140		}
141	}
142	printk("\n");
143}
144
145static int kstack_depth_to_print = 24;
146
147void show_stack(struct task_struct *task, unsigned long *sp)
148{
149	unsigned long *stack;
150	int i;
151
152	/*
153	 * debugging aid: "show_stack(NULL);" prints the
154	 * back trace for this cpu.
155	 */
156	if(sp==NULL)
157		sp=(unsigned long*)&sp;
158
159	stack = sp;
160	for(i=0; i < kstack_depth_to_print; i++) {
161		if (((long) stack & (THREAD_SIZE-1)) == 0)
162			break;
163		if ((i % 4) == 0) {
164			if (i)
165				pr_cont("\n");
166			printk("       ");
167		} else {
168			pr_cont(" ");
169		}
170		pr_cont("%016lx", *stack++);
171	}
172	pr_cont("\n");
173	dik_show_trace(sp);
174}
175
176void
177die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
178{
179	if (regs->ps & 8)
180		return;
181#ifdef CONFIG_SMP
182	printk("CPU %d ", hard_smp_processor_id());
183#endif
184	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
185	dik_show_regs(regs, r9_15);
186	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
187	dik_show_trace((unsigned long *)(regs+1));
188	dik_show_code((unsigned int *)regs->pc);
189
190	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
191		printk("die_if_kernel recursion detected.\n");
192		local_irq_enable();
193		while (1);
194	}
195	do_exit(SIGSEGV);
196}
197
198#ifndef CONFIG_MATHEMU
199static long dummy_emul(void) { return 0; }
200long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
201  = (void *)dummy_emul;
202EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
203long (*alpha_fp_emul) (unsigned long pc)
204  = (void *)dummy_emul;
205EXPORT_SYMBOL_GPL(alpha_fp_emul);
206#else
207long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
208long alpha_fp_emul (unsigned long pc);
209#endif
210
211asmlinkage void
212do_entArith(unsigned long summary, unsigned long write_mask,
213	    struct pt_regs *regs)
214{
215	long si_code = FPE_FLTINV;
216
217	if (summary & 1) {
218		/* Software-completion summary bit is set, so try to
219		   emulate the instruction.  If the processor supports
220		   precise exceptions, we don't have to search.  */
221		if (!amask(AMASK_PRECISE_TRAP))
222			si_code = alpha_fp_emul(regs->pc - 4);
223		else
224			si_code = alpha_fp_emul_imprecise(regs, write_mask);
225		if (si_code == 0)
226			return;
227	}
228	die_if_kernel("Arithmetic fault", regs, 0, NULL);
229
230	send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
231}
232
233asmlinkage void
234do_entIF(unsigned long type, struct pt_regs *regs)
235{
236	int signo, code;
237
238	if ((regs->ps & ~IPL_MAX) == 0) {
239		if (type == 1) {
240			const unsigned int *data
241			  = (const unsigned int *) regs->pc;
242			printk("Kernel bug at %s:%d\n",
243			       (const char *)(data[1] | (long)data[2] << 32), 
244			       data[0]);
245		}
246#ifdef CONFIG_ALPHA_WTINT
247		if (type == 4) {
248			/* If CALL_PAL WTINT is totally unsupported by the
249			   PALcode, e.g. MILO, "emulate" it by overwriting
250			   the insn.  */
251			unsigned int *pinsn
252			  = (unsigned int *) regs->pc - 1;
253			if (*pinsn == PAL_wtint) {
254				*pinsn = 0x47e01400; /* mov 0,$0 */
255				imb();
256				regs->r0 = 0;
257				return;
258			}
259		}
260#endif /* ALPHA_WTINT */
261		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
262			      regs, type, NULL);
263	}
264
265	switch (type) {
266	      case 0: /* breakpoint */
267		if (ptrace_cancel_bpt(current)) {
268			regs->pc -= 4;	/* make pc point to former bpt */
269		}
270
271		send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0,
272			       current);
273		return;
274
275	      case 1: /* bugcheck */
276		send_sig_fault(SIGTRAP, TRAP_UNK, (void __user *) regs->pc, 0,
277			       current);
278		return;
279		
280	      case 2: /* gentrap */
281		switch ((long) regs->r16) {
282		case GEN_INTOVF:
283			signo = SIGFPE;
284			code = FPE_INTOVF;
285			break;
286		case GEN_INTDIV:
287			signo = SIGFPE;
288			code = FPE_INTDIV;
289			break;
290		case GEN_FLTOVF:
291			signo = SIGFPE;
292			code = FPE_FLTOVF;
293			break;
294		case GEN_FLTDIV:
295			signo = SIGFPE;
296			code = FPE_FLTDIV;
297			break;
298		case GEN_FLTUND:
299			signo = SIGFPE;
300			code = FPE_FLTUND;
301			break;
302		case GEN_FLTINV:
303			signo = SIGFPE;
304			code = FPE_FLTINV;
305			break;
306		case GEN_FLTINE:
307			signo = SIGFPE;
308			code = FPE_FLTRES;
309			break;
310		case GEN_ROPRAND:
311			signo = SIGFPE;
312			code = FPE_FLTUNK;
313			break;
314
315		case GEN_DECOVF:
316		case GEN_DECDIV:
317		case GEN_DECINV:
318		case GEN_ASSERTERR:
319		case GEN_NULPTRERR:
320		case GEN_STKOVF:
321		case GEN_STRLENERR:
322		case GEN_SUBSTRERR:
323		case GEN_RANGERR:
324		case GEN_SUBRNG:
325		case GEN_SUBRNG1:
326		case GEN_SUBRNG2:
327		case GEN_SUBRNG3:
328		case GEN_SUBRNG4:
329		case GEN_SUBRNG5:
330		case GEN_SUBRNG6:
331		case GEN_SUBRNG7:
332		default:
333			signo = SIGTRAP;
334			code = TRAP_UNK;
335			break;
336		}
337
338		send_sig_fault(signo, code, (void __user *) regs->pc, regs->r16,
339			       current);
340		return;
341
342	      case 4: /* opDEC */
343		if (implver() == IMPLVER_EV4) {
344			long si_code;
345
346			/* The some versions of SRM do not handle
347			   the opDEC properly - they return the PC of the
348			   opDEC fault, not the instruction after as the
349			   Alpha architecture requires.  Here we fix it up.
350			   We do this by intentionally causing an opDEC
351			   fault during the boot sequence and testing if
352			   we get the correct PC.  If not, we set a flag
353			   to correct it every time through.  */
354			regs->pc += opDEC_fix; 
355			
356			/* EV4 does not implement anything except normal
357			   rounding.  Everything else will come here as
358			   an illegal instruction.  Emulate them.  */
359			si_code = alpha_fp_emul(regs->pc - 4);
360			if (si_code == 0)
361				return;
362			if (si_code > 0) {
363				send_sig_fault(SIGFPE, si_code,
364					       (void __user *) regs->pc, 0,
365					       current);
366				return;
367			}
368		}
369		break;
370
371	      case 3: /* FEN fault */
372		/* Irritating users can call PAL_clrfen to disable the
373		   FPU for the process.  The kernel will then trap in
374		   do_switch_stack and undo_switch_stack when we try
375		   to save and restore the FP registers.
376
377		   Given that GCC by default generates code that uses the
378		   FP registers, PAL_clrfen is not useful except for DoS
379		   attacks.  So turn the bleeding FPU back on and be done
380		   with it.  */
381		current_thread_info()->pcb.flags |= 1;
382		__reload_thread(&current_thread_info()->pcb);
383		return;
384
385	      case 5: /* illoc */
386	      default: /* unexpected instruction-fault type */
387		      ;
388	}
389
390	send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, current);
391}
392
393/* There is an ifdef in the PALcode in MILO that enables a 
394   "kernel debugging entry point" as an unprivileged call_pal.
395
396   We don't want to have anything to do with it, but unfortunately
397   several versions of MILO included in distributions have it enabled,
398   and if we don't put something on the entry point we'll oops.  */
399
400asmlinkage void
401do_entDbg(struct pt_regs *regs)
402{
403	die_if_kernel("Instruction fault", regs, 0, NULL);
404
405	force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0);
406}
407
408
409/*
410 * entUna has a different register layout to be reasonably simple. It
411 * needs access to all the integer registers (the kernel doesn't use
412 * fp-regs), and it needs to have them in order for simpler access.
413 *
414 * Due to the non-standard register layout (and because we don't want
415 * to handle floating-point regs), user-mode unaligned accesses are
416 * handled separately by do_entUnaUser below.
417 *
418 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
419 * on a gp-register unaligned load/store, something is _very_ wrong
420 * in the kernel anyway..
421 */
422struct allregs {
423	unsigned long regs[32];
424	unsigned long ps, pc, gp, a0, a1, a2;
425};
426
427struct unaligned_stat {
428	unsigned long count, va, pc;
429} unaligned[2];
430
431
432/* Macro for exception fixup code to access integer registers.  */
433#define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
434
435
436asmlinkage void
437do_entUna(void * va, unsigned long opcode, unsigned long reg,
438	  struct allregs *regs)
439{
440	long error, tmp1, tmp2, tmp3, tmp4;
441	unsigned long pc = regs->pc - 4;
442	unsigned long *_regs = regs->regs;
443	const struct exception_table_entry *fixup;
444
445	unaligned[0].count++;
446	unaligned[0].va = (unsigned long) va;
447	unaligned[0].pc = pc;
448
449	/* We don't want to use the generic get/put unaligned macros as
450	   we want to trap exceptions.  Only if we actually get an
451	   exception will we decide whether we should have caught it.  */
452
453	switch (opcode) {
454	case 0x0c: /* ldwu */
455		__asm__ __volatile__(
456		"1:	ldq_u %1,0(%3)\n"
457		"2:	ldq_u %2,1(%3)\n"
458		"	extwl %1,%3,%1\n"
459		"	extwh %2,%3,%2\n"
460		"3:\n"
461		EXC(1b,3b,%1,%0)
462		EXC(2b,3b,%2,%0)
463			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
464			: "r"(va), "0"(0));
465		if (error)
466			goto got_exception;
467		una_reg(reg) = tmp1|tmp2;
468		return;
469
470	case 0x28: /* ldl */
471		__asm__ __volatile__(
472		"1:	ldq_u %1,0(%3)\n"
473		"2:	ldq_u %2,3(%3)\n"
474		"	extll %1,%3,%1\n"
475		"	extlh %2,%3,%2\n"
476		"3:\n"
477		EXC(1b,3b,%1,%0)
478		EXC(2b,3b,%2,%0)
479			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
480			: "r"(va), "0"(0));
481		if (error)
482			goto got_exception;
483		una_reg(reg) = (int)(tmp1|tmp2);
484		return;
485
486	case 0x29: /* ldq */
487		__asm__ __volatile__(
488		"1:	ldq_u %1,0(%3)\n"
489		"2:	ldq_u %2,7(%3)\n"
490		"	extql %1,%3,%1\n"
491		"	extqh %2,%3,%2\n"
492		"3:\n"
493		EXC(1b,3b,%1,%0)
494		EXC(2b,3b,%2,%0)
495			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
496			: "r"(va), "0"(0));
497		if (error)
498			goto got_exception;
499		una_reg(reg) = tmp1|tmp2;
500		return;
501
502	/* Note that the store sequences do not indicate that they change
503	   memory because it _should_ be affecting nothing in this context.
504	   (Otherwise we have other, much larger, problems.)  */
505	case 0x0d: /* stw */
506		__asm__ __volatile__(
507		"1:	ldq_u %2,1(%5)\n"
508		"2:	ldq_u %1,0(%5)\n"
509		"	inswh %6,%5,%4\n"
510		"	inswl %6,%5,%3\n"
511		"	mskwh %2,%5,%2\n"
512		"	mskwl %1,%5,%1\n"
513		"	or %2,%4,%2\n"
514		"	or %1,%3,%1\n"
515		"3:	stq_u %2,1(%5)\n"
516		"4:	stq_u %1,0(%5)\n"
517		"5:\n"
518		EXC(1b,5b,%2,%0)
519		EXC(2b,5b,%1,%0)
520		EXC(3b,5b,$31,%0)
521		EXC(4b,5b,$31,%0)
522			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
523			  "=&r"(tmp3), "=&r"(tmp4)
524			: "r"(va), "r"(una_reg(reg)), "0"(0));
525		if (error)
526			goto got_exception;
527		return;
528
529	case 0x2c: /* stl */
530		__asm__ __volatile__(
531		"1:	ldq_u %2,3(%5)\n"
532		"2:	ldq_u %1,0(%5)\n"
533		"	inslh %6,%5,%4\n"
534		"	insll %6,%5,%3\n"
535		"	msklh %2,%5,%2\n"
536		"	mskll %1,%5,%1\n"
537		"	or %2,%4,%2\n"
538		"	or %1,%3,%1\n"
539		"3:	stq_u %2,3(%5)\n"
540		"4:	stq_u %1,0(%5)\n"
541		"5:\n"
542		EXC(1b,5b,%2,%0)
543		EXC(2b,5b,%1,%0)
544		EXC(3b,5b,$31,%0)
545		EXC(4b,5b,$31,%0)
546			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
547			  "=&r"(tmp3), "=&r"(tmp4)
548			: "r"(va), "r"(una_reg(reg)), "0"(0));
549		if (error)
550			goto got_exception;
551		return;
552
553	case 0x2d: /* stq */
554		__asm__ __volatile__(
555		"1:	ldq_u %2,7(%5)\n"
556		"2:	ldq_u %1,0(%5)\n"
557		"	insqh %6,%5,%4\n"
558		"	insql %6,%5,%3\n"
559		"	mskqh %2,%5,%2\n"
560		"	mskql %1,%5,%1\n"
561		"	or %2,%4,%2\n"
562		"	or %1,%3,%1\n"
563		"3:	stq_u %2,7(%5)\n"
564		"4:	stq_u %1,0(%5)\n"
565		"5:\n"
566		EXC(1b,5b,%2,%0)
567		EXC(2b,5b,%1,%0)
568		EXC(3b,5b,$31,%0)
569		EXC(4b,5b,$31,%0)
570			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
571			  "=&r"(tmp3), "=&r"(tmp4)
572			: "r"(va), "r"(una_reg(reg)), "0"(0));
573		if (error)
574			goto got_exception;
575		return;
576	}
577
578	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
579		pc, va, opcode, reg);
580	do_exit(SIGSEGV);
581
582got_exception:
583	/* Ok, we caught the exception, but we don't want it.  Is there
584	   someone to pass it along to?  */
585	if ((fixup = search_exception_tables(pc)) != 0) {
586		unsigned long newpc;
587		newpc = fixup_exception(una_reg, fixup, pc);
588
589		printk("Forwarding unaligned exception at %lx (%lx)\n",
590		       pc, newpc);
591
592		regs->pc = newpc;
593		return;
594	}
595
596	/*
597	 * Yikes!  No one to forward the exception to.
598	 * Since the registers are in a weird format, dump them ourselves.
599 	 */
600
601	printk("%s(%d): unhandled unaligned exception\n",
602	       current->comm, task_pid_nr(current));
603
604	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
605	       pc, una_reg(26), regs->ps);
606	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
607	       una_reg(0), una_reg(1), una_reg(2));
608	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
609 	       una_reg(3), una_reg(4), una_reg(5));
610	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
611	       una_reg(6), una_reg(7), una_reg(8));
612	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
613	       una_reg(9), una_reg(10), una_reg(11));
614	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
615	       una_reg(12), una_reg(13), una_reg(14));
616	printk("r15= %016lx\n", una_reg(15));
617	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
618	       una_reg(16), una_reg(17), una_reg(18));
619	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
620 	       una_reg(19), una_reg(20), una_reg(21));
621 	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
622	       una_reg(22), una_reg(23), una_reg(24));
623	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
624	       una_reg(25), una_reg(27), una_reg(28));
625	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
626
627	dik_show_code((unsigned int *)pc);
628	dik_show_trace((unsigned long *)(regs+1));
629
630	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
631		printk("die_if_kernel recursion detected.\n");
632		local_irq_enable();
633		while (1);
634	}
635	do_exit(SIGSEGV);
636}
637
638/*
639 * Convert an s-floating point value in memory format to the
640 * corresponding value in register format.  The exponent
641 * needs to be remapped to preserve non-finite values
642 * (infinities, not-a-numbers, denormals).
643 */
644static inline unsigned long
645s_mem_to_reg (unsigned long s_mem)
646{
647	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
648	unsigned long sign    = (s_mem >> 31) & 0x1;
649	unsigned long exp_msb = (s_mem >> 30) & 0x1;
650	unsigned long exp_low = (s_mem >> 23) & 0x7f;
651	unsigned long exp;
652
653	exp = (exp_msb << 10) | exp_low;	/* common case */
654	if (exp_msb) {
655		if (exp_low == 0x7f) {
656			exp = 0x7ff;
657		}
658	} else {
659		if (exp_low == 0x00) {
660			exp = 0x000;
661		} else {
662			exp |= (0x7 << 7);
663		}
664	}
665	return (sign << 63) | (exp << 52) | (frac << 29);
666}
667
668/*
669 * Convert an s-floating point value in register format to the
670 * corresponding value in memory format.
671 */
672static inline unsigned long
673s_reg_to_mem (unsigned long s_reg)
674{
675	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
676}
677
678/*
679 * Handle user-level unaligned fault.  Handling user-level unaligned
680 * faults is *extremely* slow and produces nasty messages.  A user
681 * program *should* fix unaligned faults ASAP.
682 *
683 * Notice that we have (almost) the regular kernel stack layout here,
684 * so finding the appropriate registers is a little more difficult
685 * than in the kernel case.
686 *
687 * Finally, we handle regular integer load/stores only.  In
688 * particular, load-linked/store-conditionally and floating point
689 * load/stores are not supported.  The former make no sense with
690 * unaligned faults (they are guaranteed to fail) and I don't think
691 * the latter will occur in any decent program.
692 *
693 * Sigh. We *do* have to handle some FP operations, because GCC will
694 * uses them as temporary storage for integer memory to memory copies.
695 * However, we need to deal with stt/ldt and sts/lds only.
696 */
697
698#define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
699			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
700			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
701			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
702
703#define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
704			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
705			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
706
707#define R(x)	((size_t) &((struct pt_regs *)0)->x)
708
709static int unauser_reg_offsets[32] = {
710	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
711	/* r9 ... r15 are stored in front of regs.  */
712	-56, -48, -40, -32, -24, -16, -8,
713	R(r16), R(r17), R(r18),
714	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
715	R(r27), R(r28), R(gp),
716	0, 0
717};
718
719#undef R
720
721asmlinkage void
722do_entUnaUser(void __user * va, unsigned long opcode,
723	      unsigned long reg, struct pt_regs *regs)
724{
725	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
726
727	unsigned long tmp1, tmp2, tmp3, tmp4;
728	unsigned long fake_reg, *reg_addr = &fake_reg;
729	int si_code;
730	long error;
731
732	/* Check the UAC bits to decide what the user wants us to do
733	   with the unaliged access.  */
734
735	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
736		if (__ratelimit(&ratelimit)) {
737			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
738			       current->comm, task_pid_nr(current),
739			       regs->pc - 4, va, opcode, reg);
740		}
741	}
742	if ((current_thread_info()->status & TS_UAC_SIGBUS))
743		goto give_sigbus;
744	/* Not sure why you'd want to use this, but... */
745	if ((current_thread_info()->status & TS_UAC_NOFIX))
746		return;
747
748	/* Don't bother reading ds in the access check since we already
749	   know that this came from the user.  Also rely on the fact that
750	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
751	if ((unsigned long)va >= TASK_SIZE)
752		goto give_sigsegv;
753
754	++unaligned[1].count;
755	unaligned[1].va = (unsigned long)va;
756	unaligned[1].pc = regs->pc - 4;
757
758	if ((1L << opcode) & OP_INT_MASK) {
759		/* it's an integer load/store */
760		if (reg < 30) {
761			reg_addr = (unsigned long *)
762			  ((char *)regs + unauser_reg_offsets[reg]);
763		} else if (reg == 30) {
764			/* usp in PAL regs */
765			fake_reg = rdusp();
766		} else {
767			/* zero "register" */
768			fake_reg = 0;
769		}
770	}
771
772	/* We don't want to use the generic get/put unaligned macros as
773	   we want to trap exceptions.  Only if we actually get an
774	   exception will we decide whether we should have caught it.  */
775
776	switch (opcode) {
777	case 0x0c: /* ldwu */
778		__asm__ __volatile__(
779		"1:	ldq_u %1,0(%3)\n"
780		"2:	ldq_u %2,1(%3)\n"
781		"	extwl %1,%3,%1\n"
782		"	extwh %2,%3,%2\n"
783		"3:\n"
784		EXC(1b,3b,%1,%0)
785		EXC(2b,3b,%2,%0)
786			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
787			: "r"(va), "0"(0));
788		if (error)
789			goto give_sigsegv;
790		*reg_addr = tmp1|tmp2;
791		break;
792
793	case 0x22: /* lds */
794		__asm__ __volatile__(
795		"1:	ldq_u %1,0(%3)\n"
796		"2:	ldq_u %2,3(%3)\n"
797		"	extll %1,%3,%1\n"
798		"	extlh %2,%3,%2\n"
799		"3:\n"
800		EXC(1b,3b,%1,%0)
801		EXC(2b,3b,%2,%0)
802			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
803			: "r"(va), "0"(0));
804		if (error)
805			goto give_sigsegv;
806		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
807		return;
808
809	case 0x23: /* ldt */
810		__asm__ __volatile__(
811		"1:	ldq_u %1,0(%3)\n"
812		"2:	ldq_u %2,7(%3)\n"
813		"	extql %1,%3,%1\n"
814		"	extqh %2,%3,%2\n"
815		"3:\n"
816		EXC(1b,3b,%1,%0)
817		EXC(2b,3b,%2,%0)
818			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
819			: "r"(va), "0"(0));
820		if (error)
821			goto give_sigsegv;
822		alpha_write_fp_reg(reg, tmp1|tmp2);
823		return;
824
825	case 0x28: /* ldl */
826		__asm__ __volatile__(
827		"1:	ldq_u %1,0(%3)\n"
828		"2:	ldq_u %2,3(%3)\n"
829		"	extll %1,%3,%1\n"
830		"	extlh %2,%3,%2\n"
831		"3:\n"
832		EXC(1b,3b,%1,%0)
833		EXC(2b,3b,%2,%0)
834			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
835			: "r"(va), "0"(0));
836		if (error)
837			goto give_sigsegv;
838		*reg_addr = (int)(tmp1|tmp2);
839		break;
840
841	case 0x29: /* ldq */
842		__asm__ __volatile__(
843		"1:	ldq_u %1,0(%3)\n"
844		"2:	ldq_u %2,7(%3)\n"
845		"	extql %1,%3,%1\n"
846		"	extqh %2,%3,%2\n"
847		"3:\n"
848		EXC(1b,3b,%1,%0)
849		EXC(2b,3b,%2,%0)
850			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
851			: "r"(va), "0"(0));
852		if (error)
853			goto give_sigsegv;
854		*reg_addr = tmp1|tmp2;
855		break;
856
857	/* Note that the store sequences do not indicate that they change
858	   memory because it _should_ be affecting nothing in this context.
859	   (Otherwise we have other, much larger, problems.)  */
860	case 0x0d: /* stw */
861		__asm__ __volatile__(
862		"1:	ldq_u %2,1(%5)\n"
863		"2:	ldq_u %1,0(%5)\n"
864		"	inswh %6,%5,%4\n"
865		"	inswl %6,%5,%3\n"
866		"	mskwh %2,%5,%2\n"
867		"	mskwl %1,%5,%1\n"
868		"	or %2,%4,%2\n"
869		"	or %1,%3,%1\n"
870		"3:	stq_u %2,1(%5)\n"
871		"4:	stq_u %1,0(%5)\n"
872		"5:\n"
873		EXC(1b,5b,%2,%0)
874		EXC(2b,5b,%1,%0)
875		EXC(3b,5b,$31,%0)
876		EXC(4b,5b,$31,%0)
877			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
878			  "=&r"(tmp3), "=&r"(tmp4)
879			: "r"(va), "r"(*reg_addr), "0"(0));
880		if (error)
881			goto give_sigsegv;
882		return;
883
884	case 0x26: /* sts */
885		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
886		/* FALLTHRU */
887
888	case 0x2c: /* stl */
889		__asm__ __volatile__(
890		"1:	ldq_u %2,3(%5)\n"
891		"2:	ldq_u %1,0(%5)\n"
892		"	inslh %6,%5,%4\n"
893		"	insll %6,%5,%3\n"
894		"	msklh %2,%5,%2\n"
895		"	mskll %1,%5,%1\n"
896		"	or %2,%4,%2\n"
897		"	or %1,%3,%1\n"
898		"3:	stq_u %2,3(%5)\n"
899		"4:	stq_u %1,0(%5)\n"
900		"5:\n"
901		EXC(1b,5b,%2,%0)
902		EXC(2b,5b,%1,%0)
903		EXC(3b,5b,$31,%0)
904		EXC(4b,5b,$31,%0)
905			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
906			  "=&r"(tmp3), "=&r"(tmp4)
907			: "r"(va), "r"(*reg_addr), "0"(0));
908		if (error)
909			goto give_sigsegv;
910		return;
911
912	case 0x27: /* stt */
913		fake_reg = alpha_read_fp_reg(reg);
914		/* FALLTHRU */
915
916	case 0x2d: /* stq */
917		__asm__ __volatile__(
918		"1:	ldq_u %2,7(%5)\n"
919		"2:	ldq_u %1,0(%5)\n"
920		"	insqh %6,%5,%4\n"
921		"	insql %6,%5,%3\n"
922		"	mskqh %2,%5,%2\n"
923		"	mskql %1,%5,%1\n"
924		"	or %2,%4,%2\n"
925		"	or %1,%3,%1\n"
926		"3:	stq_u %2,7(%5)\n"
927		"4:	stq_u %1,0(%5)\n"
928		"5:\n"
929		EXC(1b,5b,%2,%0)
930		EXC(2b,5b,%1,%0)
931		EXC(3b,5b,$31,%0)
932		EXC(4b,5b,$31,%0)
933			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
934			  "=&r"(tmp3), "=&r"(tmp4)
935			: "r"(va), "r"(*reg_addr), "0"(0));
936		if (error)
937			goto give_sigsegv;
938		return;
939
940	default:
941		/* What instruction were you trying to use, exactly?  */
942		goto give_sigbus;
943	}
944
945	/* Only integer loads should get here; everyone else returns early. */
946	if (reg == 30)
947		wrusp(fake_reg);
948	return;
949
950give_sigsegv:
951	regs->pc -= 4;  /* make pc point to faulting insn */
952
953	/* We need to replicate some of the logic in mm/fault.c,
954	   since we don't have access to the fault code in the
955	   exception handling return path.  */
956	if ((unsigned long)va >= TASK_SIZE)
957		si_code = SEGV_ACCERR;
958	else {
959		struct mm_struct *mm = current->mm;
960		down_read(&mm->mmap_sem);
961		if (find_vma(mm, (unsigned long)va))
962			si_code = SEGV_ACCERR;
963		else
964			si_code = SEGV_MAPERR;
965		up_read(&mm->mmap_sem);
966	}
967	send_sig_fault(SIGSEGV, si_code, va, 0, current);
968	return;
969
970give_sigbus:
971	regs->pc -= 4;
972	send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current);
973	return;
974}
975
976void
977trap_init(void)
978{
979	/* Tell PAL-code what global pointer we want in the kernel.  */
980	register unsigned long gptr __asm__("$29");
981	wrkgp(gptr);
982
983	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
984	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
985	if (implver() == IMPLVER_EV4)
986		opDEC_check();
987
988	wrent(entArith, 1);
989	wrent(entMM, 2);
990	wrent(entIF, 3);
991	wrent(entUna, 4);
992	wrent(entSys, 5);
993	wrent(entDbg, 6);
994}