PageRenderTime 793ms CodeModel.GetById 207ms app.highlight 285ms RepoModel.GetById 272ms app.codeStats 0ms

/arch/sparc/kernel/smp_64.c

https://bitbucket.org/cresqo/cm7-p500-kernel
C | 1491 lines | 1078 code | 246 blank | 167 comment | 193 complexity | e7bdd2b436f64db3188c62710247f27e MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
   1/* smp.c: Sparc64 SMP support.
   2 *
   3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/sched.h>
   9#include <linux/mm.h>
  10#include <linux/pagemap.h>
  11#include <linux/threads.h>
  12#include <linux/smp.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/delay.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/fs.h>
  19#include <linux/seq_file.h>
  20#include <linux/cache.h>
  21#include <linux/jiffies.h>
  22#include <linux/profile.h>
  23#include <linux/bootmem.h>
  24#include <linux/vmalloc.h>
  25#include <linux/ftrace.h>
  26#include <linux/cpu.h>
  27#include <linux/slab.h>
  28
  29#include <asm/head.h>
  30#include <asm/ptrace.h>
  31#include <asm/atomic.h>
  32#include <asm/tlbflush.h>
  33#include <asm/mmu_context.h>
  34#include <asm/cpudata.h>
  35#include <asm/hvtramp.h>
  36#include <asm/io.h>
  37#include <asm/timer.h>
  38
  39#include <asm/irq.h>
  40#include <asm/irq_regs.h>
  41#include <asm/page.h>
  42#include <asm/pgtable.h>
  43#include <asm/oplib.h>
  44#include <asm/uaccess.h>
  45#include <asm/starfire.h>
  46#include <asm/tlb.h>
  47#include <asm/sections.h>
  48#include <asm/prom.h>
  49#include <asm/mdesc.h>
  50#include <asm/ldc.h>
  51#include <asm/hypervisor.h>
  52
  53#include "cpumap.h"
  54
  55int sparc64_multi_core __read_mostly;
  56
  57DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  58cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  59	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  60
  61EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  62EXPORT_SYMBOL(cpu_core_map);
  63
  64static cpumask_t smp_commenced_mask;
  65
  66void smp_info(struct seq_file *m)
  67{
  68	int i;
  69	
  70	seq_printf(m, "State:\n");
  71	for_each_online_cpu(i)
  72		seq_printf(m, "CPU%d:\t\tonline\n", i);
  73}
  74
  75void smp_bogo(struct seq_file *m)
  76{
  77	int i;
  78	
  79	for_each_online_cpu(i)
  80		seq_printf(m,
  81			   "Cpu%dClkTck\t: %016lx\n",
  82			   i, cpu_data(i).clock_tick);
  83}
  84
  85extern void setup_sparc64_timer(void);
  86
  87static volatile unsigned long callin_flag = 0;
  88
  89void __cpuinit smp_callin(void)
  90{
  91	int cpuid = hard_smp_processor_id();
  92
  93	__local_per_cpu_offset = __per_cpu_offset(cpuid);
  94
  95	if (tlb_type == hypervisor)
  96		sun4v_ktsb_register();
  97
  98	__flush_tlb_all();
  99
 100	setup_sparc64_timer();
 101
 102	if (cheetah_pcache_forced_on)
 103		cheetah_enable_pcache();
 104
 105	local_irq_enable();
 106
 107	callin_flag = 1;
 108	__asm__ __volatile__("membar #Sync\n\t"
 109			     "flush  %%g6" : : : "memory");
 110
 111	/* Clear this or we will die instantly when we
 112	 * schedule back to this idler...
 113	 */
 114	current_thread_info()->new_child = 0;
 115
 116	/* Attach to the address space of init_task. */
 117	atomic_inc(&init_mm.mm_count);
 118	current->active_mm = &init_mm;
 119
 120	/* inform the notifiers about the new cpu */
 121	notify_cpu_starting(cpuid);
 122
 123	while (!cpu_isset(cpuid, smp_commenced_mask))
 124		rmb();
 125
 126	ipi_call_lock_irq();
 127	cpu_set(cpuid, cpu_online_map);
 128	ipi_call_unlock_irq();
 129
 130	/* idle thread is expected to have preempt disabled */
 131	preempt_disable();
 132}
 133
 134void cpu_panic(void)
 135{
 136	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 137	panic("SMP bolixed\n");
 138}
 139
 140/* This tick register synchronization scheme is taken entirely from
 141 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
 142 *
 143 * The only change I've made is to rework it so that the master
 144 * initiates the synchonization instead of the slave. -DaveM
 145 */
 146
 147#define MASTER	0
 148#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
 149
 150#define NUM_ROUNDS	64	/* magic value */
 151#define NUM_ITERS	5	/* likewise */
 152
 153static DEFINE_SPINLOCK(itc_sync_lock);
 154static unsigned long go[SLAVE + 1];
 155
 156#define DEBUG_TICK_SYNC	0
 157
 158static inline long get_delta (long *rt, long *master)
 159{
 160	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
 161	unsigned long tcenter, t0, t1, tm;
 162	unsigned long i;
 163
 164	for (i = 0; i < NUM_ITERS; i++) {
 165		t0 = tick_ops->get_tick();
 166		go[MASTER] = 1;
 167		membar_safe("#StoreLoad");
 168		while (!(tm = go[SLAVE]))
 169			rmb();
 170		go[SLAVE] = 0;
 171		wmb();
 172		t1 = tick_ops->get_tick();
 173
 174		if (t1 - t0 < best_t1 - best_t0)
 175			best_t0 = t0, best_t1 = t1, best_tm = tm;
 176	}
 177
 178	*rt = best_t1 - best_t0;
 179	*master = best_tm - best_t0;
 180
 181	/* average best_t0 and best_t1 without overflow: */
 182	tcenter = (best_t0/2 + best_t1/2);
 183	if (best_t0 % 2 + best_t1 % 2 == 2)
 184		tcenter++;
 185	return tcenter - best_tm;
 186}
 187
 188void smp_synchronize_tick_client(void)
 189{
 190	long i, delta, adj, adjust_latency = 0, done = 0;
 191	unsigned long flags, rt, master_time_stamp, bound;
 192#if DEBUG_TICK_SYNC
 193	struct {
 194		long rt;	/* roundtrip time */
 195		long master;	/* master's timestamp */
 196		long diff;	/* difference between midpoint and master's timestamp */
 197		long lat;	/* estimate of itc adjustment latency */
 198	} t[NUM_ROUNDS];
 199#endif
 200
 201	go[MASTER] = 1;
 202
 203	while (go[MASTER])
 204		rmb();
 205
 206	local_irq_save(flags);
 207	{
 208		for (i = 0; i < NUM_ROUNDS; i++) {
 209			delta = get_delta(&rt, &master_time_stamp);
 210			if (delta == 0) {
 211				done = 1;	/* let's lock on to this... */
 212				bound = rt;
 213			}
 214
 215			if (!done) {
 216				if (i > 0) {
 217					adjust_latency += -delta;
 218					adj = -delta + adjust_latency/4;
 219				} else
 220					adj = -delta;
 221
 222				tick_ops->add_tick(adj);
 223			}
 224#if DEBUG_TICK_SYNC
 225			t[i].rt = rt;
 226			t[i].master = master_time_stamp;
 227			t[i].diff = delta;
 228			t[i].lat = adjust_latency/4;
 229#endif
 230		}
 231	}
 232	local_irq_restore(flags);
 233
 234#if DEBUG_TICK_SYNC
 235	for (i = 0; i < NUM_ROUNDS; i++)
 236		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
 237		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 238#endif
 239
 240	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
 241	       "(last diff %ld cycles, maxerr %lu cycles)\n",
 242	       smp_processor_id(), delta, rt);
 243}
 244
 245static void smp_start_sync_tick_client(int cpu);
 246
 247static void smp_synchronize_one_tick(int cpu)
 248{
 249	unsigned long flags, i;
 250
 251	go[MASTER] = 0;
 252
 253	smp_start_sync_tick_client(cpu);
 254
 255	/* wait for client to be ready */
 256	while (!go[MASTER])
 257		rmb();
 258
 259	/* now let the client proceed into his loop */
 260	go[MASTER] = 0;
 261	membar_safe("#StoreLoad");
 262
 263	spin_lock_irqsave(&itc_sync_lock, flags);
 264	{
 265		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
 266			while (!go[MASTER])
 267				rmb();
 268			go[MASTER] = 0;
 269			wmb();
 270			go[SLAVE] = tick_ops->get_tick();
 271			membar_safe("#StoreLoad");
 272		}
 273	}
 274	spin_unlock_irqrestore(&itc_sync_lock, flags);
 275}
 276
 277#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 278/* XXX Put this in some common place. XXX */
 279static unsigned long kimage_addr_to_ra(void *p)
 280{
 281	unsigned long val = (unsigned long) p;
 282
 283	return kern_base + (val - KERNBASE);
 284}
 285
 286static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
 287{
 288	extern unsigned long sparc64_ttable_tl0;
 289	extern unsigned long kern_locked_tte_data;
 290	struct hvtramp_descr *hdesc;
 291	unsigned long trampoline_ra;
 292	struct trap_per_cpu *tb;
 293	u64 tte_vaddr, tte_data;
 294	unsigned long hv_err;
 295	int i;
 296
 297	hdesc = kzalloc(sizeof(*hdesc) +
 298			(sizeof(struct hvtramp_mapping) *
 299			 num_kernel_image_mappings - 1),
 300			GFP_KERNEL);
 301	if (!hdesc) {
 302		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 303		       "hvtramp_descr.\n");
 304		return;
 305	}
 306	*descrp = hdesc;
 307
 308	hdesc->cpu = cpu;
 309	hdesc->num_mappings = num_kernel_image_mappings;
 310
 311	tb = &trap_block[cpu];
 312
 313	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 314	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
 315
 316	hdesc->thread_reg = thread_reg;
 317
 318	tte_vaddr = (unsigned long) KERNBASE;
 319	tte_data = kern_locked_tte_data;
 320
 321	for (i = 0; i < hdesc->num_mappings; i++) {
 322		hdesc->maps[i].vaddr = tte_vaddr;
 323		hdesc->maps[i].tte   = tte_data;
 324		tte_vaddr += 0x400000;
 325		tte_data  += 0x400000;
 326	}
 327
 328	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 329
 330	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
 331				 kimage_addr_to_ra(&sparc64_ttable_tl0),
 332				 __pa(hdesc));
 333	if (hv_err)
 334		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
 335		       "gives error %lu\n", hv_err);
 336}
 337#endif
 338
 339extern unsigned long sparc64_cpu_startup;
 340
 341/* The OBP cpu startup callback truncates the 3rd arg cookie to
 342 * 32-bits (I think) so to be safe we have it read the pointer
 343 * contained here so we work on >4GB machines. -DaveM
 344 */
 345static struct thread_info *cpu_new_thread = NULL;
 346
 347static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 348{
 349	unsigned long entry =
 350		(unsigned long)(&sparc64_cpu_startup);
 351	unsigned long cookie =
 352		(unsigned long)(&cpu_new_thread);
 353	struct task_struct *p;
 354	void *descr = NULL;
 355	int timeout, ret;
 356
 357	p = fork_idle(cpu);
 358	if (IS_ERR(p))
 359		return PTR_ERR(p);
 360	callin_flag = 0;
 361	cpu_new_thread = task_thread_info(p);
 362
 363	if (tlb_type == hypervisor) {
 364#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 365		if (ldom_domaining_enabled)
 366			ldom_startcpu_cpuid(cpu,
 367					    (unsigned long) cpu_new_thread,
 368					    &descr);
 369		else
 370#endif
 371			prom_startcpu_cpuid(cpu, entry, cookie);
 372	} else {
 373		struct device_node *dp = of_find_node_by_cpuid(cpu);
 374
 375		prom_startcpu(dp->phandle, entry, cookie);
 376	}
 377
 378	for (timeout = 0; timeout < 50000; timeout++) {
 379		if (callin_flag)
 380			break;
 381		udelay(100);
 382	}
 383
 384	if (callin_flag) {
 385		ret = 0;
 386	} else {
 387		printk("Processor %d is stuck.\n", cpu);
 388		ret = -ENODEV;
 389	}
 390	cpu_new_thread = NULL;
 391
 392	kfree(descr);
 393
 394	return ret;
 395}
 396
 397static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
 398{
 399	u64 result, target;
 400	int stuck, tmp;
 401
 402	if (this_is_starfire) {
 403		/* map to real upaid */
 404		cpu = (((cpu & 0x3c) << 1) |
 405			((cpu & 0x40) >> 4) |
 406			(cpu & 0x3));
 407	}
 408
 409	target = (cpu << 14) | 0x70;
 410again:
 411	/* Ok, this is the real Spitfire Errata #54.
 412	 * One must read back from a UDB internal register
 413	 * after writes to the UDB interrupt dispatch, but
 414	 * before the membar Sync for that write.
 415	 * So we use the high UDB control register (ASI 0x7f,
 416	 * ADDR 0x20) for the dummy read. -DaveM
 417	 */
 418	tmp = 0x40;
 419	__asm__ __volatile__(
 420	"wrpr	%1, %2, %%pstate\n\t"
 421	"stxa	%4, [%0] %3\n\t"
 422	"stxa	%5, [%0+%8] %3\n\t"
 423	"add	%0, %8, %0\n\t"
 424	"stxa	%6, [%0+%8] %3\n\t"
 425	"membar	#Sync\n\t"
 426	"stxa	%%g0, [%7] %3\n\t"
 427	"membar	#Sync\n\t"
 428	"mov	0x20, %%g1\n\t"
 429	"ldxa	[%%g1] 0x7f, %%g0\n\t"
 430	"membar	#Sync"
 431	: "=r" (tmp)
 432	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
 433	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
 434	  "r" (0x10), "0" (tmp)
 435        : "g1");
 436
 437	/* NOTE: PSTATE_IE is still clear. */
 438	stuck = 100000;
 439	do {
 440		__asm__ __volatile__("ldxa [%%g0] %1, %0"
 441			: "=r" (result)
 442			: "i" (ASI_INTR_DISPATCH_STAT));
 443		if (result == 0) {
 444			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 445					     : : "r" (pstate));
 446			return;
 447		}
 448		stuck -= 1;
 449		if (stuck == 0)
 450			break;
 451	} while (result & 0x1);
 452	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 453			     : : "r" (pstate));
 454	if (stuck == 0) {
 455		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 456		       smp_processor_id(), result);
 457	} else {
 458		udelay(2);
 459		goto again;
 460	}
 461}
 462
 463static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 464{
 465	u64 *mondo, data0, data1, data2;
 466	u16 *cpu_list;
 467	u64 pstate;
 468	int i;
 469
 470	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 471	cpu_list = __va(tb->cpu_list_pa);
 472	mondo = __va(tb->cpu_mondo_block_pa);
 473	data0 = mondo[0];
 474	data1 = mondo[1];
 475	data2 = mondo[2];
 476	for (i = 0; i < cnt; i++)
 477		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
 478}
 479
 480/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
 481 * packet, but we have no use for that.  However we do take advantage of
 482 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
 483 */
 484static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 485{
 486	int nack_busy_id, is_jbus, need_more;
 487	u64 *mondo, pstate, ver, busy_mask;
 488	u16 *cpu_list;
 489
 490	cpu_list = __va(tb->cpu_list_pa);
 491	mondo = __va(tb->cpu_mondo_block_pa);
 492
 493	/* Unfortunately, someone at Sun had the brilliant idea to make the
 494	 * busy/nack fields hard-coded by ITID number for this Ultra-III
 495	 * derivative processor.
 496	 */
 497	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 498	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
 499		   (ver >> 32) == __SERRANO_ID);
 500
 501	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 502
 503retry:
 504	need_more = 0;
 505	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
 506			     : : "r" (pstate), "i" (PSTATE_IE));
 507
 508	/* Setup the dispatch data registers. */
 509	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
 510			     "stxa	%1, [%4] %6\n\t"
 511			     "stxa	%2, [%5] %6\n\t"
 512			     "membar	#Sync\n\t"
 513			     : /* no outputs */
 514			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
 515			       "r" (0x40), "r" (0x50), "r" (0x60),
 516			       "i" (ASI_INTR_W));
 517
 518	nack_busy_id = 0;
 519	busy_mask = 0;
 520	{
 521		int i;
 522
 523		for (i = 0; i < cnt; i++) {
 524			u64 target, nr;
 525
 526			nr = cpu_list[i];
 527			if (nr == 0xffff)
 528				continue;
 529
 530			target = (nr << 14) | 0x70;
 531			if (is_jbus) {
 532				busy_mask |= (0x1UL << (nr * 2));
 533			} else {
 534				target |= (nack_busy_id << 24);
 535				busy_mask |= (0x1UL <<
 536					      (nack_busy_id * 2));
 537			}
 538			__asm__ __volatile__(
 539				"stxa	%%g0, [%0] %1\n\t"
 540				"membar	#Sync\n\t"
 541				: /* no outputs */
 542				: "r" (target), "i" (ASI_INTR_W));
 543			nack_busy_id++;
 544			if (nack_busy_id == 32) {
 545				need_more = 1;
 546				break;
 547			}
 548		}
 549	}
 550
 551	/* Now, poll for completion. */
 552	{
 553		u64 dispatch_stat, nack_mask;
 554		long stuck;
 555
 556		stuck = 100000 * nack_busy_id;
 557		nack_mask = busy_mask << 1;
 558		do {
 559			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 560					     : "=r" (dispatch_stat)
 561					     : "i" (ASI_INTR_DISPATCH_STAT));
 562			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 563				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 564						     : : "r" (pstate));
 565				if (unlikely(need_more)) {
 566					int i, this_cnt = 0;
 567					for (i = 0; i < cnt; i++) {
 568						if (cpu_list[i] == 0xffff)
 569							continue;
 570						cpu_list[i] = 0xffff;
 571						this_cnt++;
 572						if (this_cnt == 32)
 573							break;
 574					}
 575					goto retry;
 576				}
 577				return;
 578			}
 579			if (!--stuck)
 580				break;
 581		} while (dispatch_stat & busy_mask);
 582
 583		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 584				     : : "r" (pstate));
 585
 586		if (dispatch_stat & busy_mask) {
 587			/* Busy bits will not clear, continue instead
 588			 * of freezing up on this cpu.
 589			 */
 590			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 591			       smp_processor_id(), dispatch_stat);
 592		} else {
 593			int i, this_busy_nack = 0;
 594
 595			/* Delay some random time with interrupts enabled
 596			 * to prevent deadlock.
 597			 */
 598			udelay(2 * nack_busy_id);
 599
 600			/* Clear out the mask bits for cpus which did not
 601			 * NACK us.
 602			 */
 603			for (i = 0; i < cnt; i++) {
 604				u64 check_mask, nr;
 605
 606				nr = cpu_list[i];
 607				if (nr == 0xffff)
 608					continue;
 609
 610				if (is_jbus)
 611					check_mask = (0x2UL << (2*nr));
 612				else
 613					check_mask = (0x2UL <<
 614						      this_busy_nack);
 615				if ((dispatch_stat & check_mask) == 0)
 616					cpu_list[i] = 0xffff;
 617				this_busy_nack += 2;
 618				if (this_busy_nack == 64)
 619					break;
 620			}
 621
 622			goto retry;
 623		}
 624	}
 625}
 626
 627/* Multi-cpu list version.  */
 628static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 629{
 630	int retries, this_cpu, prev_sent, i, saw_cpu_error;
 631	unsigned long status;
 632	u16 *cpu_list;
 633
 634	this_cpu = smp_processor_id();
 635
 636	cpu_list = __va(tb->cpu_list_pa);
 637
 638	saw_cpu_error = 0;
 639	retries = 0;
 640	prev_sent = 0;
 641	do {
 642		int forward_progress, n_sent;
 643
 644		status = sun4v_cpu_mondo_send(cnt,
 645					      tb->cpu_list_pa,
 646					      tb->cpu_mondo_block_pa);
 647
 648		/* HV_EOK means all cpus received the xcall, we're done.  */
 649		if (likely(status == HV_EOK))
 650			break;
 651
 652		/* First, see if we made any forward progress.
 653		 *
 654		 * The hypervisor indicates successful sends by setting
 655		 * cpu list entries to the value 0xffff.
 656		 */
 657		n_sent = 0;
 658		for (i = 0; i < cnt; i++) {
 659			if (likely(cpu_list[i] == 0xffff))
 660				n_sent++;
 661		}
 662
 663		forward_progress = 0;
 664		if (n_sent > prev_sent)
 665			forward_progress = 1;
 666
 667		prev_sent = n_sent;
 668
 669		/* If we get a HV_ECPUERROR, then one or more of the cpus
 670		 * in the list are in error state.  Use the cpu_state()
 671		 * hypervisor call to find out which cpus are in error state.
 672		 */
 673		if (unlikely(status == HV_ECPUERROR)) {
 674			for (i = 0; i < cnt; i++) {
 675				long err;
 676				u16 cpu;
 677
 678				cpu = cpu_list[i];
 679				if (cpu == 0xffff)
 680					continue;
 681
 682				err = sun4v_cpu_state(cpu);
 683				if (err == HV_CPU_STATE_ERROR) {
 684					saw_cpu_error = (cpu + 1);
 685					cpu_list[i] = 0xffff;
 686				}
 687			}
 688		} else if (unlikely(status != HV_EWOULDBLOCK))
 689			goto fatal_mondo_error;
 690
 691		/* Don't bother rewriting the CPU list, just leave the
 692		 * 0xffff and non-0xffff entries in there and the
 693		 * hypervisor will do the right thing.
 694		 *
 695		 * Only advance timeout state if we didn't make any
 696		 * forward progress.
 697		 */
 698		if (unlikely(!forward_progress)) {
 699			if (unlikely(++retries > 10000))
 700				goto fatal_mondo_timeout;
 701
 702			/* Delay a little bit to let other cpus catch up
 703			 * on their cpu mondo queue work.
 704			 */
 705			udelay(2 * cnt);
 706		}
 707	} while (1);
 708
 709	if (unlikely(saw_cpu_error))
 710		goto fatal_mondo_cpu_error;
 711
 712	return;
 713
 714fatal_mondo_cpu_error:
 715	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
 716	       "(including %d) were in error state\n",
 717	       this_cpu, saw_cpu_error - 1);
 718	return;
 719
 720fatal_mondo_timeout:
 721	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
 722	       " progress after %d retries.\n",
 723	       this_cpu, retries);
 724	goto dump_cpu_list_and_out;
 725
 726fatal_mondo_error:
 727	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
 728	       this_cpu, status);
 729	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
 730	       "mondo_block_pa(%lx)\n",
 731	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
 732
 733dump_cpu_list_and_out:
 734	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
 735	for (i = 0; i < cnt; i++)
 736		printk("%u ", cpu_list[i]);
 737	printk("]\n");
 738}
 739
 740static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
 741
 742static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
 743{
 744	struct trap_per_cpu *tb;
 745	int this_cpu, i, cnt;
 746	unsigned long flags;
 747	u16 *cpu_list;
 748	u64 *mondo;
 749
 750	/* We have to do this whole thing with interrupts fully disabled.
 751	 * Otherwise if we send an xcall from interrupt context it will
 752	 * corrupt both our mondo block and cpu list state.
 753	 *
 754	 * One consequence of this is that we cannot use timeout mechanisms
 755	 * that depend upon interrupts being delivered locally.  So, for
 756	 * example, we cannot sample jiffies and expect it to advance.
 757	 *
 758	 * Fortunately, udelay() uses %stick/%tick so we can use that.
 759	 */
 760	local_irq_save(flags);
 761
 762	this_cpu = smp_processor_id();
 763	tb = &trap_block[this_cpu];
 764
 765	mondo = __va(tb->cpu_mondo_block_pa);
 766	mondo[0] = data0;
 767	mondo[1] = data1;
 768	mondo[2] = data2;
 769	wmb();
 770
 771	cpu_list = __va(tb->cpu_list_pa);
 772
 773	/* Setup the initial cpu list.  */
 774	cnt = 0;
 775	for_each_cpu(i, mask) {
 776		if (i == this_cpu || !cpu_online(i))
 777			continue;
 778		cpu_list[cnt++] = i;
 779	}
 780
 781	if (cnt)
 782		xcall_deliver_impl(tb, cnt);
 783
 784	local_irq_restore(flags);
 785}
 786
 787/* Send cross call to all processors mentioned in MASK_P
 788 * except self.  Really, there are only two cases currently,
 789 * "&cpu_online_map" and "&mm->cpu_vm_mask".
 790 */
 791static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 792{
 793	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 794
 795	xcall_deliver(data0, data1, data2, mask);
 796}
 797
 798/* Send cross call to all processors except self. */
 799static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 800{
 801	smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
 802}
 803
 804extern unsigned long xcall_sync_tick;
 805
 806static void smp_start_sync_tick_client(int cpu)
 807{
 808	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
 809		      &cpumask_of_cpu(cpu));
 810}
 811
 812extern unsigned long xcall_call_function;
 813
 814void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 815{
 816	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 817}
 818
 819extern unsigned long xcall_call_function_single;
 820
 821void arch_send_call_function_single_ipi(int cpu)
 822{
 823	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
 824		      &cpumask_of_cpu(cpu));
 825}
 826
 827void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 828{
 829	clear_softint(1 << irq);
 830	generic_smp_call_function_interrupt();
 831}
 832
 833void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 834{
 835	clear_softint(1 << irq);
 836	generic_smp_call_function_single_interrupt();
 837}
 838
 839static void tsb_sync(void *info)
 840{
 841	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 842	struct mm_struct *mm = info;
 843
 844	/* It is not valid to test "currrent->active_mm == mm" here.
 845	 *
 846	 * The value of "current" is not changed atomically with
 847	 * switch_mm().  But that's OK, we just need to check the
 848	 * current cpu's trap block PGD physical address.
 849	 */
 850	if (tp->pgd_paddr == __pa(mm->pgd))
 851		tsb_context_switch(mm);
 852}
 853
 854void smp_tsb_sync(struct mm_struct *mm)
 855{
 856	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 857}
 858
 859extern unsigned long xcall_flush_tlb_mm;
 860extern unsigned long xcall_flush_tlb_pending;
 861extern unsigned long xcall_flush_tlb_kernel_range;
 862extern unsigned long xcall_fetch_glob_regs;
 863extern unsigned long xcall_receive_signal;
 864extern unsigned long xcall_new_mmu_context_version;
 865#ifdef CONFIG_KGDB
 866extern unsigned long xcall_kgdb_capture;
 867#endif
 868
 869#ifdef DCACHE_ALIASING_POSSIBLE
 870extern unsigned long xcall_flush_dcache_page_cheetah;
 871#endif
 872extern unsigned long xcall_flush_dcache_page_spitfire;
 873
 874#ifdef CONFIG_DEBUG_DCFLUSH
 875extern atomic_t dcpage_flushes;
 876extern atomic_t dcpage_flushes_xcall;
 877#endif
 878
 879static inline void __local_flush_dcache_page(struct page *page)
 880{
 881#ifdef DCACHE_ALIASING_POSSIBLE
 882	__flush_dcache_page(page_address(page),
 883			    ((tlb_type == spitfire) &&
 884			     page_mapping(page) != NULL));
 885#else
 886	if (page_mapping(page) != NULL &&
 887	    tlb_type == spitfire)
 888		__flush_icache_page(__pa(page_address(page)));
 889#endif
 890}
 891
 892void smp_flush_dcache_page_impl(struct page *page, int cpu)
 893{
 894	int this_cpu;
 895
 896	if (tlb_type == hypervisor)
 897		return;
 898
 899#ifdef CONFIG_DEBUG_DCFLUSH
 900	atomic_inc(&dcpage_flushes);
 901#endif
 902
 903	this_cpu = get_cpu();
 904
 905	if (cpu == this_cpu) {
 906		__local_flush_dcache_page(page);
 907	} else if (cpu_online(cpu)) {
 908		void *pg_addr = page_address(page);
 909		u64 data0 = 0;
 910
 911		if (tlb_type == spitfire) {
 912			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 913			if (page_mapping(page) != NULL)
 914				data0 |= ((u64)1 << 32);
 915		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 916#ifdef DCACHE_ALIASING_POSSIBLE
 917			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
 918#endif
 919		}
 920		if (data0) {
 921			xcall_deliver(data0, __pa(pg_addr),
 922				      (u64) pg_addr, &cpumask_of_cpu(cpu));
 923#ifdef CONFIG_DEBUG_DCFLUSH
 924			atomic_inc(&dcpage_flushes_xcall);
 925#endif
 926		}
 927	}
 928
 929	put_cpu();
 930}
 931
 932void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 933{
 934	void *pg_addr;
 935	int this_cpu;
 936	u64 data0;
 937
 938	if (tlb_type == hypervisor)
 939		return;
 940
 941	this_cpu = get_cpu();
 942
 943#ifdef CONFIG_DEBUG_DCFLUSH
 944	atomic_inc(&dcpage_flushes);
 945#endif
 946	data0 = 0;
 947	pg_addr = page_address(page);
 948	if (tlb_type == spitfire) {
 949		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 950		if (page_mapping(page) != NULL)
 951			data0 |= ((u64)1 << 32);
 952	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 953#ifdef DCACHE_ALIASING_POSSIBLE
 954		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
 955#endif
 956	}
 957	if (data0) {
 958		xcall_deliver(data0, __pa(pg_addr),
 959			      (u64) pg_addr, &cpu_online_map);
 960#ifdef CONFIG_DEBUG_DCFLUSH
 961		atomic_inc(&dcpage_flushes_xcall);
 962#endif
 963	}
 964	__local_flush_dcache_page(page);
 965
 966	put_cpu();
 967}
 968
 969void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 970{
 971	struct mm_struct *mm;
 972	unsigned long flags;
 973
 974	clear_softint(1 << irq);
 975
 976	/* See if we need to allocate a new TLB context because
 977	 * the version of the one we are using is now out of date.
 978	 */
 979	mm = current->active_mm;
 980	if (unlikely(!mm || (mm == &init_mm)))
 981		return;
 982
 983	spin_lock_irqsave(&mm->context.lock, flags);
 984
 985	if (unlikely(!CTX_VALID(mm->context)))
 986		get_new_mmu_context(mm);
 987
 988	spin_unlock_irqrestore(&mm->context.lock, flags);
 989
 990	load_secondary_context(mm);
 991	__flush_tlb_mm(CTX_HWBITS(mm->context),
 992		       SECONDARY_CONTEXT);
 993}
 994
 995void smp_new_mmu_context_version(void)
 996{
 997	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 998}
 999
1000#ifdef CONFIG_KGDB
1001void kgdb_roundup_cpus(unsigned long flags)
1002{
1003	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1004}
1005#endif
1006
1007void smp_fetch_global_regs(void)
1008{
1009	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1010}
1011
1012/* We know that the window frames of the user have been flushed
1013 * to the stack before we get here because all callers of us
1014 * are flush_tlb_*() routines, and these run after flush_cache_*()
1015 * which performs the flushw.
1016 *
1017 * The SMP TLB coherency scheme we use works as follows:
1018 *
1019 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1020 *    space has (potentially) executed on, this is the heuristic
1021 *    we use to avoid doing cross calls.
1022 *
1023 *    Also, for flushing from kswapd and also for clones, we
1024 *    use cpu_vm_mask as the list of cpus to make run the TLB.
1025 *
1026 * 2) TLB context numbers are shared globally across all processors
1027 *    in the system, this allows us to play several games to avoid
1028 *    cross calls.
1029 *
1030 *    One invariant is that when a cpu switches to a process, and
1031 *    that processes tsk->active_mm->cpu_vm_mask does not have the
1032 *    current cpu's bit set, that tlb context is flushed locally.
1033 *
1034 *    If the address space is non-shared (ie. mm->count == 1) we avoid
1035 *    cross calls when we want to flush the currently running process's
1036 *    tlb state.  This is done by clearing all cpu bits except the current
1037 *    processor's in current->mm->cpu_vm_mask and performing the
1038 *    flush locally only.  This will force any subsequent cpus which run
1039 *    this task to flush the context from the local tlb if the process
1040 *    migrates to another cpu (again).
1041 *
1042 * 3) For shared address spaces (threads) and swapping we bite the
1043 *    bullet for most cases and perform the cross call (but only to
1044 *    the cpus listed in cpu_vm_mask).
1045 *
1046 *    The performance gain from "optimizing" away the cross call for threads is
1047 *    questionable (in theory the big win for threads is the massive sharing of
1048 *    address space state across processors).
1049 */
1050
1051/* This currently is only used by the hugetlb arch pre-fault
1052 * hook on UltraSPARC-III+ and later when changing the pagesize
1053 * bits of the context register for an address space.
1054 */
1055void smp_flush_tlb_mm(struct mm_struct *mm)
1056{
1057	u32 ctx = CTX_HWBITS(mm->context);
1058	int cpu = get_cpu();
1059
1060	if (atomic_read(&mm->mm_users) == 1) {
1061		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1062		goto local_flush_and_out;
1063	}
1064
1065	smp_cross_call_masked(&xcall_flush_tlb_mm,
1066			      ctx, 0, 0,
1067			      mm_cpumask(mm));
1068
1069local_flush_and_out:
1070	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1071
1072	put_cpu();
1073}
1074
1075void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1076{
1077	u32 ctx = CTX_HWBITS(mm->context);
1078	int cpu = get_cpu();
1079
1080	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1081		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1082	else
1083		smp_cross_call_masked(&xcall_flush_tlb_pending,
1084				      ctx, nr, (unsigned long) vaddrs,
1085				      mm_cpumask(mm));
1086
1087	__flush_tlb_pending(ctx, nr, vaddrs);
1088
1089	put_cpu();
1090}
1091
1092void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1093{
1094	start &= PAGE_MASK;
1095	end    = PAGE_ALIGN(end);
1096	if (start != end) {
1097		smp_cross_call(&xcall_flush_tlb_kernel_range,
1098			       0, start, end);
1099
1100		__flush_tlb_kernel_range(start, end);
1101	}
1102}
1103
1104/* CPU capture. */
1105/* #define CAPTURE_DEBUG */
1106extern unsigned long xcall_capture;
1107
1108static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1109static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1110static unsigned long penguins_are_doing_time;
1111
1112void smp_capture(void)
1113{
1114	int result = atomic_add_ret(1, &smp_capture_depth);
1115
1116	if (result == 1) {
1117		int ncpus = num_online_cpus();
1118
1119#ifdef CAPTURE_DEBUG
1120		printk("CPU[%d]: Sending penguins to jail...",
1121		       smp_processor_id());
1122#endif
1123		penguins_are_doing_time = 1;
1124		atomic_inc(&smp_capture_registry);
1125		smp_cross_call(&xcall_capture, 0, 0, 0);
1126		while (atomic_read(&smp_capture_registry) != ncpus)
1127			rmb();
1128#ifdef CAPTURE_DEBUG
1129		printk("done\n");
1130#endif
1131	}
1132}
1133
1134void smp_release(void)
1135{
1136	if (atomic_dec_and_test(&smp_capture_depth)) {
1137#ifdef CAPTURE_DEBUG
1138		printk("CPU[%d]: Giving pardon to "
1139		       "imprisoned penguins\n",
1140		       smp_processor_id());
1141#endif
1142		penguins_are_doing_time = 0;
1143		membar_safe("#StoreLoad");
1144		atomic_dec(&smp_capture_registry);
1145	}
1146}
1147
1148/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1149 * set, so they can service tlb flush xcalls...
1150 */
1151extern void prom_world(int);
1152
1153void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1154{
1155	clear_softint(1 << irq);
1156
1157	preempt_disable();
1158
1159	__asm__ __volatile__("flushw");
1160	prom_world(1);
1161	atomic_inc(&smp_capture_registry);
1162	membar_safe("#StoreLoad");
1163	while (penguins_are_doing_time)
1164		rmb();
1165	atomic_dec(&smp_capture_registry);
1166	prom_world(0);
1167
1168	preempt_enable();
1169}
1170
1171/* /proc/profile writes can call this, don't __init it please. */
1172int setup_profiling_timer(unsigned int multiplier)
1173{
1174	return -EINVAL;
1175}
1176
1177void __init smp_prepare_cpus(unsigned int max_cpus)
1178{
1179}
1180
1181void __devinit smp_prepare_boot_cpu(void)
1182{
1183}
1184
1185void __init smp_setup_processor_id(void)
1186{
1187	if (tlb_type == spitfire)
1188		xcall_deliver_impl = spitfire_xcall_deliver;
1189	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1190		xcall_deliver_impl = cheetah_xcall_deliver;
1191	else
1192		xcall_deliver_impl = hypervisor_xcall_deliver;
1193}
1194
1195void __devinit smp_fill_in_sib_core_maps(void)
1196{
1197	unsigned int i;
1198
1199	for_each_present_cpu(i) {
1200		unsigned int j;
1201
1202		cpus_clear(cpu_core_map[i]);
1203		if (cpu_data(i).core_id == 0) {
1204			cpu_set(i, cpu_core_map[i]);
1205			continue;
1206		}
1207
1208		for_each_present_cpu(j) {
1209			if (cpu_data(i).core_id ==
1210			    cpu_data(j).core_id)
1211				cpu_set(j, cpu_core_map[i]);
1212		}
1213	}
1214
1215	for_each_present_cpu(i) {
1216		unsigned int j;
1217
1218		cpus_clear(per_cpu(cpu_sibling_map, i));
1219		if (cpu_data(i).proc_id == -1) {
1220			cpu_set(i, per_cpu(cpu_sibling_map, i));
1221			continue;
1222		}
1223
1224		for_each_present_cpu(j) {
1225			if (cpu_data(i).proc_id ==
1226			    cpu_data(j).proc_id)
1227				cpu_set(j, per_cpu(cpu_sibling_map, i));
1228		}
1229	}
1230}
1231
1232int __cpuinit __cpu_up(unsigned int cpu)
1233{
1234	int ret = smp_boot_one_cpu(cpu);
1235
1236	if (!ret) {
1237		cpu_set(cpu, smp_commenced_mask);
1238		while (!cpu_isset(cpu, cpu_online_map))
1239			mb();
1240		if (!cpu_isset(cpu, cpu_online_map)) {
1241			ret = -ENODEV;
1242		} else {
1243			/* On SUN4V, writes to %tick and %stick are
1244			 * not allowed.
1245			 */
1246			if (tlb_type != hypervisor)
1247				smp_synchronize_one_tick(cpu);
1248		}
1249	}
1250	return ret;
1251}
1252
1253#ifdef CONFIG_HOTPLUG_CPU
1254void cpu_play_dead(void)
1255{
1256	int cpu = smp_processor_id();
1257	unsigned long pstate;
1258
1259	idle_task_exit();
1260
1261	if (tlb_type == hypervisor) {
1262		struct trap_per_cpu *tb = &trap_block[cpu];
1263
1264		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1265				tb->cpu_mondo_pa, 0);
1266		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1267				tb->dev_mondo_pa, 0);
1268		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1269				tb->resum_mondo_pa, 0);
1270		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1271				tb->nonresum_mondo_pa, 0);
1272	}
1273
1274	cpu_clear(cpu, smp_commenced_mask);
1275	membar_safe("#Sync");
1276
1277	local_irq_disable();
1278
1279	__asm__ __volatile__(
1280		"rdpr	%%pstate, %0\n\t"
1281		"wrpr	%0, %1, %%pstate"
1282		: "=r" (pstate)
1283		: "i" (PSTATE_IE));
1284
1285	while (1)
1286		barrier();
1287}
1288
1289int __cpu_disable(void)
1290{
1291	int cpu = smp_processor_id();
1292	cpuinfo_sparc *c;
1293	int i;
1294
1295	for_each_cpu_mask(i, cpu_core_map[cpu])
1296		cpu_clear(cpu, cpu_core_map[i]);
1297	cpus_clear(cpu_core_map[cpu]);
1298
1299	for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1300		cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1301	cpus_clear(per_cpu(cpu_sibling_map, cpu));
1302
1303	c = &cpu_data(cpu);
1304
1305	c->core_id = 0;
1306	c->proc_id = -1;
1307
1308	smp_wmb();
1309
1310	/* Make sure no interrupts point to this cpu.  */
1311	fixup_irqs();
1312
1313	local_irq_enable();
1314	mdelay(1);
1315	local_irq_disable();
1316
1317	ipi_call_lock();
1318	cpu_clear(cpu, cpu_online_map);
1319	ipi_call_unlock();
1320
1321	cpu_map_rebuild();
1322
1323	return 0;
1324}
1325
1326void __cpu_die(unsigned int cpu)
1327{
1328	int i;
1329
1330	for (i = 0; i < 100; i++) {
1331		smp_rmb();
1332		if (!cpu_isset(cpu, smp_commenced_mask))
1333			break;
1334		msleep(100);
1335	}
1336	if (cpu_isset(cpu, smp_commenced_mask)) {
1337		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1338	} else {
1339#if defined(CONFIG_SUN_LDOMS)
1340		unsigned long hv_err;
1341		int limit = 100;
1342
1343		do {
1344			hv_err = sun4v_cpu_stop(cpu);
1345			if (hv_err == HV_EOK) {
1346				cpu_clear(cpu, cpu_present_map);
1347				break;
1348			}
1349		} while (--limit > 0);
1350		if (limit <= 0) {
1351			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1352			       hv_err);
1353		}
1354#endif
1355	}
1356}
1357#endif
1358
1359void __init smp_cpus_done(unsigned int max_cpus)
1360{
1361}
1362
1363void smp_send_reschedule(int cpu)
1364{
1365	xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1366		      &cpumask_of_cpu(cpu));
1367}
1368
1369void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1370{
1371	clear_softint(1 << irq);
1372}
1373
1374/* This is a nop because we capture all other cpus
1375 * anyways when making the PROM active.
1376 */
1377void smp_send_stop(void)
1378{
1379}
1380
1381/**
1382 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1383 * @cpu: cpu to allocate for
1384 * @size: size allocation in bytes
1385 * @align: alignment
1386 *
1387 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1388 * does the right thing for NUMA regardless of the current
1389 * configuration.
1390 *
1391 * RETURNS:
1392 * Pointer to the allocated area on success, NULL on failure.
1393 */
1394static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1395					size_t align)
1396{
1397	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1398#ifdef CONFIG_NEED_MULTIPLE_NODES
1399	int node = cpu_to_node(cpu);
1400	void *ptr;
1401
1402	if (!node_online(node) || !NODE_DATA(node)) {
1403		ptr = __alloc_bootmem(size, align, goal);
1404		pr_info("cpu %d has no node %d or node-local memory\n",
1405			cpu, node);
1406		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1407			 cpu, size, __pa(ptr));
1408	} else {
1409		ptr = __alloc_bootmem_node(NODE_DATA(node),
1410					   size, align, goal);
1411		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1412			 "%016lx\n", cpu, size, node, __pa(ptr));
1413	}
1414	return ptr;
1415#else
1416	return __alloc_bootmem(size, align, goal);
1417#endif
1418}
1419
1420static void __init pcpu_free_bootmem(void *ptr, size_t size)
1421{
1422	free_bootmem(__pa(ptr), size);
1423}
1424
1425static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1426{
1427	if (cpu_to_node(from) == cpu_to_node(to))
1428		return LOCAL_DISTANCE;
1429	else
1430		return REMOTE_DISTANCE;
1431}
1432
1433static void __init pcpu_populate_pte(unsigned long addr)
1434{
1435	pgd_t *pgd = pgd_offset_k(addr);
1436	pud_t *pud;
1437	pmd_t *pmd;
1438
1439	pud = pud_offset(pgd, addr);
1440	if (pud_none(*pud)) {
1441		pmd_t *new;
1442
1443		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1444		pud_populate(&init_mm, pud, new);
1445	}
1446
1447	pmd = pmd_offset(pud, addr);
1448	if (!pmd_present(*pmd)) {
1449		pte_t *new;
1450
1451		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1452		pmd_populate_kernel(&init_mm, pmd, new);
1453	}
1454}
1455
1456void __init setup_per_cpu_areas(void)
1457{
1458	unsigned long delta;
1459	unsigned int cpu;
1460	int rc = -EINVAL;
1461
1462	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1463		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1464					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1465					    pcpu_cpu_distance,
1466					    pcpu_alloc_bootmem,
1467					    pcpu_free_bootmem);
1468		if (rc)
1469			pr_warning("PERCPU: %s allocator failed (%d), "
1470				   "falling back to page size\n",
1471				   pcpu_fc_names[pcpu_chosen_fc], rc);
1472	}
1473	if (rc < 0)
1474		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1475					   pcpu_alloc_bootmem,
1476					   pcpu_free_bootmem,
1477					   pcpu_populate_pte);
1478	if (rc < 0)
1479		panic("cannot initialize percpu area (err=%d)", rc);
1480
1481	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1482	for_each_possible_cpu(cpu)
1483		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1484
1485	/* Setup %g5 for the boot cpu.  */
1486	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1487
1488	of_fill_in_cpu_data();
1489	if (tlb_type == hypervisor)
1490		mdesc_fill_in_cpu_data(cpu_all_mask);
1491}