PageRenderTime 320ms CodeModel.GetById 11ms app.highlight 161ms RepoModel.GetById 51ms app.codeStats 1ms

/drivers/char/ipmi/ipmi_si_intf.c

https://bitbucket.org/ndreys/linux-sunxi
C | 3587 lines | 2596 code | 516 blank | 475 comment | 442 complexity | b3c08cb63e79106bd6485557688e6098 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * ipmi_si.c
   3 *
   4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
   5 * BT).
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  13 *
  14 *  This program is free software; you can redistribute it and/or modify it
  15 *  under the terms of the GNU General Public License as published by the
  16 *  Free Software Foundation; either version 2 of the License, or (at your
  17 *  option) any later version.
  18 *
  19 *
  20 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  21 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  22 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  23 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  24 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  25 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  26 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  27 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  28 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  29 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30 *
  31 *  You should have received a copy of the GNU General Public License along
  32 *  with this program; if not, write to the Free Software Foundation, Inc.,
  33 *  675 Mass Ave, Cambridge, MA 02139, USA.
  34 */
  35
  36/*
  37 * This file holds the "policy" for the interface to the SMI state
  38 * machine.  It does the configuration, handles timers and interrupts,
  39 * and drives the real SMI state machine.
  40 */
  41
  42#include <linux/module.h>
  43#include <linux/moduleparam.h>
  44#include <asm/system.h>
  45#include <linux/sched.h>
  46#include <linux/seq_file.h>
  47#include <linux/timer.h>
  48#include <linux/errno.h>
  49#include <linux/spinlock.h>
  50#include <linux/slab.h>
  51#include <linux/delay.h>
  52#include <linux/list.h>
  53#include <linux/pci.h>
  54#include <linux/ioport.h>
  55#include <linux/notifier.h>
  56#include <linux/mutex.h>
  57#include <linux/kthread.h>
  58#include <asm/irq.h>
  59#include <linux/interrupt.h>
  60#include <linux/rcupdate.h>
  61#include <linux/ipmi.h>
  62#include <linux/ipmi_smi.h>
  63#include <asm/io.h>
  64#include "ipmi_si_sm.h"
  65#include <linux/init.h>
  66#include <linux/dmi.h>
  67#include <linux/string.h>
  68#include <linux/ctype.h>
  69#include <linux/pnp.h>
  70#include <linux/of_device.h>
  71#include <linux/of_platform.h>
  72#include <linux/of_address.h>
  73#include <linux/of_irq.h>
  74
  75#define PFX "ipmi_si: "
  76
  77/* Measure times between events in the driver. */
  78#undef DEBUG_TIMING
  79
  80/* Call every 10 ms. */
  81#define SI_TIMEOUT_TIME_USEC	10000
  82#define SI_USEC_PER_JIFFY	(1000000/HZ)
  83#define SI_TIMEOUT_JIFFIES	(SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  84#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
  85				      short timeout */
  86
  87enum si_intf_state {
  88	SI_NORMAL,
  89	SI_GETTING_FLAGS,
  90	SI_GETTING_EVENTS,
  91	SI_CLEARING_FLAGS,
  92	SI_CLEARING_FLAGS_THEN_SET_IRQ,
  93	SI_GETTING_MESSAGES,
  94	SI_ENABLE_INTERRUPTS1,
  95	SI_ENABLE_INTERRUPTS2,
  96	SI_DISABLE_INTERRUPTS1,
  97	SI_DISABLE_INTERRUPTS2
  98	/* FIXME - add watchdog stuff. */
  99};
 100
 101/* Some BT-specific defines we need here. */
 102#define IPMI_BT_INTMASK_REG		2
 103#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT	2
 104#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT	1
 105
 106enum si_type {
 107    SI_KCS, SI_SMIC, SI_BT
 108};
 109static char *si_to_str[] = { "kcs", "smic", "bt" };
 110
 111static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
 112					"ACPI", "SMBIOS", "PCI",
 113					"device-tree", "default" };
 114
 115#define DEVICE_NAME "ipmi_si"
 116
 117static struct platform_driver ipmi_driver;
 118
 119/*
 120 * Indexes into stats[] in smi_info below.
 121 */
 122enum si_stat_indexes {
 123	/*
 124	 * Number of times the driver requested a timer while an operation
 125	 * was in progress.
 126	 */
 127	SI_STAT_short_timeouts = 0,
 128
 129	/*
 130	 * Number of times the driver requested a timer while nothing was in
 131	 * progress.
 132	 */
 133	SI_STAT_long_timeouts,
 134
 135	/* Number of times the interface was idle while being polled. */
 136	SI_STAT_idles,
 137
 138	/* Number of interrupts the driver handled. */
 139	SI_STAT_interrupts,
 140
 141	/* Number of time the driver got an ATTN from the hardware. */
 142	SI_STAT_attentions,
 143
 144	/* Number of times the driver requested flags from the hardware. */
 145	SI_STAT_flag_fetches,
 146
 147	/* Number of times the hardware didn't follow the state machine. */
 148	SI_STAT_hosed_count,
 149
 150	/* Number of completed messages. */
 151	SI_STAT_complete_transactions,
 152
 153	/* Number of IPMI events received from the hardware. */
 154	SI_STAT_events,
 155
 156	/* Number of watchdog pretimeouts. */
 157	SI_STAT_watchdog_pretimeouts,
 158
 159	/* Number of asyncronous messages received. */
 160	SI_STAT_incoming_messages,
 161
 162
 163	/* This *must* remain last, add new values above this. */
 164	SI_NUM_STATS
 165};
 166
 167struct smi_info {
 168	int                    intf_num;
 169	ipmi_smi_t             intf;
 170	struct si_sm_data      *si_sm;
 171	struct si_sm_handlers  *handlers;
 172	enum si_type           si_type;
 173	spinlock_t             si_lock;
 174	spinlock_t             msg_lock;
 175	struct list_head       xmit_msgs;
 176	struct list_head       hp_xmit_msgs;
 177	struct ipmi_smi_msg    *curr_msg;
 178	enum si_intf_state     si_state;
 179
 180	/*
 181	 * Used to handle the various types of I/O that can occur with
 182	 * IPMI
 183	 */
 184	struct si_sm_io io;
 185	int (*io_setup)(struct smi_info *info);
 186	void (*io_cleanup)(struct smi_info *info);
 187	int (*irq_setup)(struct smi_info *info);
 188	void (*irq_cleanup)(struct smi_info *info);
 189	unsigned int io_size;
 190	enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
 191	void (*addr_source_cleanup)(struct smi_info *info);
 192	void *addr_source_data;
 193
 194	/*
 195	 * Per-OEM handler, called from handle_flags().  Returns 1
 196	 * when handle_flags() needs to be re-run or 0 indicating it
 197	 * set si_state itself.
 198	 */
 199	int (*oem_data_avail_handler)(struct smi_info *smi_info);
 200
 201	/*
 202	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
 203	 * is set to hold the flags until we are done handling everything
 204	 * from the flags.
 205	 */
 206#define RECEIVE_MSG_AVAIL	0x01
 207#define EVENT_MSG_BUFFER_FULL	0x02
 208#define WDT_PRE_TIMEOUT_INT	0x08
 209#define OEM0_DATA_AVAIL     0x20
 210#define OEM1_DATA_AVAIL     0x40
 211#define OEM2_DATA_AVAIL     0x80
 212#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
 213			     OEM1_DATA_AVAIL | \
 214			     OEM2_DATA_AVAIL)
 215	unsigned char       msg_flags;
 216
 217	/* Does the BMC have an event buffer? */
 218	char		    has_event_buffer;
 219
 220	/*
 221	 * If set to true, this will request events the next time the
 222	 * state machine is idle.
 223	 */
 224	atomic_t            req_events;
 225
 226	/*
 227	 * If true, run the state machine to completion on every send
 228	 * call.  Generally used after a panic to make sure stuff goes
 229	 * out.
 230	 */
 231	int                 run_to_completion;
 232
 233	/* The I/O port of an SI interface. */
 234	int                 port;
 235
 236	/*
 237	 * The space between start addresses of the two ports.  For
 238	 * instance, if the first port is 0xca2 and the spacing is 4, then
 239	 * the second port is 0xca6.
 240	 */
 241	unsigned int        spacing;
 242
 243	/* zero if no irq; */
 244	int                 irq;
 245
 246	/* The timer for this si. */
 247	struct timer_list   si_timer;
 248
 249	/* The time (in jiffies) the last timeout occurred at. */
 250	unsigned long       last_timeout_jiffies;
 251
 252	/* Used to gracefully stop the timer without race conditions. */
 253	atomic_t            stop_operation;
 254
 255	/*
 256	 * The driver will disable interrupts when it gets into a
 257	 * situation where it cannot handle messages due to lack of
 258	 * memory.  Once that situation clears up, it will re-enable
 259	 * interrupts.
 260	 */
 261	int interrupt_disabled;
 262
 263	/* From the get device id response... */
 264	struct ipmi_device_id device_id;
 265
 266	/* Driver model stuff. */
 267	struct device *dev;
 268	struct platform_device *pdev;
 269
 270	/*
 271	 * True if we allocated the device, false if it came from
 272	 * someplace else (like PCI).
 273	 */
 274	int dev_registered;
 275
 276	/* Slave address, could be reported from DMI. */
 277	unsigned char slave_addr;
 278
 279	/* Counters and things for the proc filesystem. */
 280	atomic_t stats[SI_NUM_STATS];
 281
 282	struct task_struct *thread;
 283
 284	struct list_head link;
 285	union ipmi_smi_info_union addr_info;
 286};
 287
 288#define smi_inc_stat(smi, stat) \
 289	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
 290#define smi_get_stat(smi, stat) \
 291	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
 292
 293#define SI_MAX_PARMS 4
 294
 295static int force_kipmid[SI_MAX_PARMS];
 296static int num_force_kipmid;
 297#ifdef CONFIG_PCI
 298static int pci_registered;
 299#endif
 300#ifdef CONFIG_ACPI
 301static int pnp_registered;
 302#endif
 303
 304static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
 305static int num_max_busy_us;
 306
 307static int unload_when_empty = 1;
 308
 309static int add_smi(struct smi_info *smi);
 310static int try_smi_init(struct smi_info *smi);
 311static void cleanup_one_si(struct smi_info *to_clean);
 312static void cleanup_ipmi_si(void);
 313
 314static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 315static int register_xaction_notifier(struct notifier_block *nb)
 316{
 317	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 318}
 319
 320static void deliver_recv_msg(struct smi_info *smi_info,
 321			     struct ipmi_smi_msg *msg)
 322{
 323	/* Deliver the message to the upper layer with the lock
 324	   released. */
 325
 326	if (smi_info->run_to_completion) {
 327		ipmi_smi_msg_received(smi_info->intf, msg);
 328	} else {
 329		spin_unlock(&(smi_info->si_lock));
 330		ipmi_smi_msg_received(smi_info->intf, msg);
 331		spin_lock(&(smi_info->si_lock));
 332	}
 333}
 334
 335static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 336{
 337	struct ipmi_smi_msg *msg = smi_info->curr_msg;
 338
 339	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
 340		cCode = IPMI_ERR_UNSPECIFIED;
 341	/* else use it as is */
 342
 343	/* Make it a response */
 344	msg->rsp[0] = msg->data[0] | 4;
 345	msg->rsp[1] = msg->data[1];
 346	msg->rsp[2] = cCode;
 347	msg->rsp_size = 3;
 348
 349	smi_info->curr_msg = NULL;
 350	deliver_recv_msg(smi_info, msg);
 351}
 352
 353static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 354{
 355	int              rv;
 356	struct list_head *entry = NULL;
 357#ifdef DEBUG_TIMING
 358	struct timeval t;
 359#endif
 360
 361	/*
 362	 * No need to save flags, we aleady have interrupts off and we
 363	 * already hold the SMI lock.
 364	 */
 365	if (!smi_info->run_to_completion)
 366		spin_lock(&(smi_info->msg_lock));
 367
 368	/* Pick the high priority queue first. */
 369	if (!list_empty(&(smi_info->hp_xmit_msgs))) {
 370		entry = smi_info->hp_xmit_msgs.next;
 371	} else if (!list_empty(&(smi_info->xmit_msgs))) {
 372		entry = smi_info->xmit_msgs.next;
 373	}
 374
 375	if (!entry) {
 376		smi_info->curr_msg = NULL;
 377		rv = SI_SM_IDLE;
 378	} else {
 379		int err;
 380
 381		list_del(entry);
 382		smi_info->curr_msg = list_entry(entry,
 383						struct ipmi_smi_msg,
 384						link);
 385#ifdef DEBUG_TIMING
 386		do_gettimeofday(&t);
 387		printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 388#endif
 389		err = atomic_notifier_call_chain(&xaction_notifier_list,
 390				0, smi_info);
 391		if (err & NOTIFY_STOP_MASK) {
 392			rv = SI_SM_CALL_WITHOUT_DELAY;
 393			goto out;
 394		}
 395		err = smi_info->handlers->start_transaction(
 396			smi_info->si_sm,
 397			smi_info->curr_msg->data,
 398			smi_info->curr_msg->data_size);
 399		if (err)
 400			return_hosed_msg(smi_info, err);
 401
 402		rv = SI_SM_CALL_WITHOUT_DELAY;
 403	}
 404 out:
 405	if (!smi_info->run_to_completion)
 406		spin_unlock(&(smi_info->msg_lock));
 407
 408	return rv;
 409}
 410
 411static void start_enable_irq(struct smi_info *smi_info)
 412{
 413	unsigned char msg[2];
 414
 415	/*
 416	 * If we are enabling interrupts, we have to tell the
 417	 * BMC to use them.
 418	 */
 419	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 420	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 421
 422	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 423	smi_info->si_state = SI_ENABLE_INTERRUPTS1;
 424}
 425
 426static void start_disable_irq(struct smi_info *smi_info)
 427{
 428	unsigned char msg[2];
 429
 430	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 431	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 432
 433	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 434	smi_info->si_state = SI_DISABLE_INTERRUPTS1;
 435}
 436
 437static void start_clear_flags(struct smi_info *smi_info)
 438{
 439	unsigned char msg[3];
 440
 441	/* Make sure the watchdog pre-timeout flag is not set at startup. */
 442	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 443	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 444	msg[2] = WDT_PRE_TIMEOUT_INT;
 445
 446	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
 447	smi_info->si_state = SI_CLEARING_FLAGS;
 448}
 449
 450/*
 451 * When we have a situtaion where we run out of memory and cannot
 452 * allocate messages, we just leave them in the BMC and run the system
 453 * polled until we can allocate some memory.  Once we have some
 454 * memory, we will re-enable the interrupt.
 455 */
 456static inline void disable_si_irq(struct smi_info *smi_info)
 457{
 458	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 459		start_disable_irq(smi_info);
 460		smi_info->interrupt_disabled = 1;
 461		if (!atomic_read(&smi_info->stop_operation))
 462			mod_timer(&smi_info->si_timer,
 463				  jiffies + SI_TIMEOUT_JIFFIES);
 464	}
 465}
 466
 467static inline void enable_si_irq(struct smi_info *smi_info)
 468{
 469	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
 470		start_enable_irq(smi_info);
 471		smi_info->interrupt_disabled = 0;
 472	}
 473}
 474
 475static void handle_flags(struct smi_info *smi_info)
 476{
 477 retry:
 478	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 479		/* Watchdog pre-timeout */
 480		smi_inc_stat(smi_info, watchdog_pretimeouts);
 481
 482		start_clear_flags(smi_info);
 483		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 484		spin_unlock(&(smi_info->si_lock));
 485		ipmi_smi_watchdog_pretimeout(smi_info->intf);
 486		spin_lock(&(smi_info->si_lock));
 487	} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
 488		/* Messages available. */
 489		smi_info->curr_msg = ipmi_alloc_smi_msg();
 490		if (!smi_info->curr_msg) {
 491			disable_si_irq(smi_info);
 492			smi_info->si_state = SI_NORMAL;
 493			return;
 494		}
 495		enable_si_irq(smi_info);
 496
 497		smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 498		smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 499		smi_info->curr_msg->data_size = 2;
 500
 501		smi_info->handlers->start_transaction(
 502			smi_info->si_sm,
 503			smi_info->curr_msg->data,
 504			smi_info->curr_msg->data_size);
 505		smi_info->si_state = SI_GETTING_MESSAGES;
 506	} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
 507		/* Events available. */
 508		smi_info->curr_msg = ipmi_alloc_smi_msg();
 509		if (!smi_info->curr_msg) {
 510			disable_si_irq(smi_info);
 511			smi_info->si_state = SI_NORMAL;
 512			return;
 513		}
 514		enable_si_irq(smi_info);
 515
 516		smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 517		smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 518		smi_info->curr_msg->data_size = 2;
 519
 520		smi_info->handlers->start_transaction(
 521			smi_info->si_sm,
 522			smi_info->curr_msg->data,
 523			smi_info->curr_msg->data_size);
 524		smi_info->si_state = SI_GETTING_EVENTS;
 525	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
 526		   smi_info->oem_data_avail_handler) {
 527		if (smi_info->oem_data_avail_handler(smi_info))
 528			goto retry;
 529	} else
 530		smi_info->si_state = SI_NORMAL;
 531}
 532
 533static void handle_transaction_done(struct smi_info *smi_info)
 534{
 535	struct ipmi_smi_msg *msg;
 536#ifdef DEBUG_TIMING
 537	struct timeval t;
 538
 539	do_gettimeofday(&t);
 540	printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 541#endif
 542	switch (smi_info->si_state) {
 543	case SI_NORMAL:
 544		if (!smi_info->curr_msg)
 545			break;
 546
 547		smi_info->curr_msg->rsp_size
 548			= smi_info->handlers->get_result(
 549				smi_info->si_sm,
 550				smi_info->curr_msg->rsp,
 551				IPMI_MAX_MSG_LENGTH);
 552
 553		/*
 554		 * Do this here becase deliver_recv_msg() releases the
 555		 * lock, and a new message can be put in during the
 556		 * time the lock is released.
 557		 */
 558		msg = smi_info->curr_msg;
 559		smi_info->curr_msg = NULL;
 560		deliver_recv_msg(smi_info, msg);
 561		break;
 562
 563	case SI_GETTING_FLAGS:
 564	{
 565		unsigned char msg[4];
 566		unsigned int  len;
 567
 568		/* We got the flags from the SMI, now handle them. */
 569		len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 570		if (msg[2] != 0) {
 571			/* Error fetching flags, just give up for now. */
 572			smi_info->si_state = SI_NORMAL;
 573		} else if (len < 4) {
 574			/*
 575			 * Hmm, no flags.  That's technically illegal, but
 576			 * don't use uninitialized data.
 577			 */
 578			smi_info->si_state = SI_NORMAL;
 579		} else {
 580			smi_info->msg_flags = msg[3];
 581			handle_flags(smi_info);
 582		}
 583		break;
 584	}
 585
 586	case SI_CLEARING_FLAGS:
 587	case SI_CLEARING_FLAGS_THEN_SET_IRQ:
 588	{
 589		unsigned char msg[3];
 590
 591		/* We cleared the flags. */
 592		smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 593		if (msg[2] != 0) {
 594			/* Error clearing flags */
 595			dev_warn(smi_info->dev,
 596				 "Error clearing flags: %2.2x\n", msg[2]);
 597		}
 598		if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
 599			start_enable_irq(smi_info);
 600		else
 601			smi_info->si_state = SI_NORMAL;
 602		break;
 603	}
 604
 605	case SI_GETTING_EVENTS:
 606	{
 607		smi_info->curr_msg->rsp_size
 608			= smi_info->handlers->get_result(
 609				smi_info->si_sm,
 610				smi_info->curr_msg->rsp,
 611				IPMI_MAX_MSG_LENGTH);
 612
 613		/*
 614		 * Do this here becase deliver_recv_msg() releases the
 615		 * lock, and a new message can be put in during the
 616		 * time the lock is released.
 617		 */
 618		msg = smi_info->curr_msg;
 619		smi_info->curr_msg = NULL;
 620		if (msg->rsp[2] != 0) {
 621			/* Error getting event, probably done. */
 622			msg->done(msg);
 623
 624			/* Take off the event flag. */
 625			smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 626			handle_flags(smi_info);
 627		} else {
 628			smi_inc_stat(smi_info, events);
 629
 630			/*
 631			 * Do this before we deliver the message
 632			 * because delivering the message releases the
 633			 * lock and something else can mess with the
 634			 * state.
 635			 */
 636			handle_flags(smi_info);
 637
 638			deliver_recv_msg(smi_info, msg);
 639		}
 640		break;
 641	}
 642
 643	case SI_GETTING_MESSAGES:
 644	{
 645		smi_info->curr_msg->rsp_size
 646			= smi_info->handlers->get_result(
 647				smi_info->si_sm,
 648				smi_info->curr_msg->rsp,
 649				IPMI_MAX_MSG_LENGTH);
 650
 651		/*
 652		 * Do this here becase deliver_recv_msg() releases the
 653		 * lock, and a new message can be put in during the
 654		 * time the lock is released.
 655		 */
 656		msg = smi_info->curr_msg;
 657		smi_info->curr_msg = NULL;
 658		if (msg->rsp[2] != 0) {
 659			/* Error getting event, probably done. */
 660			msg->done(msg);
 661
 662			/* Take off the msg flag. */
 663			smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 664			handle_flags(smi_info);
 665		} else {
 666			smi_inc_stat(smi_info, incoming_messages);
 667
 668			/*
 669			 * Do this before we deliver the message
 670			 * because delivering the message releases the
 671			 * lock and something else can mess with the
 672			 * state.
 673			 */
 674			handle_flags(smi_info);
 675
 676			deliver_recv_msg(smi_info, msg);
 677		}
 678		break;
 679	}
 680
 681	case SI_ENABLE_INTERRUPTS1:
 682	{
 683		unsigned char msg[4];
 684
 685		/* We got the flags from the SMI, now handle them. */
 686		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 687		if (msg[2] != 0) {
 688			dev_warn(smi_info->dev, "Could not enable interrupts"
 689				 ", failed get, using polled mode.\n");
 690			smi_info->si_state = SI_NORMAL;
 691		} else {
 692			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 693			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 694			msg[2] = (msg[3] |
 695				  IPMI_BMC_RCV_MSG_INTR |
 696				  IPMI_BMC_EVT_MSG_INTR);
 697			smi_info->handlers->start_transaction(
 698				smi_info->si_sm, msg, 3);
 699			smi_info->si_state = SI_ENABLE_INTERRUPTS2;
 700		}
 701		break;
 702	}
 703
 704	case SI_ENABLE_INTERRUPTS2:
 705	{
 706		unsigned char msg[4];
 707
 708		/* We got the flags from the SMI, now handle them. */
 709		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 710		if (msg[2] != 0)
 711			dev_warn(smi_info->dev, "Could not enable interrupts"
 712				 ", failed set, using polled mode.\n");
 713		else
 714			smi_info->interrupt_disabled = 0;
 715		smi_info->si_state = SI_NORMAL;
 716		break;
 717	}
 718
 719	case SI_DISABLE_INTERRUPTS1:
 720	{
 721		unsigned char msg[4];
 722
 723		/* We got the flags from the SMI, now handle them. */
 724		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 725		if (msg[2] != 0) {
 726			dev_warn(smi_info->dev, "Could not disable interrupts"
 727				 ", failed get.\n");
 728			smi_info->si_state = SI_NORMAL;
 729		} else {
 730			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 731			msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 732			msg[2] = (msg[3] &
 733				  ~(IPMI_BMC_RCV_MSG_INTR |
 734				    IPMI_BMC_EVT_MSG_INTR));
 735			smi_info->handlers->start_transaction(
 736				smi_info->si_sm, msg, 3);
 737			smi_info->si_state = SI_DISABLE_INTERRUPTS2;
 738		}
 739		break;
 740	}
 741
 742	case SI_DISABLE_INTERRUPTS2:
 743	{
 744		unsigned char msg[4];
 745
 746		/* We got the flags from the SMI, now handle them. */
 747		smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 748		if (msg[2] != 0) {
 749			dev_warn(smi_info->dev, "Could not disable interrupts"
 750				 ", failed set.\n");
 751		}
 752		smi_info->si_state = SI_NORMAL;
 753		break;
 754	}
 755	}
 756}
 757
 758/*
 759 * Called on timeouts and events.  Timeouts should pass the elapsed
 760 * time, interrupts should pass in zero.  Must be called with
 761 * si_lock held and interrupts disabled.
 762 */
 763static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 764					   int time)
 765{
 766	enum si_sm_result si_sm_result;
 767
 768 restart:
 769	/*
 770	 * There used to be a loop here that waited a little while
 771	 * (around 25us) before giving up.  That turned out to be
 772	 * pointless, the minimum delays I was seeing were in the 300us
 773	 * range, which is far too long to wait in an interrupt.  So
 774	 * we just run until the state machine tells us something
 775	 * happened or it needs a delay.
 776	 */
 777	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 778	time = 0;
 779	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
 780		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 781
 782	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
 783		smi_inc_stat(smi_info, complete_transactions);
 784
 785		handle_transaction_done(smi_info);
 786		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 787	} else if (si_sm_result == SI_SM_HOSED) {
 788		smi_inc_stat(smi_info, hosed_count);
 789
 790		/*
 791		 * Do the before return_hosed_msg, because that
 792		 * releases the lock.
 793		 */
 794		smi_info->si_state = SI_NORMAL;
 795		if (smi_info->curr_msg != NULL) {
 796			/*
 797			 * If we were handling a user message, format
 798			 * a response to send to the upper layer to
 799			 * tell it about the error.
 800			 */
 801			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 802		}
 803		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 804	}
 805
 806	/*
 807	 * We prefer handling attn over new messages.  But don't do
 808	 * this if there is not yet an upper layer to handle anything.
 809	 */
 810	if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
 811		unsigned char msg[2];
 812
 813		smi_inc_stat(smi_info, attentions);
 814
 815		/*
 816		 * Got a attn, send down a get message flags to see
 817		 * what's causing it.  It would be better to handle
 818		 * this in the upper layer, but due to the way
 819		 * interrupts work with the SMI, that's not really
 820		 * possible.
 821		 */
 822		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 823		msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 824
 825		smi_info->handlers->start_transaction(
 826			smi_info->si_sm, msg, 2);
 827		smi_info->si_state = SI_GETTING_FLAGS;
 828		goto restart;
 829	}
 830
 831	/* If we are currently idle, try to start the next message. */
 832	if (si_sm_result == SI_SM_IDLE) {
 833		smi_inc_stat(smi_info, idles);
 834
 835		si_sm_result = start_next_msg(smi_info);
 836		if (si_sm_result != SI_SM_IDLE)
 837			goto restart;
 838	}
 839
 840	if ((si_sm_result == SI_SM_IDLE)
 841	    && (atomic_read(&smi_info->req_events))) {
 842		/*
 843		 * We are idle and the upper layer requested that I fetch
 844		 * events, so do so.
 845		 */
 846		atomic_set(&smi_info->req_events, 0);
 847
 848		smi_info->curr_msg = ipmi_alloc_smi_msg();
 849		if (!smi_info->curr_msg)
 850			goto out;
 851
 852		smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 853		smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 854		smi_info->curr_msg->data_size = 2;
 855
 856		smi_info->handlers->start_transaction(
 857			smi_info->si_sm,
 858			smi_info->curr_msg->data,
 859			smi_info->curr_msg->data_size);
 860		smi_info->si_state = SI_GETTING_EVENTS;
 861		goto restart;
 862	}
 863 out:
 864	return si_sm_result;
 865}
 866
 867static void sender(void                *send_info,
 868		   struct ipmi_smi_msg *msg,
 869		   int                 priority)
 870{
 871	struct smi_info   *smi_info = send_info;
 872	enum si_sm_result result;
 873	unsigned long     flags;
 874#ifdef DEBUG_TIMING
 875	struct timeval    t;
 876#endif
 877
 878	if (atomic_read(&smi_info->stop_operation)) {
 879		msg->rsp[0] = msg->data[0] | 4;
 880		msg->rsp[1] = msg->data[1];
 881		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
 882		msg->rsp_size = 3;
 883		deliver_recv_msg(smi_info, msg);
 884		return;
 885	}
 886
 887#ifdef DEBUG_TIMING
 888	do_gettimeofday(&t);
 889	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 890#endif
 891
 892	/*
 893	 * last_timeout_jiffies is updated here to avoid
 894	 * smi_timeout() handler passing very large time_diff
 895	 * value to smi_event_handler() that causes
 896	 * the send command to abort.
 897	 */
 898	smi_info->last_timeout_jiffies = jiffies;
 899
 900	mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
 901
 902	if (smi_info->thread)
 903		wake_up_process(smi_info->thread);
 904
 905	if (smi_info->run_to_completion) {
 906		/*
 907		 * If we are running to completion, then throw it in
 908		 * the list and run transactions until everything is
 909		 * clear.  Priority doesn't matter here.
 910		 */
 911
 912		/*
 913		 * Run to completion means we are single-threaded, no
 914		 * need for locks.
 915		 */
 916		list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
 917
 918		result = smi_event_handler(smi_info, 0);
 919		while (result != SI_SM_IDLE) {
 920			udelay(SI_SHORT_TIMEOUT_USEC);
 921			result = smi_event_handler(smi_info,
 922						   SI_SHORT_TIMEOUT_USEC);
 923		}
 924		return;
 925	}
 926
 927	spin_lock_irqsave(&smi_info->msg_lock, flags);
 928	if (priority > 0)
 929		list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
 930	else
 931		list_add_tail(&msg->link, &smi_info->xmit_msgs);
 932	spin_unlock_irqrestore(&smi_info->msg_lock, flags);
 933
 934	spin_lock_irqsave(&smi_info->si_lock, flags);
 935	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
 936		start_next_msg(smi_info);
 937	spin_unlock_irqrestore(&smi_info->si_lock, flags);
 938}
 939
 940static void set_run_to_completion(void *send_info, int i_run_to_completion)
 941{
 942	struct smi_info   *smi_info = send_info;
 943	enum si_sm_result result;
 944
 945	smi_info->run_to_completion = i_run_to_completion;
 946	if (i_run_to_completion) {
 947		result = smi_event_handler(smi_info, 0);
 948		while (result != SI_SM_IDLE) {
 949			udelay(SI_SHORT_TIMEOUT_USEC);
 950			result = smi_event_handler(smi_info,
 951						   SI_SHORT_TIMEOUT_USEC);
 952		}
 953	}
 954}
 955
 956/*
 957 * Use -1 in the nsec value of the busy waiting timespec to tell that
 958 * we are spinning in kipmid looking for something and not delaying
 959 * between checks
 960 */
 961static inline void ipmi_si_set_not_busy(struct timespec *ts)
 962{
 963	ts->tv_nsec = -1;
 964}
 965static inline int ipmi_si_is_busy(struct timespec *ts)
 966{
 967	return ts->tv_nsec != -1;
 968}
 969
 970static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
 971				 const struct smi_info *smi_info,
 972				 struct timespec *busy_until)
 973{
 974	unsigned int max_busy_us = 0;
 975
 976	if (smi_info->intf_num < num_max_busy_us)
 977		max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
 978	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 979		ipmi_si_set_not_busy(busy_until);
 980	else if (!ipmi_si_is_busy(busy_until)) {
 981		getnstimeofday(busy_until);
 982		timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
 983	} else {
 984		struct timespec now;
 985		getnstimeofday(&now);
 986		if (unlikely(timespec_compare(&now, busy_until) > 0)) {
 987			ipmi_si_set_not_busy(busy_until);
 988			return 0;
 989		}
 990	}
 991	return 1;
 992}
 993
 994
 995/*
 996 * A busy-waiting loop for speeding up IPMI operation.
 997 *
 998 * Lousy hardware makes this hard.  This is only enabled for systems
 999 * that are not BT and do not have interrupts.  It starts spinning
1000 * when an operation is complete or until max_busy tells it to stop
1001 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
1002 * Documentation/IPMI.txt for details.
1003 */
1004static int ipmi_thread(void *data)
1005{
1006	struct smi_info *smi_info = data;
1007	unsigned long flags;
1008	enum si_sm_result smi_result;
1009	struct timespec busy_until;
1010
1011	ipmi_si_set_not_busy(&busy_until);
1012	set_user_nice(current, 19);
1013	while (!kthread_should_stop()) {
1014		int busy_wait;
1015
1016		spin_lock_irqsave(&(smi_info->si_lock), flags);
1017		smi_result = smi_event_handler(smi_info, 0);
1018		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1019		busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1020						  &busy_until);
1021		if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1022			; /* do nothing */
1023		else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1024			schedule();
1025		else if (smi_result == SI_SM_IDLE)
1026			schedule_timeout_interruptible(100);
1027		else
1028			schedule_timeout_interruptible(1);
1029	}
1030	return 0;
1031}
1032
1033
1034static void poll(void *send_info)
1035{
1036	struct smi_info *smi_info = send_info;
1037	unsigned long flags;
1038
1039	/*
1040	 * Make sure there is some delay in the poll loop so we can
1041	 * drive time forward and timeout things.
1042	 */
1043	udelay(10);
1044	spin_lock_irqsave(&smi_info->si_lock, flags);
1045	smi_event_handler(smi_info, 10);
1046	spin_unlock_irqrestore(&smi_info->si_lock, flags);
1047}
1048
1049static void request_events(void *send_info)
1050{
1051	struct smi_info *smi_info = send_info;
1052
1053	if (atomic_read(&smi_info->stop_operation) ||
1054				!smi_info->has_event_buffer)
1055		return;
1056
1057	atomic_set(&smi_info->req_events, 1);
1058}
1059
1060static int initialized;
1061
1062static void smi_timeout(unsigned long data)
1063{
1064	struct smi_info   *smi_info = (struct smi_info *) data;
1065	enum si_sm_result smi_result;
1066	unsigned long     flags;
1067	unsigned long     jiffies_now;
1068	long              time_diff;
1069	long		  timeout;
1070#ifdef DEBUG_TIMING
1071	struct timeval    t;
1072#endif
1073
1074	spin_lock_irqsave(&(smi_info->si_lock), flags);
1075#ifdef DEBUG_TIMING
1076	do_gettimeofday(&t);
1077	printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1078#endif
1079	jiffies_now = jiffies;
1080	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1081		     * SI_USEC_PER_JIFFY);
1082	smi_result = smi_event_handler(smi_info, time_diff);
1083
1084	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1085
1086	smi_info->last_timeout_jiffies = jiffies_now;
1087
1088	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1089		/* Running with interrupts, only do long timeouts. */
1090		timeout = jiffies + SI_TIMEOUT_JIFFIES;
1091		smi_inc_stat(smi_info, long_timeouts);
1092		goto do_mod_timer;
1093	}
1094
1095	/*
1096	 * If the state machine asks for a short delay, then shorten
1097	 * the timer timeout.
1098	 */
1099	if (smi_result == SI_SM_CALL_WITH_DELAY) {
1100		smi_inc_stat(smi_info, short_timeouts);
1101		timeout = jiffies + 1;
1102	} else {
1103		smi_inc_stat(smi_info, long_timeouts);
1104		timeout = jiffies + SI_TIMEOUT_JIFFIES;
1105	}
1106
1107 do_mod_timer:
1108	if (smi_result != SI_SM_IDLE)
1109		mod_timer(&(smi_info->si_timer), timeout);
1110}
1111
1112static irqreturn_t si_irq_handler(int irq, void *data)
1113{
1114	struct smi_info *smi_info = data;
1115	unsigned long   flags;
1116#ifdef DEBUG_TIMING
1117	struct timeval  t;
1118#endif
1119
1120	spin_lock_irqsave(&(smi_info->si_lock), flags);
1121
1122	smi_inc_stat(smi_info, interrupts);
1123
1124#ifdef DEBUG_TIMING
1125	do_gettimeofday(&t);
1126	printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1127#endif
1128	smi_event_handler(smi_info, 0);
1129	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1130	return IRQ_HANDLED;
1131}
1132
1133static irqreturn_t si_bt_irq_handler(int irq, void *data)
1134{
1135	struct smi_info *smi_info = data;
1136	/* We need to clear the IRQ flag for the BT interface. */
1137	smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1138			     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1139			     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1140	return si_irq_handler(irq, data);
1141}
1142
1143static int smi_start_processing(void       *send_info,
1144				ipmi_smi_t intf)
1145{
1146	struct smi_info *new_smi = send_info;
1147	int             enable = 0;
1148
1149	new_smi->intf = intf;
1150
1151	/* Try to claim any interrupts. */
1152	if (new_smi->irq_setup)
1153		new_smi->irq_setup(new_smi);
1154
1155	/* Set up the timer that drives the interface. */
1156	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1157	new_smi->last_timeout_jiffies = jiffies;
1158	mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1159
1160	/*
1161	 * Check if the user forcefully enabled the daemon.
1162	 */
1163	if (new_smi->intf_num < num_force_kipmid)
1164		enable = force_kipmid[new_smi->intf_num];
1165	/*
1166	 * The BT interface is efficient enough to not need a thread,
1167	 * and there is no need for a thread if we have interrupts.
1168	 */
1169	else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1170		enable = 1;
1171
1172	if (enable) {
1173		new_smi->thread = kthread_run(ipmi_thread, new_smi,
1174					      "kipmi%d", new_smi->intf_num);
1175		if (IS_ERR(new_smi->thread)) {
1176			dev_notice(new_smi->dev, "Could not start"
1177				   " kernel thread due to error %ld, only using"
1178				   " timers to drive the interface\n",
1179				   PTR_ERR(new_smi->thread));
1180			new_smi->thread = NULL;
1181		}
1182	}
1183
1184	return 0;
1185}
1186
1187static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1188{
1189	struct smi_info *smi = send_info;
1190
1191	data->addr_src = smi->addr_source;
1192	data->dev = smi->dev;
1193	data->addr_info = smi->addr_info;
1194	get_device(smi->dev);
1195
1196	return 0;
1197}
1198
1199static void set_maintenance_mode(void *send_info, int enable)
1200{
1201	struct smi_info   *smi_info = send_info;
1202
1203	if (!enable)
1204		atomic_set(&smi_info->req_events, 0);
1205}
1206
1207static struct ipmi_smi_handlers handlers = {
1208	.owner                  = THIS_MODULE,
1209	.start_processing       = smi_start_processing,
1210	.get_smi_info		= get_smi_info,
1211	.sender			= sender,
1212	.request_events		= request_events,
1213	.set_maintenance_mode   = set_maintenance_mode,
1214	.set_run_to_completion  = set_run_to_completion,
1215	.poll			= poll,
1216};
1217
1218/*
1219 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1220 * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
1221 */
1222
1223static LIST_HEAD(smi_infos);
1224static DEFINE_MUTEX(smi_infos_lock);
1225static int smi_num; /* Used to sequence the SMIs */
1226
1227#define DEFAULT_REGSPACING	1
1228#define DEFAULT_REGSIZE		1
1229
1230static int           si_trydefaults = 1;
1231static char          *si_type[SI_MAX_PARMS];
1232#define MAX_SI_TYPE_STR 30
1233static char          si_type_str[MAX_SI_TYPE_STR];
1234static unsigned long addrs[SI_MAX_PARMS];
1235static unsigned int num_addrs;
1236static unsigned int  ports[SI_MAX_PARMS];
1237static unsigned int num_ports;
1238static int           irqs[SI_MAX_PARMS];
1239static unsigned int num_irqs;
1240static int           regspacings[SI_MAX_PARMS];
1241static unsigned int num_regspacings;
1242static int           regsizes[SI_MAX_PARMS];
1243static unsigned int num_regsizes;
1244static int           regshifts[SI_MAX_PARMS];
1245static unsigned int num_regshifts;
1246static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1247static unsigned int num_slave_addrs;
1248
1249#define IPMI_IO_ADDR_SPACE  0
1250#define IPMI_MEM_ADDR_SPACE 1
1251static char *addr_space_to_str[] = { "i/o", "mem" };
1252
1253static int hotmod_handler(const char *val, struct kernel_param *kp);
1254
1255module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1256MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1257		 " Documentation/IPMI.txt in the kernel sources for the"
1258		 " gory details.");
1259
1260module_param_named(trydefaults, si_trydefaults, bool, 0);
1261MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1262		 " default scan of the KCS and SMIC interface at the standard"
1263		 " address");
1264module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1265MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1266		 " interface separated by commas.  The types are 'kcs',"
1267		 " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1268		 " the first interface to kcs and the second to bt");
1269module_param_array(addrs, ulong, &num_addrs, 0);
1270MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1271		 " addresses separated by commas.  Only use if an interface"
1272		 " is in memory.  Otherwise, set it to zero or leave"
1273		 " it blank.");
1274module_param_array(ports, uint, &num_ports, 0);
1275MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1276		 " addresses separated by commas.  Only use if an interface"
1277		 " is a port.  Otherwise, set it to zero or leave"
1278		 " it blank.");
1279module_param_array(irqs, int, &num_irqs, 0);
1280MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1281		 " addresses separated by commas.  Only use if an interface"
1282		 " has an interrupt.  Otherwise, set it to zero or leave"
1283		 " it blank.");
1284module_param_array(regspacings, int, &num_regspacings, 0);
1285MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1286		 " and each successive register used by the interface.  For"
1287		 " instance, if the start address is 0xca2 and the spacing"
1288		 " is 2, then the second address is at 0xca4.  Defaults"
1289		 " to 1.");
1290module_param_array(regsizes, int, &num_regsizes, 0);
1291MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1292		 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1293		 " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1294		 " the 8-bit IPMI register has to be read from a larger"
1295		 " register.");
1296module_param_array(regshifts, int, &num_regshifts, 0);
1297MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1298		 " IPMI register, in bits.  For instance, if the data"
1299		 " is read from a 32-bit word and the IPMI data is in"
1300		 " bit 8-15, then the shift would be 8");
1301module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1302MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1303		 " the controller.  Normally this is 0x20, but can be"
1304		 " overridden by this parm.  This is an array indexed"
1305		 " by interface number.");
1306module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1307MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1308		 " disabled(0).  Normally the IPMI driver auto-detects"
1309		 " this, but the value may be overridden by this parm.");
1310module_param(unload_when_empty, int, 0);
1311MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1312		 " specified or found, default is 1.  Setting to 0"
1313		 " is useful for hot add of devices using hotmod.");
1314module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1315MODULE_PARM_DESC(kipmid_max_busy_us,
1316		 "Max time (in microseconds) to busy-wait for IPMI data before"
1317		 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1318		 " if kipmid is using up a lot of CPU time.");
1319
1320
1321static void std_irq_cleanup(struct smi_info *info)
1322{
1323	if (info->si_type == SI_BT)
1324		/* Disable the interrupt in the BT interface. */
1325		info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1326	free_irq(info->irq, info);
1327}
1328
1329static int std_irq_setup(struct smi_info *info)
1330{
1331	int rv;
1332
1333	if (!info->irq)
1334		return 0;
1335
1336	if (info->si_type == SI_BT) {
1337		rv = request_irq(info->irq,
1338				 si_bt_irq_handler,
1339				 IRQF_SHARED | IRQF_DISABLED,
1340				 DEVICE_NAME,
1341				 info);
1342		if (!rv)
1343			/* Enable the interrupt in the BT interface. */
1344			info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1345					 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1346	} else
1347		rv = request_irq(info->irq,
1348				 si_irq_handler,
1349				 IRQF_SHARED | IRQF_DISABLED,
1350				 DEVICE_NAME,
1351				 info);
1352	if (rv) {
1353		dev_warn(info->dev, "%s unable to claim interrupt %d,"
1354			 " running polled\n",
1355			 DEVICE_NAME, info->irq);
1356		info->irq = 0;
1357	} else {
1358		info->irq_cleanup = std_irq_cleanup;
1359		dev_info(info->dev, "Using irq %d\n", info->irq);
1360	}
1361
1362	return rv;
1363}
1364
1365static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1366{
1367	unsigned int addr = io->addr_data;
1368
1369	return inb(addr + (offset * io->regspacing));
1370}
1371
1372static void port_outb(struct si_sm_io *io, unsigned int offset,
1373		      unsigned char b)
1374{
1375	unsigned int addr = io->addr_data;
1376
1377	outb(b, addr + (offset * io->regspacing));
1378}
1379
1380static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1381{
1382	unsigned int addr = io->addr_data;
1383
1384	return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1385}
1386
1387static void port_outw(struct si_sm_io *io, unsigned int offset,
1388		      unsigned char b)
1389{
1390	unsigned int addr = io->addr_data;
1391
1392	outw(b << io->regshift, addr + (offset * io->regspacing));
1393}
1394
1395static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1396{
1397	unsigned int addr = io->addr_data;
1398
1399	return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1400}
1401
1402static void port_outl(struct si_sm_io *io, unsigned int offset,
1403		      unsigned char b)
1404{
1405	unsigned int addr = io->addr_data;
1406
1407	outl(b << io->regshift, addr+(offset * io->regspacing));
1408}
1409
1410static void port_cleanup(struct smi_info *info)
1411{
1412	unsigned int addr = info->io.addr_data;
1413	int          idx;
1414
1415	if (addr) {
1416		for (idx = 0; idx < info->io_size; idx++)
1417			release_region(addr + idx * info->io.regspacing,
1418				       info->io.regsize);
1419	}
1420}
1421
1422static int port_setup(struct smi_info *info)
1423{
1424	unsigned int addr = info->io.addr_data;
1425	int          idx;
1426
1427	if (!addr)
1428		return -ENODEV;
1429
1430	info->io_cleanup = port_cleanup;
1431
1432	/*
1433	 * Figure out the actual inb/inw/inl/etc routine to use based
1434	 * upon the register size.
1435	 */
1436	switch (info->io.regsize) {
1437	case 1:
1438		info->io.inputb = port_inb;
1439		info->io.outputb = port_outb;
1440		break;
1441	case 2:
1442		info->io.inputb = port_inw;
1443		info->io.outputb = port_outw;
1444		break;
1445	case 4:
1446		info->io.inputb = port_inl;
1447		info->io.outputb = port_outl;
1448		break;
1449	default:
1450		dev_warn(info->dev, "Invalid register size: %d\n",
1451			 info->io.regsize);
1452		return -EINVAL;
1453	}
1454
1455	/*
1456	 * Some BIOSes reserve disjoint I/O regions in their ACPI
1457	 * tables.  This causes problems when trying to register the
1458	 * entire I/O region.  Therefore we must register each I/O
1459	 * port separately.
1460	 */
1461	for (idx = 0; idx < info->io_size; idx++) {
1462		if (request_region(addr + idx * info->io.regspacing,
1463				   info->io.regsize, DEVICE_NAME) == NULL) {
1464			/* Undo allocations */
1465			while (idx--) {
1466				release_region(addr + idx * info->io.regspacing,
1467					       info->io.regsize);
1468			}
1469			return -EIO;
1470		}
1471	}
1472	return 0;
1473}
1474
1475static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1476{
1477	return readb((io->addr)+(offset * io->regspacing));
1478}
1479
1480static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1481		     unsigned char b)
1482{
1483	writeb(b, (io->addr)+(offset * io->regspacing));
1484}
1485
1486static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1487{
1488	return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1489		& 0xff;
1490}
1491
1492static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1493		     unsigned char b)
1494{
1495	writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1496}
1497
1498static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1499{
1500	return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1501		& 0xff;
1502}
1503
1504static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1505		     unsigned char b)
1506{
1507	writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1508}
1509
1510#ifdef readq
1511static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1512{
1513	return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1514		& 0xff;
1515}
1516
1517static void mem_outq(struct si_sm_io *io, unsigned int offset,
1518		     unsigned char b)
1519{
1520	writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1521}
1522#endif
1523
1524static void mem_cleanup(struct smi_info *info)
1525{
1526	unsigned long addr = info->io.addr_data;
1527	int           mapsize;
1528
1529	if (info->io.addr) {
1530		iounmap(info->io.addr);
1531
1532		mapsize = ((info->io_size * info->io.regspacing)
1533			   - (info->io.regspacing - info->io.regsize));
1534
1535		release_mem_region(addr, mapsize);
1536	}
1537}
1538
1539static int mem_setup(struct smi_info *info)
1540{
1541	unsigned long addr = info->io.addr_data;
1542	int           mapsize;
1543
1544	if (!addr)
1545		return -ENODEV;
1546
1547	info->io_cleanup = mem_cleanup;
1548
1549	/*
1550	 * Figure out the actual readb/readw/readl/etc routine to use based
1551	 * upon the register size.
1552	 */
1553	switch (info->io.regsize) {
1554	case 1:
1555		info->io.inputb = intf_mem_inb;
1556		info->io.outputb = intf_mem_outb;
1557		break;
1558	case 2:
1559		info->io.inputb = intf_mem_inw;
1560		info->io.outputb = intf_mem_outw;
1561		break;
1562	case 4:
1563		info->io.inputb = intf_mem_inl;
1564		info->io.outputb = intf_mem_outl;
1565		break;
1566#ifdef readq
1567	case 8:
1568		info->io.inputb = mem_inq;
1569		info->io.outputb = mem_outq;
1570		break;
1571#endif
1572	default:
1573		dev_warn(info->dev, "Invalid register size: %d\n",
1574			 info->io.regsize);
1575		return -EINVAL;
1576	}
1577
1578	/*
1579	 * Calculate the total amount of memory to claim.  This is an
1580	 * unusual looking calculation, but it avoids claiming any
1581	 * more memory than it has to.  It will claim everything
1582	 * between the first address to the end of the last full
1583	 * register.
1584	 */
1585	mapsize = ((info->io_size * info->io.regspacing)
1586		   - (info->io.regspacing - info->io.regsize));
1587
1588	if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1589		return -EIO;
1590
1591	info->io.addr = ioremap(addr, mapsize);
1592	if (info->io.addr == NULL) {
1593		release_mem_region(addr, mapsize);
1594		return -EIO;
1595	}
1596	return 0;
1597}
1598
1599/*
1600 * Parms come in as <op1>[:op2[:op3...]].  ops are:
1601 *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1602 * Options are:
1603 *   rsp=<regspacing>
1604 *   rsi=<regsize>
1605 *   rsh=<regshift>
1606 *   irq=<irq>
1607 *   ipmb=<ipmb addr>
1608 */
1609enum hotmod_op { HM_ADD, HM_REMOVE };
1610struct hotmod_vals {
1611	char *name;
1612	int  val;
1613};
1614static struct hotmod_vals hotmod_ops[] = {
1615	{ "add",	HM_ADD },
1616	{ "remove",	HM_REMOVE },
1617	{ NULL }
1618};
1619static struct hotmod_vals hotmod_si[] = {
1620	{ "kcs",	SI_KCS },
1621	{ "smic",	SI_SMIC },
1622	{ "bt",		SI_BT },
1623	{ NULL }
1624};
1625static struct hotmod_vals hotmod_as[] = {
1626	{ "mem",	IPMI_MEM_ADDR_SPACE },
1627	{ "i/o",	IPMI_IO_ADDR_SPACE },
1628	{ NULL }
1629};
1630
1631static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1632{
1633	char *s;
1634	int  i;
1635
1636	s = strchr(*curr, ',');
1637	if (!s) {
1638		printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1639		return -EINVAL;
1640	}
1641	*s = '\0';
1642	s++;
1643	for (i = 0; hotmod_ops[i].name; i++) {
1644		if (strcmp(*curr, v[i].name) == 0) {
1645			*val = v[i].val;
1646			*curr = s;
1647			return 0;
1648		}
1649	}
1650
1651	printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1652	return -EINVAL;
1653}
1654
1655static int check_hotmod_int_op(const char *curr, const char *option,
1656			       const char *name, int *val)
1657{
1658	char *n;
1659
1660	if (strcmp(curr, name) == 0) {
1661		if (!option) {
1662			printk(KERN_WARNING PFX
1663			       "No option given for '%s'\n",
1664			       curr);
1665			return -EINVAL;
1666		}
1667		*val = simple_strtoul(option, &n, 0);
1668		if ((*n != '\0') || (*option == '\0')) {
1669			printk(KERN_WARNING PFX
1670			       "Bad option given for '%s'\n",
1671			       curr);
1672			return -EINVAL;
1673		}
1674		return 1;
1675	}
1676	return 0;
1677}
1678
1679static struct smi_info *smi_info_alloc(void)
1680{
1681	struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1682
1683	if (info) {
1684		spin_lock_init(&info->si_lock);
1685		spin_lock_init(&info->msg_lock);
1686	}
1687	return info;
1688}
1689
1690static int hotmod_handler(const char *val, struct kernel_param *kp)
1691{
1692	char *str = kstrdup(val, GFP_KERNEL);
1693	int  rv;
1694	char *next, *curr, *s, *n, *o;
1695	enum hotmod_op op;
1696	enum si_type si_type;
1697	int  addr_space;
1698	unsigned long addr;
1699	int regspacing;
1700	int regsize;
1701	int regshift;
1702	int irq;
1703	int ipmb;
1704	int ival;
1705	int len;
1706	struct smi_info *info;
1707
1708	if (!str)
1709		return -ENOMEM;
1710
1711	/* Kill any trailing spaces, as we can get a "\n" from echo. */
1712	len = strlen(str);
1713	ival = len - 1;
1714	while ((ival >= 0) && isspace(str[ival])) {
1715		str[ival] = '\0';
1716		ival--;
1717	}
1718
1719	for (curr = str; curr; curr = next) {
1720		regspacing = 1;
1721		regsize = 1;
1722		regshift = 0;
1723		irq = 0;
1724		ipmb = 0; /* Choose the default if not specified */
1725
1726		next = strchr(curr, ':');
1727		if (next) {
1728			*next = '\0';
1729			next++;
1730		}
1731
1732		rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1733		if (rv)
1734			break;
1735		op = ival;
1736
1737		rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1738		if (rv)
1739			break;
1740		si_type = ival;
1741
1742		rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1743		if (rv)
1744			break;
1745
1746		s = strchr(curr, ',');
1747		if (s) {
1748			*s = '\0';
1749			s++;
1750		}
1751		addr = simple_strtoul(curr, &n, 0);
1752		if ((*n != '\0') || (*curr == '\0')) {
1753			printk(KERN_WARNING PFX "Invalid hotmod address"
1754			       " '%s'\n", curr);
1755			break;
1756		}
1757
1758		while (s) {
1759			curr = s;
1760			s = strchr(curr, ',');
1761			if (s) {
1762				*s = '\0';
1763				s++;
1764			}
1765			o = strchr(curr, '=');
1766			if (o) {
1767				*o = '\0';
1768				o++;
1769			}
1770			rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1771			if (rv < 0)
1772				goto out;
1773			else if (rv)
1774				continue;
1775			rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1776			if (rv < 0)
1777				goto out;
1778			else if (rv)
1779				continue;
1780			rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1781			if (rv < 0)
1782				goto out;
1783			else if (rv)
1784				continue;
1785			rv = check_hotmod_int_op(curr, o, "irq", &irq);
1786			if (rv < 0)
1787				goto out;
1788			else if (rv)
1789				continue;
1790			rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1791			if (rv < 0)
1792				goto out;
1793			else if (rv)
1794				continue;
1795
1796			rv = -EINVAL;
1797			printk(KERN_WARNING PFX
1798			       "Invalid hotmod option '%s'\n",
1799			       curr);
1800			goto out;
1801		}
1802
1803		if (op == HM_ADD) {
1804			info = smi_info_alloc();
1805			if (!info) {
1806				rv = -ENOMEM;
1807				goto out;
1808			}
1809
1810			info->addr_source = SI_HOTMOD;
1811			info->si_type = si_type;
1812			info->io.addr_data = addr;
1813			info->io.addr_type = addr_space;
1814			if (addr_space == IPMI_MEM_ADDR_SPACE)
1815				info->io_setup = mem_setup;
1816			else
1817				info->io_setup = port_setup;
1818
1819			info->io.addr = NULL;
1820			info->io.regspacing = regspacing;
1821			if (!info->io.regspacing)
1822				info->io.regspacing = DEFAULT_REGSPACING;
1823			info->io.regsize = regsize;
1824			if (!info->io.regsize)
1825				info->io.regsize = DEFAULT_REGSPACING;
1826			info->io.regshift = regshift;
1827			info->irq = irq;
1828			if (info->irq)
1829				info->irq_setup = std_irq_setup;
1830			info->slave_addr = ipmb;
1831
1832			if (!add_smi(info)) {
1833				if (try_smi_init(info))
1834					cleanup_one_si(info);
1835			} else {
1836				kfree(info);
1837			}
1838		} else {
1839			/* remove */
1840			struct smi_info *e, *tmp_e;
1841
1842			mutex_lock(&smi_infos_lock);
1843			list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1844				if (e->io.addr_type != addr_space)
1845					continue;
1846				if (e->si_type != si_type)
1847					continue;
1848				if (e->io.addr_data == addr)
1849					cleanup_one_si(e);
1850			}
1851			mutex_unlock(&smi_infos_lock);
1852		}
1853	}
1854	rv = len;
1855 out:
1856	kfree(str);
1857	return rv;
1858}
1859
1860static int __devinit hardcode_find_bmc(void)
1861{
1862	int ret = -ENODEV;
1863	int             i;
1864	struct smi_info *info;
1865
1866	for (i = 0; i < SI_MAX_PARMS; i++) {
1867		if (!ports[i] && !addrs[i])
1868			continue;
1869
1870		info = smi_info_alloc();
1871		if (!info)
1872			return -ENOMEM;
1873
1874		info->addr_source = SI_HARDCODED;
1875		printk(KERN_INFO PFX "probing via hardcoded address\n");
1876
1877		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1878			info->si_type = SI_KCS;
1879		} else if (strcmp(si_type[i], "smic") == 0) {
1880			info->si_type = SI_SMIC;
1881		} else if (strcmp(si_type[i], "bt") == 0) {
1882			info->si_type = SI_BT;
1883		} else {
1884			printk(KERN_WARNING PFX "Interface type specified "
1885			       "for interface %d, was invalid: %s\n",
1886			       i, si_type[i]);
1887			kfree(info);
1888			continue;
1889		}
1890
1891		if (ports[i]) {
1892			/* An I/O port */
1893			info->io_setup = port_setup;
1894			info->io.addr_data = ports[i];
1895			info->io.addr_type = IPMI_IO_ADDR_SPACE;
1896		} else if (addrs[i]) {
1897			/* A memory port */
1898			info->io_setup = mem_setup;
1899			info->io.addr_data = addrs[i];
1900			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1901		} e

Large files files are truncated, but you can click here to view the full file