PageRenderTime 112ms CodeModel.GetById 15ms app.highlight 80ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/scsi/dpt_i2o.c

http://github.com/mirrors/linux
C | 3556 lines | 2743 code | 436 blank | 377 comment | 547 complexity | 9379e7d43a810c3873574c928413e5c7 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/***************************************************************************
   3                          dpti.c  -  description
   4                             -------------------
   5    begin                : Thu Sep 7 2000
   6    copyright            : (C) 2000 by Adaptec
   7
   8			   July 30, 2001 First version being submitted
   9			   for inclusion in the kernel.  V2.4
  10
  11    See Documentation/scsi/dpti.rst for history, notes, license info
  12    and credits
  13 ***************************************************************************/
  14
  15/***************************************************************************
  16 *                                                                         *
  17 *                                                                         *
  18 ***************************************************************************/
  19/***************************************************************************
  20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
  21 - Support 2.6 kernel and DMA-mapping
  22 - ioctl fix for raid tools
  23 - use schedule_timeout in long long loop
  24 **************************************************************************/
  25
  26/*#define DEBUG 1 */
  27/*#define UARTDELAY 1 */
  28
  29#include <linux/module.h>
  30
  31MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
  32MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
  33
  34////////////////////////////////////////////////////////////////
  35
  36#include <linux/ioctl.h>	/* For SCSI-Passthrough */
  37#include <linux/uaccess.h>
  38
  39#include <linux/stat.h>
  40#include <linux/slab.h>		/* for kmalloc() */
  41#include <linux/pci.h>		/* for PCI support */
  42#include <linux/proc_fs.h>
  43#include <linux/blkdev.h>
  44#include <linux/delay.h>	/* for udelay */
  45#include <linux/interrupt.h>
  46#include <linux/kernel.h>	/* for printk */
  47#include <linux/sched.h>
  48#include <linux/reboot.h>
  49#include <linux/spinlock.h>
  50#include <linux/dma-mapping.h>
  51
  52#include <linux/timer.h>
  53#include <linux/string.h>
  54#include <linux/ioport.h>
  55#include <linux/mutex.h>
  56
  57#include <asm/processor.h>	/* for boot_cpu_data */
  58#include <asm/pgtable.h>
  59#include <asm/io.h>		/* for virt_to_bus, etc. */
  60
  61#include <scsi/scsi.h>
  62#include <scsi/scsi_cmnd.h>
  63#include <scsi/scsi_device.h>
  64#include <scsi/scsi_host.h>
  65#include <scsi/scsi_tcq.h>
  66
  67#include "dpt/dptsig.h"
  68#include "dpti.h"
  69
  70/*============================================================================
  71 * Create a binary signature - this is read by dptsig
  72 * Needed for our management apps
  73 *============================================================================
  74 */
  75static DEFINE_MUTEX(adpt_mutex);
  76static dpt_sig_S DPTI_sig = {
  77	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
  78#ifdef __i386__
  79	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  80#elif defined(__ia64__)
  81	PROC_INTEL, PROC_IA64,
  82#elif defined(__sparc__)
  83	PROC_ULTRASPARC, PROC_ULTRASPARC,
  84#elif defined(__alpha__)
  85	PROC_ALPHA, PROC_ALPHA,
  86#else
  87	(-1),(-1),
  88#endif
  89	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
  90	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
  91	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
  92};
  93
  94
  95
  96
  97/*============================================================================
  98 * Globals
  99 *============================================================================
 100 */
 101
 102static DEFINE_MUTEX(adpt_configuration_lock);
 103
 104static struct i2o_sys_tbl *sys_tbl;
 105static dma_addr_t sys_tbl_pa;
 106static int sys_tbl_ind;
 107static int sys_tbl_len;
 108
 109static adpt_hba* hba_chain = NULL;
 110static int hba_count = 0;
 111
 112static struct class *adpt_sysfs_class;
 113
 114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 115#ifdef CONFIG_COMPAT
 116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
 117#endif
 118
 119static const struct file_operations adpt_fops = {
 120	.unlocked_ioctl	= adpt_unlocked_ioctl,
 121	.open		= adpt_open,
 122	.release	= adpt_close,
 123#ifdef CONFIG_COMPAT
 124	.compat_ioctl	= compat_adpt_ioctl,
 125#endif
 126	.llseek		= noop_llseek,
 127};
 128
 129/* Structures and definitions for synchronous message posting.
 130 * See adpt_i2o_post_wait() for description
 131 * */
 132struct adpt_i2o_post_wait_data
 133{
 134	int status;
 135	u32 id;
 136	adpt_wait_queue_head_t *wq;
 137	struct adpt_i2o_post_wait_data *next;
 138};
 139
 140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
 141static u32 adpt_post_wait_id = 0;
 142static DEFINE_SPINLOCK(adpt_post_wait_lock);
 143
 144
 145/*============================================================================
 146 * 				Functions
 147 *============================================================================
 148 */
 149
 150static inline int dpt_dma64(adpt_hba *pHba)
 151{
 152	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
 153}
 154
 155static inline u32 dma_high(dma_addr_t addr)
 156{
 157	return upper_32_bits(addr);
 158}
 159
 160static inline u32 dma_low(dma_addr_t addr)
 161{
 162	return (u32)addr;
 163}
 164
 165static u8 adpt_read_blink_led(adpt_hba* host)
 166{
 167	if (host->FwDebugBLEDflag_P) {
 168		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
 169			return readb(host->FwDebugBLEDvalue_P);
 170		}
 171	}
 172	return 0;
 173}
 174
 175/*============================================================================
 176 * Scsi host template interface functions
 177 *============================================================================
 178 */
 179
 180#ifdef MODULE
 181static struct pci_device_id dptids[] = {
 182	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 183	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 184	{ 0, }
 185};
 186#endif
 187
 188MODULE_DEVICE_TABLE(pci,dptids);
 189
 190static int adpt_detect(struct scsi_host_template* sht)
 191{
 192	struct pci_dev *pDev = NULL;
 193	adpt_hba *pHba;
 194	adpt_hba *next;
 195
 196	PINFO("Detecting Adaptec I2O RAID controllers...\n");
 197
 198        /* search for all Adatpec I2O RAID cards */
 199	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
 200		if(pDev->device == PCI_DPT_DEVICE_ID ||
 201		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
 202			if(adpt_install_hba(sht, pDev) ){
 203				PERROR("Could not Init an I2O RAID device\n");
 204				PERROR("Will not try to detect others.\n");
 205				return hba_count-1;
 206			}
 207			pci_dev_get(pDev);
 208		}
 209	}
 210
 211	/* In INIT state, Activate IOPs */
 212	for (pHba = hba_chain; pHba; pHba = next) {
 213		next = pHba->next;
 214		// Activate does get status , init outbound, and get hrt
 215		if (adpt_i2o_activate_hba(pHba) < 0) {
 216			adpt_i2o_delete_hba(pHba);
 217		}
 218	}
 219
 220
 221	/* Active IOPs in HOLD state */
 222
 223rebuild_sys_tab:
 224	if (hba_chain == NULL) 
 225		return 0;
 226
 227	/*
 228	 * If build_sys_table fails, we kill everything and bail
 229	 * as we can't init the IOPs w/o a system table
 230	 */	
 231	if (adpt_i2o_build_sys_table() < 0) {
 232		adpt_i2o_sys_shutdown();
 233		return 0;
 234	}
 235
 236	PDEBUG("HBA's in HOLD state\n");
 237
 238	/* If IOP don't get online, we need to rebuild the System table */
 239	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 240		if (adpt_i2o_online_hba(pHba) < 0) {
 241			adpt_i2o_delete_hba(pHba);	
 242			goto rebuild_sys_tab;
 243		}
 244	}
 245
 246	/* Active IOPs now in OPERATIONAL state */
 247	PDEBUG("HBA's in OPERATIONAL state\n");
 248
 249	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
 250	for (pHba = hba_chain; pHba; pHba = next) {
 251		next = pHba->next;
 252		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
 253		if (adpt_i2o_lct_get(pHba) < 0){
 254			adpt_i2o_delete_hba(pHba);
 255			continue;
 256		}
 257
 258		if (adpt_i2o_parse_lct(pHba) < 0){
 259			adpt_i2o_delete_hba(pHba);
 260			continue;
 261		}
 262		adpt_inquiry(pHba);
 263	}
 264
 265	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
 266	if (IS_ERR(adpt_sysfs_class)) {
 267		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
 268		adpt_sysfs_class = NULL;
 269	}
 270
 271	for (pHba = hba_chain; pHba; pHba = next) {
 272		next = pHba->next;
 273		if (adpt_scsi_host_alloc(pHba, sht) < 0){
 274			adpt_i2o_delete_hba(pHba);
 275			continue;
 276		}
 277		pHba->initialized = TRUE;
 278		pHba->state &= ~DPTI_STATE_RESET;
 279		if (adpt_sysfs_class) {
 280			struct device *dev = device_create(adpt_sysfs_class,
 281				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
 282				"dpti%d", pHba->unit);
 283			if (IS_ERR(dev)) {
 284				printk(KERN_WARNING"dpti%d: unable to "
 285					"create device in dpt_i2o class\n",
 286					pHba->unit);
 287			}
 288		}
 289	}
 290
 291	// Register our control device node
 292	// nodes will need to be created in /dev to access this
 293	// the nodes can not be created from within the driver
 294	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
 295		adpt_i2o_sys_shutdown();
 296		return 0;
 297	}
 298	return hba_count;
 299}
 300
 301
 302static void adpt_release(adpt_hba *pHba)
 303{
 304	struct Scsi_Host *shost = pHba->host;
 305
 306	scsi_remove_host(shost);
 307//	adpt_i2o_quiesce_hba(pHba);
 308	adpt_i2o_delete_hba(pHba);
 309	scsi_host_put(shost);
 310}
 311
 312
 313static void adpt_inquiry(adpt_hba* pHba)
 314{
 315	u32 msg[17]; 
 316	u32 *mptr;
 317	u32 *lenptr;
 318	int direction;
 319	int scsidir;
 320	u32 len;
 321	u32 reqlen;
 322	u8* buf;
 323	dma_addr_t addr;
 324	u8  scb[16];
 325	s32 rcode;
 326
 327	memset(msg, 0, sizeof(msg));
 328	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
 329	if(!buf){
 330		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
 331		return;
 332	}
 333	memset((void*)buf, 0, 36);
 334	
 335	len = 36;
 336	direction = 0x00000000;	
 337	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
 338
 339	if (dpt_dma64(pHba))
 340		reqlen = 17;		// SINGLE SGE, 64 bit
 341	else
 342		reqlen = 14;		// SINGLE SGE, 32 bit
 343	/* Stick the headers on */
 344	msg[0] = reqlen<<16 | SGL_OFFSET_12;
 345	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
 346	msg[2] = 0;
 347	msg[3]  = 0;
 348	// Adaptec/DPT Private stuff 
 349	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
 350	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
 351	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
 352	// I2O_SCB_FLAG_ENABLE_DISCONNECT | 
 353	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
 354	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
 355	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
 356
 357	mptr=msg+7;
 358
 359	memset(scb, 0, sizeof(scb));
 360	// Write SCSI command into the message - always 16 byte block 
 361	scb[0] = INQUIRY;
 362	scb[1] = 0;
 363	scb[2] = 0;
 364	scb[3] = 0;
 365	scb[4] = 36;
 366	scb[5] = 0;
 367	// Don't care about the rest of scb
 368
 369	memcpy(mptr, scb, sizeof(scb));
 370	mptr+=4;
 371	lenptr=mptr++;		/* Remember me - fill in when we know */
 372
 373	/* Now fill in the SGList and command */
 374	*lenptr = len;
 375	if (dpt_dma64(pHba)) {
 376		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
 377		*mptr++ = 1 << PAGE_SHIFT;
 378		*mptr++ = 0xD0000000|direction|len;
 379		*mptr++ = dma_low(addr);
 380		*mptr++ = dma_high(addr);
 381	} else {
 382		*mptr++ = 0xD0000000|direction|len;
 383		*mptr++ = addr;
 384	}
 385
 386	// Send it on it's way
 387	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
 388	if (rcode != 0) {
 389		sprintf(pHba->detail, "Adaptec I2O RAID");
 390		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
 391		if (rcode != -ETIME && rcode != -EINTR)
 392			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 393	} else {
 394		memset(pHba->detail, 0, sizeof(pHba->detail));
 395		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
 396		memcpy(&(pHba->detail[16]), " Model: ", 8);
 397		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
 398		memcpy(&(pHba->detail[40]), " FW: ", 4);
 399		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
 400		pHba->detail[48] = '\0';	/* precautionary */
 401		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
 402	}
 403	adpt_i2o_status_get(pHba);
 404	return ;
 405}
 406
 407
 408static int adpt_slave_configure(struct scsi_device * device)
 409{
 410	struct Scsi_Host *host = device->host;
 411	adpt_hba* pHba;
 412
 413	pHba = (adpt_hba *) host->hostdata[0];
 414
 415	if (host->can_queue && device->tagged_supported) {
 416		scsi_change_queue_depth(device,
 417				host->can_queue - 1);
 418	}
 419	return 0;
 420}
 421
 422static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
 423{
 424	adpt_hba* pHba = NULL;
 425	struct adpt_device* pDev = NULL;	/* dpt per device information */
 426
 427	cmd->scsi_done = done;
 428	/*
 429	 * SCSI REQUEST_SENSE commands will be executed automatically by the 
 430	 * Host Adapter for any errors, so they should not be executed 
 431	 * explicitly unless the Sense Data is zero indicating that no error 
 432	 * occurred.
 433	 */
 434
 435	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
 436		cmd->result = (DID_OK << 16);
 437		cmd->scsi_done(cmd);
 438		return 0;
 439	}
 440
 441	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 442	if (!pHba) {
 443		return FAILED;
 444	}
 445
 446	rmb();
 447	if ((pHba->state) & DPTI_STATE_RESET)
 448		return SCSI_MLQUEUE_HOST_BUSY;
 449
 450	// TODO if the cmd->device if offline then I may need to issue a bus rescan
 451	// followed by a get_lct to see if the device is there anymore
 452	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
 453		/*
 454		 * First command request for this device.  Set up a pointer
 455		 * to the device structure.  This should be a TEST_UNIT_READY
 456		 * command from scan_scsis_single.
 457		 */
 458		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
 459			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
 460			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
 461			cmd->result = (DID_NO_CONNECT << 16);
 462			cmd->scsi_done(cmd);
 463			return 0;
 464		}
 465		cmd->device->hostdata = pDev;
 466	}
 467	pDev->pScsi_dev = cmd->device;
 468
 469	/*
 470	 * If we are being called from when the device is being reset, 
 471	 * delay processing of the command until later.
 472	 */
 473	if (pDev->state & DPTI_DEV_RESET ) {
 474		return FAILED;
 475	}
 476	return adpt_scsi_to_i2o(pHba, cmd, pDev);
 477}
 478
 479static DEF_SCSI_QCMD(adpt_queue)
 480
 481static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
 482		sector_t capacity, int geom[])
 483{
 484	int heads=-1;
 485	int sectors=-1;
 486	int cylinders=-1;
 487
 488	// *** First lets set the default geometry ****
 489	
 490	// If the capacity is less than ox2000
 491	if (capacity < 0x2000 ) {	// floppy
 492		heads = 18;
 493		sectors = 2;
 494	} 
 495	// else if between 0x2000 and 0x20000
 496	else if (capacity < 0x20000) {
 497		heads = 64;
 498		sectors = 32;
 499	}
 500	// else if between 0x20000 and 0x40000
 501	else if (capacity < 0x40000) {
 502		heads = 65;
 503		sectors = 63;
 504	}
 505	// else if between 0x4000 and 0x80000
 506	else if (capacity < 0x80000) {
 507		heads = 128;
 508		sectors = 63;
 509	}
 510	// else if greater than 0x80000
 511	else {
 512		heads = 255;
 513		sectors = 63;
 514	}
 515	cylinders = sector_div(capacity, heads * sectors);
 516
 517	// Special case if CDROM
 518	if(sdev->type == 5) {  // CDROM
 519		heads = 252;
 520		sectors = 63;
 521		cylinders = 1111;
 522	}
 523
 524	geom[0] = heads;
 525	geom[1] = sectors;
 526	geom[2] = cylinders;
 527	
 528	PDEBUG("adpt_bios_param: exit\n");
 529	return 0;
 530}
 531
 532
 533static const char *adpt_info(struct Scsi_Host *host)
 534{
 535	adpt_hba* pHba;
 536
 537	pHba = (adpt_hba *) host->hostdata[0];
 538	return (char *) (pHba->detail);
 539}
 540
 541static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
 542{
 543	struct adpt_device* d;
 544	int id;
 545	int chan;
 546	adpt_hba* pHba;
 547	int unit;
 548
 549	// Find HBA (host bus adapter) we are looking for
 550	mutex_lock(&adpt_configuration_lock);
 551	for (pHba = hba_chain; pHba; pHba = pHba->next) {
 552		if (pHba->host == host) {
 553			break;	/* found adapter */
 554		}
 555	}
 556	mutex_unlock(&adpt_configuration_lock);
 557	if (pHba == NULL) {
 558		return 0;
 559	}
 560	host = pHba->host;
 561
 562	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
 563	seq_printf(m, "%s\n", pHba->detail);
 564	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
 565			pHba->host->host_no, pHba->name, host->irq);
 566	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
 567			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
 568
 569	seq_puts(m, "Devices:\n");
 570	for(chan = 0; chan < MAX_CHANNEL; chan++) {
 571		for(id = 0; id < MAX_ID; id++) {
 572			d = pHba->channel[chan].device[id];
 573			while(d) {
 574				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
 575				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
 576
 577				unit = d->pI2o_dev->lct_data.tid;
 578				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
 579					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
 580					       scsi_device_online(d->pScsi_dev)? "online":"offline"); 
 581				d = d->next_lun;
 582			}
 583		}
 584	}
 585	return 0;
 586}
 587
 588/*
 589 *	Turn a pointer to ioctl reply data into an u32 'context'
 590 */
 591static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
 592{
 593#if BITS_PER_LONG == 32
 594	return (u32)(unsigned long)reply;
 595#else
 596	ulong flags = 0;
 597	u32 nr, i;
 598
 599	spin_lock_irqsave(pHba->host->host_lock, flags);
 600	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
 601	for (i = 0; i < nr; i++) {
 602		if (pHba->ioctl_reply_context[i] == NULL) {
 603			pHba->ioctl_reply_context[i] = reply;
 604			break;
 605		}
 606	}
 607	spin_unlock_irqrestore(pHba->host->host_lock, flags);
 608	if (i >= nr) {
 609		printk(KERN_WARNING"%s: Too many outstanding "
 610				"ioctl commands\n", pHba->name);
 611		return (u32)-1;
 612	}
 613
 614	return i;
 615#endif
 616}
 617
 618/*
 619 *	Go from an u32 'context' to a pointer to ioctl reply data.
 620 */
 621static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
 622{
 623#if BITS_PER_LONG == 32
 624	return (void *)(unsigned long)context;
 625#else
 626	void *p = pHba->ioctl_reply_context[context];
 627	pHba->ioctl_reply_context[context] = NULL;
 628
 629	return p;
 630#endif
 631}
 632
 633/*===========================================================================
 634 * Error Handling routines
 635 *===========================================================================
 636 */
 637
 638static int adpt_abort(struct scsi_cmnd * cmd)
 639{
 640	adpt_hba* pHba = NULL;	/* host bus adapter structure */
 641	struct adpt_device* dptdevice;	/* dpt per device information */
 642	u32 msg[5];
 643	int rcode;
 644
 645	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
 646	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
 647	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
 648		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
 649		return FAILED;
 650	}
 651
 652	memset(msg, 0, sizeof(msg));
 653	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
 654	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
 655	msg[2] = 0;
 656	msg[3]= 0;
 657	/* Add 1 to avoid firmware treating it as invalid command */
 658	msg[4] = cmd->request->tag + 1;
 659	if (pHba->host)
 660		spin_lock_irq(pHba->host->host_lock);
 661	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
 662	if (pHba->host)
 663		spin_unlock_irq(pHba->host->host_lock);
 664	if (rcode != 0) {
 665		if(rcode == -EOPNOTSUPP ){
 666			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
 667			return FAILED;
 668		}
 669		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
 670		return FAILED;
 671	} 
 672	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
 673	return SUCCESS;
 674}
 675
 676
 677#define I2O_DEVICE_RESET 0x27
 678// This is the same for BLK and SCSI devices
 679// NOTE this is wrong in the i2o.h definitions
 680// This is not currently supported by our adapter but we issue it anyway
 681static int adpt_device_reset(struct scsi_cmnd* cmd)
 682{
 683	adpt_hba* pHba;
 684	u32 msg[4];
 685	u32 rcode;
 686	int old_state;
 687	struct adpt_device* d = cmd->device->hostdata;
 688
 689	pHba = (void*) cmd->device->host->hostdata[0];
 690	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
 691	if (!d) {
 692		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
 693		return FAILED;
 694	}
 695	memset(msg, 0, sizeof(msg));
 696	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 697	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
 698	msg[2] = 0;
 699	msg[3] = 0;
 700
 701	if (pHba->host)
 702		spin_lock_irq(pHba->host->host_lock);
 703	old_state = d->state;
 704	d->state |= DPTI_DEV_RESET;
 705	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 706	d->state = old_state;
 707	if (pHba->host)
 708		spin_unlock_irq(pHba->host->host_lock);
 709	if (rcode != 0) {
 710		if(rcode == -EOPNOTSUPP ){
 711			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
 712			return FAILED;
 713		}
 714		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
 715		return FAILED;
 716	} else {
 717		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
 718		return SUCCESS;
 719	}
 720}
 721
 722
 723#define I2O_HBA_BUS_RESET 0x87
 724// This version of bus reset is called by the eh_error handler
 725static int adpt_bus_reset(struct scsi_cmnd* cmd)
 726{
 727	adpt_hba* pHba;
 728	u32 msg[4];
 729	u32 rcode;
 730
 731	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 732	memset(msg, 0, sizeof(msg));
 733	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
 734	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
 735	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
 736	msg[2] = 0;
 737	msg[3] = 0;
 738	if (pHba->host)
 739		spin_lock_irq(pHba->host->host_lock);
 740	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
 741	if (pHba->host)
 742		spin_unlock_irq(pHba->host->host_lock);
 743	if (rcode != 0) {
 744		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
 745		return FAILED;
 746	} else {
 747		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
 748		return SUCCESS;
 749	}
 750}
 751
 752// This version of reset is called by the eh_error_handler
 753static int __adpt_reset(struct scsi_cmnd* cmd)
 754{
 755	adpt_hba* pHba;
 756	int rcode;
 757	char name[32];
 758
 759	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
 760	strncpy(name, pHba->name, sizeof(name));
 761	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
 762	rcode =  adpt_hba_reset(pHba);
 763	if(rcode == 0){
 764		printk(KERN_WARNING"%s: HBA reset complete\n", name);
 765		return SUCCESS;
 766	} else {
 767		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
 768		return FAILED;
 769	}
 770}
 771
 772static int adpt_reset(struct scsi_cmnd* cmd)
 773{
 774	int rc;
 775
 776	spin_lock_irq(cmd->device->host->host_lock);
 777	rc = __adpt_reset(cmd);
 778	spin_unlock_irq(cmd->device->host->host_lock);
 779
 780	return rc;
 781}
 782
 783// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
 784static int adpt_hba_reset(adpt_hba* pHba)
 785{
 786	int rcode;
 787
 788	pHba->state |= DPTI_STATE_RESET;
 789
 790	// Activate does get status , init outbound, and get hrt
 791	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
 792		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
 793		adpt_i2o_delete_hba(pHba);
 794		return rcode;
 795	}
 796
 797	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
 798		adpt_i2o_delete_hba(pHba);
 799		return rcode;
 800	}
 801	PDEBUG("%s: in HOLD state\n",pHba->name);
 802
 803	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
 804		adpt_i2o_delete_hba(pHba);	
 805		return rcode;
 806	}
 807	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
 808
 809	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
 810		adpt_i2o_delete_hba(pHba);
 811		return rcode;
 812	}
 813
 814	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
 815		adpt_i2o_delete_hba(pHba);
 816		return rcode;
 817	}
 818	pHba->state &= ~DPTI_STATE_RESET;
 819
 820	scsi_host_complete_all_commands(pHba->host, DID_RESET);
 821	return 0;	/* return success */
 822}
 823
 824/*===========================================================================
 825 * 
 826 *===========================================================================
 827 */
 828
 829
 830static void adpt_i2o_sys_shutdown(void)
 831{
 832	adpt_hba *pHba, *pNext;
 833	struct adpt_i2o_post_wait_data *p1, *old;
 834
 835	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
 836	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
 837	/* Delete all IOPs from the controller chain */
 838	/* They should have already been released by the
 839	 * scsi-core
 840	 */
 841	for (pHba = hba_chain; pHba; pHba = pNext) {
 842		pNext = pHba->next;
 843		adpt_i2o_delete_hba(pHba);
 844	}
 845
 846	/* Remove any timedout entries from the wait queue.  */
 847//	spin_lock_irqsave(&adpt_post_wait_lock, flags);
 848	/* Nothing should be outstanding at this point so just
 849	 * free them 
 850	 */
 851	for(p1 = adpt_post_wait_queue; p1;) {
 852		old = p1;
 853		p1 = p1->next;
 854		kfree(old);
 855	}
 856//	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
 857	adpt_post_wait_queue = NULL;
 858
 859	printk(KERN_INFO "Adaptec I2O controllers down.\n");
 860}
 861
 862static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
 863{
 864
 865	adpt_hba* pHba = NULL;
 866	adpt_hba* p = NULL;
 867	ulong base_addr0_phys = 0;
 868	ulong base_addr1_phys = 0;
 869	u32 hba_map0_area_size = 0;
 870	u32 hba_map1_area_size = 0;
 871	void __iomem *base_addr_virt = NULL;
 872	void __iomem *msg_addr_virt = NULL;
 873	int dma64 = 0;
 874
 875	int raptorFlag = FALSE;
 876
 877	if(pci_enable_device(pDev)) {
 878		return -EINVAL;
 879	}
 880
 881	if (pci_request_regions(pDev, "dpt_i2o")) {
 882		PERROR("dpti: adpt_config_hba: pci request region failed\n");
 883		return -EINVAL;
 884	}
 885
 886	pci_set_master(pDev);
 887
 888	/*
 889	 *	See if we should enable dma64 mode.
 890	 */
 891	if (sizeof(dma_addr_t) > 4 &&
 892	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
 893	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
 894		dma64 = 1;
 895
 896	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
 897		return -EINVAL;
 898
 899	/* adapter only supports message blocks below 4GB */
 900	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
 901
 902	base_addr0_phys = pci_resource_start(pDev,0);
 903	hba_map0_area_size = pci_resource_len(pDev,0);
 904
 905	// Check if standard PCI card or single BAR Raptor
 906	if(pDev->device == PCI_DPT_DEVICE_ID){
 907		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
 908			// Raptor card with this device id needs 4M
 909			hba_map0_area_size = 0x400000;
 910		} else { // Not Raptor - it is a PCI card
 911			if(hba_map0_area_size > 0x100000 ){ 
 912				hba_map0_area_size = 0x100000;
 913			}
 914		}
 915	} else {// Raptor split BAR config
 916		// Use BAR1 in this configuration
 917		base_addr1_phys = pci_resource_start(pDev,1);
 918		hba_map1_area_size = pci_resource_len(pDev,1);
 919		raptorFlag = TRUE;
 920	}
 921
 922#if BITS_PER_LONG == 64
 923	/*
 924	 *	The original Adaptec 64 bit driver has this comment here:
 925	 *	"x86_64 machines need more optimal mappings"
 926	 *
 927	 *	I assume some HBAs report ridiculously large mappings
 928	 *	and we need to limit them on platforms with IOMMUs.
 929	 */
 930	if (raptorFlag == TRUE) {
 931		if (hba_map0_area_size > 128)
 932			hba_map0_area_size = 128;
 933		if (hba_map1_area_size > 524288)
 934			hba_map1_area_size = 524288;
 935	} else {
 936		if (hba_map0_area_size > 524288)
 937			hba_map0_area_size = 524288;
 938	}
 939#endif
 940
 941	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
 942	if (!base_addr_virt) {
 943		pci_release_regions(pDev);
 944		PERROR("dpti: adpt_config_hba: io remap failed\n");
 945		return -EINVAL;
 946	}
 947
 948        if(raptorFlag == TRUE) {
 949		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
 950		if (!msg_addr_virt) {
 951			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
 952			iounmap(base_addr_virt);
 953			pci_release_regions(pDev);
 954			return -EINVAL;
 955		}
 956	} else {
 957		msg_addr_virt = base_addr_virt;
 958	}
 959	
 960	// Allocate and zero the data structure
 961	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
 962	if (!pHba) {
 963		if (msg_addr_virt != base_addr_virt)
 964			iounmap(msg_addr_virt);
 965		iounmap(base_addr_virt);
 966		pci_release_regions(pDev);
 967		return -ENOMEM;
 968	}
 969
 970	mutex_lock(&adpt_configuration_lock);
 971
 972	if(hba_chain != NULL){
 973		for(p = hba_chain; p->next; p = p->next);
 974		p->next = pHba;
 975	} else {
 976		hba_chain = pHba;
 977	}
 978	pHba->next = NULL;
 979	pHba->unit = hba_count;
 980	sprintf(pHba->name, "dpti%d", hba_count);
 981	hba_count++;
 982	
 983	mutex_unlock(&adpt_configuration_lock);
 984
 985	pHba->pDev = pDev;
 986	pHba->base_addr_phys = base_addr0_phys;
 987
 988	// Set up the Virtual Base Address of the I2O Device
 989	pHba->base_addr_virt = base_addr_virt;
 990	pHba->msg_addr_virt = msg_addr_virt;
 991	pHba->irq_mask = base_addr_virt+0x30;
 992	pHba->post_port = base_addr_virt+0x40;
 993	pHba->reply_port = base_addr_virt+0x44;
 994
 995	pHba->hrt = NULL;
 996	pHba->lct = NULL;
 997	pHba->lct_size = 0;
 998	pHba->status_block = NULL;
 999	pHba->post_count = 0;
1000	pHba->state = DPTI_STATE_RESET;
1001	pHba->pDev = pDev;
1002	pHba->devices = NULL;
1003	pHba->dma64 = dma64;
1004
1005	// Initializing the spinlocks
1006	spin_lock_init(&pHba->state_lock);
1007	spin_lock_init(&adpt_post_wait_lock);
1008
1009	if(raptorFlag == 0){
1010		printk(KERN_INFO "Adaptec I2O RAID controller"
1011				 " %d at %p size=%x irq=%d%s\n", 
1012			hba_count-1, base_addr_virt,
1013			hba_map0_area_size, pDev->irq,
1014			dma64 ? " (64-bit DMA)" : "");
1015	} else {
1016		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017			hba_count-1, pDev->irq,
1018			dma64 ? " (64-bit DMA)" : "");
1019		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1021	}
1022
1023	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025		adpt_i2o_delete_hba(pHba);
1026		return -EINVAL;
1027	}
1028
1029	return 0;
1030}
1031
1032
1033static void adpt_i2o_delete_hba(adpt_hba* pHba)
1034{
1035	adpt_hba* p1;
1036	adpt_hba* p2;
1037	struct i2o_device* d;
1038	struct i2o_device* next;
1039	int i;
1040	int j;
1041	struct adpt_device* pDev;
1042	struct adpt_device* pNext;
1043
1044
1045	mutex_lock(&adpt_configuration_lock);
1046	if(pHba->host){
1047		free_irq(pHba->host->irq, pHba);
1048	}
1049	p2 = NULL;
1050	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051		if(p1 == pHba) {
1052			if(p2) {
1053				p2->next = p1->next;
1054			} else {
1055				hba_chain = p1->next;
1056			}
1057			break;
1058		}
1059	}
1060
1061	hba_count--;
1062	mutex_unlock(&adpt_configuration_lock);
1063
1064	iounmap(pHba->base_addr_virt);
1065	pci_release_regions(pHba->pDev);
1066	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067		iounmap(pHba->msg_addr_virt);
1068	}
1069	if(pHba->FwDebugBuffer_P)
1070	   	iounmap(pHba->FwDebugBuffer_P);
1071	if(pHba->hrt) {
1072		dma_free_coherent(&pHba->pDev->dev,
1073			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074			pHba->hrt, pHba->hrt_pa);
1075	}
1076	if(pHba->lct) {
1077		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078			pHba->lct, pHba->lct_pa);
1079	}
1080	if(pHba->status_block) {
1081		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082			pHba->status_block, pHba->status_block_pa);
1083	}
1084	if(pHba->reply_pool) {
1085		dma_free_coherent(&pHba->pDev->dev,
1086			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087			pHba->reply_pool, pHba->reply_pool_pa);
1088	}
1089
1090	for(d = pHba->devices; d ; d = next){
1091		next = d->next;
1092		kfree(d);
1093	}
1094	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095		for(j = 0; j < MAX_ID; j++){
1096			if(pHba->channel[i].device[j] != NULL){
1097				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098					pNext = pDev->next_lun;
1099					kfree(pDev);
1100				}
1101			}
1102		}
1103	}
1104	pci_dev_put(pHba->pDev);
1105	if (adpt_sysfs_class)
1106		device_destroy(adpt_sysfs_class,
1107				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108	kfree(pHba);
1109
1110	if(hba_count <= 0){
1111		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1112		if (adpt_sysfs_class) {
1113			class_destroy(adpt_sysfs_class);
1114			adpt_sysfs_class = NULL;
1115		}
1116	}
1117}
1118
1119static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1120{
1121	struct adpt_device* d;
1122
1123	if(chan < 0 || chan >= MAX_CHANNEL)
1124		return NULL;
1125	
1126	d = pHba->channel[chan].device[id];
1127	if(!d || d->tid == 0) {
1128		return NULL;
1129	}
1130
1131	/* If it is the only lun at that address then this should match*/
1132	if(d->scsi_lun == lun){
1133		return d;
1134	}
1135
1136	/* else we need to look through all the luns */
1137	for(d=d->next_lun ; d ; d = d->next_lun){
1138		if(d->scsi_lun == lun){
1139			return d;
1140		}
1141	}
1142	return NULL;
1143}
1144
1145
1146static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1147{
1148	// I used my own version of the WAIT_QUEUE_HEAD
1149	// to handle some version differences
1150	// When embedded in the kernel this could go back to the vanilla one
1151	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152	int status = 0;
1153	ulong flags = 0;
1154	struct adpt_i2o_post_wait_data *p1, *p2;
1155	struct adpt_i2o_post_wait_data *wait_data =
1156		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157	DECLARE_WAITQUEUE(wait, current);
1158
1159	if (!wait_data)
1160		return -ENOMEM;
1161
1162	/*
1163	 * The spin locking is needed to keep anyone from playing
1164	 * with the queue pointers and id while we do the same
1165	 */
1166	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167       // TODO we need a MORE unique way of getting ids
1168       // to support async LCT get
1169	wait_data->next = adpt_post_wait_queue;
1170	adpt_post_wait_queue = wait_data;
1171	adpt_post_wait_id++;
1172	adpt_post_wait_id &= 0x7fff;
1173	wait_data->id =  adpt_post_wait_id;
1174	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1175
1176	wait_data->wq = &adpt_wq_i2o_post;
1177	wait_data->status = -ETIMEDOUT;
1178
1179	add_wait_queue(&adpt_wq_i2o_post, &wait);
1180
1181	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182	timeout *= HZ;
1183	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184		set_current_state(TASK_INTERRUPTIBLE);
1185		if(pHba->host)
1186			spin_unlock_irq(pHba->host->host_lock);
1187		if (!timeout)
1188			schedule();
1189		else{
1190			timeout = schedule_timeout(timeout);
1191			if (timeout == 0) {
1192				// I/O issued, but cannot get result in
1193				// specified time. Freeing resorces is
1194				// dangerous.
1195				status = -ETIME;
1196			}
1197		}
1198		if(pHba->host)
1199			spin_lock_irq(pHba->host->host_lock);
1200	}
1201	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1202
1203	if(status == -ETIMEDOUT){
1204		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205		// We will have to free the wait_data memory during shutdown
1206		return status;
1207	}
1208
1209	/* Remove the entry from the queue.  */
1210	p2 = NULL;
1211	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213		if(p1 == wait_data) {
1214			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215				status = -EOPNOTSUPP;
1216			}
1217			if(p2) {
1218				p2->next = p1->next;
1219			} else {
1220				adpt_post_wait_queue = p1->next;
1221			}
1222			break;
1223		}
1224	}
1225	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227	kfree(wait_data);
1228
1229	return status;
1230}
1231
1232
1233static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1234{
1235
1236	u32 m = EMPTY_QUEUE;
1237	u32 __iomem *msg;
1238	ulong timeout = jiffies + 30*HZ;
1239	do {
1240		rmb();
1241		m = readl(pHba->post_port);
1242		if (m != EMPTY_QUEUE) {
1243			break;
1244		}
1245		if(time_after(jiffies,timeout)){
1246			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247			return -ETIMEDOUT;
1248		}
1249		schedule_timeout_uninterruptible(1);
1250	} while(m == EMPTY_QUEUE);
1251		
1252	msg = pHba->msg_addr_virt + m;
1253	memcpy_toio(msg, data, len);
1254	wmb();
1255
1256	//post message
1257	writel(m, pHba->post_port);
1258	wmb();
1259
1260	return 0;
1261}
1262
1263
1264static void adpt_i2o_post_wait_complete(u32 context, int status)
1265{
1266	struct adpt_i2o_post_wait_data *p1 = NULL;
1267	/*
1268	 * We need to search through the adpt_post_wait
1269	 * queue to see if the given message is still
1270	 * outstanding.  If not, it means that the IOP
1271	 * took longer to respond to the message than we
1272	 * had allowed and timer has already expired.
1273	 * Not much we can do about that except log
1274	 * it for debug purposes, increase timeout, and recompile
1275	 *
1276	 * Lock needed to keep anyone from moving queue pointers
1277	 * around while we're looking through them.
1278	 */
1279
1280	context &= 0x7fff;
1281
1282	spin_lock(&adpt_post_wait_lock);
1283	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284		if(p1->id == context) {
1285			p1->status = status;
1286			spin_unlock(&adpt_post_wait_lock);
1287			wake_up_interruptible(p1->wq);
1288			return;
1289		}
1290	}
1291	spin_unlock(&adpt_post_wait_lock);
1292        // If this happens we lose commands that probably really completed
1293	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1295	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296		printk(KERN_DEBUG"           %d\n",p1->id);
1297	}
1298	return;
1299}
1300
1301static s32 adpt_i2o_reset_hba(adpt_hba* pHba)			
1302{
1303	u32 msg[8];
1304	u8* status;
1305	dma_addr_t addr;
1306	u32 m = EMPTY_QUEUE ;
1307	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1308
1309	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1310		timeout = jiffies + (25*HZ);
1311	} else {
1312		adpt_i2o_quiesce_hba(pHba);
1313	}
1314
1315	do {
1316		rmb();
1317		m = readl(pHba->post_port);
1318		if (m != EMPTY_QUEUE) {
1319			break;
1320		}
1321		if(time_after(jiffies,timeout)){
1322			printk(KERN_WARNING"Timeout waiting for message!\n");
1323			return -ETIMEDOUT;
1324		}
1325		schedule_timeout_uninterruptible(1);
1326	} while (m == EMPTY_QUEUE);
1327
1328	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329	if(status == NULL) {
1330		adpt_send_nop(pHba, m);
1331		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332		return -ENOMEM;
1333	}
1334	memset(status,0,4);
1335
1336	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1337	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1338	msg[2]=0;
1339	msg[3]=0;
1340	msg[4]=0;
1341	msg[5]=0;
1342	msg[6]=dma_low(addr);
1343	msg[7]=dma_high(addr);
1344
1345	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1346	wmb();
1347	writel(m, pHba->post_port);
1348	wmb();
1349
1350	while(*status == 0){
1351		if(time_after(jiffies,timeout)){
1352			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1353			/* We lose 4 bytes of "status" here, but we cannot
1354			   free these because controller may awake and corrupt
1355			   those bytes at any time */
1356			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1357			return -ETIMEDOUT;
1358		}
1359		rmb();
1360		schedule_timeout_uninterruptible(1);
1361	}
1362
1363	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1364		PDEBUG("%s: Reset in progress...\n", pHba->name);
1365		// Here we wait for message frame to become available
1366		// indicated that reset has finished
1367		do {
1368			rmb();
1369			m = readl(pHba->post_port);
1370			if (m != EMPTY_QUEUE) {
1371				break;
1372			}
1373			if(time_after(jiffies,timeout)){
1374				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1375				/* We lose 4 bytes of "status" here, but we
1376				   cannot free these because controller may
1377				   awake and corrupt those bytes at any time */
1378				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1379				return -ETIMEDOUT;
1380			}
1381			schedule_timeout_uninterruptible(1);
1382		} while (m == EMPTY_QUEUE);
1383		// Flush the offset
1384		adpt_send_nop(pHba, m);
1385	}
1386	adpt_i2o_status_get(pHba);
1387	if(*status == 0x02 ||
1388			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1389		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1390				pHba->name);
1391	} else {
1392		PDEBUG("%s: Reset completed.\n", pHba->name);
1393	}
1394
1395	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1396#ifdef UARTDELAY
1397	// This delay is to allow someone attached to the card through the debug UART to 
1398	// set up the dump levels that they want before the rest of the initialization sequence
1399	adpt_delay(20000);
1400#endif
1401	return 0;
1402}
1403
1404
1405static int adpt_i2o_parse_lct(adpt_hba* pHba)
1406{
1407	int i;
1408	int max;
1409	int tid;
1410	struct i2o_device *d;
1411	i2o_lct *lct = pHba->lct;
1412	u8 bus_no = 0;
1413	s16 scsi_id;
1414	u64 scsi_lun;
1415	u32 buf[10]; // larger than 7, or 8 ...
1416	struct adpt_device* pDev; 
1417	
1418	if (lct == NULL) {
1419		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1420		return -1;
1421	}
1422	
1423	max = lct->table_size;	
1424	max -= 3;
1425	max /= 9;
1426
1427	for(i=0;i<max;i++) {
1428		if( lct->lct_entry[i].user_tid != 0xfff){
1429			/*
1430			 * If we have hidden devices, we need to inform the upper layers about
1431			 * the possible maximum id reference to handle device access when
1432			 * an array is disassembled. This code has no other purpose but to
1433			 * allow us future access to devices that are currently hidden
1434			 * behind arrays, hotspares or have not been configured (JBOD mode).
1435			 */
1436			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1437			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1438			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1439			    	continue;
1440			}
1441			tid = lct->lct_entry[i].tid;
1442			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1443			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1444				continue;
1445			}
1446			bus_no = buf[0]>>16;
1447			scsi_id = buf[1];
1448			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1449			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1450				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1451				continue;
1452			}
1453			if (scsi_id >= MAX_ID){
1454				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1455				continue;
1456			}
1457			if(bus_no > pHba->top_scsi_channel){
1458				pHba->top_scsi_channel = bus_no;
1459			}
1460			if(scsi_id > pHba->top_scsi_id){
1461				pHba->top_scsi_id = scsi_id;
1462			}
1463			if(scsi_lun > pHba->top_scsi_lun){
1464				pHba->top_scsi_lun = scsi_lun;
1465			}
1466			continue;
1467		}
1468		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1469		if(d==NULL)
1470		{
1471			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1472			return -ENOMEM;
1473		}
1474		
1475		d->controller = pHba;
1476		d->next = NULL;
1477
1478		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1479
1480		d->flags = 0;
1481		tid = d->lct_data.tid;
1482		adpt_i2o_report_hba_unit(pHba, d);
1483		adpt_i2o_install_device(pHba, d);
1484	}
1485	bus_no = 0;
1486	for(d = pHba->devices; d ; d = d->next) {
1487		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1488		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1489			tid = d->lct_data.tid;
1490			// TODO get the bus_no from hrt-but for now they are in order
1491			//bus_no = 
1492			if(bus_no > pHba->top_scsi_channel){
1493				pHba->top_scsi_channel = bus_no;
1494			}
1495			pHba->channel[bus_no].type = d->lct_data.class_id;
1496			pHba->channel[bus_no].tid = tid;
1497			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1498			{
1499				pHba->channel[bus_no].scsi_id = buf[1];
1500				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1501			}
1502			// TODO remove - this is just until we get from hrt
1503			bus_no++;
1504			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1505				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1506				break;
1507			}
1508		}
1509	}
1510
1511	// Setup adpt_device table
1512	for(d = pHba->devices; d ; d = d->next) {
1513		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1514		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1515		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1516
1517			tid = d->lct_data.tid;
1518			scsi_id = -1;
1519			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1520			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1521				bus_no = buf[0]>>16;
1522				scsi_id = buf[1];
1523				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1524				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1525					continue;
1526				}
1527				if (scsi_id >= MAX_ID) {
1528					continue;
1529				}
1530				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1531					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1532					if(pDev == NULL) {
1533						return -ENOMEM;
1534					}
1535					pHba->channel[bus_no].device[scsi_id] = pDev;
1536				} else {
1537					for( pDev = pHba->channel[bus_no].device[scsi_id];	
1538							pDev->next_lun; pDev = pDev->next_lun){
1539					}
1540					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1541					if(pDev->next_lun == NULL) {
1542						return -ENOMEM;
1543					}
1544					pDev = pDev->next_lun;
1545				}
1546				pDev->tid = tid;
1547				pDev->scsi_channel = bus_no;
1548				pDev->scsi_id = scsi_id;
1549				pDev->scsi_lun = scsi_lun;
1550				pDev->pI2o_dev = d;
1551				d->owner = pDev;
1552				pDev->type = (buf[0])&0xff;
1553				pDev->flags = (buf[0]>>8)&0xff;
1554				if(scsi_id > pHba->top_scsi_id){
1555					pHba->top_scsi_id = scsi_id;
1556				}
1557				if(scsi_lun > pHba->top_scsi_lun){
1558					pHba->top_scsi_lun = scsi_lun;
1559				}
1560			}
1561			if(scsi_id == -1){
1562				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1563						d->lct_data.identity_tag);
1564			}
1565		}
1566	}
1567	return 0;
1568}
1569
1570
1571/*
1572 *	Each I2O controller has a chain of devices on it - these match
1573 *	the useful parts of the LCT of the board.
1574 */
1575 
1576static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1577{
1578	mutex_lock(&adpt_configuration_lock);
1579	d->controller=pHba;
1580	d->owner=NULL;
1581	d->next=pHba->devices;
1582	d->prev=NULL;
1583	if (pHba->devices != NULL){
1584		pHba->devices->prev=d;
1585	}
1586	pHba->devices=d;
1587	*d->dev_name = 0;
1588
1589	mutex_unlock(&adpt_configuration_lock);
1590	return 0;
1591}
1592
1593static int adpt_open(struct inode *inode, struct file *file)
1594{
1595	int minor;
1596	adpt_hba* pHba;
1597
1598	mutex_lock(&adpt_mutex);
1599	//TODO check for root access
1600	//
1601	minor = iminor(inode);
1602	if (minor >= hba_count) {
1603		mutex_unlock(&adpt_mutex);
1604		return -ENXIO;
1605	}
1606	mutex_lock(&adpt_configuration_lock);
1607	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1608		if (pHba->unit == minor) {
1609			break;	/* found adapter */
1610		}
1611	}
1612	if (pHba == NULL) {
1613		mutex_unlock(&adpt_configuration_lock);
1614		mutex_unlock(&adpt_mutex);
1615		return -ENXIO;
1616	}
1617
1618//	if(pHba->in_use){
1619	//	mutex_unlock(&adpt_configuration_lock);
1620//		return -EBUSY;
1621//	}
1622
1623	pHba->in_use = 1;
1624	mutex_unlock(&adpt_configuration_lock);
1625	mutex_unlock(&adpt_mutex);
1626
1627	return 0;
1628}
1629
1630static int adpt_close(struct inode *inode, struct file *file)
1631{
1632	int minor;
1633	adpt_hba* pHba;
1634
1635	minor = iminor(inode);
1636	if (minor >= hba_count) {
1637		return -ENXIO;
1638	}
1639	mutex_lock(&adpt_configuration_lock);
1640	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1641		if (pHba->unit == minor) {
1642			break;	/* found adapter */
1643		}
1644	}
1645	mutex_unlock(&adpt_configuration_lock);
1646	if (pHba == NULL) {
1647		return -ENXIO;
1648	}
1649
1650	pHba->in_use = 0;
1651
1652	return 0;
1653}
1654
1655
1656static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1657{
1658	u32 msg[MAX_MESSAGE_SIZE];
1659	u32* reply = NULL;
1660	u32 size = 0;
1661	u32 reply_size = 0;
1662	u32 __user *user_msg = arg;
1663	u32 __user * user_reply = NULL;
1664	void **sg_list = NULL;
1665	u32 sg_offset = 0;
1666	u32 sg_count = 0;
1667	int sg_index = 0;
1668	u32 i = 0;
1669	u32 rcode = 0;
1670	void *p = NULL;
1671	dma_addr_t addr;
1672	ulong flags = 0;
1673
1674	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1675	// get user msg size in u32s 
1676	if(get_user(size, &user_msg[0])){
1677		return -EFAULT;
1678	}
1679	size = size>>16;
1680
1681	user_reply = &user_msg[size];
1682	if(size > MAX_MESSAGE_SIZE){
1683		return -EFAULT;
1684	}
1685	size *= 4; // Convert to bytes
1686
1687	/* Copy in the user's I2O command */
1688	if(copy_from_user(msg, user_msg, size)) {
1689		return -EFAULT;
1690	}
1691	get_user(reply_size, &user_reply[0]);
1692	reply_size = reply_size>>16;
1693	if(reply_size > REPLY_FRAME_SIZE){
1694		reply_size = REPLY_FRAME_SIZE;
1695	}
1696	reply_size *= 4;
1697	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1698	if(reply == NULL) {
1699		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1700		return -ENOMEM;
1701	}
1702	sg_offset = (msg[0]>>4)&0xf;
1703	msg[2] = 0x40000000; // IOCTL context
1704	msg[3] = adpt_ioctl_to_context(pHba, reply);
1705	if (msg[3] == (u32)-1) {
1706		rcode = -EBUSY;
1707		goto free;
1708	}
1709
1710	sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1711	if (!sg_list) {
1712		rcode = -ENOMEM;
1713		goto free;
1714	}
1715	if(sg_offset) {
1716		// TODO add 64 bit API
1717		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1718		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1719		if (sg_count > pHba->sg_tablesize){
1720			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1721			rcode = -EINVAL;
1722			goto free;
1723		}
1724
1725		for(i = 0; i < sg_count; i++) {
1726			int sg_size;
1727
1728			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1729				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1730				rcode = -EINVAL;
1731				goto cleanup;
1732			}
1733			sg_size = sg[i].flag_count & 0xffffff;      
1734			/* Allocate memory for the transfer */
1735			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1736			if(!p) {
1737				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1738						pHba->name,sg_size,i,sg_count);
1739				rcode = -ENOMEM;
1740				goto cleanup;
1741			}
1742			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1743			/* Copy in the user's SG buffer if necessary */
1744			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1745				// sg_simple_element API is 32 bit
1746				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1747					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1748					rcode = -EFAULT;
1749					goto cleanup;
1750				}
1751			}
1752			/* sg_simple_element API is 32 bit, but addr < 4GB */
1753			sg[i].addr_bus = addr;
1754		}
1755	}
1756
1757	do {
1758		/*
1759		 * Stop any new commands from enterring the
1760		 * controller while processing the ioctl
1761		 */
1762		if (pHba->host) {
1763			scsi_block_requests(pHba->host);
1764			spin_lock_irqsave(pHba->host->host_lock, flags);
1765		}
1766		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1767		if (rcode != 0)
1768			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1769					rcode, reply);
1770		if (pHba->host) {
1771			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1772			scsi_unblock_requests(pHba->host);
1773		}
1774	} while (rcode == -ETIMEDOUT);
1775
1776	if(rcode){
1777		goto cleanup;
1778	}
1779
1780	if(sg_offset) {
1781	/* Copy back the Scatter Gather buffers back to user space */
1782		u32 j;
1783		// TODO add 64 bit API
1784		struct sg_simple_element* sg;
1785		int sg_size;
1786
1787		// re-acquire the original message to handle correctly the sg copy operation
1788		memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1789		// get user msg size in u32s 
1790		if(get_user(size, &user_msg[0])){
1791			rcode = -EFAULT; 
1792			goto cleanup; 
1793		}
1794		size = size>>16;
1795		size *= 4;
1796		if (size > MAX_MESSAGE_SIZE) {
1797			rcode = -EINVAL;
1798			goto cleanup;
1799		}
1800		/* Copy in the user's I2O command */
1801		if (copy_from_user (msg, user_msg, size)) {
1802			rcode = -EFAULT;
1803			goto cleanup;
1804		}
1805		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1806
1807		// TODO add 64 bit API
1808		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1809		for (j = 0; j < sg_count; j++) {
1810			/* Copy out the SG list to user's buffer if necessary */
1811			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1812				sg_size = sg[j].flag_count & 0xffffff; 
1813				// sg_simple_element API is 32 bit
1814				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1815					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1816					rcode = -EFAULT;
1817					goto cleanup;
1818				}
1819			}
1820		}
1821	} 
1822
1823	/* Copy back the reply to user space */
1824	if (reply_size) {
1825		// we wrote our own values for context - now restore the user supplied ones
1826		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1827			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1828			rcode = -EFAULT;
1829		}
1830		if(copy_to_user(user_reply, reply, reply_size)) {
1831			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1832			rcode = -EFAULT;
1833		}
1834	}
1835
1836
1837cleanup:
1838	if (rcode != -ETIME && rcode != -EINTR) {
1839		struct sg_simple_element *sg =
1840				(struct sg_simple_element*) (msg +sg_offset);
1841		while(sg_index) {
1842			if(sg_list[--sg_index]) {
1843				dma_free_coherent(&pHba->pDev->dev,
1844					sg[sg_index].flag_count & 0xffffff,
1845					sg_list[sg_index],
1846					sg[sg_index].addr_bus);
1847			}
1848		}
1849	}
1850
1851free:
1852	kfree(sg_list);
1853	kfree(reply);
1854	return rcode;
1855}
1856
1857#if defined __ia64__ 
1858static void adpt_ia64_info(sysInfo_S* si)
1859{
1860	// This is all the info we need for now
1861	// We will add more info as our new
1862	// managmenent utility requires it
1863	si->processorType = PROC_IA64;
1864}
1865#endif
1866
1867#if defined __sparc__ 
1868static void adpt_sparc_info(sysInfo_S* si)
1869{
1870	// This is all the info we need for now
1871	// We will add more info as our new
1872	// managmenent utility requires it
1873	si->processorType = PROC_ULTRASPARC;
1874}
1875#endif
1876#if defined __alpha__ 
1877static void adpt_alpha_info(sysInfo_S* si)
1878{
1879	// This is all the info we need for now
1880	// We will add more info as our new
1881	// managmenent utility requires it
1882	si->processorType = PROC_ALPHA;
1883}
1884#endif
1885
1886#if defined __i386__
1887
1888#include <uapi/asm/vm86.h>
1889
1890static void adpt_i386_info(sysInfo_S* si)
1891{
1892	// This is all the info we need for now
1893	// We will add more info as our new
1894	// managmenent utility requires it
1895	switch (boot_cpu_data.x86) {
1896	case CPU_386:
1897		si->processorType = PROC_386;
1898		break;
1899	case CPU_486:
1900		si->processorType = PRO

Large files files are truncated, but you can click here to view the full file