/drivers/scsi/dpt_i2o.c

http://github.com/mirrors/linux · C · 3556 lines · 2743 code · 436 blank · 377 comment · 565 complexity · 9379e7d43a810c3873574c928413e5c7 MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /***************************************************************************
  3. dpti.c - description
  4. -------------------
  5. begin : Thu Sep 7 2000
  6. copyright : (C) 2000 by Adaptec
  7. July 30, 2001 First version being submitted
  8. for inclusion in the kernel. V2.4
  9. See Documentation/scsi/dpti.rst for history, notes, license info
  10. and credits
  11. ***************************************************************************/
  12. /***************************************************************************
  13. * *
  14. * *
  15. ***************************************************************************/
  16. /***************************************************************************
  17. * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
  18. - Support 2.6 kernel and DMA-mapping
  19. - ioctl fix for raid tools
  20. - use schedule_timeout in long long loop
  21. **************************************************************************/
  22. /*#define DEBUG 1 */
  23. /*#define UARTDELAY 1 */
  24. #include <linux/module.h>
  25. MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
  26. MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
  27. ////////////////////////////////////////////////////////////////
  28. #include <linux/ioctl.h> /* For SCSI-Passthrough */
  29. #include <linux/uaccess.h>
  30. #include <linux/stat.h>
  31. #include <linux/slab.h> /* for kmalloc() */
  32. #include <linux/pci.h> /* for PCI support */
  33. #include <linux/proc_fs.h>
  34. #include <linux/blkdev.h>
  35. #include <linux/delay.h> /* for udelay */
  36. #include <linux/interrupt.h>
  37. #include <linux/kernel.h> /* for printk */
  38. #include <linux/sched.h>
  39. #include <linux/reboot.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/timer.h>
  43. #include <linux/string.h>
  44. #include <linux/ioport.h>
  45. #include <linux/mutex.h>
  46. #include <asm/processor.h> /* for boot_cpu_data */
  47. #include <asm/pgtable.h>
  48. #include <asm/io.h> /* for virt_to_bus, etc. */
  49. #include <scsi/scsi.h>
  50. #include <scsi/scsi_cmnd.h>
  51. #include <scsi/scsi_device.h>
  52. #include <scsi/scsi_host.h>
  53. #include <scsi/scsi_tcq.h>
  54. #include "dpt/dptsig.h"
  55. #include "dpti.h"
  56. /*============================================================================
  57. * Create a binary signature - this is read by dptsig
  58. * Needed for our management apps
  59. *============================================================================
  60. */
  61. static DEFINE_MUTEX(adpt_mutex);
  62. static dpt_sig_S DPTI_sig = {
  63. {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
  64. #ifdef __i386__
  65. PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
  66. #elif defined(__ia64__)
  67. PROC_INTEL, PROC_IA64,
  68. #elif defined(__sparc__)
  69. PROC_ULTRASPARC, PROC_ULTRASPARC,
  70. #elif defined(__alpha__)
  71. PROC_ALPHA, PROC_ALPHA,
  72. #else
  73. (-1),(-1),
  74. #endif
  75. FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
  76. ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
  77. DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
  78. };
  79. /*============================================================================
  80. * Globals
  81. *============================================================================
  82. */
  83. static DEFINE_MUTEX(adpt_configuration_lock);
  84. static struct i2o_sys_tbl *sys_tbl;
  85. static dma_addr_t sys_tbl_pa;
  86. static int sys_tbl_ind;
  87. static int sys_tbl_len;
  88. static adpt_hba* hba_chain = NULL;
  89. static int hba_count = 0;
  90. static struct class *adpt_sysfs_class;
  91. static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
  92. #ifdef CONFIG_COMPAT
  93. static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
  94. #endif
  95. static const struct file_operations adpt_fops = {
  96. .unlocked_ioctl = adpt_unlocked_ioctl,
  97. .open = adpt_open,
  98. .release = adpt_close,
  99. #ifdef CONFIG_COMPAT
  100. .compat_ioctl = compat_adpt_ioctl,
  101. #endif
  102. .llseek = noop_llseek,
  103. };
  104. /* Structures and definitions for synchronous message posting.
  105. * See adpt_i2o_post_wait() for description
  106. * */
  107. struct adpt_i2o_post_wait_data
  108. {
  109. int status;
  110. u32 id;
  111. adpt_wait_queue_head_t *wq;
  112. struct adpt_i2o_post_wait_data *next;
  113. };
  114. static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
  115. static u32 adpt_post_wait_id = 0;
  116. static DEFINE_SPINLOCK(adpt_post_wait_lock);
  117. /*============================================================================
  118. * Functions
  119. *============================================================================
  120. */
  121. static inline int dpt_dma64(adpt_hba *pHba)
  122. {
  123. return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
  124. }
  125. static inline u32 dma_high(dma_addr_t addr)
  126. {
  127. return upper_32_bits(addr);
  128. }
  129. static inline u32 dma_low(dma_addr_t addr)
  130. {
  131. return (u32)addr;
  132. }
  133. static u8 adpt_read_blink_led(adpt_hba* host)
  134. {
  135. if (host->FwDebugBLEDflag_P) {
  136. if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
  137. return readb(host->FwDebugBLEDvalue_P);
  138. }
  139. }
  140. return 0;
  141. }
  142. /*============================================================================
  143. * Scsi host template interface functions
  144. *============================================================================
  145. */
  146. #ifdef MODULE
  147. static struct pci_device_id dptids[] = {
  148. { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
  149. { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
  150. { 0, }
  151. };
  152. #endif
  153. MODULE_DEVICE_TABLE(pci,dptids);
  154. static int adpt_detect(struct scsi_host_template* sht)
  155. {
  156. struct pci_dev *pDev = NULL;
  157. adpt_hba *pHba;
  158. adpt_hba *next;
  159. PINFO("Detecting Adaptec I2O RAID controllers...\n");
  160. /* search for all Adatpec I2O RAID cards */
  161. while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
  162. if(pDev->device == PCI_DPT_DEVICE_ID ||
  163. pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
  164. if(adpt_install_hba(sht, pDev) ){
  165. PERROR("Could not Init an I2O RAID device\n");
  166. PERROR("Will not try to detect others.\n");
  167. return hba_count-1;
  168. }
  169. pci_dev_get(pDev);
  170. }
  171. }
  172. /* In INIT state, Activate IOPs */
  173. for (pHba = hba_chain; pHba; pHba = next) {
  174. next = pHba->next;
  175. // Activate does get status , init outbound, and get hrt
  176. if (adpt_i2o_activate_hba(pHba) < 0) {
  177. adpt_i2o_delete_hba(pHba);
  178. }
  179. }
  180. /* Active IOPs in HOLD state */
  181. rebuild_sys_tab:
  182. if (hba_chain == NULL)
  183. return 0;
  184. /*
  185. * If build_sys_table fails, we kill everything and bail
  186. * as we can't init the IOPs w/o a system table
  187. */
  188. if (adpt_i2o_build_sys_table() < 0) {
  189. adpt_i2o_sys_shutdown();
  190. return 0;
  191. }
  192. PDEBUG("HBA's in HOLD state\n");
  193. /* If IOP don't get online, we need to rebuild the System table */
  194. for (pHba = hba_chain; pHba; pHba = pHba->next) {
  195. if (adpt_i2o_online_hba(pHba) < 0) {
  196. adpt_i2o_delete_hba(pHba);
  197. goto rebuild_sys_tab;
  198. }
  199. }
  200. /* Active IOPs now in OPERATIONAL state */
  201. PDEBUG("HBA's in OPERATIONAL state\n");
  202. printk("dpti: If you have a lot of devices this could take a few minutes.\n");
  203. for (pHba = hba_chain; pHba; pHba = next) {
  204. next = pHba->next;
  205. printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
  206. if (adpt_i2o_lct_get(pHba) < 0){
  207. adpt_i2o_delete_hba(pHba);
  208. continue;
  209. }
  210. if (adpt_i2o_parse_lct(pHba) < 0){
  211. adpt_i2o_delete_hba(pHba);
  212. continue;
  213. }
  214. adpt_inquiry(pHba);
  215. }
  216. adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
  217. if (IS_ERR(adpt_sysfs_class)) {
  218. printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
  219. adpt_sysfs_class = NULL;
  220. }
  221. for (pHba = hba_chain; pHba; pHba = next) {
  222. next = pHba->next;
  223. if (adpt_scsi_host_alloc(pHba, sht) < 0){
  224. adpt_i2o_delete_hba(pHba);
  225. continue;
  226. }
  227. pHba->initialized = TRUE;
  228. pHba->state &= ~DPTI_STATE_RESET;
  229. if (adpt_sysfs_class) {
  230. struct device *dev = device_create(adpt_sysfs_class,
  231. NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
  232. "dpti%d", pHba->unit);
  233. if (IS_ERR(dev)) {
  234. printk(KERN_WARNING"dpti%d: unable to "
  235. "create device in dpt_i2o class\n",
  236. pHba->unit);
  237. }
  238. }
  239. }
  240. // Register our control device node
  241. // nodes will need to be created in /dev to access this
  242. // the nodes can not be created from within the driver
  243. if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
  244. adpt_i2o_sys_shutdown();
  245. return 0;
  246. }
  247. return hba_count;
  248. }
  249. static void adpt_release(adpt_hba *pHba)
  250. {
  251. struct Scsi_Host *shost = pHba->host;
  252. scsi_remove_host(shost);
  253. // adpt_i2o_quiesce_hba(pHba);
  254. adpt_i2o_delete_hba(pHba);
  255. scsi_host_put(shost);
  256. }
  257. static void adpt_inquiry(adpt_hba* pHba)
  258. {
  259. u32 msg[17];
  260. u32 *mptr;
  261. u32 *lenptr;
  262. int direction;
  263. int scsidir;
  264. u32 len;
  265. u32 reqlen;
  266. u8* buf;
  267. dma_addr_t addr;
  268. u8 scb[16];
  269. s32 rcode;
  270. memset(msg, 0, sizeof(msg));
  271. buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
  272. if(!buf){
  273. printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
  274. return;
  275. }
  276. memset((void*)buf, 0, 36);
  277. len = 36;
  278. direction = 0x00000000;
  279. scsidir =0x40000000; // DATA IN (iop<--dev)
  280. if (dpt_dma64(pHba))
  281. reqlen = 17; // SINGLE SGE, 64 bit
  282. else
  283. reqlen = 14; // SINGLE SGE, 32 bit
  284. /* Stick the headers on */
  285. msg[0] = reqlen<<16 | SGL_OFFSET_12;
  286. msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
  287. msg[2] = 0;
  288. msg[3] = 0;
  289. // Adaptec/DPT Private stuff
  290. msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
  291. msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
  292. /* Direction, disconnect ok | sense data | simple queue , CDBLen */
  293. // I2O_SCB_FLAG_ENABLE_DISCONNECT |
  294. // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
  295. // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
  296. msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
  297. mptr=msg+7;
  298. memset(scb, 0, sizeof(scb));
  299. // Write SCSI command into the message - always 16 byte block
  300. scb[0] = INQUIRY;
  301. scb[1] = 0;
  302. scb[2] = 0;
  303. scb[3] = 0;
  304. scb[4] = 36;
  305. scb[5] = 0;
  306. // Don't care about the rest of scb
  307. memcpy(mptr, scb, sizeof(scb));
  308. mptr+=4;
  309. lenptr=mptr++; /* Remember me - fill in when we know */
  310. /* Now fill in the SGList and command */
  311. *lenptr = len;
  312. if (dpt_dma64(pHba)) {
  313. *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
  314. *mptr++ = 1 << PAGE_SHIFT;
  315. *mptr++ = 0xD0000000|direction|len;
  316. *mptr++ = dma_low(addr);
  317. *mptr++ = dma_high(addr);
  318. } else {
  319. *mptr++ = 0xD0000000|direction|len;
  320. *mptr++ = addr;
  321. }
  322. // Send it on it's way
  323. rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
  324. if (rcode != 0) {
  325. sprintf(pHba->detail, "Adaptec I2O RAID");
  326. printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
  327. if (rcode != -ETIME && rcode != -EINTR)
  328. dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
  329. } else {
  330. memset(pHba->detail, 0, sizeof(pHba->detail));
  331. memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
  332. memcpy(&(pHba->detail[16]), " Model: ", 8);
  333. memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
  334. memcpy(&(pHba->detail[40]), " FW: ", 4);
  335. memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
  336. pHba->detail[48] = '\0'; /* precautionary */
  337. dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
  338. }
  339. adpt_i2o_status_get(pHba);
  340. return ;
  341. }
  342. static int adpt_slave_configure(struct scsi_device * device)
  343. {
  344. struct Scsi_Host *host = device->host;
  345. adpt_hba* pHba;
  346. pHba = (adpt_hba *) host->hostdata[0];
  347. if (host->can_queue && device->tagged_supported) {
  348. scsi_change_queue_depth(device,
  349. host->can_queue - 1);
  350. }
  351. return 0;
  352. }
  353. static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
  354. {
  355. adpt_hba* pHba = NULL;
  356. struct adpt_device* pDev = NULL; /* dpt per device information */
  357. cmd->scsi_done = done;
  358. /*
  359. * SCSI REQUEST_SENSE commands will be executed automatically by the
  360. * Host Adapter for any errors, so they should not be executed
  361. * explicitly unless the Sense Data is zero indicating that no error
  362. * occurred.
  363. */
  364. if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
  365. cmd->result = (DID_OK << 16);
  366. cmd->scsi_done(cmd);
  367. return 0;
  368. }
  369. pHba = (adpt_hba*)cmd->device->host->hostdata[0];
  370. if (!pHba) {
  371. return FAILED;
  372. }
  373. rmb();
  374. if ((pHba->state) & DPTI_STATE_RESET)
  375. return SCSI_MLQUEUE_HOST_BUSY;
  376. // TODO if the cmd->device if offline then I may need to issue a bus rescan
  377. // followed by a get_lct to see if the device is there anymore
  378. if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
  379. /*
  380. * First command request for this device. Set up a pointer
  381. * to the device structure. This should be a TEST_UNIT_READY
  382. * command from scan_scsis_single.
  383. */
  384. if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
  385. // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
  386. // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
  387. cmd->result = (DID_NO_CONNECT << 16);
  388. cmd->scsi_done(cmd);
  389. return 0;
  390. }
  391. cmd->device->hostdata = pDev;
  392. }
  393. pDev->pScsi_dev = cmd->device;
  394. /*
  395. * If we are being called from when the device is being reset,
  396. * delay processing of the command until later.
  397. */
  398. if (pDev->state & DPTI_DEV_RESET ) {
  399. return FAILED;
  400. }
  401. return adpt_scsi_to_i2o(pHba, cmd, pDev);
  402. }
  403. static DEF_SCSI_QCMD(adpt_queue)
  404. static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
  405. sector_t capacity, int geom[])
  406. {
  407. int heads=-1;
  408. int sectors=-1;
  409. int cylinders=-1;
  410. // *** First lets set the default geometry ****
  411. // If the capacity is less than ox2000
  412. if (capacity < 0x2000 ) { // floppy
  413. heads = 18;
  414. sectors = 2;
  415. }
  416. // else if between 0x2000 and 0x20000
  417. else if (capacity < 0x20000) {
  418. heads = 64;
  419. sectors = 32;
  420. }
  421. // else if between 0x20000 and 0x40000
  422. else if (capacity < 0x40000) {
  423. heads = 65;
  424. sectors = 63;
  425. }
  426. // else if between 0x4000 and 0x80000
  427. else if (capacity < 0x80000) {
  428. heads = 128;
  429. sectors = 63;
  430. }
  431. // else if greater than 0x80000
  432. else {
  433. heads = 255;
  434. sectors = 63;
  435. }
  436. cylinders = sector_div(capacity, heads * sectors);
  437. // Special case if CDROM
  438. if(sdev->type == 5) { // CDROM
  439. heads = 252;
  440. sectors = 63;
  441. cylinders = 1111;
  442. }
  443. geom[0] = heads;
  444. geom[1] = sectors;
  445. geom[2] = cylinders;
  446. PDEBUG("adpt_bios_param: exit\n");
  447. return 0;
  448. }
  449. static const char *adpt_info(struct Scsi_Host *host)
  450. {
  451. adpt_hba* pHba;
  452. pHba = (adpt_hba *) host->hostdata[0];
  453. return (char *) (pHba->detail);
  454. }
  455. static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
  456. {
  457. struct adpt_device* d;
  458. int id;
  459. int chan;
  460. adpt_hba* pHba;
  461. int unit;
  462. // Find HBA (host bus adapter) we are looking for
  463. mutex_lock(&adpt_configuration_lock);
  464. for (pHba = hba_chain; pHba; pHba = pHba->next) {
  465. if (pHba->host == host) {
  466. break; /* found adapter */
  467. }
  468. }
  469. mutex_unlock(&adpt_configuration_lock);
  470. if (pHba == NULL) {
  471. return 0;
  472. }
  473. host = pHba->host;
  474. seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
  475. seq_printf(m, "%s\n", pHba->detail);
  476. seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
  477. pHba->host->host_no, pHba->name, host->irq);
  478. seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
  479. host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
  480. seq_puts(m, "Devices:\n");
  481. for(chan = 0; chan < MAX_CHANNEL; chan++) {
  482. for(id = 0; id < MAX_ID; id++) {
  483. d = pHba->channel[chan].device[id];
  484. while(d) {
  485. seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
  486. seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
  487. unit = d->pI2o_dev->lct_data.tid;
  488. seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
  489. unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
  490. scsi_device_online(d->pScsi_dev)? "online":"offline");
  491. d = d->next_lun;
  492. }
  493. }
  494. }
  495. return 0;
  496. }
  497. /*
  498. * Turn a pointer to ioctl reply data into an u32 'context'
  499. */
  500. static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
  501. {
  502. #if BITS_PER_LONG == 32
  503. return (u32)(unsigned long)reply;
  504. #else
  505. ulong flags = 0;
  506. u32 nr, i;
  507. spin_lock_irqsave(pHba->host->host_lock, flags);
  508. nr = ARRAY_SIZE(pHba->ioctl_reply_context);
  509. for (i = 0; i < nr; i++) {
  510. if (pHba->ioctl_reply_context[i] == NULL) {
  511. pHba->ioctl_reply_context[i] = reply;
  512. break;
  513. }
  514. }
  515. spin_unlock_irqrestore(pHba->host->host_lock, flags);
  516. if (i >= nr) {
  517. printk(KERN_WARNING"%s: Too many outstanding "
  518. "ioctl commands\n", pHba->name);
  519. return (u32)-1;
  520. }
  521. return i;
  522. #endif
  523. }
  524. /*
  525. * Go from an u32 'context' to a pointer to ioctl reply data.
  526. */
  527. static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
  528. {
  529. #if BITS_PER_LONG == 32
  530. return (void *)(unsigned long)context;
  531. #else
  532. void *p = pHba->ioctl_reply_context[context];
  533. pHba->ioctl_reply_context[context] = NULL;
  534. return p;
  535. #endif
  536. }
  537. /*===========================================================================
  538. * Error Handling routines
  539. *===========================================================================
  540. */
  541. static int adpt_abort(struct scsi_cmnd * cmd)
  542. {
  543. adpt_hba* pHba = NULL; /* host bus adapter structure */
  544. struct adpt_device* dptdevice; /* dpt per device information */
  545. u32 msg[5];
  546. int rcode;
  547. pHba = (adpt_hba*) cmd->device->host->hostdata[0];
  548. printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
  549. if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
  550. printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
  551. return FAILED;
  552. }
  553. memset(msg, 0, sizeof(msg));
  554. msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
  555. msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
  556. msg[2] = 0;
  557. msg[3]= 0;
  558. /* Add 1 to avoid firmware treating it as invalid command */
  559. msg[4] = cmd->request->tag + 1;
  560. if (pHba->host)
  561. spin_lock_irq(pHba->host->host_lock);
  562. rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
  563. if (pHba->host)
  564. spin_unlock_irq(pHba->host->host_lock);
  565. if (rcode != 0) {
  566. if(rcode == -EOPNOTSUPP ){
  567. printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
  568. return FAILED;
  569. }
  570. printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
  571. return FAILED;
  572. }
  573. printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
  574. return SUCCESS;
  575. }
  576. #define I2O_DEVICE_RESET 0x27
  577. // This is the same for BLK and SCSI devices
  578. // NOTE this is wrong in the i2o.h definitions
  579. // This is not currently supported by our adapter but we issue it anyway
  580. static int adpt_device_reset(struct scsi_cmnd* cmd)
  581. {
  582. adpt_hba* pHba;
  583. u32 msg[4];
  584. u32 rcode;
  585. int old_state;
  586. struct adpt_device* d = cmd->device->hostdata;
  587. pHba = (void*) cmd->device->host->hostdata[0];
  588. printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
  589. if (!d) {
  590. printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
  591. return FAILED;
  592. }
  593. memset(msg, 0, sizeof(msg));
  594. msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
  595. msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
  596. msg[2] = 0;
  597. msg[3] = 0;
  598. if (pHba->host)
  599. spin_lock_irq(pHba->host->host_lock);
  600. old_state = d->state;
  601. d->state |= DPTI_DEV_RESET;
  602. rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
  603. d->state = old_state;
  604. if (pHba->host)
  605. spin_unlock_irq(pHba->host->host_lock);
  606. if (rcode != 0) {
  607. if(rcode == -EOPNOTSUPP ){
  608. printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
  609. return FAILED;
  610. }
  611. printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
  612. return FAILED;
  613. } else {
  614. printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
  615. return SUCCESS;
  616. }
  617. }
  618. #define I2O_HBA_BUS_RESET 0x87
  619. // This version of bus reset is called by the eh_error handler
  620. static int adpt_bus_reset(struct scsi_cmnd* cmd)
  621. {
  622. adpt_hba* pHba;
  623. u32 msg[4];
  624. u32 rcode;
  625. pHba = (adpt_hba*)cmd->device->host->hostdata[0];
  626. memset(msg, 0, sizeof(msg));
  627. printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
  628. msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
  629. msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
  630. msg[2] = 0;
  631. msg[3] = 0;
  632. if (pHba->host)
  633. spin_lock_irq(pHba->host->host_lock);
  634. rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
  635. if (pHba->host)
  636. spin_unlock_irq(pHba->host->host_lock);
  637. if (rcode != 0) {
  638. printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
  639. return FAILED;
  640. } else {
  641. printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
  642. return SUCCESS;
  643. }
  644. }
  645. // This version of reset is called by the eh_error_handler
  646. static int __adpt_reset(struct scsi_cmnd* cmd)
  647. {
  648. adpt_hba* pHba;
  649. int rcode;
  650. char name[32];
  651. pHba = (adpt_hba*)cmd->device->host->hostdata[0];
  652. strncpy(name, pHba->name, sizeof(name));
  653. printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
  654. rcode = adpt_hba_reset(pHba);
  655. if(rcode == 0){
  656. printk(KERN_WARNING"%s: HBA reset complete\n", name);
  657. return SUCCESS;
  658. } else {
  659. printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
  660. return FAILED;
  661. }
  662. }
  663. static int adpt_reset(struct scsi_cmnd* cmd)
  664. {
  665. int rc;
  666. spin_lock_irq(cmd->device->host->host_lock);
  667. rc = __adpt_reset(cmd);
  668. spin_unlock_irq(cmd->device->host->host_lock);
  669. return rc;
  670. }
  671. // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
  672. static int adpt_hba_reset(adpt_hba* pHba)
  673. {
  674. int rcode;
  675. pHba->state |= DPTI_STATE_RESET;
  676. // Activate does get status , init outbound, and get hrt
  677. if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
  678. printk(KERN_ERR "%s: Could not activate\n", pHba->name);
  679. adpt_i2o_delete_hba(pHba);
  680. return rcode;
  681. }
  682. if ((rcode=adpt_i2o_build_sys_table()) < 0) {
  683. adpt_i2o_delete_hba(pHba);
  684. return rcode;
  685. }
  686. PDEBUG("%s: in HOLD state\n",pHba->name);
  687. if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
  688. adpt_i2o_delete_hba(pHba);
  689. return rcode;
  690. }
  691. PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
  692. if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
  693. adpt_i2o_delete_hba(pHba);
  694. return rcode;
  695. }
  696. if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
  697. adpt_i2o_delete_hba(pHba);
  698. return rcode;
  699. }
  700. pHba->state &= ~DPTI_STATE_RESET;
  701. scsi_host_complete_all_commands(pHba->host, DID_RESET);
  702. return 0; /* return success */
  703. }
  704. /*===========================================================================
  705. *
  706. *===========================================================================
  707. */
  708. static void adpt_i2o_sys_shutdown(void)
  709. {
  710. adpt_hba *pHba, *pNext;
  711. struct adpt_i2o_post_wait_data *p1, *old;
  712. printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
  713. printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
  714. /* Delete all IOPs from the controller chain */
  715. /* They should have already been released by the
  716. * scsi-core
  717. */
  718. for (pHba = hba_chain; pHba; pHba = pNext) {
  719. pNext = pHba->next;
  720. adpt_i2o_delete_hba(pHba);
  721. }
  722. /* Remove any timedout entries from the wait queue. */
  723. // spin_lock_irqsave(&adpt_post_wait_lock, flags);
  724. /* Nothing should be outstanding at this point so just
  725. * free them
  726. */
  727. for(p1 = adpt_post_wait_queue; p1;) {
  728. old = p1;
  729. p1 = p1->next;
  730. kfree(old);
  731. }
  732. // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
  733. adpt_post_wait_queue = NULL;
  734. printk(KERN_INFO "Adaptec I2O controllers down.\n");
  735. }
  736. static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
  737. {
  738. adpt_hba* pHba = NULL;
  739. adpt_hba* p = NULL;
  740. ulong base_addr0_phys = 0;
  741. ulong base_addr1_phys = 0;
  742. u32 hba_map0_area_size = 0;
  743. u32 hba_map1_area_size = 0;
  744. void __iomem *base_addr_virt = NULL;
  745. void __iomem *msg_addr_virt = NULL;
  746. int dma64 = 0;
  747. int raptorFlag = FALSE;
  748. if(pci_enable_device(pDev)) {
  749. return -EINVAL;
  750. }
  751. if (pci_request_regions(pDev, "dpt_i2o")) {
  752. PERROR("dpti: adpt_config_hba: pci request region failed\n");
  753. return -EINVAL;
  754. }
  755. pci_set_master(pDev);
  756. /*
  757. * See if we should enable dma64 mode.
  758. */
  759. if (sizeof(dma_addr_t) > 4 &&
  760. dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
  761. dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
  762. dma64 = 1;
  763. if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
  764. return -EINVAL;
  765. /* adapter only supports message blocks below 4GB */
  766. dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
  767. base_addr0_phys = pci_resource_start(pDev,0);
  768. hba_map0_area_size = pci_resource_len(pDev,0);
  769. // Check if standard PCI card or single BAR Raptor
  770. if(pDev->device == PCI_DPT_DEVICE_ID){
  771. if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
  772. // Raptor card with this device id needs 4M
  773. hba_map0_area_size = 0x400000;
  774. } else { // Not Raptor - it is a PCI card
  775. if(hba_map0_area_size > 0x100000 ){
  776. hba_map0_area_size = 0x100000;
  777. }
  778. }
  779. } else {// Raptor split BAR config
  780. // Use BAR1 in this configuration
  781. base_addr1_phys = pci_resource_start(pDev,1);
  782. hba_map1_area_size = pci_resource_len(pDev,1);
  783. raptorFlag = TRUE;
  784. }
  785. #if BITS_PER_LONG == 64
  786. /*
  787. * The original Adaptec 64 bit driver has this comment here:
  788. * "x86_64 machines need more optimal mappings"
  789. *
  790. * I assume some HBAs report ridiculously large mappings
  791. * and we need to limit them on platforms with IOMMUs.
  792. */
  793. if (raptorFlag == TRUE) {
  794. if (hba_map0_area_size > 128)
  795. hba_map0_area_size = 128;
  796. if (hba_map1_area_size > 524288)
  797. hba_map1_area_size = 524288;
  798. } else {
  799. if (hba_map0_area_size > 524288)
  800. hba_map0_area_size = 524288;
  801. }
  802. #endif
  803. base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
  804. if (!base_addr_virt) {
  805. pci_release_regions(pDev);
  806. PERROR("dpti: adpt_config_hba: io remap failed\n");
  807. return -EINVAL;
  808. }
  809. if(raptorFlag == TRUE) {
  810. msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
  811. if (!msg_addr_virt) {
  812. PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
  813. iounmap(base_addr_virt);
  814. pci_release_regions(pDev);
  815. return -EINVAL;
  816. }
  817. } else {
  818. msg_addr_virt = base_addr_virt;
  819. }
  820. // Allocate and zero the data structure
  821. pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
  822. if (!pHba) {
  823. if (msg_addr_virt != base_addr_virt)
  824. iounmap(msg_addr_virt);
  825. iounmap(base_addr_virt);
  826. pci_release_regions(pDev);
  827. return -ENOMEM;
  828. }
  829. mutex_lock(&adpt_configuration_lock);
  830. if(hba_chain != NULL){
  831. for(p = hba_chain; p->next; p = p->next);
  832. p->next = pHba;
  833. } else {
  834. hba_chain = pHba;
  835. }
  836. pHba->next = NULL;
  837. pHba->unit = hba_count;
  838. sprintf(pHba->name, "dpti%d", hba_count);
  839. hba_count++;
  840. mutex_unlock(&adpt_configuration_lock);
  841. pHba->pDev = pDev;
  842. pHba->base_addr_phys = base_addr0_phys;
  843. // Set up the Virtual Base Address of the I2O Device
  844. pHba->base_addr_virt = base_addr_virt;
  845. pHba->msg_addr_virt = msg_addr_virt;
  846. pHba->irq_mask = base_addr_virt+0x30;
  847. pHba->post_port = base_addr_virt+0x40;
  848. pHba->reply_port = base_addr_virt+0x44;
  849. pHba->hrt = NULL;
  850. pHba->lct = NULL;
  851. pHba->lct_size = 0;
  852. pHba->status_block = NULL;
  853. pHba->post_count = 0;
  854. pHba->state = DPTI_STATE_RESET;
  855. pHba->pDev = pDev;
  856. pHba->devices = NULL;
  857. pHba->dma64 = dma64;
  858. // Initializing the spinlocks
  859. spin_lock_init(&pHba->state_lock);
  860. spin_lock_init(&adpt_post_wait_lock);
  861. if(raptorFlag == 0){
  862. printk(KERN_INFO "Adaptec I2O RAID controller"
  863. " %d at %p size=%x irq=%d%s\n",
  864. hba_count-1, base_addr_virt,
  865. hba_map0_area_size, pDev->irq,
  866. dma64 ? " (64-bit DMA)" : "");
  867. } else {
  868. printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
  869. hba_count-1, pDev->irq,
  870. dma64 ? " (64-bit DMA)" : "");
  871. printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
  872. printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
  873. }
  874. if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
  875. printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
  876. adpt_i2o_delete_hba(pHba);
  877. return -EINVAL;
  878. }
  879. return 0;
  880. }
  881. static void adpt_i2o_delete_hba(adpt_hba* pHba)
  882. {
  883. adpt_hba* p1;
  884. adpt_hba* p2;
  885. struct i2o_device* d;
  886. struct i2o_device* next;
  887. int i;
  888. int j;
  889. struct adpt_device* pDev;
  890. struct adpt_device* pNext;
  891. mutex_lock(&adpt_configuration_lock);
  892. if(pHba->host){
  893. free_irq(pHba->host->irq, pHba);
  894. }
  895. p2 = NULL;
  896. for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
  897. if(p1 == pHba) {
  898. if(p2) {
  899. p2->next = p1->next;
  900. } else {
  901. hba_chain = p1->next;
  902. }
  903. break;
  904. }
  905. }
  906. hba_count--;
  907. mutex_unlock(&adpt_configuration_lock);
  908. iounmap(pHba->base_addr_virt);
  909. pci_release_regions(pHba->pDev);
  910. if(pHba->msg_addr_virt != pHba->base_addr_virt){
  911. iounmap(pHba->msg_addr_virt);
  912. }
  913. if(pHba->FwDebugBuffer_P)
  914. iounmap(pHba->FwDebugBuffer_P);
  915. if(pHba->hrt) {
  916. dma_free_coherent(&pHba->pDev->dev,
  917. pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
  918. pHba->hrt, pHba->hrt_pa);
  919. }
  920. if(pHba->lct) {
  921. dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
  922. pHba->lct, pHba->lct_pa);
  923. }
  924. if(pHba->status_block) {
  925. dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
  926. pHba->status_block, pHba->status_block_pa);
  927. }
  928. if(pHba->reply_pool) {
  929. dma_free_coherent(&pHba->pDev->dev,
  930. pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
  931. pHba->reply_pool, pHba->reply_pool_pa);
  932. }
  933. for(d = pHba->devices; d ; d = next){
  934. next = d->next;
  935. kfree(d);
  936. }
  937. for(i = 0 ; i < pHba->top_scsi_channel ; i++){
  938. for(j = 0; j < MAX_ID; j++){
  939. if(pHba->channel[i].device[j] != NULL){
  940. for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
  941. pNext = pDev->next_lun;
  942. kfree(pDev);
  943. }
  944. }
  945. }
  946. }
  947. pci_dev_put(pHba->pDev);
  948. if (adpt_sysfs_class)
  949. device_destroy(adpt_sysfs_class,
  950. MKDEV(DPTI_I2O_MAJOR, pHba->unit));
  951. kfree(pHba);
  952. if(hba_count <= 0){
  953. unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
  954. if (adpt_sysfs_class) {
  955. class_destroy(adpt_sysfs_class);
  956. adpt_sysfs_class = NULL;
  957. }
  958. }
  959. }
  960. static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
  961. {
  962. struct adpt_device* d;
  963. if(chan < 0 || chan >= MAX_CHANNEL)
  964. return NULL;
  965. d = pHba->channel[chan].device[id];
  966. if(!d || d->tid == 0) {
  967. return NULL;
  968. }
  969. /* If it is the only lun at that address then this should match*/
  970. if(d->scsi_lun == lun){
  971. return d;
  972. }
  973. /* else we need to look through all the luns */
  974. for(d=d->next_lun ; d ; d = d->next_lun){
  975. if(d->scsi_lun == lun){
  976. return d;
  977. }
  978. }
  979. return NULL;
  980. }
  981. static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
  982. {
  983. // I used my own version of the WAIT_QUEUE_HEAD
  984. // to handle some version differences
  985. // When embedded in the kernel this could go back to the vanilla one
  986. ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
  987. int status = 0;
  988. ulong flags = 0;
  989. struct adpt_i2o_post_wait_data *p1, *p2;
  990. struct adpt_i2o_post_wait_data *wait_data =
  991. kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
  992. DECLARE_WAITQUEUE(wait, current);
  993. if (!wait_data)
  994. return -ENOMEM;
  995. /*
  996. * The spin locking is needed to keep anyone from playing
  997. * with the queue pointers and id while we do the same
  998. */
  999. spin_lock_irqsave(&adpt_post_wait_lock, flags);
  1000. // TODO we need a MORE unique way of getting ids
  1001. // to support async LCT get
  1002. wait_data->next = adpt_post_wait_queue;
  1003. adpt_post_wait_queue = wait_data;
  1004. adpt_post_wait_id++;
  1005. adpt_post_wait_id &= 0x7fff;
  1006. wait_data->id = adpt_post_wait_id;
  1007. spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
  1008. wait_data->wq = &adpt_wq_i2o_post;
  1009. wait_data->status = -ETIMEDOUT;
  1010. add_wait_queue(&adpt_wq_i2o_post, &wait);
  1011. msg[2] |= 0x80000000 | ((u32)wait_data->id);
  1012. timeout *= HZ;
  1013. if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
  1014. set_current_state(TASK_INTERRUPTIBLE);
  1015. if(pHba->host)
  1016. spin_unlock_irq(pHba->host->host_lock);
  1017. if (!timeout)
  1018. schedule();
  1019. else{
  1020. timeout = schedule_timeout(timeout);
  1021. if (timeout == 0) {
  1022. // I/O issued, but cannot get result in
  1023. // specified time. Freeing resorces is
  1024. // dangerous.
  1025. status = -ETIME;
  1026. }
  1027. }
  1028. if(pHba->host)
  1029. spin_lock_irq(pHba->host->host_lock);
  1030. }
  1031. remove_wait_queue(&adpt_wq_i2o_post, &wait);
  1032. if(status == -ETIMEDOUT){
  1033. printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
  1034. // We will have to free the wait_data memory during shutdown
  1035. return status;
  1036. }
  1037. /* Remove the entry from the queue. */
  1038. p2 = NULL;
  1039. spin_lock_irqsave(&adpt_post_wait_lock, flags);
  1040. for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
  1041. if(p1 == wait_data) {
  1042. if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
  1043. status = -EOPNOTSUPP;
  1044. }
  1045. if(p2) {
  1046. p2->next = p1->next;
  1047. } else {
  1048. adpt_post_wait_queue = p1->next;
  1049. }
  1050. break;
  1051. }
  1052. }
  1053. spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
  1054. kfree(wait_data);
  1055. return status;
  1056. }
  1057. static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
  1058. {
  1059. u32 m = EMPTY_QUEUE;
  1060. u32 __iomem *msg;
  1061. ulong timeout = jiffies + 30*HZ;
  1062. do {
  1063. rmb();
  1064. m = readl(pHba->post_port);
  1065. if (m != EMPTY_QUEUE) {
  1066. break;
  1067. }
  1068. if(time_after(jiffies,timeout)){
  1069. printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
  1070. return -ETIMEDOUT;
  1071. }
  1072. schedule_timeout_uninterruptible(1);
  1073. } while(m == EMPTY_QUEUE);
  1074. msg = pHba->msg_addr_virt + m;
  1075. memcpy_toio(msg, data, len);
  1076. wmb();
  1077. //post message
  1078. writel(m, pHba->post_port);
  1079. wmb();
  1080. return 0;
  1081. }
  1082. static void adpt_i2o_post_wait_complete(u32 context, int status)
  1083. {
  1084. struct adpt_i2o_post_wait_data *p1 = NULL;
  1085. /*
  1086. * We need to search through the adpt_post_wait
  1087. * queue to see if the given message is still
  1088. * outstanding. If not, it means that the IOP
  1089. * took longer to respond to the message than we
  1090. * had allowed and timer has already expired.
  1091. * Not much we can do about that except log
  1092. * it for debug purposes, increase timeout, and recompile
  1093. *
  1094. * Lock needed to keep anyone from moving queue pointers
  1095. * around while we're looking through them.
  1096. */
  1097. context &= 0x7fff;
  1098. spin_lock(&adpt_post_wait_lock);
  1099. for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
  1100. if(p1->id == context) {
  1101. p1->status = status;
  1102. spin_unlock(&adpt_post_wait_lock);
  1103. wake_up_interruptible(p1->wq);
  1104. return;
  1105. }
  1106. }
  1107. spin_unlock(&adpt_post_wait_lock);
  1108. // If this happens we lose commands that probably really completed
  1109. printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
  1110. printk(KERN_DEBUG" Tasks in wait queue:\n");
  1111. for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
  1112. printk(KERN_DEBUG" %d\n",p1->id);
  1113. }
  1114. return;
  1115. }
  1116. static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
  1117. {
  1118. u32 msg[8];
  1119. u8* status;
  1120. dma_addr_t addr;
  1121. u32 m = EMPTY_QUEUE ;
  1122. ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
  1123. if(pHba->initialized == FALSE) { // First time reset should be quick
  1124. timeout = jiffies + (25*HZ);
  1125. } else {
  1126. adpt_i2o_quiesce_hba(pHba);
  1127. }
  1128. do {
  1129. rmb();
  1130. m = readl(pHba->post_port);
  1131. if (m != EMPTY_QUEUE) {
  1132. break;
  1133. }
  1134. if(time_after(jiffies,timeout)){
  1135. printk(KERN_WARNING"Timeout waiting for message!\n");
  1136. return -ETIMEDOUT;
  1137. }
  1138. schedule_timeout_uninterruptible(1);
  1139. } while (m == EMPTY_QUEUE);
  1140. status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
  1141. if(status == NULL) {
  1142. adpt_send_nop(pHba, m);
  1143. printk(KERN_ERR"IOP reset failed - no free memory.\n");
  1144. return -ENOMEM;
  1145. }
  1146. memset(status,0,4);
  1147. msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
  1148. msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
  1149. msg[2]=0;
  1150. msg[3]=0;
  1151. msg[4]=0;
  1152. msg[5]=0;
  1153. msg[6]=dma_low(addr);
  1154. msg[7]=dma_high(addr);
  1155. memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
  1156. wmb();
  1157. writel(m, pHba->post_port);
  1158. wmb();
  1159. while(*status == 0){
  1160. if(time_after(jiffies,timeout)){
  1161. printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
  1162. /* We lose 4 bytes of "status" here, but we cannot
  1163. free these because controller may awake and corrupt
  1164. those bytes at any time */
  1165. /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
  1166. return -ETIMEDOUT;
  1167. }
  1168. rmb();
  1169. schedule_timeout_uninterruptible(1);
  1170. }
  1171. if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
  1172. PDEBUG("%s: Reset in progress...\n", pHba->name);
  1173. // Here we wait for message frame to become available
  1174. // indicated that reset has finished
  1175. do {
  1176. rmb();
  1177. m = readl(pHba->post_port);
  1178. if (m != EMPTY_QUEUE) {
  1179. break;
  1180. }
  1181. if(time_after(jiffies,timeout)){
  1182. printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
  1183. /* We lose 4 bytes of "status" here, but we
  1184. cannot free these because controller may
  1185. awake and corrupt those bytes at any time */
  1186. /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
  1187. return -ETIMEDOUT;
  1188. }
  1189. schedule_timeout_uninterruptible(1);
  1190. } while (m == EMPTY_QUEUE);
  1191. // Flush the offset
  1192. adpt_send_nop(pHba, m);
  1193. }
  1194. adpt_i2o_status_get(pHba);
  1195. if(*status == 0x02 ||
  1196. pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
  1197. printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
  1198. pHba->name);
  1199. } else {
  1200. PDEBUG("%s: Reset completed.\n", pHba->name);
  1201. }
  1202. dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
  1203. #ifdef UARTDELAY
  1204. // This delay is to allow someone attached to the card through the debug UART to
  1205. // set up the dump levels that they want before the rest of the initialization sequence
  1206. adpt_delay(20000);
  1207. #endif
  1208. return 0;
  1209. }
  1210. static int adpt_i2o_parse_lct(adpt_hba* pHba)
  1211. {
  1212. int i;
  1213. int max;
  1214. int tid;
  1215. struct i2o_device *d;
  1216. i2o_lct *lct = pHba->lct;
  1217. u8 bus_no = 0;
  1218. s16 scsi_id;
  1219. u64 scsi_lun;
  1220. u32 buf[10]; // larger than 7, or 8 ...
  1221. struct adpt_device* pDev;
  1222. if (lct == NULL) {
  1223. printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
  1224. return -1;
  1225. }
  1226. max = lct->table_size;
  1227. max -= 3;
  1228. max /= 9;
  1229. for(i=0;i<max;i++) {
  1230. if( lct->lct_entry[i].user_tid != 0xfff){
  1231. /*
  1232. * If we have hidden devices, we need to inform the upper layers about
  1233. * the possible maximum id reference to handle device access when
  1234. * an array is disassembled. This code has no other purpose but to
  1235. * allow us future access to devices that are currently hidden
  1236. * behind arrays, hotspares or have not been configured (JBOD mode).
  1237. */
  1238. if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
  1239. lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
  1240. lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
  1241. continue;
  1242. }
  1243. tid = lct->lct_entry[i].tid;
  1244. // I2O_DPT_DEVICE_INFO_GROUP_NO;
  1245. if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
  1246. continue;
  1247. }
  1248. bus_no = buf[0]>>16;
  1249. scsi_id = buf[1];
  1250. scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
  1251. if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
  1252. printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
  1253. continue;
  1254. }
  1255. if (scsi_id >= MAX_ID){
  1256. printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
  1257. continue;
  1258. }
  1259. if(bus_no > pHba->top_scsi_channel){
  1260. pHba->top_scsi_channel = bus_no;
  1261. }
  1262. if(scsi_id > pHba->top_scsi_id){
  1263. pHba->top_scsi_id = scsi_id;
  1264. }
  1265. if(scsi_lun > pHba->top_scsi_lun){
  1266. pHba->top_scsi_lun = scsi_lun;
  1267. }
  1268. continue;
  1269. }
  1270. d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
  1271. if(d==NULL)
  1272. {
  1273. printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
  1274. return -ENOMEM;
  1275. }
  1276. d->controller = pHba;
  1277. d->next = NULL;
  1278. memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
  1279. d->flags = 0;
  1280. tid = d->lct_data.tid;
  1281. adpt_i2o_report_hba_unit(pHba, d);
  1282. adpt_i2o_install_device(pHba, d);
  1283. }
  1284. bus_no = 0;
  1285. for(d = pHba->devices; d ; d = d->next) {
  1286. if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
  1287. d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
  1288. tid = d->lct_data.tid;
  1289. // TODO get the bus_no from hrt-but for now they are in order
  1290. //bus_no =
  1291. if(bus_no > pHba->top_scsi_channel){
  1292. pHba->top_scsi_channel = bus_no;
  1293. }
  1294. pHba->channel[bus_no].type = d->lct_data.class_id;
  1295. pHba->channel[bus_no].tid = tid;
  1296. if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
  1297. {
  1298. pHba->channel[bus_no].scsi_id = buf[1];
  1299. PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
  1300. }
  1301. // TODO remove - this is just until we get from hrt
  1302. bus_no++;
  1303. if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
  1304. printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
  1305. break;
  1306. }
  1307. }
  1308. }
  1309. // Setup adpt_device table
  1310. for(d = pHba->devices; d ; d = d->next) {
  1311. if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
  1312. d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
  1313. d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
  1314. tid = d->lct_data.tid;
  1315. scsi_id = -1;
  1316. // I2O_DPT_DEVICE_INFO_GROUP_NO;
  1317. if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
  1318. bus_no = buf[0]>>16;
  1319. scsi_id = buf[1];
  1320. scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
  1321. if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
  1322. continue;
  1323. }
  1324. if (scsi_id >= MAX_ID) {
  1325. continue;
  1326. }
  1327. if( pHba->channel[bus_no].device[scsi_id] == NULL){
  1328. pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
  1329. if(pDev == NULL) {
  1330. return -ENOMEM;
  1331. }
  1332. pHba->channel[bus_no].device[scsi_id] = pDev;
  1333. } else {
  1334. for( pDev = pHba->channel[bus_no].device[scsi_id];
  1335. pDev->next_lun; pDev = pDev->next_lun){
  1336. }
  1337. pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
  1338. if(pDev->next_lun == NULL) {
  1339. return -ENOMEM;
  1340. }
  1341. pDev = pDev->next_lun;
  1342. }
  1343. pDev->tid = tid;
  1344. pDev->scsi_channel = bus_no;
  1345. pDev->scsi_id = scsi_id;
  1346. pDev->scsi_lun = scsi_lun;
  1347. pDev->pI2o_dev = d;
  1348. d->owner = pDev;
  1349. pDev->type = (buf[0])&0xff;
  1350. pDev->flags = (buf[0]>>8)&0xff;
  1351. if(scsi_id > pHba->top_scsi_id){
  1352. pHba->top_scsi_id = scsi_id;
  1353. }
  1354. if(scsi_lun > pHba->top_scsi_lun){
  1355. pHba->top_scsi_lun = scsi_lun;
  1356. }
  1357. }
  1358. if(scsi_id == -1){
  1359. printk(KERN_WARNING"Could not find SCSI ID for %s\n",
  1360. d->lct_data.identity_tag);
  1361. }
  1362. }
  1363. }
  1364. return 0;
  1365. }
  1366. /*
  1367. * Each I2O controller has a chain of devices on it - these match
  1368. * the useful parts of the LCT of the board.
  1369. */
  1370. static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
  1371. {
  1372. mutex_lock(&adpt_configuration_lock);
  1373. d->controller=pHba;
  1374. d->owner=NULL;
  1375. d->next=pHba->devices;
  1376. d->prev=NULL;
  1377. if (pHba->devices != NULL){
  1378. pHba->devices->prev=d;
  1379. }
  1380. pHba->devices=d;
  1381. *d->dev_name = 0;
  1382. mutex_unlock(&adpt_configuration_lock);
  1383. return 0;
  1384. }
  1385. static int adpt_open(struct inode *inode, struct file *file)
  1386. {
  1387. int minor;
  1388. adpt_hba* pHba;
  1389. mutex_lock(&adpt_mutex);
  1390. //TODO check for root access
  1391. //
  1392. minor = iminor(inode);
  1393. if (minor >= hba_count) {
  1394. mutex_unlock(&adpt_mutex);
  1395. return -ENXIO;
  1396. }
  1397. mutex_lock(&adpt_configuration_lock);
  1398. for (pHba = hba_chain; pHba; pHba = pHba->next) {
  1399. if (pHba->unit == minor) {
  1400. break; /* found adapter */
  1401. }
  1402. }
  1403. if (pHba == NULL) {
  1404. mutex_unlock(&adpt_configuration_lock);
  1405. mutex_unlock(&adpt_mutex);
  1406. return -ENXIO;
  1407. }
  1408. // if(pHba->in_use){
  1409. // mutex_unlock(&adpt_configuration_lock);
  1410. // return -EBUSY;
  1411. // }
  1412. pHba->in_use = 1;
  1413. mutex_unlock(&adpt_configuration_lock);
  1414. mutex_unlock(&adpt_mutex);
  1415. return 0;
  1416. }
  1417. static int adpt_close(struct inode *inode, struct file *file)
  1418. {
  1419. int minor;
  1420. adpt_hba* pHba;
  1421. minor = iminor(inode);
  1422. if (minor >= hba_count) {
  1423. return -ENXIO;
  1424. }
  1425. mutex_lock(&adpt_configuration_lock);
  1426. for (pHba = hba_chain; pHba; pHba = pHba->next) {
  1427. if (pHba->unit == minor) {
  1428. break; /* found adapter */
  1429. }
  1430. }
  1431. mutex_unlock(&adpt_configuration_lock);
  1432. if (pHba == NULL) {
  1433. return -ENXIO;
  1434. }
  1435. pHba->in_use = 0;
  1436. return 0;
  1437. }
  1438. static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
  1439. {
  1440. u32 msg[MAX_MESSAGE_SIZE];
  1441. u32* reply = NULL;
  1442. u32 size = 0;
  1443. u32 reply_size = 0;
  1444. u32 __user *user_msg = arg;
  1445. u32 __user * user_reply = NULL;
  1446. void **sg_list = NULL;
  1447. u32 sg_offset = 0;
  1448. u32 sg_count = 0;
  1449. int sg_index = 0;
  1450. u32 i = 0;
  1451. u32 rcode = 0;
  1452. void *p = NULL;
  1453. dma_addr_t addr;
  1454. ulong flags = 0;
  1455. memset(&msg, 0, MAX_MESSAGE_SIZE*4);
  1456. // get user msg size in u32s
  1457. if(get_user(size, &user_msg[0])){
  1458. return -EFAULT;
  1459. }
  1460. size = size>>16;
  1461. user_reply = &user_msg[size];
  1462. if(size > MAX_MESSAGE_SIZE){
  1463. return -EFAULT;
  1464. }
  1465. size *= 4; // Convert to bytes
  1466. /* Copy in the user's I2O command */
  1467. if(copy_from_user(msg, user_msg, size)) {
  1468. return -EFAULT;
  1469. }
  1470. get_user(reply_size, &user_reply[0]);
  1471. reply_size = reply_size>>16;
  1472. if(reply_size > REPLY_FRAME_SIZE){
  1473. reply_size = REPLY_FRAME_SIZE;
  1474. }
  1475. reply_size *= 4;
  1476. reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
  1477. if(reply == NULL) {
  1478. printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
  1479. return -ENOMEM;
  1480. }
  1481. sg_offset = (msg[0]>>4)&0xf;
  1482. msg[2] = 0x40000000; // IOCTL context
  1483. msg[3] = adpt_ioctl_to_context(pHba, reply);
  1484. if (msg[3] == (u32)-1) {
  1485. rcode = -EBUSY;
  1486. goto free;
  1487. }
  1488. sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
  1489. if (!sg_list) {
  1490. rcode = -ENOMEM;
  1491. goto free;
  1492. }
  1493. if(sg_offset) {
  1494. // TODO add 64 bit API
  1495. struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
  1496. sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
  1497. if (sg_count > pHba->sg_tablesize){
  1498. printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
  1499. rcode = -EINVAL;
  1500. goto free;
  1501. }
  1502. for(i = 0; i < sg_count; i++) {
  1503. int sg_size;
  1504. if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
  1505. printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
  1506. rcode = -EINVAL;
  1507. goto cleanup;
  1508. }
  1509. sg_size = sg[i].flag_count & 0xffffff;
  1510. /* Allocate memory for the transfer */
  1511. p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
  1512. if(!p) {
  1513. printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  1514. pHba->name,sg_size,i,sg_count);
  1515. rcode = -ENOMEM;
  1516. goto cleanup;
  1517. }
  1518. sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
  1519. /* Copy in the user's SG buffer if necessary */
  1520. if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
  1521. // sg_simple_element API is 32 bit
  1522. if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
  1523. printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
  1524. rcode = -EFAULT;
  1525. goto cleanup;
  1526. }
  1527. }
  1528. /* sg_simple_element API is 32 bit, but addr < 4GB */
  1529. sg[i].addr_bus = addr;
  1530. }
  1531. }
  1532. do {
  1533. /*
  1534. * Stop any new commands from enterring the
  1535. * controller while processing the ioctl
  1536. */
  1537. if (pHba->host) {
  1538. scsi_block_requests(pHba->host);
  1539. spin_lock_irqsave(pHba->host->host_lock, flags);
  1540. }
  1541. rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
  1542. if (rcode != 0)
  1543. printk("adpt_i2o_passthru: post wait failed %d %p\n",
  1544. rcode, reply);
  1545. if (pHba->host) {
  1546. spin_unlock_irqrestore(pHba->host->host_lock, flags);
  1547. scsi_unblock_requests(pHba->host);
  1548. }
  1549. } while (rcode == -ETIMEDOUT);
  1550. if(rcode){
  1551. goto cleanup;
  1552. }
  1553. if(sg_offset) {
  1554. /* Copy back the Scatter Gather buffers back to user space */
  1555. u32 j;
  1556. // TODO add 64 bit API
  1557. struct sg_simple_element* sg;
  1558. int sg_size;
  1559. // re-acquire the original message to handle correctly the sg copy operation
  1560. memset(&msg, 0, MAX_MESSAGE_SIZE*4);
  1561. // get user msg size in u32s
  1562. if(get_user(size, &user_msg[0])){
  1563. rcode = -EFAULT;
  1564. goto cleanup;
  1565. }
  1566. size = size>>16;
  1567. size *= 4;
  1568. if (size > MAX_MESSAGE_SIZE) {
  1569. rcode = -EINVAL;
  1570. goto cleanup;
  1571. }
  1572. /* Copy in the user's I2O command */
  1573. if (copy_from_user (msg, user_msg, size)) {
  1574. rcode = -EFAULT;
  1575. goto cleanup;
  1576. }
  1577. sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
  1578. // TODO add 64 bit API
  1579. sg = (struct sg_simple_element*)(msg + sg_offset);
  1580. for (j = 0; j < sg_count; j++) {
  1581. /* Copy out the SG list to user's buffer if necessary */
  1582. if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
  1583. sg_size = sg[j].flag_count & 0xffffff;
  1584. // sg_simple_element API is 32 bit
  1585. if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
  1586. printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
  1587. rcode = -EFAULT;
  1588. goto cleanup;
  1589. }
  1590. }
  1591. }
  1592. }
  1593. /* Copy back the reply to user space */
  1594. if (reply_size) {
  1595. // we wrote our own values for context - now restore the user supplied ones
  1596. if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
  1597. printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
  1598. rcode = -EFAULT;
  1599. }
  1600. if(copy_to_user(user_reply, reply, reply_size)) {
  1601. printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
  1602. rcode = -EFAULT;
  1603. }
  1604. }
  1605. cleanup:
  1606. if (rcode != -ETIME && rcode != -EINTR) {
  1607. struct sg_simple_element *sg =
  1608. (struct sg_simple_element*) (msg +sg_offset);
  1609. while(sg_index) {
  1610. if(sg_list[--sg_index]) {
  1611. dma_free_coherent(&pHba->pDev->dev,
  1612. sg[sg_index].flag_count & 0xffffff,
  1613. sg_list[sg_index],
  1614. sg[sg_index].addr_bus);
  1615. }
  1616. }
  1617. }
  1618. free:
  1619. kfree(sg_list);
  1620. kfree(reply);
  1621. return rcode;
  1622. }
  1623. #if defined __ia64__
  1624. static void adpt_ia64_info(sysInfo_S* si)
  1625. {
  1626. // This is all the info we need for now
  1627. // We will add more info as our new
  1628. // managmenent utility requires it
  1629. si->processorType = PROC_IA64;
  1630. }
  1631. #endif
  1632. #if defined __sparc__
  1633. static void adpt_sparc_info(sysInfo_S* si)
  1634. {
  1635. // This is all the info we need for now
  1636. // We will add more info as our new
  1637. // managmenent utility requires it
  1638. si->processorType = PROC_ULTRASPARC;
  1639. }
  1640. #endif
  1641. #if defined __alpha__
  1642. static void adpt_alpha_info(sysInfo_S* si)
  1643. {
  1644. // This is all the info we need for now
  1645. // We will add more info as our new
  1646. // managmenent utility requires it
  1647. si->processorType = PROC_ALPHA;
  1648. }
  1649. #endif
  1650. #if defined __i386__
  1651. #include <uapi/asm/vm86.h>
  1652. static void adpt_i386_info(sysInfo_S* si)
  1653. {
  1654. // This is all the info we need for now
  1655. // We will add more info as our new
  1656. // managmenent utility requires it
  1657. switch (boot_cpu_data.x86) {
  1658. case CPU_386:
  1659. si->processorType = PROC_386;
  1660. break;
  1661. case CPU_486:
  1662. si->processorType = PRO