PageRenderTime 64ms CodeModel.GetById 28ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/arm/mach-rpc/ecard.c

https://github.com/tekkamanninja/linux
C | 1146 lines | 818 code | 190 blank | 138 comment | 147 complexity | e0a6a99095162dc6440484603eb8a378 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/kernel/ecard.c
  4. *
  5. * Copyright 1995-2001 Russell King
  6. *
  7. * Find all installed expansion cards, and handle interrupts from them.
  8. *
  9. * Created from information from Acorns RiscOS3 PRMs
  10. *
  11. * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether
  12. * podule slot.
  13. * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
  14. * 12-Sep-1997 RMK Created new handling of interrupt enables/disables
  15. * - cards can now register their own routine to control
  16. * interrupts (recommended).
  17. * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled
  18. * on reset from Linux. (Caused cards not to respond
  19. * under RiscOS without hard reset).
  20. * 15-Feb-1998 RMK Added DMA support
  21. * 12-Sep-1998 RMK Added EASI support
  22. * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment.
  23. * 17-Apr-1999 RMK Support for EASI Type C cycles.
  24. */
  25. #define ECARD_C
  26. #include <linux/module.h>
  27. #include <linux/kernel.h>
  28. #include <linux/types.h>
  29. #include <linux/sched.h>
  30. #include <linux/sched/mm.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/completion.h>
  33. #include <linux/reboot.h>
  34. #include <linux/mm.h>
  35. #include <linux/slab.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/device.h>
  39. #include <linux/init.h>
  40. #include <linux/mutex.h>
  41. #include <linux/kthread.h>
  42. #include <linux/irq.h>
  43. #include <linux/io.h>
  44. #include <asm/dma.h>
  45. #include <asm/ecard.h>
  46. #include <mach/hardware.h>
  47. #include <asm/irq.h>
  48. #include <asm/mmu_context.h>
  49. #include <asm/mach/irq.h>
  50. #include <asm/tlbflush.h>
  51. #include "ecard.h"
  52. struct ecard_request {
  53. void (*fn)(struct ecard_request *);
  54. ecard_t *ec;
  55. unsigned int address;
  56. unsigned int length;
  57. unsigned int use_loader;
  58. void *buffer;
  59. struct completion *complete;
  60. };
  61. struct expcard_quirklist {
  62. unsigned short manufacturer;
  63. unsigned short product;
  64. const char *type;
  65. void (*init)(ecard_t *ec);
  66. };
  67. static ecard_t *cards;
  68. static ecard_t *slot_to_expcard[MAX_ECARDS];
  69. static unsigned int ectcr;
  70. static void atomwide_3p_quirk(ecard_t *ec);
  71. /* List of descriptions of cards which don't have an extended
  72. * identification, or chunk directories containing a description.
  73. */
  74. static struct expcard_quirklist quirklist[] __initdata = {
  75. { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" },
  76. { MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, NULL, atomwide_3p_quirk },
  77. };
  78. asmlinkage extern int
  79. ecard_loader_reset(unsigned long base, loader_t loader);
  80. asmlinkage extern int
  81. ecard_loader_read(int off, unsigned long base, loader_t loader);
  82. static inline unsigned short ecard_getu16(unsigned char *v)
  83. {
  84. return v[0] | v[1] << 8;
  85. }
  86. static inline signed long ecard_gets24(unsigned char *v)
  87. {
  88. return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0);
  89. }
  90. static inline ecard_t *slot_to_ecard(unsigned int slot)
  91. {
  92. return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL;
  93. }
  94. /* ===================== Expansion card daemon ======================== */
  95. /*
  96. * Since the loader programs on the expansion cards need to be run
  97. * in a specific environment, create a separate task with this
  98. * environment up, and pass requests to this task as and when we
  99. * need to.
  100. *
  101. * This should allow 99% of loaders to be called from Linux.
  102. *
  103. * From a security standpoint, we trust the card vendors. This
  104. * may be a misplaced trust.
  105. */
  106. static void ecard_task_reset(struct ecard_request *req)
  107. {
  108. struct expansion_card *ec = req->ec;
  109. struct resource *res;
  110. res = ec->slot_no == 8
  111. ? &ec->resource[ECARD_RES_MEMC]
  112. : ec->easi
  113. ? &ec->resource[ECARD_RES_EASI]
  114. : &ec->resource[ECARD_RES_IOCSYNC];
  115. ecard_loader_reset(res->start, ec->loader);
  116. }
  117. static void ecard_task_readbytes(struct ecard_request *req)
  118. {
  119. struct expansion_card *ec = req->ec;
  120. unsigned char *buf = req->buffer;
  121. unsigned int len = req->length;
  122. unsigned int off = req->address;
  123. if (ec->slot_no == 8) {
  124. void __iomem *base = (void __iomem *)
  125. ec->resource[ECARD_RES_MEMC].start;
  126. /*
  127. * The card maintains an index which increments the address
  128. * into a 4096-byte page on each access. We need to keep
  129. * track of the counter.
  130. */
  131. static unsigned int index;
  132. unsigned int page;
  133. page = (off >> 12) * 4;
  134. if (page > 256 * 4)
  135. return;
  136. off &= 4095;
  137. /*
  138. * If we are reading offset 0, or our current index is
  139. * greater than the offset, reset the hardware index counter.
  140. */
  141. if (off == 0 || index > off) {
  142. writeb(0, base);
  143. index = 0;
  144. }
  145. /*
  146. * Increment the hardware index counter until we get to the
  147. * required offset. The read bytes are discarded.
  148. */
  149. while (index < off) {
  150. readb(base + page);
  151. index += 1;
  152. }
  153. while (len--) {
  154. *buf++ = readb(base + page);
  155. index += 1;
  156. }
  157. } else {
  158. unsigned long base = (ec->easi
  159. ? &ec->resource[ECARD_RES_EASI]
  160. : &ec->resource[ECARD_RES_IOCSYNC])->start;
  161. void __iomem *pbase = (void __iomem *)base;
  162. if (!req->use_loader || !ec->loader) {
  163. off *= 4;
  164. while (len--) {
  165. *buf++ = readb(pbase + off);
  166. off += 4;
  167. }
  168. } else {
  169. while(len--) {
  170. /*
  171. * The following is required by some
  172. * expansion card loader programs.
  173. */
  174. *(unsigned long *)0x108 = 0;
  175. *buf++ = ecard_loader_read(off++, base,
  176. ec->loader);
  177. }
  178. }
  179. }
  180. }
  181. static DECLARE_WAIT_QUEUE_HEAD(ecard_wait);
  182. static struct ecard_request *ecard_req;
  183. static DEFINE_MUTEX(ecard_mutex);
  184. /*
  185. * Set up the expansion card daemon's page tables.
  186. */
  187. static void ecard_init_pgtables(struct mm_struct *mm)
  188. {
  189. struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
  190. /* We want to set up the page tables for the following mapping:
  191. * Virtual Physical
  192. * 0x03000000 0x03000000
  193. * 0x03010000 unmapped
  194. * 0x03210000 0x03210000
  195. * 0x03400000 unmapped
  196. * 0x08000000 0x08000000
  197. * 0x10000000 unmapped
  198. *
  199. * FIXME: we don't follow this 100% yet.
  200. */
  201. pgd_t *src_pgd, *dst_pgd;
  202. src_pgd = pgd_offset(mm, (unsigned long)IO_BASE);
  203. dst_pgd = pgd_offset(mm, IO_START);
  204. memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE));
  205. src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE);
  206. dst_pgd = pgd_offset(mm, EASI_START);
  207. memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
  208. flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
  209. flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
  210. }
  211. static int ecard_init_mm(void)
  212. {
  213. struct mm_struct * mm = mm_alloc();
  214. struct mm_struct *active_mm = current->active_mm;
  215. if (!mm)
  216. return -ENOMEM;
  217. current->mm = mm;
  218. current->active_mm = mm;
  219. activate_mm(active_mm, mm);
  220. mmdrop(active_mm);
  221. ecard_init_pgtables(mm);
  222. return 0;
  223. }
  224. static int
  225. ecard_task(void * unused)
  226. {
  227. /*
  228. * Allocate a mm. We're not a lazy-TLB kernel task since we need
  229. * to set page table entries where the user space would be. Note
  230. * that this also creates the page tables. Failure is not an
  231. * option here.
  232. */
  233. if (ecard_init_mm())
  234. panic("kecardd: unable to alloc mm\n");
  235. while (1) {
  236. struct ecard_request *req;
  237. wait_event_interruptible(ecard_wait, ecard_req != NULL);
  238. req = xchg(&ecard_req, NULL);
  239. if (req != NULL) {
  240. req->fn(req);
  241. complete(req->complete);
  242. }
  243. }
  244. }
  245. /*
  246. * Wake the expansion card daemon to action our request.
  247. *
  248. * FIXME: The test here is not sufficient to detect if the
  249. * kcardd is running.
  250. */
  251. static void ecard_call(struct ecard_request *req)
  252. {
  253. DECLARE_COMPLETION_ONSTACK(completion);
  254. req->complete = &completion;
  255. mutex_lock(&ecard_mutex);
  256. ecard_req = req;
  257. wake_up(&ecard_wait);
  258. /*
  259. * Now wait for kecardd to run.
  260. */
  261. wait_for_completion(&completion);
  262. mutex_unlock(&ecard_mutex);
  263. }
  264. /* ======================= Mid-level card control ===================== */
  265. static void
  266. ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld)
  267. {
  268. struct ecard_request req;
  269. req.fn = ecard_task_readbytes;
  270. req.ec = ec;
  271. req.address = off;
  272. req.length = len;
  273. req.use_loader = useld;
  274. req.buffer = addr;
  275. ecard_call(&req);
  276. }
  277. int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
  278. {
  279. struct ex_chunk_dir excd;
  280. int index = 16;
  281. int useld = 0;
  282. if (!ec->cid.cd)
  283. return 0;
  284. while(1) {
  285. ecard_readbytes(&excd, ec, index, 8, useld);
  286. index += 8;
  287. if (c_id(&excd) == 0) {
  288. if (!useld && ec->loader) {
  289. useld = 1;
  290. index = 0;
  291. continue;
  292. }
  293. return 0;
  294. }
  295. if (c_id(&excd) == 0xf0) { /* link */
  296. index = c_start(&excd);
  297. continue;
  298. }
  299. if (c_id(&excd) == 0x80) { /* loader */
  300. if (!ec->loader) {
  301. ec->loader = kmalloc(c_len(&excd),
  302. GFP_KERNEL);
  303. if (ec->loader)
  304. ecard_readbytes(ec->loader, ec,
  305. (int)c_start(&excd),
  306. c_len(&excd), useld);
  307. else
  308. return 0;
  309. }
  310. continue;
  311. }
  312. if (c_id(&excd) == id && num-- == 0)
  313. break;
  314. }
  315. if (c_id(&excd) & 0x80) {
  316. switch (c_id(&excd) & 0x70) {
  317. case 0x70:
  318. ecard_readbytes((unsigned char *)excd.d.string, ec,
  319. (int)c_start(&excd), c_len(&excd),
  320. useld);
  321. break;
  322. case 0x00:
  323. break;
  324. }
  325. }
  326. cd->start_offset = c_start(&excd);
  327. memcpy(cd->d.string, excd.d.string, 256);
  328. return 1;
  329. }
  330. /* ======================= Interrupt control ============================ */
  331. static void ecard_def_irq_enable(ecard_t *ec, int irqnr)
  332. {
  333. }
  334. static void ecard_def_irq_disable(ecard_t *ec, int irqnr)
  335. {
  336. }
  337. static int ecard_def_irq_pending(ecard_t *ec)
  338. {
  339. return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask;
  340. }
  341. static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr)
  342. {
  343. panic("ecard_def_fiq_enable called - impossible");
  344. }
  345. static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr)
  346. {
  347. panic("ecard_def_fiq_disable called - impossible");
  348. }
  349. static int ecard_def_fiq_pending(ecard_t *ec)
  350. {
  351. return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask;
  352. }
  353. static expansioncard_ops_t ecard_default_ops = {
  354. ecard_def_irq_enable,
  355. ecard_def_irq_disable,
  356. ecard_def_irq_pending,
  357. ecard_def_fiq_enable,
  358. ecard_def_fiq_disable,
  359. ecard_def_fiq_pending
  360. };
  361. /*
  362. * Enable and disable interrupts from expansion cards.
  363. * (interrupts are disabled for these functions).
  364. *
  365. * They are not meant to be called directly, but via enable/disable_irq.
  366. */
  367. static void ecard_irq_unmask(struct irq_data *d)
  368. {
  369. ecard_t *ec = irq_data_get_irq_chip_data(d);
  370. if (ec) {
  371. if (!ec->ops)
  372. ec->ops = &ecard_default_ops;
  373. if (ec->claimed && ec->ops->irqenable)
  374. ec->ops->irqenable(ec, d->irq);
  375. else
  376. printk(KERN_ERR "ecard: rejecting request to "
  377. "enable IRQs for %d\n", d->irq);
  378. }
  379. }
  380. static void ecard_irq_mask(struct irq_data *d)
  381. {
  382. ecard_t *ec = irq_data_get_irq_chip_data(d);
  383. if (ec) {
  384. if (!ec->ops)
  385. ec->ops = &ecard_default_ops;
  386. if (ec->ops && ec->ops->irqdisable)
  387. ec->ops->irqdisable(ec, d->irq);
  388. }
  389. }
  390. static struct irq_chip ecard_chip = {
  391. .name = "ECARD",
  392. .irq_ack = ecard_irq_mask,
  393. .irq_mask = ecard_irq_mask,
  394. .irq_unmask = ecard_irq_unmask,
  395. };
  396. void ecard_enablefiq(unsigned int fiqnr)
  397. {
  398. ecard_t *ec = slot_to_ecard(fiqnr);
  399. if (ec) {
  400. if (!ec->ops)
  401. ec->ops = &ecard_default_ops;
  402. if (ec->claimed && ec->ops->fiqenable)
  403. ec->ops->fiqenable(ec, fiqnr);
  404. else
  405. printk(KERN_ERR "ecard: rejecting request to "
  406. "enable FIQs for %d\n", fiqnr);
  407. }
  408. }
  409. void ecard_disablefiq(unsigned int fiqnr)
  410. {
  411. ecard_t *ec = slot_to_ecard(fiqnr);
  412. if (ec) {
  413. if (!ec->ops)
  414. ec->ops = &ecard_default_ops;
  415. if (ec->ops->fiqdisable)
  416. ec->ops->fiqdisable(ec, fiqnr);
  417. }
  418. }
  419. static void ecard_dump_irq_state(void)
  420. {
  421. ecard_t *ec;
  422. printk("Expansion card IRQ state:\n");
  423. for (ec = cards; ec; ec = ec->next) {
  424. const char *claimed;
  425. if (ec->slot_no == 8)
  426. continue;
  427. claimed = ec->claimed ? "" : "not ";
  428. if (ec->ops && ec->ops->irqpending &&
  429. ec->ops != &ecard_default_ops)
  430. printk(" %d: %sclaimed irq %spending\n",
  431. ec->slot_no, claimed,
  432. ec->ops->irqpending(ec) ? "" : "not ");
  433. else
  434. printk(" %d: %sclaimed irqaddr %p, mask = %02X, status = %02X\n",
  435. ec->slot_no, claimed,
  436. ec->irqaddr, ec->irqmask, readb(ec->irqaddr));
  437. }
  438. }
  439. static void ecard_check_lockup(struct irq_desc *desc)
  440. {
  441. static unsigned long last;
  442. static int lockup;
  443. /*
  444. * If the timer interrupt has not run since the last million
  445. * unrecognised expansion card interrupts, then there is
  446. * something seriously wrong. Disable the expansion card
  447. * interrupts so at least we can continue.
  448. *
  449. * Maybe we ought to start a timer to re-enable them some time
  450. * later?
  451. */
  452. if (last == jiffies) {
  453. lockup += 1;
  454. if (lockup > 1000000) {
  455. printk(KERN_ERR "\nInterrupt lockup detected - "
  456. "disabling all expansion card interrupts\n");
  457. desc->irq_data.chip->irq_mask(&desc->irq_data);
  458. ecard_dump_irq_state();
  459. }
  460. } else
  461. lockup = 0;
  462. /*
  463. * If we did not recognise the source of this interrupt,
  464. * warn the user, but don't flood the user with these messages.
  465. */
  466. if (!last || time_after(jiffies, last + 5*HZ)) {
  467. last = jiffies;
  468. printk(KERN_WARNING "Unrecognised interrupt from backplane\n");
  469. ecard_dump_irq_state();
  470. }
  471. }
  472. static void ecard_irq_handler(struct irq_desc *desc)
  473. {
  474. ecard_t *ec;
  475. int called = 0;
  476. desc->irq_data.chip->irq_mask(&desc->irq_data);
  477. for (ec = cards; ec; ec = ec->next) {
  478. int pending;
  479. if (!ec->claimed || !ec->irq || ec->slot_no == 8)
  480. continue;
  481. if (ec->ops && ec->ops->irqpending)
  482. pending = ec->ops->irqpending(ec);
  483. else
  484. pending = ecard_default_ops.irqpending(ec);
  485. if (pending) {
  486. generic_handle_irq(ec->irq);
  487. called ++;
  488. }
  489. }
  490. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  491. if (called == 0)
  492. ecard_check_lockup(desc);
  493. }
  494. static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
  495. {
  496. void __iomem *address = NULL;
  497. int slot = ec->slot_no;
  498. if (ec->slot_no == 8)
  499. return ECARD_MEMC8_BASE;
  500. ectcr &= ~(1 << slot);
  501. switch (type) {
  502. case ECARD_MEMC:
  503. if (slot < 4)
  504. address = ECARD_MEMC_BASE + (slot << 14);
  505. break;
  506. case ECARD_IOC:
  507. if (slot < 4)
  508. address = ECARD_IOC_BASE + (slot << 14);
  509. else
  510. address = ECARD_IOC4_BASE + ((slot - 4) << 14);
  511. if (address)
  512. address += speed << 19;
  513. break;
  514. case ECARD_EASI:
  515. address = ECARD_EASI_BASE + (slot << 24);
  516. if (speed == ECARD_FAST)
  517. ectcr |= 1 << slot;
  518. break;
  519. default:
  520. break;
  521. }
  522. #ifdef IOMD_ECTCR
  523. iomd_writeb(ectcr, IOMD_ECTCR);
  524. #endif
  525. return address;
  526. }
  527. static int ecard_prints(struct seq_file *m, ecard_t *ec)
  528. {
  529. seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " ");
  530. if (ec->cid.id == 0) {
  531. struct in_chunk_dir incd;
  532. seq_printf(m, "[%04X:%04X] ",
  533. ec->cid.manufacturer, ec->cid.product);
  534. if (!ec->card_desc && ec->cid.cd &&
  535. ecard_readchunk(&incd, ec, 0xf5, 0)) {
  536. ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL);
  537. if (ec->card_desc)
  538. strcpy((char *)ec->card_desc, incd.d.string);
  539. }
  540. seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
  541. } else
  542. seq_printf(m, "Simple card %d\n", ec->cid.id);
  543. return 0;
  544. }
  545. static int ecard_devices_proc_show(struct seq_file *m, void *v)
  546. {
  547. ecard_t *ec = cards;
  548. while (ec) {
  549. ecard_prints(m, ec);
  550. ec = ec->next;
  551. }
  552. return 0;
  553. }
  554. static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
  555. static void ecard_proc_init(void)
  556. {
  557. proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL);
  558. proc_create_single("devices", 0, proc_bus_ecard_dir,
  559. ecard_devices_proc_show);
  560. }
  561. #define ec_set_resource(ec,nr,st,sz) \
  562. do { \
  563. (ec)->resource[nr].name = dev_name(&ec->dev); \
  564. (ec)->resource[nr].start = st; \
  565. (ec)->resource[nr].end = (st) + (sz) - 1; \
  566. (ec)->resource[nr].flags = IORESOURCE_MEM; \
  567. } while (0)
  568. static void __init ecard_free_card(struct expansion_card *ec)
  569. {
  570. int i;
  571. for (i = 0; i < ECARD_NUM_RESOURCES; i++)
  572. if (ec->resource[i].flags)
  573. release_resource(&ec->resource[i]);
  574. kfree(ec);
  575. }
  576. static struct expansion_card *__init ecard_alloc_card(int type, int slot)
  577. {
  578. struct expansion_card *ec;
  579. unsigned long base;
  580. int i;
  581. ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
  582. if (!ec) {
  583. ec = ERR_PTR(-ENOMEM);
  584. goto nomem;
  585. }
  586. ec->slot_no = slot;
  587. ec->easi = type == ECARD_EASI;
  588. ec->irq = 0;
  589. ec->fiq = 0;
  590. ec->dma = NO_DMA;
  591. ec->ops = &ecard_default_ops;
  592. dev_set_name(&ec->dev, "ecard%d", slot);
  593. ec->dev.parent = NULL;
  594. ec->dev.bus = &ecard_bus_type;
  595. ec->dev.dma_mask = &ec->dma_mask;
  596. ec->dma_mask = (u64)0xffffffff;
  597. ec->dev.coherent_dma_mask = ec->dma_mask;
  598. if (slot < 4) {
  599. ec_set_resource(ec, ECARD_RES_MEMC,
  600. PODSLOT_MEMC_BASE + (slot << 14),
  601. PODSLOT_MEMC_SIZE);
  602. base = PODSLOT_IOC0_BASE + (slot << 14);
  603. } else
  604. base = PODSLOT_IOC4_BASE + ((slot - 4) << 14);
  605. #ifdef CONFIG_ARCH_RPC
  606. if (slot < 8) {
  607. ec_set_resource(ec, ECARD_RES_EASI,
  608. PODSLOT_EASI_BASE + (slot << 24),
  609. PODSLOT_EASI_SIZE);
  610. }
  611. if (slot == 8) {
  612. ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE);
  613. } else
  614. #endif
  615. for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++)
  616. ec_set_resource(ec, i + ECARD_RES_IOCSLOW,
  617. base + (i << 19), PODSLOT_IOC_SIZE);
  618. for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
  619. if (ec->resource[i].flags &&
  620. request_resource(&iomem_resource, &ec->resource[i])) {
  621. dev_err(&ec->dev, "resource(s) not available\n");
  622. ec->resource[i].end -= ec->resource[i].start;
  623. ec->resource[i].start = 0;
  624. ec->resource[i].flags = 0;
  625. }
  626. }
  627. nomem:
  628. return ec;
  629. }
  630. static ssize_t irq_show(struct device *dev, struct device_attribute *attr, char *buf)
  631. {
  632. struct expansion_card *ec = ECARD_DEV(dev);
  633. return sprintf(buf, "%u\n", ec->irq);
  634. }
  635. static DEVICE_ATTR_RO(irq);
  636. static ssize_t dma_show(struct device *dev, struct device_attribute *attr, char *buf)
  637. {
  638. struct expansion_card *ec = ECARD_DEV(dev);
  639. return sprintf(buf, "%u\n", ec->dma);
  640. }
  641. static DEVICE_ATTR_RO(dma);
  642. static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf)
  643. {
  644. struct expansion_card *ec = ECARD_DEV(dev);
  645. char *str = buf;
  646. int i;
  647. for (i = 0; i < ECARD_NUM_RESOURCES; i++)
  648. str += sprintf(str, "%08x %08x %08lx\n",
  649. ec->resource[i].start,
  650. ec->resource[i].end,
  651. ec->resource[i].flags);
  652. return str - buf;
  653. }
  654. static DEVICE_ATTR_RO(resource);
  655. static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
  656. {
  657. struct expansion_card *ec = ECARD_DEV(dev);
  658. return sprintf(buf, "%u\n", ec->cid.manufacturer);
  659. }
  660. static DEVICE_ATTR_RO(vendor);
  661. static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf)
  662. {
  663. struct expansion_card *ec = ECARD_DEV(dev);
  664. return sprintf(buf, "%u\n", ec->cid.product);
  665. }
  666. static DEVICE_ATTR_RO(device);
  667. static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
  668. {
  669. struct expansion_card *ec = ECARD_DEV(dev);
  670. return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC");
  671. }
  672. static DEVICE_ATTR_RO(type);
  673. static struct attribute *ecard_dev_attrs[] = {
  674. &dev_attr_device.attr,
  675. &dev_attr_dma.attr,
  676. &dev_attr_irq.attr,
  677. &dev_attr_resource.attr,
  678. &dev_attr_type.attr,
  679. &dev_attr_vendor.attr,
  680. NULL,
  681. };
  682. ATTRIBUTE_GROUPS(ecard_dev);
  683. int ecard_request_resources(struct expansion_card *ec)
  684. {
  685. int i, err = 0;
  686. for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
  687. if (ecard_resource_end(ec, i) &&
  688. !request_mem_region(ecard_resource_start(ec, i),
  689. ecard_resource_len(ec, i),
  690. ec->dev.driver->name)) {
  691. err = -EBUSY;
  692. break;
  693. }
  694. }
  695. if (err) {
  696. while (i--)
  697. if (ecard_resource_end(ec, i))
  698. release_mem_region(ecard_resource_start(ec, i),
  699. ecard_resource_len(ec, i));
  700. }
  701. return err;
  702. }
  703. EXPORT_SYMBOL(ecard_request_resources);
  704. void ecard_release_resources(struct expansion_card *ec)
  705. {
  706. int i;
  707. for (i = 0; i < ECARD_NUM_RESOURCES; i++)
  708. if (ecard_resource_end(ec, i))
  709. release_mem_region(ecard_resource_start(ec, i),
  710. ecard_resource_len(ec, i));
  711. }
  712. EXPORT_SYMBOL(ecard_release_resources);
  713. void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data)
  714. {
  715. ec->irq_data = irq_data;
  716. barrier();
  717. ec->ops = ops;
  718. }
  719. EXPORT_SYMBOL(ecard_setirq);
  720. void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
  721. unsigned long offset, unsigned long maxsize)
  722. {
  723. unsigned long start = ecard_resource_start(ec, res);
  724. unsigned long end = ecard_resource_end(ec, res);
  725. if (offset > (end - start))
  726. return NULL;
  727. start += offset;
  728. if (maxsize && end - start > maxsize)
  729. end = start + maxsize;
  730. return devm_ioremap(&ec->dev, start, end - start);
  731. }
  732. EXPORT_SYMBOL(ecardm_iomap);
  733. static void atomwide_3p_quirk(ecard_t *ec)
  734. {
  735. void __iomem *addr = __ecard_address(ec, ECARD_IOC, ECARD_SYNC);
  736. unsigned int i;
  737. /* Disable interrupts on each port */
  738. for (i = 0x2000; i <= 0x2800; i += 0x0400)
  739. writeb(0, addr + i + 4);
  740. }
  741. /*
  742. * Probe for an expansion card.
  743. *
  744. * If bit 1 of the first byte of the card is set, then the
  745. * card does not exist.
  746. */
  747. static int __init ecard_probe(int slot, unsigned irq, card_type_t type)
  748. {
  749. ecard_t **ecp;
  750. ecard_t *ec;
  751. struct ex_ecid cid;
  752. void __iomem *addr;
  753. int i, rc;
  754. ec = ecard_alloc_card(type, slot);
  755. if (IS_ERR(ec)) {
  756. rc = PTR_ERR(ec);
  757. goto nomem;
  758. }
  759. rc = -ENODEV;
  760. if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL)
  761. goto nodev;
  762. cid.r_zero = 1;
  763. ecard_readbytes(&cid, ec, 0, 16, 0);
  764. if (cid.r_zero)
  765. goto nodev;
  766. ec->cid.id = cid.r_id;
  767. ec->cid.cd = cid.r_cd;
  768. ec->cid.is = cid.r_is;
  769. ec->cid.w = cid.r_w;
  770. ec->cid.manufacturer = ecard_getu16(cid.r_manu);
  771. ec->cid.product = ecard_getu16(cid.r_prod);
  772. ec->cid.country = cid.r_country;
  773. ec->cid.irqmask = cid.r_irqmask;
  774. ec->cid.irqoff = ecard_gets24(cid.r_irqoff);
  775. ec->cid.fiqmask = cid.r_fiqmask;
  776. ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
  777. ec->fiqaddr =
  778. ec->irqaddr = addr;
  779. if (ec->cid.is) {
  780. ec->irqmask = ec->cid.irqmask;
  781. ec->irqaddr += ec->cid.irqoff;
  782. ec->fiqmask = ec->cid.fiqmask;
  783. ec->fiqaddr += ec->cid.fiqoff;
  784. } else {
  785. ec->irqmask = 1;
  786. ec->fiqmask = 4;
  787. }
  788. for (i = 0; i < ARRAY_SIZE(quirklist); i++)
  789. if (quirklist[i].manufacturer == ec->cid.manufacturer &&
  790. quirklist[i].product == ec->cid.product) {
  791. if (quirklist[i].type)
  792. ec->card_desc = quirklist[i].type;
  793. if (quirklist[i].init)
  794. quirklist[i].init(ec);
  795. break;
  796. }
  797. ec->irq = irq;
  798. /*
  799. * hook the interrupt handlers
  800. */
  801. if (slot < 8) {
  802. irq_set_chip_and_handler(ec->irq, &ecard_chip,
  803. handle_level_irq);
  804. irq_set_chip_data(ec->irq, ec);
  805. irq_clear_status_flags(ec->irq, IRQ_NOREQUEST);
  806. }
  807. #ifdef CONFIG_ARCH_RPC
  808. /* On RiscPC, only first two slots have DMA capability */
  809. if (slot < 2)
  810. ec->dma = 2 + slot;
  811. #endif
  812. for (ecp = &cards; *ecp; ecp = &(*ecp)->next);
  813. *ecp = ec;
  814. slot_to_expcard[slot] = ec;
  815. rc = device_register(&ec->dev);
  816. if (rc)
  817. goto nodev;
  818. return 0;
  819. nodev:
  820. ecard_free_card(ec);
  821. nomem:
  822. return rc;
  823. }
  824. /*
  825. * Initialise the expansion card system.
  826. * Locate all hardware - interrupt management and
  827. * actual cards.
  828. */
  829. static int __init ecard_init(void)
  830. {
  831. struct task_struct *task;
  832. int slot, irqbase;
  833. irqbase = irq_alloc_descs(-1, 0, 8, -1);
  834. if (irqbase < 0)
  835. return irqbase;
  836. task = kthread_run(ecard_task, NULL, "kecardd");
  837. if (IS_ERR(task)) {
  838. printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n",
  839. PTR_ERR(task));
  840. irq_free_descs(irqbase, 8);
  841. return PTR_ERR(task);
  842. }
  843. printk("Probing expansion cards\n");
  844. for (slot = 0; slot < 8; slot ++) {
  845. if (ecard_probe(slot, irqbase + slot, ECARD_EASI) == -ENODEV)
  846. ecard_probe(slot, irqbase + slot, ECARD_IOC);
  847. }
  848. ecard_probe(8, 11, ECARD_IOC);
  849. irq_set_chained_handler(IRQ_EXPANSIONCARD, ecard_irq_handler);
  850. ecard_proc_init();
  851. return 0;
  852. }
  853. subsys_initcall(ecard_init);
  854. /*
  855. * ECARD "bus"
  856. */
  857. static const struct ecard_id *
  858. ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec)
  859. {
  860. int i;
  861. for (i = 0; ids[i].manufacturer != 65535; i++)
  862. if (ec->cid.manufacturer == ids[i].manufacturer &&
  863. ec->cid.product == ids[i].product)
  864. return ids + i;
  865. return NULL;
  866. }
  867. static int ecard_drv_probe(struct device *dev)
  868. {
  869. struct expansion_card *ec = ECARD_DEV(dev);
  870. struct ecard_driver *drv = ECARD_DRV(dev->driver);
  871. const struct ecard_id *id;
  872. int ret;
  873. id = ecard_match_device(drv->id_table, ec);
  874. ec->claimed = 1;
  875. ret = drv->probe(ec, id);
  876. if (ret)
  877. ec->claimed = 0;
  878. return ret;
  879. }
  880. static void ecard_drv_remove(struct device *dev)
  881. {
  882. struct expansion_card *ec = ECARD_DEV(dev);
  883. struct ecard_driver *drv = ECARD_DRV(dev->driver);
  884. drv->remove(ec);
  885. ec->claimed = 0;
  886. /*
  887. * Restore the default operations. We ensure that the
  888. * ops are set before we change the data.
  889. */
  890. ec->ops = &ecard_default_ops;
  891. barrier();
  892. ec->irq_data = NULL;
  893. }
  894. /*
  895. * Before rebooting, we must make sure that the expansion card is in a
  896. * sensible state, so it can be re-detected. This means that the first
  897. * page of the ROM must be visible. We call the expansion cards reset
  898. * handler, if any.
  899. */
  900. static void ecard_drv_shutdown(struct device *dev)
  901. {
  902. struct expansion_card *ec = ECARD_DEV(dev);
  903. struct ecard_driver *drv = ECARD_DRV(dev->driver);
  904. struct ecard_request req;
  905. if (dev->driver) {
  906. if (drv->shutdown)
  907. drv->shutdown(ec);
  908. ec->claimed = 0;
  909. }
  910. /*
  911. * If this card has a loader, call the reset handler.
  912. */
  913. if (ec->loader) {
  914. req.fn = ecard_task_reset;
  915. req.ec = ec;
  916. ecard_call(&req);
  917. }
  918. }
  919. int ecard_register_driver(struct ecard_driver *drv)
  920. {
  921. drv->drv.bus = &ecard_bus_type;
  922. return driver_register(&drv->drv);
  923. }
  924. void ecard_remove_driver(struct ecard_driver *drv)
  925. {
  926. driver_unregister(&drv->drv);
  927. }
  928. static int ecard_match(struct device *_dev, struct device_driver *_drv)
  929. {
  930. struct expansion_card *ec = ECARD_DEV(_dev);
  931. struct ecard_driver *drv = ECARD_DRV(_drv);
  932. int ret;
  933. if (drv->id_table) {
  934. ret = ecard_match_device(drv->id_table, ec) != NULL;
  935. } else {
  936. ret = ec->cid.id == drv->id;
  937. }
  938. return ret;
  939. }
  940. struct bus_type ecard_bus_type = {
  941. .name = "ecard",
  942. .dev_groups = ecard_dev_groups,
  943. .match = ecard_match,
  944. .probe = ecard_drv_probe,
  945. .remove = ecard_drv_remove,
  946. .shutdown = ecard_drv_shutdown,
  947. };
  948. static int ecard_bus_init(void)
  949. {
  950. return bus_register(&ecard_bus_type);
  951. }
  952. postcore_initcall(ecard_bus_init);
  953. EXPORT_SYMBOL(ecard_readchunk);
  954. EXPORT_SYMBOL(ecard_register_driver);
  955. EXPORT_SYMBOL(ecard_remove_driver);
  956. EXPORT_SYMBOL(ecard_bus_type);