/drivers/tty/serial/jsm/jsm_tty.c

http://github.com/mirrors/linux · C · 820 lines · 489 code · 154 blank · 177 comment · 91 complexity · 21f13e4aa5a4f6e7a64e2e126df60f41 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0+
  2. /************************************************************************
  3. * Copyright 2003 Digi International (www.digi.com)
  4. *
  5. * Copyright (C) 2004 IBM Corporation. All rights reserved.
  6. *
  7. * Contact Information:
  8. * Scott H Kilau <Scott_Kilau@digi.com>
  9. * Ananda Venkatarman <mansarov@us.ibm.com>
  10. * Modifications:
  11. * 01/19/06: changed jsm_input routine to use the dynamically allocated
  12. * tty_buffer changes. Contributors: Scott Kilau and Ananda V.
  13. ***********************************************************************/
  14. #include <linux/tty.h>
  15. #include <linux/tty_flip.h>
  16. #include <linux/serial_reg.h>
  17. #include <linux/delay.h> /* For udelay */
  18. #include <linux/pci.h>
  19. #include <linux/slab.h>
  20. #include "jsm.h"
  21. static DECLARE_BITMAP(linemap, MAXLINES);
  22. static void jsm_carrier(struct jsm_channel *ch);
  23. static inline int jsm_get_mstat(struct jsm_channel *ch)
  24. {
  25. unsigned char mstat;
  26. int result;
  27. jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n");
  28. mstat = (ch->ch_mostat | ch->ch_mistat);
  29. result = 0;
  30. if (mstat & UART_MCR_DTR)
  31. result |= TIOCM_DTR;
  32. if (mstat & UART_MCR_RTS)
  33. result |= TIOCM_RTS;
  34. if (mstat & UART_MSR_CTS)
  35. result |= TIOCM_CTS;
  36. if (mstat & UART_MSR_DSR)
  37. result |= TIOCM_DSR;
  38. if (mstat & UART_MSR_RI)
  39. result |= TIOCM_RI;
  40. if (mstat & UART_MSR_DCD)
  41. result |= TIOCM_CD;
  42. jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
  43. return result;
  44. }
  45. static unsigned int jsm_tty_tx_empty(struct uart_port *port)
  46. {
  47. return TIOCSER_TEMT;
  48. }
  49. /*
  50. * Return modem signals to ld.
  51. */
  52. static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
  53. {
  54. int result;
  55. struct jsm_channel *channel =
  56. container_of(port, struct jsm_channel, uart_port);
  57. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
  58. result = jsm_get_mstat(channel);
  59. if (result < 0)
  60. return -ENXIO;
  61. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
  62. return result;
  63. }
  64. /*
  65. * jsm_set_modem_info()
  66. *
  67. * Set modem signals, called by ld.
  68. */
  69. static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
  70. {
  71. struct jsm_channel *channel =
  72. container_of(port, struct jsm_channel, uart_port);
  73. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
  74. if (mctrl & TIOCM_RTS)
  75. channel->ch_mostat |= UART_MCR_RTS;
  76. else
  77. channel->ch_mostat &= ~UART_MCR_RTS;
  78. if (mctrl & TIOCM_DTR)
  79. channel->ch_mostat |= UART_MCR_DTR;
  80. else
  81. channel->ch_mostat &= ~UART_MCR_DTR;
  82. channel->ch_bd->bd_ops->assert_modem_signals(channel);
  83. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
  84. udelay(10);
  85. }
  86. /*
  87. * jsm_tty_write()
  88. *
  89. * Take data from the user or kernel and send it out to the FEP.
  90. * In here exists all the Transparent Print magic as well.
  91. */
  92. static void jsm_tty_write(struct uart_port *port)
  93. {
  94. struct jsm_channel *channel;
  95. channel = container_of(port, struct jsm_channel, uart_port);
  96. channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
  97. }
  98. static void jsm_tty_start_tx(struct uart_port *port)
  99. {
  100. struct jsm_channel *channel =
  101. container_of(port, struct jsm_channel, uart_port);
  102. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
  103. channel->ch_flags &= ~(CH_STOP);
  104. jsm_tty_write(port);
  105. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
  106. }
  107. static void jsm_tty_stop_tx(struct uart_port *port)
  108. {
  109. struct jsm_channel *channel =
  110. container_of(port, struct jsm_channel, uart_port);
  111. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
  112. channel->ch_flags |= (CH_STOP);
  113. jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n");
  114. }
  115. static void jsm_tty_send_xchar(struct uart_port *port, char ch)
  116. {
  117. unsigned long lock_flags;
  118. struct jsm_channel *channel =
  119. container_of(port, struct jsm_channel, uart_port);
  120. struct ktermios *termios;
  121. spin_lock_irqsave(&port->lock, lock_flags);
  122. termios = &port->state->port.tty->termios;
  123. if (ch == termios->c_cc[VSTART])
  124. channel->ch_bd->bd_ops->send_start_character(channel);
  125. if (ch == termios->c_cc[VSTOP])
  126. channel->ch_bd->bd_ops->send_stop_character(channel);
  127. spin_unlock_irqrestore(&port->lock, lock_flags);
  128. }
  129. static void jsm_tty_stop_rx(struct uart_port *port)
  130. {
  131. struct jsm_channel *channel =
  132. container_of(port, struct jsm_channel, uart_port);
  133. channel->ch_bd->bd_ops->disable_receiver(channel);
  134. }
  135. static void jsm_tty_break(struct uart_port *port, int break_state)
  136. {
  137. unsigned long lock_flags;
  138. struct jsm_channel *channel =
  139. container_of(port, struct jsm_channel, uart_port);
  140. spin_lock_irqsave(&port->lock, lock_flags);
  141. if (break_state == -1)
  142. channel->ch_bd->bd_ops->send_break(channel);
  143. else
  144. channel->ch_bd->bd_ops->clear_break(channel);
  145. spin_unlock_irqrestore(&port->lock, lock_flags);
  146. }
  147. static int jsm_tty_open(struct uart_port *port)
  148. {
  149. struct jsm_board *brd;
  150. struct jsm_channel *channel =
  151. container_of(port, struct jsm_channel, uart_port);
  152. struct ktermios *termios;
  153. /* Get board pointer from our array of majors we have allocated */
  154. brd = channel->ch_bd;
  155. /*
  156. * Allocate channel buffers for read/write/error.
  157. * Set flag, so we don't get trounced on.
  158. */
  159. channel->ch_flags |= (CH_OPENING);
  160. /* Drop locks, as malloc with GFP_KERNEL can sleep */
  161. if (!channel->ch_rqueue) {
  162. channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
  163. if (!channel->ch_rqueue) {
  164. jsm_dbg(INIT, &channel->ch_bd->pci_dev,
  165. "unable to allocate read queue buf\n");
  166. return -ENOMEM;
  167. }
  168. }
  169. if (!channel->ch_equeue) {
  170. channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
  171. if (!channel->ch_equeue) {
  172. jsm_dbg(INIT, &channel->ch_bd->pci_dev,
  173. "unable to allocate error queue buf\n");
  174. return -ENOMEM;
  175. }
  176. }
  177. channel->ch_flags &= ~(CH_OPENING);
  178. /*
  179. * Initialize if neither terminal is open.
  180. */
  181. jsm_dbg(OPEN, &channel->ch_bd->pci_dev,
  182. "jsm_open: initializing channel in open...\n");
  183. /*
  184. * Flush input queues.
  185. */
  186. channel->ch_r_head = channel->ch_r_tail = 0;
  187. channel->ch_e_head = channel->ch_e_tail = 0;
  188. brd->bd_ops->flush_uart_write(channel);
  189. brd->bd_ops->flush_uart_read(channel);
  190. channel->ch_flags = 0;
  191. channel->ch_cached_lsr = 0;
  192. channel->ch_stops_sent = 0;
  193. termios = &port->state->port.tty->termios;
  194. channel->ch_c_cflag = termios->c_cflag;
  195. channel->ch_c_iflag = termios->c_iflag;
  196. channel->ch_c_oflag = termios->c_oflag;
  197. channel->ch_c_lflag = termios->c_lflag;
  198. channel->ch_startc = termios->c_cc[VSTART];
  199. channel->ch_stopc = termios->c_cc[VSTOP];
  200. /* Tell UART to init itself */
  201. brd->bd_ops->uart_init(channel);
  202. /*
  203. * Run param in case we changed anything
  204. */
  205. brd->bd_ops->param(channel);
  206. jsm_carrier(channel);
  207. channel->ch_open_count++;
  208. jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
  209. return 0;
  210. }
  211. static void jsm_tty_close(struct uart_port *port)
  212. {
  213. struct jsm_board *bd;
  214. struct jsm_channel *channel =
  215. container_of(port, struct jsm_channel, uart_port);
  216. jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n");
  217. bd = channel->ch_bd;
  218. channel->ch_flags &= ~(CH_STOPI);
  219. channel->ch_open_count--;
  220. /*
  221. * If we have HUPCL set, lower DTR and RTS
  222. */
  223. if (channel->ch_c_cflag & HUPCL) {
  224. jsm_dbg(CLOSE, &channel->ch_bd->pci_dev,
  225. "Close. HUPCL set, dropping DTR/RTS\n");
  226. /* Drop RTS/DTR */
  227. channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
  228. bd->bd_ops->assert_modem_signals(channel);
  229. }
  230. /* Turn off UART interrupts for this port */
  231. channel->ch_bd->bd_ops->uart_off(channel);
  232. jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "finish\n");
  233. }
  234. static void jsm_tty_set_termios(struct uart_port *port,
  235. struct ktermios *termios,
  236. struct ktermios *old_termios)
  237. {
  238. unsigned long lock_flags;
  239. struct jsm_channel *channel =
  240. container_of(port, struct jsm_channel, uart_port);
  241. spin_lock_irqsave(&port->lock, lock_flags);
  242. channel->ch_c_cflag = termios->c_cflag;
  243. channel->ch_c_iflag = termios->c_iflag;
  244. channel->ch_c_oflag = termios->c_oflag;
  245. channel->ch_c_lflag = termios->c_lflag;
  246. channel->ch_startc = termios->c_cc[VSTART];
  247. channel->ch_stopc = termios->c_cc[VSTOP];
  248. channel->ch_bd->bd_ops->param(channel);
  249. jsm_carrier(channel);
  250. spin_unlock_irqrestore(&port->lock, lock_flags);
  251. }
  252. static const char *jsm_tty_type(struct uart_port *port)
  253. {
  254. return "jsm";
  255. }
  256. static void jsm_tty_release_port(struct uart_port *port)
  257. {
  258. }
  259. static int jsm_tty_request_port(struct uart_port *port)
  260. {
  261. return 0;
  262. }
  263. static void jsm_config_port(struct uart_port *port, int flags)
  264. {
  265. port->type = PORT_JSM;
  266. }
  267. static const struct uart_ops jsm_ops = {
  268. .tx_empty = jsm_tty_tx_empty,
  269. .set_mctrl = jsm_tty_set_mctrl,
  270. .get_mctrl = jsm_tty_get_mctrl,
  271. .stop_tx = jsm_tty_stop_tx,
  272. .start_tx = jsm_tty_start_tx,
  273. .send_xchar = jsm_tty_send_xchar,
  274. .stop_rx = jsm_tty_stop_rx,
  275. .break_ctl = jsm_tty_break,
  276. .startup = jsm_tty_open,
  277. .shutdown = jsm_tty_close,
  278. .set_termios = jsm_tty_set_termios,
  279. .type = jsm_tty_type,
  280. .release_port = jsm_tty_release_port,
  281. .request_port = jsm_tty_request_port,
  282. .config_port = jsm_config_port,
  283. };
  284. /*
  285. * jsm_tty_init()
  286. *
  287. * Init the tty subsystem. Called once per board after board has been
  288. * downloaded and init'ed.
  289. */
  290. int jsm_tty_init(struct jsm_board *brd)
  291. {
  292. int i;
  293. void __iomem *vaddr;
  294. struct jsm_channel *ch;
  295. if (!brd)
  296. return -ENXIO;
  297. jsm_dbg(INIT, &brd->pci_dev, "start\n");
  298. /*
  299. * Initialize board structure elements.
  300. */
  301. brd->nasync = brd->maxports;
  302. /*
  303. * Allocate channel memory that might not have been allocated
  304. * when the driver was first loaded.
  305. */
  306. for (i = 0; i < brd->nasync; i++) {
  307. if (!brd->channels[i]) {
  308. /*
  309. * Okay to malloc with GFP_KERNEL, we are not at
  310. * interrupt context, and there are no locks held.
  311. */
  312. brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL);
  313. if (!brd->channels[i]) {
  314. jsm_dbg(CORE, &brd->pci_dev,
  315. "%s:%d Unable to allocate memory for channel struct\n",
  316. __FILE__, __LINE__);
  317. }
  318. }
  319. }
  320. ch = brd->channels[0];
  321. vaddr = brd->re_map_membase;
  322. /* Set up channel variables */
  323. for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
  324. if (!brd->channels[i])
  325. continue;
  326. spin_lock_init(&ch->ch_lock);
  327. if (brd->bd_uart_offset == 0x200)
  328. ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
  329. else
  330. ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
  331. ch->ch_bd = brd;
  332. ch->ch_portnum = i;
  333. /* .25 second delay */
  334. ch->ch_close_delay = 250;
  335. init_waitqueue_head(&ch->ch_flags_wait);
  336. }
  337. jsm_dbg(INIT, &brd->pci_dev, "finish\n");
  338. return 0;
  339. }
  340. int jsm_uart_port_init(struct jsm_board *brd)
  341. {
  342. int i, rc;
  343. unsigned int line;
  344. if (!brd)
  345. return -ENXIO;
  346. jsm_dbg(INIT, &brd->pci_dev, "start\n");
  347. /*
  348. * Initialize board structure elements.
  349. */
  350. brd->nasync = brd->maxports;
  351. /* Set up channel variables */
  352. for (i = 0; i < brd->nasync; i++) {
  353. if (!brd->channels[i])
  354. continue;
  355. brd->channels[i]->uart_port.irq = brd->irq;
  356. brd->channels[i]->uart_port.uartclk = 14745600;
  357. brd->channels[i]->uart_port.type = PORT_JSM;
  358. brd->channels[i]->uart_port.iotype = UPIO_MEM;
  359. brd->channels[i]->uart_port.membase = brd->re_map_membase;
  360. brd->channels[i]->uart_port.fifosize = 16;
  361. brd->channels[i]->uart_port.ops = &jsm_ops;
  362. line = find_first_zero_bit(linemap, MAXLINES);
  363. if (line >= MAXLINES) {
  364. printk(KERN_INFO "jsm: linemap is full, added device failed\n");
  365. continue;
  366. } else
  367. set_bit(line, linemap);
  368. brd->channels[i]->uart_port.line = line;
  369. rc = uart_add_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
  370. if (rc) {
  371. printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
  372. return rc;
  373. } else
  374. printk(KERN_INFO "jsm: Port %d added\n", i);
  375. }
  376. jsm_dbg(INIT, &brd->pci_dev, "finish\n");
  377. return 0;
  378. }
  379. int jsm_remove_uart_port(struct jsm_board *brd)
  380. {
  381. int i;
  382. struct jsm_channel *ch;
  383. if (!brd)
  384. return -ENXIO;
  385. jsm_dbg(INIT, &brd->pci_dev, "start\n");
  386. /*
  387. * Initialize board structure elements.
  388. */
  389. brd->nasync = brd->maxports;
  390. /* Set up channel variables */
  391. for (i = 0; i < brd->nasync; i++) {
  392. if (!brd->channels[i])
  393. continue;
  394. ch = brd->channels[i];
  395. clear_bit(ch->uart_port.line, linemap);
  396. uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
  397. }
  398. jsm_dbg(INIT, &brd->pci_dev, "finish\n");
  399. return 0;
  400. }
  401. void jsm_input(struct jsm_channel *ch)
  402. {
  403. struct jsm_board *bd;
  404. struct tty_struct *tp;
  405. struct tty_port *port;
  406. u32 rmask;
  407. u16 head;
  408. u16 tail;
  409. int data_len;
  410. unsigned long lock_flags;
  411. int len = 0;
  412. int s = 0;
  413. int i = 0;
  414. jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n");
  415. port = &ch->uart_port.state->port;
  416. tp = port->tty;
  417. bd = ch->ch_bd;
  418. if (!bd)
  419. return;
  420. spin_lock_irqsave(&ch->ch_lock, lock_flags);
  421. /*
  422. *Figure the number of characters in the buffer.
  423. *Exit immediately if none.
  424. */
  425. rmask = RQUEUEMASK;
  426. head = ch->ch_r_head & rmask;
  427. tail = ch->ch_r_tail & rmask;
  428. data_len = (head - tail) & rmask;
  429. if (data_len == 0) {
  430. spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
  431. return;
  432. }
  433. jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n");
  434. /*
  435. *If the device is not open, or CREAD is off, flush
  436. *input data and return immediately.
  437. */
  438. if (!tp || !C_CREAD(tp)) {
  439. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  440. "input. dropping %d bytes on port %d...\n",
  441. data_len, ch->ch_portnum);
  442. ch->ch_r_head = tail;
  443. /* Force queue flow control to be released, if needed */
  444. jsm_check_queue_flow_control(ch);
  445. spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
  446. return;
  447. }
  448. /*
  449. * If we are throttled, simply don't read any data.
  450. */
  451. if (ch->ch_flags & CH_STOPI) {
  452. spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
  453. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  454. "Port %d throttled, not reading any data. head: %x tail: %x\n",
  455. ch->ch_portnum, head, tail);
  456. return;
  457. }
  458. jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n");
  459. len = tty_buffer_request_room(port, data_len);
  460. /*
  461. * len now contains the most amount of data we can copy,
  462. * bounded either by the flip buffer size or the amount
  463. * of data the card actually has pending...
  464. */
  465. while (len) {
  466. s = ((head >= tail) ? head : RQUEUESIZE) - tail;
  467. s = min(s, len);
  468. if (s <= 0)
  469. break;
  470. /*
  471. * If conditions are such that ld needs to see all
  472. * UART errors, we will have to walk each character
  473. * and error byte and send them to the buffer one at
  474. * a time.
  475. */
  476. if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
  477. for (i = 0; i < s; i++) {
  478. /*
  479. * Give the Linux ld the flags in the
  480. * format it likes.
  481. */
  482. if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
  483. tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK);
  484. else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
  485. tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY);
  486. else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
  487. tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME);
  488. else
  489. tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
  490. }
  491. } else {
  492. tty_insert_flip_string(port, ch->ch_rqueue + tail, s);
  493. }
  494. tail += s;
  495. len -= s;
  496. /* Flip queue if needed */
  497. tail &= rmask;
  498. }
  499. ch->ch_r_tail = tail & rmask;
  500. ch->ch_e_tail = tail & rmask;
  501. jsm_check_queue_flow_control(ch);
  502. spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
  503. /* Tell the tty layer its okay to "eat" the data now */
  504. tty_flip_buffer_push(port);
  505. jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n");
  506. }
  507. static void jsm_carrier(struct jsm_channel *ch)
  508. {
  509. struct jsm_board *bd;
  510. int virt_carrier = 0;
  511. int phys_carrier = 0;
  512. jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n");
  513. bd = ch->ch_bd;
  514. if (!bd)
  515. return;
  516. if (ch->ch_mistat & UART_MSR_DCD) {
  517. jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n",
  518. ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD);
  519. phys_carrier = 1;
  520. }
  521. if (ch->ch_c_cflag & CLOCAL)
  522. virt_carrier = 1;
  523. jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n",
  524. phys_carrier, virt_carrier);
  525. /*
  526. * Test for a VIRTUAL carrier transition to HIGH.
  527. */
  528. if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
  529. /*
  530. * When carrier rises, wake any threads waiting
  531. * for carrier in the open routine.
  532. */
  533. jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n");
  534. if (waitqueue_active(&(ch->ch_flags_wait)))
  535. wake_up_interruptible(&ch->ch_flags_wait);
  536. }
  537. /*
  538. * Test for a PHYSICAL carrier transition to HIGH.
  539. */
  540. if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
  541. /*
  542. * When carrier rises, wake any threads waiting
  543. * for carrier in the open routine.
  544. */
  545. jsm_dbg(CARR, &ch->ch_bd->pci_dev,
  546. "carrier: physical DCD rose\n");
  547. if (waitqueue_active(&(ch->ch_flags_wait)))
  548. wake_up_interruptible(&ch->ch_flags_wait);
  549. }
  550. /*
  551. * Test for a PHYSICAL transition to low, so long as we aren't
  552. * currently ignoring physical transitions (which is what "virtual
  553. * carrier" indicates).
  554. *
  555. * The transition of the virtual carrier to low really doesn't
  556. * matter... it really only means "ignore carrier state", not
  557. * "make pretend that carrier is there".
  558. */
  559. if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0)
  560. && (phys_carrier == 0)) {
  561. /*
  562. * When carrier drops:
  563. *
  564. * Drop carrier on all open units.
  565. *
  566. * Flush queues, waking up any task waiting in the
  567. * line discipline.
  568. *
  569. * Send a hangup to the control terminal.
  570. *
  571. * Enable all select calls.
  572. */
  573. if (waitqueue_active(&(ch->ch_flags_wait)))
  574. wake_up_interruptible(&ch->ch_flags_wait);
  575. }
  576. /*
  577. * Make sure that our cached values reflect the current reality.
  578. */
  579. if (virt_carrier == 1)
  580. ch->ch_flags |= CH_FCAR;
  581. else
  582. ch->ch_flags &= ~CH_FCAR;
  583. if (phys_carrier == 1)
  584. ch->ch_flags |= CH_CD;
  585. else
  586. ch->ch_flags &= ~CH_CD;
  587. }
  588. void jsm_check_queue_flow_control(struct jsm_channel *ch)
  589. {
  590. struct board_ops *bd_ops = ch->ch_bd->bd_ops;
  591. int qleft;
  592. /* Store how much space we have left in the queue */
  593. if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
  594. qleft += RQUEUEMASK + 1;
  595. /*
  596. * Check to see if we should enforce flow control on our queue because
  597. * the ld (or user) isn't reading data out of our queue fast enuf.
  598. *
  599. * NOTE: This is done based on what the current flow control of the
  600. * port is set for.
  601. *
  602. * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
  603. * This will cause the UART's FIFO to back up, and force
  604. * the RTS signal to be dropped.
  605. * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
  606. * the other side, in hopes it will stop sending data to us.
  607. * 3) NONE - Nothing we can do. We will simply drop any extra data
  608. * that gets sent into us when the queue fills up.
  609. */
  610. if (qleft < 256) {
  611. /* HWFLOW */
  612. if (ch->ch_c_cflag & CRTSCTS) {
  613. if (!(ch->ch_flags & CH_RECEIVER_OFF)) {
  614. bd_ops->disable_receiver(ch);
  615. ch->ch_flags |= (CH_RECEIVER_OFF);
  616. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  617. "Internal queue hit hilevel mark (%d)! Turning off interrupts\n",
  618. qleft);
  619. }
  620. }
  621. /* SWFLOW */
  622. else if (ch->ch_c_iflag & IXOFF) {
  623. if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
  624. bd_ops->send_stop_character(ch);
  625. ch->ch_stops_sent++;
  626. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  627. "Sending stop char! Times sent: %x\n",
  628. ch->ch_stops_sent);
  629. }
  630. }
  631. }
  632. /*
  633. * Check to see if we should unenforce flow control because
  634. * ld (or user) finally read enuf data out of our queue.
  635. *
  636. * NOTE: This is done based on what the current flow control of the
  637. * port is set for.
  638. *
  639. * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
  640. * This will cause the UART's FIFO to raise RTS back up,
  641. * which will allow the other side to start sending data again.
  642. * 2) SWFLOW (IXOFF) - Send a start character to
  643. * the other side, so it will start sending data to us again.
  644. * 3) NONE - Do nothing. Since we didn't do anything to turn off the
  645. * other side, we don't need to do anything now.
  646. */
  647. if (qleft > (RQUEUESIZE / 2)) {
  648. /* HWFLOW */
  649. if (ch->ch_c_cflag & CRTSCTS) {
  650. if (ch->ch_flags & CH_RECEIVER_OFF) {
  651. bd_ops->enable_receiver(ch);
  652. ch->ch_flags &= ~(CH_RECEIVER_OFF);
  653. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  654. "Internal queue hit lowlevel mark (%d)! Turning on interrupts\n",
  655. qleft);
  656. }
  657. }
  658. /* SWFLOW */
  659. else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
  660. ch->ch_stops_sent = 0;
  661. bd_ops->send_start_character(ch);
  662. jsm_dbg(READ, &ch->ch_bd->pci_dev,
  663. "Sending start char!\n");
  664. }
  665. }
  666. }