PageRenderTime 124ms CodeModel.GetById 25ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/isdn/i4l/isdn_ppp.c

http://github.com/mirrors/linux
C | 3041 lines | 2305 code | 331 blank | 405 comment | 545 complexity | 84c300f5e583d04c0595ce8c1adceca5 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /* $Id: isdn_ppp.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $
  2. *
  3. * Linux ISDN subsystem, functions for synchronous PPP (linklevel).
  4. *
  5. * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. */
  11. #include <linux/isdn.h>
  12. #include <linux/poll.h>
  13. #include <linux/ppp-comp.h>
  14. #include <linux/slab.h>
  15. #ifdef CONFIG_IPPP_FILTER
  16. #include <linux/filter.h>
  17. #endif
  18. #include "isdn_common.h"
  19. #include "isdn_ppp.h"
  20. #include "isdn_net.h"
  21. #ifndef PPP_IPX
  22. #define PPP_IPX 0x002b
  23. #endif
  24. /* Prototypes */
  25. static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot);
  26. static int isdn_ppp_closewait(int slot);
  27. static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp,
  28. struct sk_buff *skb, int proto);
  29. static int isdn_ppp_if_get_unit(char *namebuf);
  30. static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *);
  31. static struct sk_buff *isdn_ppp_decompress(struct sk_buff *,
  32. struct ippp_struct *, struct ippp_struct *, int *proto);
  33. static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
  34. struct sk_buff *skb, int proto);
  35. static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto,
  36. struct ippp_struct *is, struct ippp_struct *master, int type);
  37. static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
  38. struct sk_buff *skb);
  39. /* New CCP stuff */
  40. static void isdn_ppp_ccp_kickup(struct ippp_struct *is);
  41. static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
  42. unsigned char code, unsigned char id,
  43. unsigned char *data, int len);
  44. static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
  45. static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
  46. static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
  47. unsigned char id);
  48. static void isdn_ppp_ccp_timer_callback(unsigned long closure);
  49. static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
  50. unsigned char id);
  51. static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
  52. struct isdn_ppp_resetparams *rp);
  53. static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is,
  54. unsigned char id);
  55. #ifdef CONFIG_ISDN_MPP
  56. static ippp_bundle *isdn_ppp_bundle_arr = NULL;
  57. static int isdn_ppp_mp_bundle_array_init(void);
  58. static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to);
  59. static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp,
  60. struct sk_buff *skb);
  61. static void isdn_ppp_mp_cleanup(isdn_net_local *lp);
  62. static int isdn_ppp_bundle(struct ippp_struct *, int unit);
  63. #endif /* CONFIG_ISDN_MPP */
  64. char *isdn_ppp_revision = "$Revision: 1.1.2.3 $";
  65. static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS];
  66. static struct isdn_ppp_compressor *ipc_head = NULL;
  67. /*
  68. * frame log (debug)
  69. */
  70. static void
  71. isdn_ppp_frame_log(char *info, char *data, int len, int maxlen, int unit, int slot)
  72. {
  73. int cnt,
  74. j,
  75. i;
  76. char buf[80];
  77. if (len < maxlen)
  78. maxlen = len;
  79. for (i = 0, cnt = 0; cnt < maxlen; i++) {
  80. for (j = 0; j < 16 && cnt < maxlen; j++, cnt++)
  81. sprintf(buf + j * 3, "%02x ", (unsigned char)data[cnt]);
  82. printk(KERN_DEBUG "[%d/%d].%s[%d]: %s\n", unit, slot, info, i, buf);
  83. }
  84. }
  85. /*
  86. * unbind isdn_net_local <=> ippp-device
  87. * note: it can happen, that we hangup/free the master before the slaves
  88. * in this case we bind another lp to the master device
  89. */
  90. int
  91. isdn_ppp_free(isdn_net_local *lp)
  92. {
  93. struct ippp_struct *is;
  94. if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  95. printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
  96. __func__, lp->ppp_slot);
  97. return 0;
  98. }
  99. #ifdef CONFIG_ISDN_MPP
  100. spin_lock(&lp->netdev->pb->lock);
  101. #endif
  102. isdn_net_rm_from_bundle(lp);
  103. #ifdef CONFIG_ISDN_MPP
  104. if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */
  105. isdn_ppp_mp_cleanup(lp);
  106. lp->netdev->pb->ref_ct--;
  107. spin_unlock(&lp->netdev->pb->lock);
  108. #endif /* CONFIG_ISDN_MPP */
  109. if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  110. printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
  111. __func__, lp->ppp_slot);
  112. return 0;
  113. }
  114. is = ippp_table[lp->ppp_slot];
  115. if ((is->state & IPPP_CONNECT))
  116. isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */
  117. else if (is->state & IPPP_ASSIGNED)
  118. is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */
  119. if (is->debug & 0x1)
  120. printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp);
  121. is->lp = NULL; /* link is down .. set lp to NULL */
  122. lp->ppp_slot = -1; /* is this OK ?? */
  123. return 0;
  124. }
  125. /*
  126. * bind isdn_net_local <=> ippp-device
  127. *
  128. * This function is allways called with holding dev->lock so
  129. * no additional lock is needed
  130. */
  131. int
  132. isdn_ppp_bind(isdn_net_local *lp)
  133. {
  134. int i;
  135. int unit = 0;
  136. struct ippp_struct *is;
  137. int retval;
  138. if (lp->pppbind < 0) { /* device bounded to ippp device ? */
  139. isdn_net_dev *net_dev = dev->netdev;
  140. char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */
  141. memset(exclusive, 0, ISDN_MAX_CHANNELS);
  142. while (net_dev) { /* step through net devices to find exclusive minors */
  143. isdn_net_local *lp = net_dev->local;
  144. if (lp->pppbind >= 0)
  145. exclusive[lp->pppbind] = 1;
  146. net_dev = net_dev->next;
  147. }
  148. /*
  149. * search a free device / slot
  150. */
  151. for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
  152. if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not connected! */
  153. break;
  154. }
  155. }
  156. } else {
  157. for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
  158. if (ippp_table[i]->minor == lp->pppbind &&
  159. (ippp_table[i]->state & IPPP_OPEN) == IPPP_OPEN)
  160. break;
  161. }
  162. }
  163. if (i >= ISDN_MAX_CHANNELS) {
  164. printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n");
  165. retval = -1;
  166. goto out;
  167. }
  168. /* get unit number from interface name .. ugly! */
  169. unit = isdn_ppp_if_get_unit(lp->netdev->dev->name);
  170. if (unit < 0) {
  171. printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n",
  172. lp->netdev->dev->name);
  173. retval = -1;
  174. goto out;
  175. }
  176. lp->ppp_slot = i;
  177. is = ippp_table[i];
  178. is->lp = lp;
  179. is->unit = unit;
  180. is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */
  181. #ifdef CONFIG_ISDN_MPP
  182. retval = isdn_ppp_mp_init(lp, NULL);
  183. if (retval < 0)
  184. goto out;
  185. #endif /* CONFIG_ISDN_MPP */
  186. retval = lp->ppp_slot;
  187. out:
  188. return retval;
  189. }
  190. /*
  191. * kick the ipppd on the device
  192. * (wakes up daemon after B-channel connect)
  193. */
  194. void
  195. isdn_ppp_wakeup_daemon(isdn_net_local *lp)
  196. {
  197. if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  198. printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
  199. __func__, lp->ppp_slot);
  200. return;
  201. }
  202. ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
  203. wake_up_interruptible(&ippp_table[lp->ppp_slot]->wq);
  204. }
  205. /*
  206. * there was a hangup on the netdevice
  207. * force wakeup of the ippp device
  208. * go into 'device waits for release' state
  209. */
  210. static int
  211. isdn_ppp_closewait(int slot)
  212. {
  213. struct ippp_struct *is;
  214. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  215. printk(KERN_ERR "%s: slot(%d) out of range\n",
  216. __func__, slot);
  217. return 0;
  218. }
  219. is = ippp_table[slot];
  220. if (is->state)
  221. wake_up_interruptible(&is->wq);
  222. is->state = IPPP_CLOSEWAIT;
  223. return 1;
  224. }
  225. /*
  226. * isdn_ppp_find_slot / isdn_ppp_free_slot
  227. */
  228. static int
  229. isdn_ppp_get_slot(void)
  230. {
  231. int i;
  232. for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
  233. if (!ippp_table[i]->state)
  234. return i;
  235. }
  236. return -1;
  237. }
  238. /*
  239. * isdn_ppp_open
  240. */
  241. int
  242. isdn_ppp_open(int min, struct file *file)
  243. {
  244. int slot;
  245. struct ippp_struct *is;
  246. if (min < 0 || min >= ISDN_MAX_CHANNELS)
  247. return -ENODEV;
  248. slot = isdn_ppp_get_slot();
  249. if (slot < 0) {
  250. return -EBUSY;
  251. }
  252. is = file->private_data = ippp_table[slot];
  253. printk(KERN_DEBUG "ippp, open, slot: %d, minor: %d, state: %04x\n",
  254. slot, min, is->state);
  255. /* compression stuff */
  256. is->link_compressor = is->compressor = NULL;
  257. is->link_decompressor = is->decompressor = NULL;
  258. is->link_comp_stat = is->comp_stat = NULL;
  259. is->link_decomp_stat = is->decomp_stat = NULL;
  260. is->compflags = 0;
  261. is->reset = isdn_ppp_ccp_reset_alloc(is);
  262. if (!is->reset)
  263. return -ENOMEM;
  264. is->lp = NULL;
  265. is->mp_seqno = 0; /* MP sequence number */
  266. is->pppcfg = 0; /* ppp configuration */
  267. is->mpppcfg = 0; /* mppp configuration */
  268. is->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */
  269. is->unit = -1; /* set, when we have our interface */
  270. is->mru = 1524; /* MRU, default 1524 */
  271. is->maxcid = 16; /* VJ: maxcid */
  272. is->tk = current;
  273. init_waitqueue_head(&is->wq);
  274. is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */
  275. is->last = is->rq;
  276. is->minor = min;
  277. #ifdef CONFIG_ISDN_PPP_VJ
  278. /*
  279. * VJ header compression init
  280. */
  281. is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
  282. if (IS_ERR(is->slcomp)) {
  283. isdn_ppp_ccp_reset_free(is);
  284. return PTR_ERR(is->slcomp);
  285. }
  286. #endif
  287. #ifdef CONFIG_IPPP_FILTER
  288. is->pass_filter = NULL;
  289. is->active_filter = NULL;
  290. #endif
  291. is->state = IPPP_OPEN;
  292. return 0;
  293. }
  294. /*
  295. * release ippp device
  296. */
  297. void
  298. isdn_ppp_release(int min, struct file *file)
  299. {
  300. int i;
  301. struct ippp_struct *is;
  302. if (min < 0 || min >= ISDN_MAX_CHANNELS)
  303. return;
  304. is = file->private_data;
  305. if (!is) {
  306. printk(KERN_ERR "%s: no file->private_data\n", __func__);
  307. return;
  308. }
  309. if (is->debug & 0x1)
  310. printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", min, (long) is->lp);
  311. if (is->lp) { /* a lp address says: this link is still up */
  312. isdn_net_dev *p = is->lp->netdev;
  313. if (!p) {
  314. printk(KERN_ERR "%s: no lp->netdev\n", __func__);
  315. return;
  316. }
  317. is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */
  318. /*
  319. * isdn_net_hangup() calls isdn_ppp_free()
  320. * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1
  321. * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon()
  322. */
  323. isdn_net_hangup(p->dev);
  324. }
  325. for (i = 0; i < NUM_RCV_BUFFS; i++) {
  326. kfree(is->rq[i].buf);
  327. is->rq[i].buf = NULL;
  328. }
  329. is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */
  330. is->last = is->rq;
  331. #ifdef CONFIG_ISDN_PPP_VJ
  332. /* TODO: if this was the previous master: link the slcomp to the new master */
  333. slhc_free(is->slcomp);
  334. is->slcomp = NULL;
  335. #endif
  336. #ifdef CONFIG_IPPP_FILTER
  337. if (is->pass_filter) {
  338. bpf_prog_destroy(is->pass_filter);
  339. is->pass_filter = NULL;
  340. }
  341. if (is->active_filter) {
  342. bpf_prog_destroy(is->active_filter);
  343. is->active_filter = NULL;
  344. }
  345. #endif
  346. /* TODO: if this was the previous master: link the stuff to the new master */
  347. if (is->comp_stat)
  348. is->compressor->free(is->comp_stat);
  349. if (is->link_comp_stat)
  350. is->link_compressor->free(is->link_comp_stat);
  351. if (is->link_decomp_stat)
  352. is->link_decompressor->free(is->link_decomp_stat);
  353. if (is->decomp_stat)
  354. is->decompressor->free(is->decomp_stat);
  355. is->compressor = is->link_compressor = NULL;
  356. is->decompressor = is->link_decompressor = NULL;
  357. is->comp_stat = is->link_comp_stat = NULL;
  358. is->decomp_stat = is->link_decomp_stat = NULL;
  359. /* Clean up if necessary */
  360. if (is->reset)
  361. isdn_ppp_ccp_reset_free(is);
  362. /* this slot is ready for new connections */
  363. is->state = 0;
  364. }
  365. /*
  366. * get_arg .. ioctl helper
  367. */
  368. static int
  369. get_arg(void __user *b, void *val, int len)
  370. {
  371. if (len <= 0)
  372. len = sizeof(void *);
  373. if (copy_from_user(val, b, len))
  374. return -EFAULT;
  375. return 0;
  376. }
  377. /*
  378. * set arg .. ioctl helper
  379. */
  380. static int
  381. set_arg(void __user *b, void *val, int len)
  382. {
  383. if (len <= 0)
  384. len = sizeof(void *);
  385. if (copy_to_user(b, val, len))
  386. return -EFAULT;
  387. return 0;
  388. }
  389. #ifdef CONFIG_IPPP_FILTER
  390. static int get_filter(void __user *arg, struct sock_filter **p)
  391. {
  392. struct sock_fprog uprog;
  393. struct sock_filter *code = NULL;
  394. int len;
  395. if (copy_from_user(&uprog, arg, sizeof(uprog)))
  396. return -EFAULT;
  397. if (!uprog.len) {
  398. *p = NULL;
  399. return 0;
  400. }
  401. /* uprog.len is unsigned short, so no overflow here */
  402. len = uprog.len * sizeof(struct sock_filter);
  403. code = memdup_user(uprog.filter, len);
  404. if (IS_ERR(code))
  405. return PTR_ERR(code);
  406. *p = code;
  407. return uprog.len;
  408. }
  409. #endif /* CONFIG_IPPP_FILTER */
  410. /*
  411. * ippp device ioctl
  412. */
  413. int
  414. isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
  415. {
  416. unsigned long val;
  417. int r, i, j;
  418. struct ippp_struct *is;
  419. isdn_net_local *lp;
  420. struct isdn_ppp_comp_data data;
  421. void __user *argp = (void __user *)arg;
  422. is = file->private_data;
  423. lp = is->lp;
  424. if (is->debug & 0x1)
  425. printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x state: %x\n", min, cmd, is->state);
  426. if (!(is->state & IPPP_OPEN))
  427. return -EINVAL;
  428. switch (cmd) {
  429. case PPPIOCBUNDLE:
  430. #ifdef CONFIG_ISDN_MPP
  431. if (!(is->state & IPPP_CONNECT))
  432. return -EINVAL;
  433. if ((r = get_arg(argp, &val, sizeof(val))))
  434. return r;
  435. printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n",
  436. (int) min, (int) is->unit, (int) val);
  437. return isdn_ppp_bundle(is, val);
  438. #else
  439. return -1;
  440. #endif
  441. break;
  442. case PPPIOCGUNIT: /* get ppp/isdn unit number */
  443. if ((r = set_arg(argp, &is->unit, sizeof(is->unit))))
  444. return r;
  445. break;
  446. case PPPIOCGIFNAME:
  447. if (!lp)
  448. return -EINVAL;
  449. if ((r = set_arg(argp, lp->netdev->dev->name,
  450. strlen(lp->netdev->dev->name))))
  451. return r;
  452. break;
  453. case PPPIOCGMPFLAGS: /* get configuration flags */
  454. if ((r = set_arg(argp, &is->mpppcfg, sizeof(is->mpppcfg))))
  455. return r;
  456. break;
  457. case PPPIOCSMPFLAGS: /* set configuration flags */
  458. if ((r = get_arg(argp, &val, sizeof(val))))
  459. return r;
  460. is->mpppcfg = val;
  461. break;
  462. case PPPIOCGFLAGS: /* get configuration flags */
  463. if ((r = set_arg(argp, &is->pppcfg, sizeof(is->pppcfg))))
  464. return r;
  465. break;
  466. case PPPIOCSFLAGS: /* set configuration flags */
  467. if ((r = get_arg(argp, &val, sizeof(val)))) {
  468. return r;
  469. }
  470. if (val & SC_ENABLE_IP && !(is->pppcfg & SC_ENABLE_IP) && (is->state & IPPP_CONNECT)) {
  471. if (lp) {
  472. /* OK .. we are ready to send buffers */
  473. is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */
  474. netif_wake_queue(lp->netdev->dev);
  475. break;
  476. }
  477. }
  478. is->pppcfg = val;
  479. break;
  480. case PPPIOCGIDLE: /* get idle time information */
  481. if (lp) {
  482. struct ppp_idle pidle;
  483. pidle.xmit_idle = pidle.recv_idle = lp->huptimer;
  484. if ((r = set_arg(argp, &pidle, sizeof(struct ppp_idle))))
  485. return r;
  486. }
  487. break;
  488. case PPPIOCSMRU: /* set receive unit size for PPP */
  489. if ((r = get_arg(argp, &val, sizeof(val))))
  490. return r;
  491. is->mru = val;
  492. break;
  493. case PPPIOCSMPMRU:
  494. break;
  495. case PPPIOCSMPMTU:
  496. break;
  497. case PPPIOCSMAXCID: /* set the maximum compression slot id */
  498. if ((r = get_arg(argp, &val, sizeof(val))))
  499. return r;
  500. val++;
  501. if (is->maxcid != val) {
  502. #ifdef CONFIG_ISDN_PPP_VJ
  503. struct slcompress *sltmp;
  504. #endif
  505. if (is->debug & 0x1)
  506. printk(KERN_DEBUG "ippp, ioctl: changed MAXCID to %ld\n", val);
  507. is->maxcid = val;
  508. #ifdef CONFIG_ISDN_PPP_VJ
  509. sltmp = slhc_init(16, val);
  510. if (IS_ERR(sltmp))
  511. return PTR_ERR(sltmp);
  512. if (is->slcomp)
  513. slhc_free(is->slcomp);
  514. is->slcomp = sltmp;
  515. #endif
  516. }
  517. break;
  518. case PPPIOCGDEBUG:
  519. if ((r = set_arg(argp, &is->debug, sizeof(is->debug))))
  520. return r;
  521. break;
  522. case PPPIOCSDEBUG:
  523. if ((r = get_arg(argp, &val, sizeof(val))))
  524. return r;
  525. is->debug = val;
  526. break;
  527. case PPPIOCGCOMPRESSORS:
  528. {
  529. unsigned long protos[8] = {0,};
  530. struct isdn_ppp_compressor *ipc = ipc_head;
  531. while (ipc) {
  532. j = ipc->num / (sizeof(long) * 8);
  533. i = ipc->num % (sizeof(long) * 8);
  534. if (j < 8)
  535. protos[j] |= (1UL << i);
  536. ipc = ipc->next;
  537. }
  538. if ((r = set_arg(argp, protos, 8 * sizeof(long))))
  539. return r;
  540. }
  541. break;
  542. case PPPIOCSCOMPRESSOR:
  543. if ((r = get_arg(argp, &data, sizeof(struct isdn_ppp_comp_data))))
  544. return r;
  545. return isdn_ppp_set_compressor(is, &data);
  546. case PPPIOCGCALLINFO:
  547. {
  548. struct pppcallinfo pci;
  549. memset((char *)&pci, 0, sizeof(struct pppcallinfo));
  550. if (lp)
  551. {
  552. strncpy(pci.local_num, lp->msn, 63);
  553. if (lp->dial) {
  554. strncpy(pci.remote_num, lp->dial->num, 63);
  555. }
  556. pci.charge_units = lp->charge;
  557. if (lp->outgoing)
  558. pci.calltype = CALLTYPE_OUTGOING;
  559. else
  560. pci.calltype = CALLTYPE_INCOMING;
  561. if (lp->flags & ISDN_NET_CALLBACK)
  562. pci.calltype |= CALLTYPE_CALLBACK;
  563. }
  564. return set_arg(argp, &pci, sizeof(struct pppcallinfo));
  565. }
  566. #ifdef CONFIG_IPPP_FILTER
  567. case PPPIOCSPASS:
  568. {
  569. struct sock_fprog_kern fprog;
  570. struct sock_filter *code;
  571. int err, len = get_filter(argp, &code);
  572. if (len < 0)
  573. return len;
  574. fprog.len = len;
  575. fprog.filter = code;
  576. if (is->pass_filter) {
  577. bpf_prog_destroy(is->pass_filter);
  578. is->pass_filter = NULL;
  579. }
  580. if (fprog.filter != NULL)
  581. err = bpf_prog_create(&is->pass_filter, &fprog);
  582. else
  583. err = 0;
  584. kfree(code);
  585. return err;
  586. }
  587. case PPPIOCSACTIVE:
  588. {
  589. struct sock_fprog_kern fprog;
  590. struct sock_filter *code;
  591. int err, len = get_filter(argp, &code);
  592. if (len < 0)
  593. return len;
  594. fprog.len = len;
  595. fprog.filter = code;
  596. if (is->active_filter) {
  597. bpf_prog_destroy(is->active_filter);
  598. is->active_filter = NULL;
  599. }
  600. if (fprog.filter != NULL)
  601. err = bpf_prog_create(&is->active_filter, &fprog);
  602. else
  603. err = 0;
  604. kfree(code);
  605. return err;
  606. }
  607. #endif /* CONFIG_IPPP_FILTER */
  608. default:
  609. break;
  610. }
  611. return 0;
  612. }
  613. unsigned int
  614. isdn_ppp_poll(struct file *file, poll_table *wait)
  615. {
  616. u_int mask;
  617. struct ippp_buf_queue *bf, *bl;
  618. u_long flags;
  619. struct ippp_struct *is;
  620. is = file->private_data;
  621. if (is->debug & 0x2)
  622. printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n",
  623. iminor(file_inode(file)));
  624. /* just registers wait_queue hook. This doesn't really wait. */
  625. poll_wait(file, &is->wq, wait);
  626. if (!(is->state & IPPP_OPEN)) {
  627. if (is->state == IPPP_CLOSEWAIT)
  628. return POLLHUP;
  629. printk(KERN_DEBUG "isdn_ppp: device not open\n");
  630. return POLLERR;
  631. }
  632. /* we're always ready to send .. */
  633. mask = POLLOUT | POLLWRNORM;
  634. spin_lock_irqsave(&is->buflock, flags);
  635. bl = is->last;
  636. bf = is->first;
  637. /*
  638. * if IPPP_NOBLOCK is set we return even if we have nothing to read
  639. */
  640. if (bf->next != bl || (is->state & IPPP_NOBLOCK)) {
  641. is->state &= ~IPPP_NOBLOCK;
  642. mask |= POLLIN | POLLRDNORM;
  643. }
  644. spin_unlock_irqrestore(&is->buflock, flags);
  645. return mask;
  646. }
  647. /*
  648. * fill up isdn_ppp_read() queue ..
  649. */
  650. static int
  651. isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot)
  652. {
  653. struct ippp_buf_queue *bf, *bl;
  654. u_long flags;
  655. u_char *nbuf;
  656. struct ippp_struct *is;
  657. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  658. printk(KERN_WARNING "ippp: illegal slot(%d).\n", slot);
  659. return 0;
  660. }
  661. is = ippp_table[slot];
  662. if (!(is->state & IPPP_CONNECT)) {
  663. printk(KERN_DEBUG "ippp: device not activated.\n");
  664. return 0;
  665. }
  666. nbuf = kmalloc(len + 4, GFP_ATOMIC);
  667. if (!nbuf) {
  668. printk(KERN_WARNING "ippp: Can't alloc buf\n");
  669. return 0;
  670. }
  671. nbuf[0] = PPP_ALLSTATIONS;
  672. nbuf[1] = PPP_UI;
  673. nbuf[2] = proto >> 8;
  674. nbuf[3] = proto & 0xff;
  675. memcpy(nbuf + 4, buf, len);
  676. spin_lock_irqsave(&is->buflock, flags);
  677. bf = is->first;
  678. bl = is->last;
  679. if (bf == bl) {
  680. printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n");
  681. bf = bf->next;
  682. kfree(bf->buf);
  683. is->first = bf;
  684. }
  685. bl->buf = (char *) nbuf;
  686. bl->len = len + 4;
  687. is->last = bl->next;
  688. spin_unlock_irqrestore(&is->buflock, flags);
  689. wake_up_interruptible(&is->wq);
  690. return len;
  691. }
  692. /*
  693. * read() .. non-blocking: ipppd calls it only after select()
  694. * reports, that there is data
  695. */
  696. int
  697. isdn_ppp_read(int min, struct file *file, char __user *buf, int count)
  698. {
  699. struct ippp_struct *is;
  700. struct ippp_buf_queue *b;
  701. u_long flags;
  702. u_char *save_buf;
  703. is = file->private_data;
  704. if (!(is->state & IPPP_OPEN))
  705. return 0;
  706. if (!access_ok(VERIFY_WRITE, buf, count))
  707. return -EFAULT;
  708. spin_lock_irqsave(&is->buflock, flags);
  709. b = is->first->next;
  710. save_buf = b->buf;
  711. if (!save_buf) {
  712. spin_unlock_irqrestore(&is->buflock, flags);
  713. return -EAGAIN;
  714. }
  715. if (b->len < count)
  716. count = b->len;
  717. b->buf = NULL;
  718. is->first = b;
  719. spin_unlock_irqrestore(&is->buflock, flags);
  720. if (copy_to_user(buf, save_buf, count))
  721. count = -EFAULT;
  722. kfree(save_buf);
  723. return count;
  724. }
  725. /*
  726. * ipppd wanna write a packet to the card .. non-blocking
  727. */
  728. int
  729. isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
  730. {
  731. isdn_net_local *lp;
  732. struct ippp_struct *is;
  733. int proto;
  734. unsigned char protobuf[4];
  735. is = file->private_data;
  736. if (!(is->state & IPPP_CONNECT))
  737. return 0;
  738. lp = is->lp;
  739. /* -> push it directly to the lowlevel interface */
  740. if (!lp)
  741. printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
  742. else {
  743. /*
  744. * Don't reset huptimer for
  745. * LCP packets. (Echo requests).
  746. */
  747. if (copy_from_user(protobuf, buf, 4))
  748. return -EFAULT;
  749. proto = PPP_PROTOCOL(protobuf);
  750. if (proto != PPP_LCP)
  751. lp->huptimer = 0;
  752. if (lp->isdn_device < 0 || lp->isdn_channel < 0)
  753. return 0;
  754. if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
  755. lp->dialstate == 0 &&
  756. (lp->flags & ISDN_NET_CONNECTED)) {
  757. unsigned short hl;
  758. struct sk_buff *skb;
  759. /*
  760. * we need to reserve enough space in front of
  761. * sk_buff. old call to dev_alloc_skb only reserved
  762. * 16 bytes, now we are looking what the driver want
  763. */
  764. hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
  765. skb = alloc_skb(hl + count, GFP_ATOMIC);
  766. if (!skb) {
  767. printk(KERN_WARNING "isdn_ppp_write: out of memory!\n");
  768. return count;
  769. }
  770. skb_reserve(skb, hl);
  771. if (copy_from_user(skb_put(skb, count), buf, count))
  772. {
  773. kfree_skb(skb);
  774. return -EFAULT;
  775. }
  776. if (is->debug & 0x40) {
  777. printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
  778. isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  779. }
  780. isdn_ppp_send_ccp(lp->netdev, lp, skb); /* keeps CCP/compression states in sync */
  781. isdn_net_write_super(lp, skb);
  782. }
  783. }
  784. return count;
  785. }
  786. /*
  787. * init memory, structures etc.
  788. */
  789. int
  790. isdn_ppp_init(void)
  791. {
  792. int i,
  793. j;
  794. #ifdef CONFIG_ISDN_MPP
  795. if (isdn_ppp_mp_bundle_array_init() < 0)
  796. return -ENOMEM;
  797. #endif /* CONFIG_ISDN_MPP */
  798. for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
  799. if (!(ippp_table[i] = kzalloc(sizeof(struct ippp_struct), GFP_KERNEL))) {
  800. printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n");
  801. for (j = 0; j < i; j++)
  802. kfree(ippp_table[j]);
  803. return -1;
  804. }
  805. spin_lock_init(&ippp_table[i]->buflock);
  806. ippp_table[i]->state = 0;
  807. ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1;
  808. ippp_table[i]->last = ippp_table[i]->rq;
  809. for (j = 0; j < NUM_RCV_BUFFS; j++) {
  810. ippp_table[i]->rq[j].buf = NULL;
  811. ippp_table[i]->rq[j].last = ippp_table[i]->rq +
  812. (NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS;
  813. ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS;
  814. }
  815. }
  816. return 0;
  817. }
  818. void
  819. isdn_ppp_cleanup(void)
  820. {
  821. int i;
  822. for (i = 0; i < ISDN_MAX_CHANNELS; i++)
  823. kfree(ippp_table[i]);
  824. #ifdef CONFIG_ISDN_MPP
  825. kfree(isdn_ppp_bundle_arr);
  826. #endif /* CONFIG_ISDN_MPP */
  827. }
  828. /*
  829. * check for address/control field and skip if allowed
  830. * retval != 0 -> discard packet silently
  831. */
  832. static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb)
  833. {
  834. if (skb->len < 1)
  835. return -1;
  836. if (skb->data[0] == 0xff) {
  837. if (skb->len < 2)
  838. return -1;
  839. if (skb->data[1] != 0x03)
  840. return -1;
  841. // skip address/control (AC) field
  842. skb_pull(skb, 2);
  843. } else {
  844. if (is->pppcfg & SC_REJ_COMP_AC)
  845. // if AC compression was not negotiated, but used, discard packet
  846. return -1;
  847. }
  848. return 0;
  849. }
  850. /*
  851. * get the PPP protocol header and pull skb
  852. * retval < 0 -> discard packet silently
  853. */
  854. static int isdn_ppp_strip_proto(struct sk_buff *skb)
  855. {
  856. int proto;
  857. if (skb->len < 1)
  858. return -1;
  859. if (skb->data[0] & 0x1) {
  860. // protocol field is compressed
  861. proto = skb->data[0];
  862. skb_pull(skb, 1);
  863. } else {
  864. if (skb->len < 2)
  865. return -1;
  866. proto = ((int) skb->data[0] << 8) + skb->data[1];
  867. skb_pull(skb, 2);
  868. }
  869. return proto;
  870. }
  871. /*
  872. * handler for incoming packets on a syncPPP interface
  873. */
  874. void isdn_ppp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb)
  875. {
  876. struct ippp_struct *is;
  877. int slot;
  878. int proto;
  879. BUG_ON(net_dev->local->master); // we're called with the master device always
  880. slot = lp->ppp_slot;
  881. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  882. printk(KERN_ERR "isdn_ppp_receive: lp->ppp_slot(%d)\n",
  883. lp->ppp_slot);
  884. kfree_skb(skb);
  885. return;
  886. }
  887. is = ippp_table[slot];
  888. if (is->debug & 0x4) {
  889. printk(KERN_DEBUG "ippp_receive: is:%08lx lp:%08lx slot:%d unit:%d len:%d\n",
  890. (long)is, (long)lp, lp->ppp_slot, is->unit, (int)skb->len);
  891. isdn_ppp_frame_log("receive", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  892. }
  893. if (isdn_ppp_skip_ac(is, skb) < 0) {
  894. kfree_skb(skb);
  895. return;
  896. }
  897. proto = isdn_ppp_strip_proto(skb);
  898. if (proto < 0) {
  899. kfree_skb(skb);
  900. return;
  901. }
  902. #ifdef CONFIG_ISDN_MPP
  903. if (is->compflags & SC_LINK_DECOMP_ON) {
  904. skb = isdn_ppp_decompress(skb, is, NULL, &proto);
  905. if (!skb) // decompression error
  906. return;
  907. }
  908. if (!(is->mpppcfg & SC_REJ_MP_PROT)) { // we agreed to receive MPPP
  909. if (proto == PPP_MP) {
  910. isdn_ppp_mp_receive(net_dev, lp, skb);
  911. return;
  912. }
  913. }
  914. #endif
  915. isdn_ppp_push_higher(net_dev, lp, skb, proto);
  916. }
  917. /*
  918. * we receive a reassembled frame, MPPP has been taken care of before.
  919. * address/control and protocol have been stripped from the skb
  920. * note: net_dev has to be master net_dev
  921. */
  922. static void
  923. isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto)
  924. {
  925. struct net_device *dev = net_dev->dev;
  926. struct ippp_struct *is, *mis;
  927. isdn_net_local *mlp = NULL;
  928. int slot;
  929. slot = lp->ppp_slot;
  930. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  931. printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n",
  932. lp->ppp_slot);
  933. goto drop_packet;
  934. }
  935. is = ippp_table[slot];
  936. if (lp->master) { // FIXME?
  937. mlp = ISDN_MASTER_PRIV(lp);
  938. slot = mlp->ppp_slot;
  939. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  940. printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n",
  941. lp->ppp_slot);
  942. goto drop_packet;
  943. }
  944. }
  945. mis = ippp_table[slot];
  946. if (is->debug & 0x10) {
  947. printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto);
  948. isdn_ppp_frame_log("rpush", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  949. }
  950. if (mis->compflags & SC_DECOMP_ON) {
  951. skb = isdn_ppp_decompress(skb, is, mis, &proto);
  952. if (!skb) // decompression error
  953. return;
  954. }
  955. switch (proto) {
  956. case PPP_IPX: /* untested */
  957. if (is->debug & 0x20)
  958. printk(KERN_DEBUG "isdn_ppp: IPX\n");
  959. skb->protocol = htons(ETH_P_IPX);
  960. break;
  961. case PPP_IP:
  962. if (is->debug & 0x20)
  963. printk(KERN_DEBUG "isdn_ppp: IP\n");
  964. skb->protocol = htons(ETH_P_IP);
  965. break;
  966. case PPP_COMP:
  967. case PPP_COMPFRAG:
  968. printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n");
  969. goto drop_packet;
  970. #ifdef CONFIG_ISDN_PPP_VJ
  971. case PPP_VJC_UNCOMP:
  972. if (is->debug & 0x20)
  973. printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n");
  974. if (net_dev->local->ppp_slot < 0) {
  975. printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
  976. __func__, net_dev->local->ppp_slot);
  977. goto drop_packet;
  978. }
  979. if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) {
  980. printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n");
  981. goto drop_packet;
  982. }
  983. skb->protocol = htons(ETH_P_IP);
  984. break;
  985. case PPP_VJC_COMP:
  986. if (is->debug & 0x20)
  987. printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n");
  988. {
  989. struct sk_buff *skb_old = skb;
  990. int pkt_len;
  991. skb = dev_alloc_skb(skb_old->len + 128);
  992. if (!skb) {
  993. printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
  994. skb = skb_old;
  995. goto drop_packet;
  996. }
  997. skb_put(skb, skb_old->len + 128);
  998. skb_copy_from_linear_data(skb_old, skb->data,
  999. skb_old->len);
  1000. if (net_dev->local->ppp_slot < 0) {
  1001. printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
  1002. __func__, net_dev->local->ppp_slot);
  1003. goto drop_packet;
  1004. }
  1005. pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp,
  1006. skb->data, skb_old->len);
  1007. kfree_skb(skb_old);
  1008. if (pkt_len < 0)
  1009. goto drop_packet;
  1010. skb_trim(skb, pkt_len);
  1011. skb->protocol = htons(ETH_P_IP);
  1012. }
  1013. break;
  1014. #endif
  1015. case PPP_CCP:
  1016. case PPP_CCPFRAG:
  1017. isdn_ppp_receive_ccp(net_dev, lp, skb, proto);
  1018. /* Dont pop up ResetReq/Ack stuff to the daemon any
  1019. longer - the job is done already */
  1020. if (skb->data[0] == CCP_RESETREQ ||
  1021. skb->data[0] == CCP_RESETACK)
  1022. break;
  1023. /* fall through */
  1024. default:
  1025. isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */
  1026. kfree_skb(skb);
  1027. return;
  1028. }
  1029. #ifdef CONFIG_IPPP_FILTER
  1030. /* check if the packet passes the pass and active filters
  1031. * the filter instructions are constructed assuming
  1032. * a four-byte PPP header on each packet (which is still present) */
  1033. skb_push(skb, 4);
  1034. {
  1035. u_int16_t *p = (u_int16_t *) skb->data;
  1036. *p = 0; /* indicate inbound */
  1037. }
  1038. if (is->pass_filter
  1039. && BPF_PROG_RUN(is->pass_filter, skb) == 0) {
  1040. if (is->debug & 0x2)
  1041. printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
  1042. kfree_skb(skb);
  1043. return;
  1044. }
  1045. if (!(is->active_filter
  1046. && BPF_PROG_RUN(is->active_filter, skb) == 0)) {
  1047. if (is->debug & 0x2)
  1048. printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
  1049. lp->huptimer = 0;
  1050. if (mlp)
  1051. mlp->huptimer = 0;
  1052. }
  1053. skb_pull(skb, 4);
  1054. #else /* CONFIG_IPPP_FILTER */
  1055. lp->huptimer = 0;
  1056. if (mlp)
  1057. mlp->huptimer = 0;
  1058. #endif /* CONFIG_IPPP_FILTER */
  1059. skb->dev = dev;
  1060. skb_reset_mac_header(skb);
  1061. netif_rx(skb);
  1062. /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
  1063. return;
  1064. drop_packet:
  1065. net_dev->local->stats.rx_dropped++;
  1066. kfree_skb(skb);
  1067. }
  1068. /*
  1069. * isdn_ppp_skb_push ..
  1070. * checks whether we have enough space at the beginning of the skb
  1071. * and allocs a new SKB if necessary
  1072. */
  1073. static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p, int len)
  1074. {
  1075. struct sk_buff *skb = *skb_p;
  1076. if (skb_headroom(skb) < len) {
  1077. struct sk_buff *nskb = skb_realloc_headroom(skb, len);
  1078. if (!nskb) {
  1079. printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n");
  1080. dev_kfree_skb(skb);
  1081. return NULL;
  1082. }
  1083. printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n", skb_headroom(skb), len);
  1084. dev_kfree_skb(skb);
  1085. *skb_p = nskb;
  1086. return skb_push(nskb, len);
  1087. }
  1088. return skb_push(skb, len);
  1089. }
  1090. /*
  1091. * send ppp frame .. we expect a PIDCOMPressable proto --
  1092. * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP)
  1093. *
  1094. * VJ compression may change skb pointer!!! .. requeue with old
  1095. * skb isn't allowed!!
  1096. */
  1097. int
  1098. isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
  1099. {
  1100. isdn_net_local *lp, *mlp;
  1101. isdn_net_dev *nd;
  1102. unsigned int proto = PPP_IP; /* 0x21 */
  1103. struct ippp_struct *ipt, *ipts;
  1104. int slot, retval = NETDEV_TX_OK;
  1105. mlp = netdev_priv(netdev);
  1106. nd = mlp->netdev; /* get master lp */
  1107. slot = mlp->ppp_slot;
  1108. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  1109. printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
  1110. mlp->ppp_slot);
  1111. kfree_skb(skb);
  1112. goto out;
  1113. }
  1114. ipts = ippp_table[slot];
  1115. if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */
  1116. if (ipts->debug & 0x1)
  1117. printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name);
  1118. retval = NETDEV_TX_BUSY;
  1119. goto out;
  1120. }
  1121. switch (ntohs(skb->protocol)) {
  1122. case ETH_P_IP:
  1123. proto = PPP_IP;
  1124. break;
  1125. case ETH_P_IPX:
  1126. proto = PPP_IPX; /* untested */
  1127. break;
  1128. default:
  1129. printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n",
  1130. skb->protocol);
  1131. dev_kfree_skb(skb);
  1132. goto out;
  1133. }
  1134. lp = isdn_net_get_locked_lp(nd);
  1135. if (!lp) {
  1136. printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name);
  1137. retval = NETDEV_TX_BUSY;
  1138. goto out;
  1139. }
  1140. /* we have our lp locked from now on */
  1141. slot = lp->ppp_slot;
  1142. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  1143. printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
  1144. lp->ppp_slot);
  1145. kfree_skb(skb);
  1146. goto unlock;
  1147. }
  1148. ipt = ippp_table[slot];
  1149. /*
  1150. * after this line .. requeueing in the device queue is no longer allowed!!!
  1151. */
  1152. /* Pull off the fake header we stuck on earlier to keep
  1153. * the fragmentation code happy.
  1154. */
  1155. skb_pull(skb, IPPP_MAX_HEADER);
  1156. #ifdef CONFIG_IPPP_FILTER
  1157. /* check if we should pass this packet
  1158. * the filter instructions are constructed assuming
  1159. * a four-byte PPP header on each packet */
  1160. *skb_push(skb, 4) = 1; /* indicate outbound */
  1161. {
  1162. __be16 *p = (__be16 *)skb->data;
  1163. p++;
  1164. *p = htons(proto);
  1165. }
  1166. if (ipt->pass_filter
  1167. && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
  1168. if (ipt->debug & 0x4)
  1169. printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
  1170. kfree_skb(skb);
  1171. goto unlock;
  1172. }
  1173. if (!(ipt->active_filter
  1174. && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
  1175. if (ipt->debug & 0x4)
  1176. printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
  1177. lp->huptimer = 0;
  1178. }
  1179. skb_pull(skb, 4);
  1180. #else /* CONFIG_IPPP_FILTER */
  1181. lp->huptimer = 0;
  1182. #endif /* CONFIG_IPPP_FILTER */
  1183. if (ipt->debug & 0x4)
  1184. printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len);
  1185. if (ipts->debug & 0x40)
  1186. isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32, ipts->unit, lp->ppp_slot);
  1187. #ifdef CONFIG_ISDN_PPP_VJ
  1188. if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */
  1189. struct sk_buff *new_skb;
  1190. unsigned short hl;
  1191. /*
  1192. * we need to reserve enough space in front of
  1193. * sk_buff. old call to dev_alloc_skb only reserved
  1194. * 16 bytes, now we are looking what the driver want.
  1195. */
  1196. hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER;
  1197. /*
  1198. * Note: hl might still be insufficient because the method
  1199. * above does not account for a possibible MPPP slave channel
  1200. * which had larger HL header space requirements than the
  1201. * master.
  1202. */
  1203. new_skb = alloc_skb(hl + skb->len, GFP_ATOMIC);
  1204. if (new_skb) {
  1205. u_char *buf;
  1206. int pktlen;
  1207. skb_reserve(new_skb, hl);
  1208. new_skb->dev = skb->dev;
  1209. skb_put(new_skb, skb->len);
  1210. buf = skb->data;
  1211. pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data,
  1212. &buf, !(ipts->pppcfg & SC_NO_TCP_CCID));
  1213. if (buf != skb->data) {
  1214. if (new_skb->data != buf)
  1215. printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n");
  1216. dev_kfree_skb(skb);
  1217. skb = new_skb;
  1218. } else {
  1219. dev_kfree_skb(new_skb);
  1220. }
  1221. skb_trim(skb, pktlen);
  1222. if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */
  1223. proto = PPP_VJC_COMP;
  1224. skb->data[0] ^= SL_TYPE_COMPRESSED_TCP;
  1225. } else {
  1226. if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP)
  1227. proto = PPP_VJC_UNCOMP;
  1228. skb->data[0] = (skb->data[0] & 0x0f) | 0x40;
  1229. }
  1230. }
  1231. }
  1232. #endif
  1233. /*
  1234. * normal (single link) or bundle compression
  1235. */
  1236. if (ipts->compflags & SC_COMP_ON) {
  1237. /* We send compressed only if both down- und upstream
  1238. compression is negotiated, that means, CCP is up */
  1239. if (ipts->compflags & SC_DECOMP_ON) {
  1240. skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 0);
  1241. } else {
  1242. printk(KERN_DEBUG "isdn_ppp: CCP not yet up - sending as-is\n");
  1243. }
  1244. }
  1245. if (ipt->debug & 0x24)
  1246. printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto);
  1247. #ifdef CONFIG_ISDN_MPP
  1248. if (ipt->mpppcfg & SC_MP_PROT) {
  1249. /* we get mp_seqno from static isdn_net_local */
  1250. long mp_seqno = ipts->mp_seqno;
  1251. ipts->mp_seqno++;
  1252. if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) {
  1253. unsigned char *data = isdn_ppp_skb_push(&skb, 3);
  1254. if (!data)
  1255. goto unlock;
  1256. mp_seqno &= 0xfff;
  1257. data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */
  1258. data[1] = mp_seqno & 0xff;
  1259. data[2] = proto; /* PID compression */
  1260. } else {
  1261. unsigned char *data = isdn_ppp_skb_push(&skb, 5);
  1262. if (!data)
  1263. goto unlock;
  1264. data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */
  1265. data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */
  1266. data[2] = (mp_seqno >> 8) & 0xff;
  1267. data[3] = (mp_seqno >> 0) & 0xff;
  1268. data[4] = proto; /* PID compression */
  1269. }
  1270. proto = PPP_MP; /* MP Protocol, 0x003d */
  1271. }
  1272. #endif
  1273. /*
  1274. * 'link in bundle' compression ...
  1275. */
  1276. if (ipt->compflags & SC_LINK_COMP_ON)
  1277. skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 1);
  1278. if ((ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff)) {
  1279. unsigned char *data = isdn_ppp_skb_push(&skb, 1);
  1280. if (!data)
  1281. goto unlock;
  1282. data[0] = proto & 0xff;
  1283. }
  1284. else {
  1285. unsigned char *data = isdn_ppp_skb_push(&skb, 2);
  1286. if (!data)
  1287. goto unlock;
  1288. data[0] = (proto >> 8) & 0xff;
  1289. data[1] = proto & 0xff;
  1290. }
  1291. if (!(ipt->pppcfg & SC_COMP_AC)) {
  1292. unsigned char *data = isdn_ppp_skb_push(&skb, 2);
  1293. if (!data)
  1294. goto unlock;
  1295. data[0] = 0xff; /* All Stations */
  1296. data[1] = 0x03; /* Unnumbered information */
  1297. }
  1298. /* tx-stats are now updated via BSENT-callback */
  1299. if (ipts->debug & 0x40) {
  1300. printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len);
  1301. isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, ipt->unit, lp->ppp_slot);
  1302. }
  1303. isdn_net_writebuf_skb(lp, skb);
  1304. unlock:
  1305. spin_unlock_bh(&lp->xmit_lock);
  1306. out:
  1307. return retval;
  1308. }
  1309. #ifdef CONFIG_IPPP_FILTER
  1310. /*
  1311. * check if this packet may trigger auto-dial.
  1312. */
  1313. int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
  1314. {
  1315. struct ippp_struct *is = ippp_table[lp->ppp_slot];
  1316. u_int16_t proto;
  1317. int drop = 0;
  1318. switch (ntohs(skb->protocol)) {
  1319. case ETH_P_IP:
  1320. proto = PPP_IP;
  1321. break;
  1322. case ETH_P_IPX:
  1323. proto = PPP_IPX;
  1324. break;
  1325. default:
  1326. printk(KERN_ERR "isdn_ppp_autodial_filter: unsupported protocol 0x%x.\n",
  1327. skb->protocol);
  1328. return 1;
  1329. }
  1330. /* the filter instructions are constructed assuming
  1331. * a four-byte PPP header on each packet. we have to
  1332. * temporarily remove part of the fake header stuck on
  1333. * earlier.
  1334. */
  1335. *skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */
  1336. {
  1337. __be16 *p = (__be16 *)skb->data;
  1338. p++;
  1339. *p = htons(proto);
  1340. }
  1341. drop |= is->pass_filter
  1342. && BPF_PROG_RUN(is->pass_filter, skb) == 0;
  1343. drop |= is->active_filter
  1344. && BPF_PROG_RUN(is->active_filter, skb) == 0;
  1345. skb_push(skb, IPPP_MAX_HEADER - 4);
  1346. return drop;
  1347. }
  1348. #endif
  1349. #ifdef CONFIG_ISDN_MPP
  1350. /* this is _not_ rfc1990 header, but something we convert both short and long
  1351. * headers to for convinience's sake:
  1352. * byte 0 is flags as in rfc1990
  1353. * bytes 1...4 is 24-bit seqence number converted to host byte order
  1354. */
  1355. #define MP_HEADER_LEN 5
  1356. #define MP_LONGSEQ_MASK 0x00ffffff
  1357. #define MP_SHORTSEQ_MASK 0x00000fff
  1358. #define MP_LONGSEQ_MAX MP_LONGSEQ_MASK
  1359. #define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK
  1360. #define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK + 1) >> 1)
  1361. #define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK + 1) >> 1)
  1362. /* sequence-wrap safe comparisons (for long sequence)*/
  1363. #define MP_LT(a, b) ((a - b) & MP_LONGSEQ_MAXBIT)
  1364. #define MP_LE(a, b) !((b - a) & MP_LONGSEQ_MAXBIT)
  1365. #define MP_GT(a, b) ((b - a) & MP_LONGSEQ_MAXBIT)
  1366. #define MP_GE(a, b) !((a - b) & MP_LONGSEQ_MAXBIT)
  1367. #define MP_SEQ(f) ((*(u32 *)(f->data + 1)))
  1368. #define MP_FLAGS(f) (f->data[0])
  1369. static int isdn_ppp_mp_bundle_array_init(void)
  1370. {
  1371. int i;
  1372. int sz = ISDN_MAX_CHANNELS * sizeof(ippp_bundle);
  1373. if ((isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL)
  1374. return -ENOMEM;
  1375. for (i = 0; i < ISDN_MAX_CHANNELS; i++)
  1376. spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
  1377. return 0;
  1378. }
  1379. static ippp_bundle *isdn_ppp_mp_bundle_alloc(void)
  1380. {
  1381. int i;
  1382. for (i = 0; i < ISDN_MAX_CHANNELS; i++)
  1383. if (isdn_ppp_bundle_arr[i].ref_ct <= 0)
  1384. return (isdn_ppp_bundle_arr + i);
  1385. return NULL;
  1386. }
  1387. static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to)
  1388. {
  1389. struct ippp_struct *is;
  1390. if (lp->ppp_slot < 0) {
  1391. printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
  1392. __func__, lp->ppp_slot);
  1393. return (-EINVAL);
  1394. }
  1395. is = ippp_table[lp->ppp_slot];
  1396. if (add_to) {
  1397. if (lp->netdev->pb)
  1398. lp->netdev->pb->ref_ct--;
  1399. lp->netdev->pb = add_to;
  1400. } else { /* first link in a bundle */
  1401. is->mp_seqno = 0;
  1402. if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
  1403. return -ENOMEM;
  1404. lp->next = lp->last = lp; /* nobody else in a queue */
  1405. lp->netdev->pb->frags = NULL;
  1406. lp->netdev->pb->frames = 0;
  1407. lp->netdev->pb->seq = UINT_MAX;
  1408. }
  1409. lp->netdev->pb->ref_ct++;
  1410. is->last_link_seqno = 0;
  1411. return 0;
  1412. }
  1413. static u32 isdn_ppp_mp_get_seq(int short_seq,
  1414. struct sk_buff *skb, u32 last_seq);
  1415. static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
  1416. struct sk_buff *from, struct sk_buff *to);
  1417. static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
  1418. struct sk_buff *from, struct sk_buff *to);
  1419. static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
  1420. static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb);
  1421. static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp,
  1422. struct sk_buff *skb)
  1423. {
  1424. struct ippp_struct *is;
  1425. isdn_net_local *lpq;
  1426. ippp_bundle *mp;
  1427. isdn_mppp_stats *stats;
  1428. struct sk_buff *newfrag, *frag, *start, *nextf;
  1429. u32 newseq, minseq, thisseq;
  1430. unsigned long flags;
  1431. int slot;
  1432. spin_lock_irqsave(&net_dev->pb->lock, flags);
  1433. mp = net_dev->pb;
  1434. stats = &mp->stats;
  1435. slot = lp->ppp_slot;
  1436. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  1437. printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
  1438. __func__, lp->ppp_slot);
  1439. stats->frame_drops++;
  1440. dev_kfree_skb(skb);
  1441. spin_unlock_irqrestore(&mp->lock, flags);
  1442. return;
  1443. }
  1444. is = ippp_table[slot];
  1445. if (++mp->frames > stats->max_queue_len)
  1446. stats->max_queue_len = mp->frames;
  1447. if (is->debug & 0x8)
  1448. isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
  1449. newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
  1450. skb, is->last_link_seqno);
  1451. /* if this packet seq # is less than last already processed one,
  1452. * toss it right away, but check for sequence start case first
  1453. */
  1454. if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
  1455. mp->seq = newseq; /* the first packet: required for
  1456. * rfc1990 non-compliant clients --
  1457. * prevents constant packet toss */
  1458. } else if (MP_LT(newseq, mp->seq)) {
  1459. stats->frame_drops++;
  1460. isdn_ppp_mp_free_skb(mp, skb);
  1461. spin_unlock_irqrestore(&mp->lock, flags);
  1462. return;
  1463. }
  1464. /* find the minimum received sequence number over all links */
  1465. is->last_link_seqno = minseq = newseq;
  1466. for (lpq = net_dev->queue;;) {
  1467. slot = lpq->ppp_slot;
  1468. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  1469. printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n",
  1470. __func__, lpq->ppp_slot);
  1471. } else {
  1472. u32 lls = ippp_table[slot]->last_link_seqno;
  1473. if (MP_LT(lls, minseq))
  1474. minseq = lls;
  1475. }
  1476. if ((lpq = lpq->next) == net_dev->queue)
  1477. break;
  1478. }
  1479. if (MP_LT(minseq, mp->seq))
  1480. minseq = mp->seq; /* can't go beyond already processed
  1481. * packets */
  1482. newfrag = skb;
  1483. /* if this new fragment is before the first one, then enqueue it now. */
  1484. if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
  1485. newfrag->next = frag;
  1486. mp->frags = frag = newfrag;
  1487. newfrag = NULL;
  1488. }
  1489. start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
  1490. MP_SEQ(frag) == mp->seq ? frag : NULL;
  1491. /*
  1492. * main fragment traversing loop
  1493. *
  1494. * try to accomplish several tasks:
  1495. * - insert new fragment into the proper sequence slot (once that's done
  1496. * newfrag will be set to NULL)
  1497. * - reassemble any complete fragment sequence (non-null 'start'
  1498. * indicates there is a contiguous sequence present)
  1499. * - discard any incomplete sequences that are below minseq -- due
  1500. * to the fact that sender always increment sequence number, if there
  1501. * is an incomplete sequence below minseq, no new fragments would
  1502. * come to complete such sequence and it should be discarded
  1503. *
  1504. * loop completes when we accomplished the following tasks:
  1505. * - new fragment is inserted in the proper sequence ('newfrag' is
  1506. * set to NULL)
  1507. * - we hit a gap in the sequence, so no reassembly/processing is
  1508. * possible ('start' would be set to NULL)
  1509. *
  1510. * algorithm for this code is derived from code in the book
  1511. * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
  1512. */
  1513. while (start != NULL || newfrag != NULL) {
  1514. thisseq = MP_SEQ(frag);
  1515. nextf = frag->next;
  1516. /* drop any duplicate fragments */
  1517. if (newfrag != NULL && thisseq == newseq) {
  1518. isdn_ppp_mp_free_skb(mp, newfrag);
  1519. newfrag = NULL;
  1520. }
  1521. /* insert new fragment before next element if possible. */
  1522. if (newfrag != NULL && (nextf == NULL ||
  1523. MP_LT(newseq, MP_SEQ(nextf)))) {
  1524. newfrag->next = nextf;
  1525. frag->next = nextf = newfrag;
  1526. newfrag = NULL;
  1527. }
  1528. if (start != NULL) {
  1529. /* check for misplaced start */
  1530. if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
  1531. printk(KERN_WARNING"isdn_mppp(seq %d): new "
  1532. "BEGIN flag with no prior END", thisseq);
  1533. stats->seqerrs++;
  1534. stats->frame_drops++;
  1535. start = isdn_ppp_mp_discard(mp, start, frag);
  1536. nextf = frag->next;
  1537. }
  1538. } else if (MP_LE(thisseq, minseq)) {
  1539. if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
  1540. start = frag;
  1541. else {
  1542. if (MP_FLAGS(frag) & MP_END_FRAG)
  1543. stats->frame_drops++;
  1544. if (mp->frags == frag)
  1545. mp->frags = nextf;
  1546. isdn_ppp_mp_free_skb(mp, frag);
  1547. frag = nextf;
  1548. continue;
  1549. }
  1550. }
  1551. /* if start is non-null and we have end fragment, then
  1552. * we have full reassembly sequence -- reassemble
  1553. * and process packet now
  1554. */
  1555. if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
  1556. minseq = mp->seq = (thisseq + 1) & MP_LONGSEQ_MASK;
  1557. /* Reassemble the packet then dispatch it */
  1558. isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
  1559. start = NULL;
  1560. frag = NULL;
  1561. mp->frags = nextf;
  1562. }
  1563. /* check if need to update start pointer: if we just
  1564. * reassembled the packet and sequence is contiguous
  1565. * then next fragment should be the start of new reassembly
  1566. * if sequence is contiguous, but we haven't reassembled yet,
  1567. * keep going.
  1568. * if sequence is not contiguous, either clear everything
  1569. * below low watermark and set start to the next frag or
  1570. * clear start ptr.
  1571. */
  1572. if (nextf != NULL &&
  1573. ((thisseq + 1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
  1574. /* if we just reassembled and the next one is here,
  1575. * then start another reassembly. */
  1576. if (frag == NULL) {
  1577. if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
  1578. start = nextf;
  1579. else
  1580. {
  1581. printk(KERN_WARNING"isdn_mppp(seq %d):"
  1582. " END flag with no following "
  1583. "BEGIN", thisseq);
  1584. stats->seqerrs++;
  1585. }
  1586. }
  1587. } else {
  1588. if (nextf != NULL && frag != NULL &&
  1589. MP_LT(thisseq, minseq)) {
  1590. /* we've got a break in the sequence
  1591. * and we not at the end yet
  1592. * and we did not just reassembled
  1593. *(if we did, there wouldn't be anything before)
  1594. * and we below the low watermark
  1595. * discard all the frames below low watermark
  1596. * and start over */
  1597. stats->frame_drops++;
  1598. mp->frags = isdn_ppp_mp_discard(mp, start, nextf);
  1599. }
  1600. /* break in the sequence, no reassembly */
  1601. start = NULL;
  1602. }
  1603. frag = nextf;
  1604. } /* while -- main loop */
  1605. if (mp->frags == NULL)
  1606. mp->frags = frag;
  1607. /* rather straighforward way to deal with (not very) possible
  1608. * queue overflow */
  1609. if (mp->frames > MP_MAX_QUEUE_LEN) {
  1610. stats->overflows++;
  1611. while (mp->frames > MP_MAX_QUEUE_LEN) {
  1612. frag = mp->frags->next;
  1613. isdn_ppp_mp_free_skb(mp, mp->frags);
  1614. mp->frags = frag;
  1615. }
  1616. }
  1617. spin_unlock_irqrestore(&mp->lock, flags);
  1618. }
  1619. static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
  1620. {
  1621. struct sk_buff *frag = lp->netdev->pb->frags;
  1622. struct sk_buff *nextfrag;
  1623. while (frag) {
  1624. nextfrag = frag->next;
  1625. isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
  1626. frag = nextfrag;
  1627. }
  1628. lp->netdev->pb->frags = NULL;
  1629. }
  1630. static u32 isdn_ppp_mp_get_seq(int short_seq,
  1631. struct sk_buff *skb, u32 last_seq)
  1632. {
  1633. u32 seq;
  1634. int flags = skb->data[0] & (MP_BEGIN_FRAG | MP_END_FRAG);
  1635. if (!short_seq)
  1636. {
  1637. seq = ntohl(*(__be32 *)skb->data) & MP_LONGSEQ_MASK;
  1638. skb_push(skb, 1);
  1639. }
  1640. else
  1641. {
  1642. /* convert 12-bit short seq number to 24-bit long one
  1643. */
  1644. seq = ntohs(*(__be16 *)skb->data) & MP_SHORTSEQ_MASK;
  1645. /* check for seqence wrap */
  1646. if (!(seq & MP_SHORTSEQ_MAXBIT) &&
  1647. (last_seq & MP_SHORTSEQ_MAXBIT) &&
  1648. (unsigned long)last_seq <= MP_LONGSEQ_MAX)
  1649. seq |= (last_seq + MP_SHORTSEQ_MAX + 1) &
  1650. (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK);
  1651. else
  1652. seq |= last_seq & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK);
  1653. skb_push(skb, 3); /* put converted seqence back in skb */
  1654. }
  1655. *(u32 *)(skb->data + 1) = seq; /* put seqence back in _host_ byte
  1656. * order */
  1657. skb->data[0] = flags; /* restore flags */
  1658. return seq;
  1659. }
  1660. struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
  1661. struct sk_buff *from, struct sk_buff *to)
  1662. {
  1663. if (from)
  1664. while (from != to) {
  1665. struct sk_buff *next = from->next;
  1666. isdn_ppp_mp_free_skb(mp, from);
  1667. from = next;
  1668. }
  1669. return from;
  1670. }
  1671. void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
  1672. struct sk_buff *from, struct sk_buff *to)
  1673. {
  1674. ippp_bundle *mp = net_dev->pb;
  1675. int proto;
  1676. struct sk_buff *skb;
  1677. unsigned int tot_len;
  1678. if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  1679. printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
  1680. __func__, lp->ppp_slot);
  1681. return;
  1682. }
  1683. if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
  1684. if (ippp_table[lp->ppp_slot]->debug & 0x40)
  1685. printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
  1686. "len %d\n", MP_SEQ(from), from->len);
  1687. skb = from;
  1688. skb_pull(skb, MP_HEADER_LEN);
  1689. mp->frames--;
  1690. } else {
  1691. struct sk_buff *frag;
  1692. int n;
  1693. for (tot_len = n = 0, frag = from; frag != to; frag = frag->next, n++)
  1694. tot_len += frag->len - MP_HEADER_LEN;
  1695. if (ippp_table[lp->ppp_slot]->debug & 0x40)
  1696. printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
  1697. "to %d, len %d\n", MP_SEQ(from),
  1698. (MP_SEQ(from) + n - 1) & MP_LONGSEQ_MASK, tot_len);
  1699. if ((skb = dev_alloc_skb(tot_len)) == NULL) {
  1700. printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
  1701. "of size %d\n", tot_len);
  1702. isdn_ppp_mp_discard(mp, from, to);
  1703. return;
  1704. }
  1705. while (from != to) {
  1706. unsigned int len = from->len - MP_HEADER_LEN;
  1707. skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
  1708. skb_put(skb, len),
  1709. len);
  1710. frag = from->next;
  1711. isdn_ppp_mp_free_skb(mp, from);
  1712. from = frag;
  1713. }
  1714. }
  1715. proto = isdn_ppp_strip_proto(skb);
  1716. isdn_ppp_push_higher(net_dev, lp, skb, proto);
  1717. }
  1718. static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
  1719. {
  1720. dev_kfree_skb(skb);
  1721. mp->frames--;
  1722. }
  1723. static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb)
  1724. {
  1725. printk(KERN_DEBUG "mp_recv: %d/%d -> %02x %02x %02x %02x %02x %02x\n",
  1726. slot, (int) skb->len,
  1727. (int) skb->data[0], (int) skb->data[1], (int) skb->data[2],
  1728. (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]);
  1729. }
  1730. static int
  1731. isdn_ppp_bundle(struct ippp_struct *is, int unit)
  1732. {
  1733. char ifn[IFNAMSIZ + 1];
  1734. isdn_net_dev *p;
  1735. isdn_net_local *lp, *nlp;
  1736. int rc;
  1737. unsigned long flags;
  1738. sprintf(ifn, "ippp%d", unit);
  1739. p = isdn_net_findif(ifn);
  1740. if (!p) {
  1741. printk(KERN_ERR "ippp_bundle: cannot find %s\n", ifn);
  1742. return -EINVAL;
  1743. }
  1744. spin_lock_irqsave(&p->pb->lock, flags);
  1745. nlp = is->lp;
  1746. lp = p->queue;
  1747. if (nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ||
  1748. lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  1749. printk(KERN_ERR "ippp_bundle: binding to invalid slot %d\n",
  1750. nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ?
  1751. nlp->ppp_slot : lp->ppp_slot);
  1752. rc = -EINVAL;
  1753. goto out;
  1754. }
  1755. isdn_net_add_to_bundle(p, nlp);
  1756. ippp_table[nlp->ppp_slot]->unit = ippp_table[lp->ppp_slot]->unit;
  1757. /* maybe also SC_CCP stuff */
  1758. ippp_table[nlp->ppp_slot]->pppcfg |= ippp_table[lp->ppp_slot]->pppcfg &
  1759. (SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP);
  1760. ippp_table[nlp->ppp_slot]->mpppcfg |= ippp_table[lp->ppp_slot]->mpppcfg &
  1761. (SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ);
  1762. rc = isdn_ppp_mp_init(nlp, p->pb);
  1763. out:
  1764. spin_unlock_irqrestore(&p->pb->lock, flags);
  1765. return rc;
  1766. }
  1767. #endif /* CONFIG_ISDN_MPP */
  1768. /*
  1769. * network device ioctl handlers
  1770. */
  1771. static int
  1772. isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
  1773. {
  1774. struct ppp_stats __user *res = ifr->ifr_data;
  1775. struct ppp_stats t;
  1776. isdn_net_local *lp = netdev_priv(dev);
  1777. if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
  1778. return -EFAULT;
  1779. /* build a temporary stat struct and copy it to user space */
  1780. memset(&t, 0, sizeof(struct ppp_stats));
  1781. if (dev->flags & IFF_UP) {
  1782. t.p.ppp_ipackets = lp->stats.rx_packets;
  1783. t.p.ppp_ibytes = lp->stats.rx_bytes;
  1784. t.p.ppp_ierrors = lp->stats.rx_errors;
  1785. t.p.ppp_opackets = lp->stats.tx_packets;
  1786. t.p.ppp_obytes = lp->stats.tx_bytes;
  1787. t.p.ppp_oerrors = lp->stats.tx_errors;
  1788. #ifdef CONFIG_ISDN_PPP_VJ
  1789. if (slot >= 0 && ippp_table[slot]->slcomp) {
  1790. struct slcompress *slcomp = ippp_table[slot]->slcomp;
  1791. t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed;
  1792. t.vj.vjs_compressed = slcomp->sls_o_compressed;
  1793. t.vj.vjs_searches = slcomp->sls_o_searches;
  1794. t.vj.vjs_misses = slcomp->sls_o_misses;
  1795. t.vj.vjs_errorin = slcomp->sls_i_error;
  1796. t.vj.vjs_tossed = slcomp->sls_i_tossed;
  1797. t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed;
  1798. t.vj.vjs_compressedin = slcomp->sls_i_compressed;
  1799. }
  1800. #endif
  1801. }
  1802. if (copy_to_user(res, &t, sizeof(struct ppp_stats)))
  1803. return -EFAULT;
  1804. return 0;
  1805. }
  1806. int
  1807. isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1808. {
  1809. int error = 0;
  1810. int len;
  1811. isdn_net_local *lp = netdev_priv(dev);
  1812. if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
  1813. return -EINVAL;
  1814. switch (cmd) {
  1815. #define PPP_VERSION "2.3.7"
  1816. case SIOCGPPPVER:
  1817. len = strlen(PPP_VERSION) + 1;
  1818. if (copy_to_user(ifr->ifr_data, PPP_VERSION, len))
  1819. error = -EFAULT;
  1820. break;
  1821. case SIOCGPPPSTATS:
  1822. error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev);
  1823. break;
  1824. default:
  1825. error = -EINVAL;
  1826. break;
  1827. }
  1828. return error;
  1829. }
  1830. static int
  1831. isdn_ppp_if_get_unit(char *name)
  1832. {
  1833. int len,
  1834. i,
  1835. unit = 0,
  1836. deci;
  1837. len = strlen(name);
  1838. if (strncmp("ippp", name, 4) || len > 8)
  1839. return -1;
  1840. for (i = 0, deci = 1; i < len; i++, deci *= 10) {
  1841. char a = name[len - i - 1];
  1842. if (a >= '0' && a <= '9')
  1843. unit += (a - '0') * deci;
  1844. else
  1845. break;
  1846. }
  1847. if (!i || len - i != 4)
  1848. unit = -1;
  1849. return unit;
  1850. }
  1851. int
  1852. isdn_ppp_dial_slave(char *name)
  1853. {
  1854. #ifdef CONFIG_ISDN_MPP
  1855. isdn_net_dev *ndev;
  1856. isdn_net_local *lp;
  1857. struct net_device *sdev;
  1858. if (!(ndev = isdn_net_findif(name)))
  1859. return 1;
  1860. lp = ndev->local;
  1861. if (!(lp->flags & ISDN_NET_CONNECTED))
  1862. return 5;
  1863. sdev = lp->slave;
  1864. while (sdev) {
  1865. isdn_net_local *mlp = netdev_priv(sdev);
  1866. if (!(mlp->flags & ISDN_NET_CONNECTED))
  1867. break;
  1868. sdev = mlp->slave;
  1869. }
  1870. if (!sdev)
  1871. return 2;
  1872. isdn_net_dial_req(netdev_priv(sdev));
  1873. return 0;
  1874. #else
  1875. return -1;
  1876. #endif
  1877. }
  1878. int
  1879. isdn_ppp_hangup_slave(char *name)
  1880. {
  1881. #ifdef CONFIG_ISDN_MPP
  1882. isdn_net_dev *ndev;
  1883. isdn_net_local *lp;
  1884. struct net_device *sdev;
  1885. if (!(ndev = isdn_net_findif(name)))
  1886. return 1;
  1887. lp = ndev->local;
  1888. if (!(lp->flags & ISDN_NET_CONNECTED))
  1889. return 5;
  1890. sdev = lp->slave;
  1891. while (sdev) {
  1892. isdn_net_local *mlp = netdev_priv(sdev);
  1893. if (mlp->slave) { /* find last connected link in chain */
  1894. isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
  1895. if (!(nlp->flags & ISDN_NET_CONNECTED))
  1896. break;
  1897. } else if (mlp->flags & ISDN_NET_CONNECTED)
  1898. break;
  1899. sdev = mlp->slave;
  1900. }
  1901. if (!sdev)
  1902. return 2;
  1903. isdn_net_hangup(sdev);
  1904. return 0;
  1905. #else
  1906. return -1;
  1907. #endif
  1908. }
  1909. /*
  1910. * PPP compression stuff
  1911. */
  1912. /* Push an empty CCP Data Frame up to the daemon to wake it up and let it
  1913. generate a CCP Reset-Request or tear down CCP altogether */
  1914. static void isdn_ppp_ccp_kickup(struct ippp_struct *is)
  1915. {
  1916. isdn_ppp_fill_rq(NULL, 0, PPP_COMP, is->lp->ppp_slot);
  1917. }
  1918. /* In-kernel handling of CCP Reset-Request and Reset-Ack is necessary,
  1919. but absolutely nontrivial. The most abstruse problem we are facing is
  1920. that the generation, reception and all the handling of timeouts and
  1921. resends including proper request id management should be entirely left
  1922. to the (de)compressor, but indeed is not covered by the current API to
  1923. the (de)compressor. The API is a prototype version from PPP where only
  1924. some (de)compressors have yet been implemented and all of them are
  1925. rather simple in their reset handling. Especially, their is only one
  1926. outstanding ResetAck at a time with all of them and ResetReq/-Acks do
  1927. not have parameters. For this very special case it was sufficient to
  1928. just return an error code from the decompressor and have a single
  1929. reset() entry to communicate all the necessary information between
  1930. the framework and the (de)compressor. Bad enough, LZS is different
  1931. (and any other compressor may be different, too). It has multiple
  1932. histories (eventually) and needs to Reset each of them independently
  1933. and thus uses multiple outstanding Acks and history numbers as an
  1934. additional parameter to Reqs/Acks.
  1935. All that makes it harder to port the reset state engine into the
  1936. kernel because it is not just the same simple one as in (i)pppd but
  1937. it must be able to pass additional parameters and have multiple out-
  1938. standing Acks. We are trying to achieve the impossible by handling
  1939. reset transactions independent by their id. The id MUST change when
  1940. the data portion changes, thus any (de)compressor who uses more than
  1941. one resettable state must provide and recognize individual ids for
  1942. each individual reset transaction. The framework itself does _only_
  1943. differentiate them by id, because it has no other semantics like the
  1944. (de)compressor might.
  1945. This looks like a major redesign of the interface would be nice,
  1946. but I don't have an idea how to do it better. */
  1947. /* Send a CCP Reset-Request or Reset-Ack directly from the kernel. This is
  1948. getting that lengthy because there is no simple "send-this-frame-out"
  1949. function above but every wrapper does a bit different. Hope I guess
  1950. correct in this hack... */
  1951. static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
  1952. unsigned char code, unsigned char id,
  1953. unsigned char *data, int len)
  1954. {
  1955. struct sk_buff *skb;
  1956. unsigned char *p;
  1957. int hl;
  1958. int cnt = 0;
  1959. isdn_net_local *lp = is->lp;
  1960. /* Alloc large enough skb */
  1961. hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
  1962. skb = alloc_skb(len + hl + 16, GFP_ATOMIC);
  1963. if (!skb) {
  1964. printk(KERN_WARNING
  1965. "ippp: CCP cannot send reset - out of memory\n");
  1966. return;
  1967. }
  1968. skb_reserve(skb, hl);
  1969. /* We may need to stuff an address and control field first */
  1970. if (!(is->pppcfg & SC_COMP_AC)) {
  1971. p = skb_put(skb, 2);
  1972. *p++ = 0xff;
  1973. *p++ = 0x03;
  1974. }
  1975. /* Stuff proto, code, id and length */
  1976. p = skb_put(skb, 6);
  1977. *p++ = (proto >> 8);
  1978. *p++ = (proto & 0xff);
  1979. *p++ = code;
  1980. *p++ = id;
  1981. cnt = 4 + len;
  1982. *p++ = (cnt >> 8);
  1983. *p++ = (cnt & 0xff);
  1984. /* Now stuff remaining bytes */
  1985. if (len) {
  1986. p = skb_put(skb, len);
  1987. memcpy(p, data, len);
  1988. }
  1989. /* skb is now ready for xmit */
  1990. printk(KERN_DEBUG "Sending CCP Frame:\n");
  1991. isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  1992. isdn_net_write_super(lp, skb);
  1993. }
  1994. /* Allocate the reset state vector */
  1995. static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is)
  1996. {
  1997. struct ippp_ccp_reset *r;
  1998. r = kzalloc(sizeof(struct ippp_ccp_reset), GFP_KERNEL);
  1999. if (!r) {
  2000. printk(KERN_ERR "ippp_ccp: failed to allocate reset data"
  2001. " structure - no mem\n");
  2002. return NULL;
  2003. }
  2004. printk(KERN_DEBUG "ippp_ccp: allocated reset data structure %p\n", r);
  2005. is->reset = r;
  2006. return r;
  2007. }
  2008. /* Destroy the reset state vector. Kill all pending timers first. */
  2009. static void isdn_ppp_ccp_reset_free(struct ippp_struct *is)
  2010. {
  2011. unsigned int id;
  2012. printk(KERN_DEBUG "ippp_ccp: freeing reset data structure %p\n",
  2013. is->reset);
  2014. for (id = 0; id < 256; id++) {
  2015. if (is->reset->rs[id]) {
  2016. isdn_ppp_ccp_reset_free_state(is, (unsigned char)id);
  2017. }
  2018. }
  2019. kfree(is->reset);
  2020. is->reset = NULL;
  2021. }
  2022. /* Free a given state and clear everything up for later reallocation */
  2023. static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
  2024. unsigned char id)
  2025. {
  2026. struct ippp_ccp_reset_state *rs;
  2027. if (is->reset->rs[id]) {
  2028. printk(KERN_DEBUG "ippp_ccp: freeing state for id %d\n", id);
  2029. rs = is->reset->rs[id];
  2030. /* Make sure the kernel will not call back later */
  2031. if (rs->ta)
  2032. del_timer(&rs->timer);
  2033. is->reset->rs[id] = NULL;
  2034. kfree(rs);
  2035. } else {
  2036. printk(KERN_WARNING "ippp_ccp: id %d is not allocated\n", id);
  2037. }
  2038. }
  2039. /* The timer callback function which is called when a ResetReq has timed out,
  2040. aka has never been answered by a ResetAck */
  2041. static void isdn_ppp_ccp_timer_callback(unsigned long closure)
  2042. {
  2043. struct ippp_ccp_reset_state *rs =
  2044. (struct ippp_ccp_reset_state *)closure;
  2045. if (!rs) {
  2046. printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
  2047. return;
  2048. }
  2049. if (rs->ta && rs->state == CCPResetSentReq) {
  2050. /* We are correct here */
  2051. if (!rs->expra) {
  2052. /* Hmm, there is no Ack really expected. We can clean
  2053. up the state now, it will be reallocated if the
  2054. decompressor insists on another reset */
  2055. rs->ta = 0;
  2056. isdn_ppp_ccp_reset_free_state(rs->is, rs->id);
  2057. return;
  2058. }
  2059. printk(KERN_DEBUG "ippp_ccp: CCP Reset timed out for id %d\n",
  2060. rs->id);
  2061. /* Push it again */
  2062. isdn_ppp_ccp_xmit_reset(rs->is, PPP_CCP, CCP_RESETREQ, rs->id,
  2063. rs->data, rs->dlen);
  2064. /* Restart timer */
  2065. rs->timer.expires = jiffies + HZ * 5;
  2066. add_timer(&rs->timer);
  2067. } else {
  2068. printk(KERN_WARNING "ippp_ccp: timer cb in wrong state %d\n",
  2069. rs->state);
  2070. }
  2071. }
  2072. /* Allocate a new reset transaction state */
  2073. static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
  2074. unsigned char id)
  2075. {
  2076. struct ippp_ccp_reset_state *rs;
  2077. if (is->reset->rs[id]) {
  2078. printk(KERN_WARNING "ippp_ccp: old state exists for id %d\n",
  2079. id);
  2080. return NULL;
  2081. } else {
  2082. rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
  2083. if (!rs)
  2084. return NULL;
  2085. rs->state = CCPResetIdle;
  2086. rs->is = is;
  2087. rs->id = id;
  2088. init_timer(&rs->timer);
  2089. rs->timer.data = (unsigned long)rs;
  2090. rs->timer.function = isdn_ppp_ccp_timer_callback;
  2091. is->reset->rs[id] = rs;
  2092. }
  2093. return rs;
  2094. }
  2095. /* A decompressor wants a reset with a set of parameters - do what is
  2096. necessary to fulfill it */
  2097. static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
  2098. struct isdn_ppp_resetparams *rp)
  2099. {
  2100. struct ippp_ccp_reset_state *rs;
  2101. if (rp->valid) {
  2102. /* The decompressor defines parameters by itself */
  2103. if (rp->rsend) {
  2104. /* And he wants us to send a request */
  2105. if (!(rp->idval)) {
  2106. printk(KERN_ERR "ippp_ccp: decompressor must"
  2107. " specify reset id\n");
  2108. return;
  2109. }
  2110. if (is->reset->rs[rp->id]) {
  2111. /* There is already a transaction in existence
  2112. for this id. May be still waiting for a
  2113. Ack or may be wrong. */
  2114. rs = is->reset->rs[rp->id];
  2115. if (rs->state == CCPResetSentReq && rs->ta) {
  2116. printk(KERN_DEBUG "ippp_ccp: reset"
  2117. " trans still in progress"
  2118. " for id %d\n", rp->id);
  2119. } else {
  2120. printk(KERN_WARNING "ippp_ccp: reset"
  2121. " trans in wrong state %d for"
  2122. " id %d\n", rs->state, rp->id);
  2123. }
  2124. } else {
  2125. /* Ok, this is a new transaction */
  2126. printk(KERN_DEBUG "ippp_ccp: new trans for id"
  2127. " %d to be started\n", rp->id);
  2128. rs = isdn_ppp_ccp_reset_alloc_state(is, rp->id);
  2129. if (!rs) {
  2130. printk(KERN_ERR "ippp_ccp: out of mem"
  2131. " allocing ccp trans\n");
  2132. return;
  2133. }
  2134. rs->state = CCPResetSentReq;
  2135. rs->expra = rp->expra;
  2136. if (rp->dtval) {
  2137. rs->dlen = rp->dlen;
  2138. memcpy(rs->data, rp->data, rp->dlen);
  2139. }
  2140. /* HACK TODO - add link comp here */
  2141. isdn_ppp_ccp_xmit_reset(is, PPP_CCP,
  2142. CCP_RESETREQ, rs->id,
  2143. rs->data, rs->dlen);
  2144. /* Start the timer */
  2145. rs->timer.expires = jiffies + 5 * HZ;
  2146. add_timer(&rs->timer);
  2147. rs->ta = 1;
  2148. }
  2149. } else {
  2150. printk(KERN_DEBUG "ippp_ccp: no reset sent\n");
  2151. }
  2152. } else {
  2153. /* The reset params are invalid. The decompressor does not
  2154. care about them, so we just send the minimal requests
  2155. and increase ids only when an Ack is received for a
  2156. given id */
  2157. if (is->reset->rs[is->reset->lastid]) {
  2158. /* There is already a transaction in existence
  2159. for this id. May be still waiting for a
  2160. Ack or may be wrong. */
  2161. rs = is->reset->rs[is->reset->lastid];
  2162. if (rs->state == CCPResetSentReq && rs->ta) {
  2163. printk(KERN_DEBUG "ippp_ccp: reset"
  2164. " trans still in progress"
  2165. " for id %d\n", rp->id);
  2166. } else {
  2167. printk(KERN_WARNING "ippp_ccp: reset"
  2168. " trans in wrong state %d for"
  2169. " id %d\n", rs->state, rp->id);
  2170. }
  2171. } else {
  2172. printk(KERN_DEBUG "ippp_ccp: new trans for id"
  2173. " %d to be started\n", is->reset->lastid);
  2174. rs = isdn_ppp_ccp_reset_alloc_state(is,
  2175. is->reset->lastid);
  2176. if (!rs) {
  2177. printk(KERN_ERR "ippp_ccp: out of mem"
  2178. " allocing ccp trans\n");
  2179. return;
  2180. }
  2181. rs->state = CCPResetSentReq;
  2182. /* We always expect an Ack if the decompressor doesn't
  2183. know better */
  2184. rs->expra = 1;
  2185. rs->dlen = 0;
  2186. /* HACK TODO - add link comp here */
  2187. isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ,
  2188. rs->id, NULL, 0);
  2189. /* Start the timer */
  2190. rs->timer.expires = jiffies + 5 * HZ;
  2191. add_timer(&rs->timer);
  2192. rs->ta = 1;
  2193. }
  2194. }
  2195. }
  2196. /* An Ack was received for this id. This means we stop the timer and clean
  2197. up the state prior to calling the decompressors reset routine. */
  2198. static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is,
  2199. unsigned char id)
  2200. {
  2201. struct ippp_ccp_reset_state *rs = is->reset->rs[id];
  2202. if (rs) {
  2203. if (rs->ta && rs->state == CCPResetSentReq) {
  2204. /* Great, we are correct */
  2205. if (!rs->expra)
  2206. printk(KERN_DEBUG "ippp_ccp: ResetAck received"
  2207. " for id %d but not expected\n", id);
  2208. } else {
  2209. printk(KERN_INFO "ippp_ccp: ResetAck received out of"
  2210. "sync for id %d\n", id);
  2211. }
  2212. if (rs->ta) {
  2213. rs->ta = 0;
  2214. del_timer(&rs->timer);
  2215. }
  2216. isdn_ppp_ccp_reset_free_state(is, id);
  2217. } else {
  2218. printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id"
  2219. " %d\n", id);
  2220. }
  2221. /* Make sure the simple reset stuff uses a new id next time */
  2222. is->reset->lastid++;
  2223. }
  2224. /*
  2225. * decompress packet
  2226. *
  2227. * if master = 0, we're trying to uncompress an per-link compressed packet,
  2228. * as opposed to an compressed reconstructed-from-MPPP packet.
  2229. * proto is updated to protocol field of uncompressed packet.
  2230. *
  2231. * retval: decompressed packet,
  2232. * same packet if uncompressed,
  2233. * NULL if decompression error
  2234. */
  2235. static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb, struct ippp_struct *is, struct ippp_struct *master,
  2236. int *proto)
  2237. {
  2238. void *stat = NULL;
  2239. struct isdn_ppp_compressor *ipc = NULL;
  2240. struct sk_buff *skb_out;
  2241. int len;
  2242. struct ippp_struct *ri;
  2243. struct isdn_ppp_resetparams rsparm;
  2244. unsigned char rsdata[IPPP_RESET_MAXDATABYTES];
  2245. if (!master) {
  2246. // per-link decompression
  2247. stat = is->link_decomp_stat;
  2248. ipc = is->link_decompressor;
  2249. ri = is;
  2250. } else {
  2251. stat = master->decomp_stat;
  2252. ipc = master->decompressor;
  2253. ri = master;
  2254. }
  2255. if (!ipc) {
  2256. // no decompressor -> we can't decompress.
  2257. printk(KERN_DEBUG "ippp: no decompressor defined!\n");
  2258. return skb;
  2259. }
  2260. BUG_ON(!stat); // if we have a compressor, stat has been set as well
  2261. if ((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG)) {
  2262. // compressed packets are compressed by their protocol type
  2263. // Set up reset params for the decompressor
  2264. memset(&rsparm, 0, sizeof(rsparm));
  2265. rsparm.data = rsdata;
  2266. rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
  2267. skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN);
  2268. if (!skb_out) {
  2269. kfree_skb(skb);
  2270. printk(KERN_ERR "ippp: decomp memory allocation failure\n");
  2271. return NULL;
  2272. }
  2273. len = ipc->decompress(stat, skb, skb_out, &rsparm);
  2274. kfree_skb(skb);
  2275. if (len <= 0) {
  2276. switch (len) {
  2277. case DECOMP_ERROR:
  2278. printk(KERN_INFO "ippp: decomp wants reset %s params\n",
  2279. rsparm.valid ? "with" : "without");
  2280. isdn_ppp_ccp_reset_trans(ri, &rsparm);
  2281. break;
  2282. case DECOMP_FATALERROR:
  2283. ri->pppcfg |= SC_DC_FERROR;
  2284. /* Kick ipppd to recognize the error */
  2285. isdn_ppp_ccp_kickup(ri);
  2286. break;
  2287. }
  2288. kfree_skb(skb_out);
  2289. return NULL;
  2290. }
  2291. *proto = isdn_ppp_strip_proto(skb_out);
  2292. if (*proto < 0) {
  2293. kfree_skb(skb_out);
  2294. return NULL;
  2295. }
  2296. return skb_out;
  2297. } else {
  2298. // uncompressed packets are fed through the decompressor to
  2299. // update the decompressor state
  2300. ipc->incomp(stat, skb, *proto);
  2301. return skb;
  2302. }
  2303. }
  2304. /*
  2305. * compress a frame
  2306. * type=0: normal/bundle compression
  2307. * =1: link compression
  2308. * returns original skb if we haven't compressed the frame
  2309. * and a new skb pointer if we've done it
  2310. */
  2311. static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto,
  2312. struct ippp_struct *is, struct ippp_struct *master, int type)
  2313. {
  2314. int ret;
  2315. int new_proto;
  2316. struct isdn_ppp_compressor *compressor;
  2317. void *stat;
  2318. struct sk_buff *skb_out;
  2319. /* we do not compress control protocols */
  2320. if (*proto < 0 || *proto > 0x3fff) {
  2321. return skb_in;
  2322. }
  2323. if (type) { /* type=1 => Link compression */
  2324. return skb_in;
  2325. }
  2326. else {
  2327. if (!master) {
  2328. compressor = is->compressor;
  2329. stat = is->comp_stat;
  2330. }
  2331. else {
  2332. compressor = master->compressor;
  2333. stat = master->comp_stat;
  2334. }
  2335. new_proto = PPP_COMP;
  2336. }
  2337. if (!compressor) {
  2338. printk(KERN_ERR "isdn_ppp: No compressor set!\n");
  2339. return skb_in;
  2340. }
  2341. if (!stat) {
  2342. printk(KERN_ERR "isdn_ppp: Compressor not initialized?\n");
  2343. return skb_in;
  2344. }
  2345. /* Allow for at least 150 % expansion (for now) */
  2346. skb_out = alloc_skb(skb_in->len + skb_in->len / 2 + 32 +
  2347. skb_headroom(skb_in), GFP_ATOMIC);
  2348. if (!skb_out)
  2349. return skb_in;
  2350. skb_reserve(skb_out, skb_headroom(skb_in));
  2351. ret = (compressor->compress)(stat, skb_in, skb_out, *proto);
  2352. if (!ret) {
  2353. dev_kfree_skb(skb_out);
  2354. return skb_in;
  2355. }
  2356. dev_kfree_skb(skb_in);
  2357. *proto = new_proto;
  2358. return skb_out;
  2359. }
  2360. /*
  2361. * we received a CCP frame ..
  2362. * not a clean solution, but we MUST handle a few cases in the kernel
  2363. */
  2364. static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
  2365. struct sk_buff *skb, int proto)
  2366. {
  2367. struct ippp_struct *is;
  2368. struct ippp_struct *mis;
  2369. int len;
  2370. struct isdn_ppp_resetparams rsparm;
  2371. unsigned char rsdata[IPPP_RESET_MAXDATABYTES];
  2372. printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n",
  2373. lp->ppp_slot);
  2374. if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
  2375. printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
  2376. __func__, lp->ppp_slot);
  2377. return;
  2378. }
  2379. is = ippp_table[lp->ppp_slot];
  2380. isdn_ppp_frame_log("ccp-rcv", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  2381. if (lp->master) {
  2382. int slot = ISDN_MASTER_PRIV(lp)->ppp_slot;
  2383. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  2384. printk(KERN_ERR "%s: slot(%d) out of range\n",
  2385. __func__, slot);
  2386. return;
  2387. }
  2388. mis = ippp_table[slot];
  2389. } else
  2390. mis = is;
  2391. switch (skb->data[0]) {
  2392. case CCP_CONFREQ:
  2393. if (is->debug & 0x10)
  2394. printk(KERN_DEBUG "Disable compression here!\n");
  2395. if (proto == PPP_CCP)
  2396. mis->compflags &= ~SC_COMP_ON;
  2397. else
  2398. is->compflags &= ~SC_LINK_COMP_ON;
  2399. break;
  2400. case CCP_TERMREQ:
  2401. case CCP_TERMACK:
  2402. if (is->debug & 0x10)
  2403. printk(KERN_DEBUG "Disable (de)compression here!\n");
  2404. if (proto == PPP_CCP)
  2405. mis->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON);
  2406. else
  2407. is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON);
  2408. break;
  2409. case CCP_CONFACK:
  2410. /* if we RECEIVE an ackowledge we enable the decompressor */
  2411. if (is->debug & 0x10)
  2412. printk(KERN_DEBUG "Enable decompression here!\n");
  2413. if (proto == PPP_CCP) {
  2414. if (!mis->decompressor)
  2415. break;
  2416. mis->compflags |= SC_DECOMP_ON;
  2417. } else {
  2418. if (!is->decompressor)
  2419. break;
  2420. is->compflags |= SC_LINK_DECOMP_ON;
  2421. }
  2422. break;
  2423. case CCP_RESETACK:
  2424. printk(KERN_DEBUG "Received ResetAck from peer\n");
  2425. len = (skb->data[2] << 8) | skb->data[3];
  2426. len -= 4;
  2427. if (proto == PPP_CCP) {
  2428. /* If a reset Ack was outstanding for this id, then
  2429. clean up the state engine */
  2430. isdn_ppp_ccp_reset_ack_rcvd(mis, skb->data[1]);
  2431. if (mis->decompressor && mis->decomp_stat)
  2432. mis->decompressor->
  2433. reset(mis->decomp_stat,
  2434. skb->data[0],
  2435. skb->data[1],
  2436. len ? &skb->data[4] : NULL,
  2437. len, NULL);
  2438. /* TODO: This is not easy to decide here */
  2439. mis->compflags &= ~SC_DECOMP_DISCARD;
  2440. }
  2441. else {
  2442. isdn_ppp_ccp_reset_ack_rcvd(is, skb->data[1]);
  2443. if (is->link_decompressor && is->link_decomp_stat)
  2444. is->link_decompressor->
  2445. reset(is->link_decomp_stat,
  2446. skb->data[0],
  2447. skb->data[1],
  2448. len ? &skb->data[4] : NULL,
  2449. len, NULL);
  2450. /* TODO: neither here */
  2451. is->compflags &= ~SC_LINK_DECOMP_DISCARD;
  2452. }
  2453. break;
  2454. case CCP_RESETREQ:
  2455. printk(KERN_DEBUG "Received ResetReq from peer\n");
  2456. /* Receiving a ResetReq means we must reset our compressor */
  2457. /* Set up reset params for the reset entry */
  2458. memset(&rsparm, 0, sizeof(rsparm));
  2459. rsparm.data = rsdata;
  2460. rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
  2461. /* Isolate data length */
  2462. len = (skb->data[2] << 8) | skb->data[3];
  2463. len -= 4;
  2464. if (proto == PPP_CCP) {
  2465. if (mis->compressor && mis->comp_stat)
  2466. mis->compressor->
  2467. reset(mis->comp_stat,
  2468. skb->data[0],
  2469. skb->data[1],
  2470. len ? &skb->data[4] : NULL,
  2471. len, &rsparm);
  2472. }
  2473. else {
  2474. if (is->link_compressor && is->link_comp_stat)
  2475. is->link_compressor->
  2476. reset(is->link_comp_stat,
  2477. skb->data[0],
  2478. skb->data[1],
  2479. len ? &skb->data[4] : NULL,
  2480. len, &rsparm);
  2481. }
  2482. /* Ack the Req as specified by rsparm */
  2483. if (rsparm.valid) {
  2484. /* Compressor reset handler decided how to answer */
  2485. if (rsparm.rsend) {
  2486. /* We should send a Frame */
  2487. isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK,
  2488. rsparm.idval ? rsparm.id
  2489. : skb->data[1],
  2490. rsparm.dtval ?
  2491. rsparm.data : NULL,
  2492. rsparm.dtval ?
  2493. rsparm.dlen : 0);
  2494. } else {
  2495. printk(KERN_DEBUG "ResetAck suppressed\n");
  2496. }
  2497. } else {
  2498. /* We answer with a straight reflected Ack */
  2499. isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK,
  2500. skb->data[1],
  2501. len ? &skb->data[4] : NULL,
  2502. len);
  2503. }
  2504. break;
  2505. }
  2506. }
  2507. /*
  2508. * Daemon sends a CCP frame ...
  2509. */
  2510. /* TODO: Clean this up with new Reset semantics */
  2511. /* I believe the CCP handling as-is is done wrong. Compressed frames
  2512. * should only be sent/received after CCP reaches UP state, which means
  2513. * both sides have sent CONF_ACK. Currently, we handle both directions
  2514. * independently, which means we may accept compressed frames too early
  2515. * (supposedly not a problem), but may also mean we send compressed frames
  2516. * too early, which may turn out to be a problem.
  2517. * This part of state machine should actually be handled by (i)pppd, but
  2518. * that's too big of a change now. --kai
  2519. */
  2520. /* Actually, we might turn this into an advantage: deal with the RFC in
  2521. * the old tradition of beeing generous on what we accept, but beeing
  2522. * strict on what we send. Thus we should just
  2523. * - accept compressed frames as soon as decompression is negotiated
  2524. * - send compressed frames only when decomp *and* comp are negotiated
  2525. * - drop rx compressed frames if we cannot decomp (instead of pushing them
  2526. * up to ipppd)
  2527. * and I tried to modify this file according to that. --abp
  2528. */
  2529. static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb)
  2530. {
  2531. struct ippp_struct *mis, *is;
  2532. int proto, slot = lp->ppp_slot;
  2533. unsigned char *data;
  2534. if (!skb || skb->len < 3)
  2535. return;
  2536. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  2537. printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
  2538. __func__, slot);
  2539. return;
  2540. }
  2541. is = ippp_table[slot];
  2542. /* Daemon may send with or without address and control field comp */
  2543. data = skb->data;
  2544. if (!(is->pppcfg & SC_COMP_AC) && data[0] == 0xff && data[1] == 0x03) {
  2545. data += 2;
  2546. if (skb->len < 5)
  2547. return;
  2548. }
  2549. proto = ((int)data[0]<<8) + data[1];
  2550. if (proto != PPP_CCP && proto != PPP_CCPFRAG)
  2551. return;
  2552. printk(KERN_DEBUG "Received CCP frame from daemon:\n");
  2553. isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
  2554. if (lp->master) {
  2555. slot = ISDN_MASTER_PRIV(lp)->ppp_slot;
  2556. if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
  2557. printk(KERN_ERR "%s: slot(%d) out of range\n",
  2558. __func__, slot);
  2559. return;
  2560. }
  2561. mis = ippp_table[slot];
  2562. } else
  2563. mis = is;
  2564. if (mis != is)
  2565. printk(KERN_DEBUG "isdn_ppp: Ouch! Master CCP sends on slave slot!\n");
  2566. switch (data[2]) {
  2567. case CCP_CONFREQ:
  2568. if (is->debug & 0x10)
  2569. printk(KERN_DEBUG "Disable decompression here!\n");
  2570. if (proto == PPP_CCP)
  2571. is->compflags &= ~SC_DECOMP_ON;
  2572. else
  2573. is->compflags &= ~SC_LINK_DECOMP_ON;
  2574. break;
  2575. case CCP_TERMREQ:
  2576. case CCP_TERMACK:
  2577. if (is->debug & 0x10)
  2578. printk(KERN_DEBUG "Disable (de)compression here!\n");
  2579. if (proto == PPP_CCP)
  2580. is->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON);
  2581. else
  2582. is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON);
  2583. break;
  2584. case CCP_CONFACK:
  2585. /* if we SEND an ackowledge we can/must enable the compressor */
  2586. if (is->debug & 0x10)
  2587. printk(KERN_DEBUG "Enable compression here!\n");
  2588. if (proto == PPP_CCP) {
  2589. if (!is->compressor)
  2590. break;
  2591. is->compflags |= SC_COMP_ON;
  2592. } else {
  2593. if (!is->compressor)
  2594. break;
  2595. is->compflags |= SC_LINK_COMP_ON;
  2596. }
  2597. break;
  2598. case CCP_RESETACK:
  2599. /* If we send a ACK we should reset our compressor */
  2600. if (is->debug & 0x10)
  2601. printk(KERN_DEBUG "Reset decompression state here!\n");
  2602. printk(KERN_DEBUG "ResetAck from daemon passed by\n");
  2603. if (proto == PPP_CCP) {
  2604. /* link to master? */
  2605. if (is->compressor && is->comp_stat)
  2606. is->compressor->reset(is->comp_stat, 0, 0,
  2607. NULL, 0, NULL);
  2608. is->compflags &= ~SC_COMP_DISCARD;
  2609. }
  2610. else {
  2611. if (is->link_compressor && is->link_comp_stat)
  2612. is->link_compressor->reset(is->link_comp_stat,
  2613. 0, 0, NULL, 0, NULL);
  2614. is->compflags &= ~SC_LINK_COMP_DISCARD;
  2615. }
  2616. break;
  2617. case CCP_RESETREQ:
  2618. /* Just let it pass by */
  2619. printk(KERN_DEBUG "ResetReq from daemon passed by\n");
  2620. break;
  2621. }
  2622. }
  2623. int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc)
  2624. {
  2625. ipc->next = ipc_head;
  2626. ipc->prev = NULL;
  2627. if (ipc_head) {
  2628. ipc_head->prev = ipc;
  2629. }
  2630. ipc_head = ipc;
  2631. return 0;
  2632. }
  2633. int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc)
  2634. {
  2635. if (ipc->prev)
  2636. ipc->prev->next = ipc->next;
  2637. else
  2638. ipc_head = ipc->next;
  2639. if (ipc->next)
  2640. ipc->next->prev = ipc->prev;
  2641. ipc->prev = ipc->next = NULL;
  2642. return 0;
  2643. }
  2644. static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *data)
  2645. {
  2646. struct isdn_ppp_compressor *ipc = ipc_head;
  2647. int ret;
  2648. void *stat;
  2649. int num = data->num;
  2650. if (is->debug & 0x10)
  2651. printk(KERN_DEBUG "[%d] Set %s type %d\n", is->unit,
  2652. (data->flags & IPPP_COMP_FLAG_XMIT) ? "compressor" : "decompressor", num);
  2653. /* If is has no valid reset state vector, we cannot allocate a
  2654. decompressor. The decompressor would cause reset transactions
  2655. sooner or later, and they need that vector. */
  2656. if (!(data->flags & IPPP_COMP_FLAG_XMIT) && !is->reset) {
  2657. printk(KERN_ERR "ippp_ccp: no reset data structure - can't"
  2658. " allow decompression.\n");
  2659. return -ENOMEM;
  2660. }
  2661. while (ipc) {
  2662. if (ipc->num == num) {
  2663. stat = ipc->alloc(data);
  2664. if (stat) {
  2665. ret = ipc->init(stat, data, is->unit, 0);
  2666. if (!ret) {
  2667. printk(KERN_ERR "Can't init (de)compression!\n");
  2668. ipc->free(stat);
  2669. stat = NULL;
  2670. break;
  2671. }
  2672. }
  2673. else {
  2674. printk(KERN_ERR "Can't alloc (de)compression!\n");
  2675. break;
  2676. }
  2677. if (data->flags & IPPP_COMP_FLAG_XMIT) {
  2678. if (data->flags & IPPP_COMP_FLAG_LINK) {
  2679. if (is->link_comp_stat)
  2680. is->link_compressor->free(is->link_comp_stat);
  2681. is->link_comp_stat = stat;
  2682. is->link_compressor = ipc;
  2683. }
  2684. else {
  2685. if (is->comp_stat)
  2686. is->compressor->free(is->comp_stat);
  2687. is->comp_stat = stat;
  2688. is->compressor = ipc;
  2689. }
  2690. }
  2691. else {
  2692. if (data->flags & IPPP_COMP_FLAG_LINK) {
  2693. if (is->link_decomp_stat)
  2694. is->link_decompressor->free(is->link_decomp_stat);
  2695. is->link_decomp_stat = stat;
  2696. is->link_decompressor = ipc;
  2697. }
  2698. else {
  2699. if (is->decomp_stat)
  2700. is->decompressor->free(is->decomp_stat);
  2701. is->decomp_stat = stat;
  2702. is->decompressor = ipc;
  2703. }
  2704. }
  2705. return 0;
  2706. }
  2707. ipc = ipc->next;
  2708. }
  2709. return -EINVAL;
  2710. }