PageRenderTime 50ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/staging/pohmelfs/trans.c

https://bitbucket.org/paulobrien/android_kernel_andypad
C | 706 lines | 515 code | 127 blank | 64 comment | 75 complexity | 33c04ce9a3be12c575f11afca192a750 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/crypto.h>
  17. #include <linux/fs.h>
  18. #include <linux/jhash.h>
  19. #include <linux/hash.h>
  20. #include <linux/ktime.h>
  21. #include <linux/mempool.h>
  22. #include <linux/mm.h>
  23. #include <linux/mount.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/parser.h>
  26. #include <linux/poll.h>
  27. #include <linux/swap.h>
  28. #include <linux/slab.h>
  29. #include <linux/statfs.h>
  30. #include <linux/writeback.h>
  31. #include "netfs.h"
  32. static struct kmem_cache *netfs_trans_dst;
  33. static mempool_t *netfs_trans_dst_pool;
  34. static void netfs_trans_init_static(struct netfs_trans *t, int num, int size)
  35. {
  36. t->page_num = num;
  37. t->total_size = size;
  38. atomic_set(&t->refcnt, 1);
  39. spin_lock_init(&t->dst_lock);
  40. INIT_LIST_HEAD(&t->dst_list);
  41. }
  42. static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
  43. {
  44. int err = 0;
  45. unsigned int i, attached_pages = t->attached_pages, ci;
  46. struct msghdr msg;
  47. struct page **pages = (t->eng)?t->eng->pages:t->pages;
  48. struct page *p;
  49. unsigned int size;
  50. msg.msg_name = NULL;
  51. msg.msg_namelen = 0;
  52. msg.msg_control = NULL;
  53. msg.msg_controllen = 0;
  54. msg.msg_flags = MSG_WAITALL | MSG_MORE;
  55. ci = 0;
  56. for (i=0; i<t->page_num; ++i) {
  57. struct page *page = pages[ci];
  58. struct netfs_cmd cmd;
  59. struct iovec io;
  60. p = t->pages[i];
  61. if (!p)
  62. continue;
  63. size = page_private(p);
  64. io.iov_base = &cmd;
  65. io.iov_len = sizeof(struct netfs_cmd);
  66. cmd.cmd = NETFS_WRITE_PAGE;
  67. cmd.ext = 0;
  68. cmd.id = 0;
  69. cmd.size = size;
  70. cmd.start = p->index;
  71. cmd.start <<= PAGE_CACHE_SHIFT;
  72. cmd.csize = 0;
  73. cmd.cpad = 0;
  74. cmd.iv = pohmelfs_gen_iv(t);
  75. netfs_convert_cmd(&cmd);
  76. msg.msg_iov = &io;
  77. msg.msg_iovlen = 1;
  78. msg.msg_flags = MSG_WAITALL | MSG_MORE;
  79. err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, sizeof(struct netfs_cmd));
  80. if (err <= 0) {
  81. printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
  82. __func__, i, t->page_num, t, t->gen, err);
  83. if (err == 0)
  84. err = -ECONNRESET;
  85. goto err_out;
  86. }
  87. msg.msg_flags = MSG_WAITALL | (attached_pages == 1 ? 0 :
  88. MSG_MORE);
  89. err = kernel_sendpage(st->socket, page, 0, size, msg.msg_flags);
  90. if (err <= 0) {
  91. printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
  92. __func__, i, t->page_num, t, t->gen, size, err);
  93. if (err == 0)
  94. err = -ECONNRESET;
  95. goto err_out;
  96. }
  97. dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
  98. __func__, i, t->page_num, t, t->gen, page, p, size);
  99. err = 0;
  100. attached_pages--;
  101. if (!attached_pages)
  102. break;
  103. ci++;
  104. continue;
  105. err_out:
  106. printk("%s: t: %p, gen: %u, err: %d.\n", __func__, t, t->gen, err);
  107. netfs_state_exit(st);
  108. break;
  109. }
  110. return err;
  111. }
  112. int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
  113. {
  114. int err;
  115. struct msghdr msg;
  116. BUG_ON(!t->iovec.iov_len);
  117. BUG_ON(t->iovec.iov_len > 1024*1024*1024);
  118. netfs_state_lock_send(st);
  119. if (!st->socket) {
  120. err = netfs_state_init(st);
  121. if (err)
  122. goto err_out_unlock_return;
  123. }
  124. msg.msg_iov = &t->iovec;
  125. msg.msg_iovlen = 1;
  126. msg.msg_name = NULL;
  127. msg.msg_namelen = 0;
  128. msg.msg_control = NULL;
  129. msg.msg_controllen = 0;
  130. msg.msg_flags = MSG_WAITALL;
  131. if (t->attached_pages)
  132. msg.msg_flags |= MSG_MORE;
  133. err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, t->iovec.iov_len);
  134. if (err <= 0) {
  135. printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
  136. __func__, t, t->gen, t->iovec.iov_len, err);
  137. if (err == 0)
  138. err = -ECONNRESET;
  139. goto err_out_unlock_return;
  140. }
  141. dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
  142. __func__, (t->page_num)?"partial":"full",
  143. t, t->gen, t->iovec.iov_len, t->page_num);
  144. err = 0;
  145. if (t->attached_pages)
  146. err = netfs_trans_send_pages(t, st);
  147. err_out_unlock_return:
  148. if (st->need_reset)
  149. netfs_state_exit(st);
  150. netfs_state_unlock_send(st);
  151. dprintk("%s: t: %p, gen: %u, err: %d.\n",
  152. __func__, t, t->gen, err);
  153. t->result = err;
  154. return err;
  155. }
  156. static inline int netfs_trans_cmp(unsigned int gen, unsigned int new)
  157. {
  158. if (gen < new)
  159. return 1;
  160. if (gen > new)
  161. return -1;
  162. return 0;
  163. }
  164. struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen)
  165. {
  166. struct rb_root *root = &st->trans_root;
  167. struct rb_node *n = root->rb_node;
  168. struct netfs_trans_dst *tmp, *ret = NULL;
  169. struct netfs_trans *t;
  170. int cmp;
  171. while (n) {
  172. tmp = rb_entry(n, struct netfs_trans_dst, state_entry);
  173. t = tmp->trans;
  174. cmp = netfs_trans_cmp(t->gen, gen);
  175. if (cmp < 0)
  176. n = n->rb_left;
  177. else if (cmp > 0)
  178. n = n->rb_right;
  179. else {
  180. ret = tmp;
  181. break;
  182. }
  183. }
  184. return ret;
  185. }
  186. static int netfs_trans_insert(struct netfs_trans_dst *ndst, struct netfs_state *st)
  187. {
  188. struct rb_root *root = &st->trans_root;
  189. struct rb_node **n = &root->rb_node, *parent = NULL;
  190. struct netfs_trans_dst *ret = NULL, *tmp;
  191. struct netfs_trans *t = NULL, *new = ndst->trans;
  192. int cmp;
  193. while (*n) {
  194. parent = *n;
  195. tmp = rb_entry(parent, struct netfs_trans_dst, state_entry);
  196. t = tmp->trans;
  197. cmp = netfs_trans_cmp(t->gen, new->gen);
  198. if (cmp < 0)
  199. n = &parent->rb_left;
  200. else if (cmp > 0)
  201. n = &parent->rb_right;
  202. else {
  203. ret = tmp;
  204. break;
  205. }
  206. }
  207. if (ret) {
  208. printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
  209. "new: gen: %u, flags: %x, send_time: %lu.\n",
  210. __func__, t->gen, t->flags, ret->send_time,
  211. new->gen, new->flags, ndst->send_time);
  212. return -EEXIST;
  213. }
  214. rb_link_node(&ndst->state_entry, parent, n);
  215. rb_insert_color(&ndst->state_entry, root);
  216. ndst->send_time = jiffies;
  217. return 0;
  218. }
  219. int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st)
  220. {
  221. if (dst && dst->state_entry.rb_parent_color) {
  222. rb_erase(&dst->state_entry, &st->trans_root);
  223. dst->state_entry.rb_parent_color = 0;
  224. return 1;
  225. }
  226. return 0;
  227. }
  228. static int netfs_trans_remove_state(struct netfs_trans_dst *dst)
  229. {
  230. int ret;
  231. struct netfs_state *st = dst->state;
  232. mutex_lock(&st->trans_lock);
  233. ret = netfs_trans_remove_nolock(dst, st);
  234. mutex_unlock(&st->trans_lock);
  235. return ret;
  236. }
  237. /*
  238. * Create new destination for given transaction associated with given network state.
  239. * Transaction's reference counter is bumped and will be dropped when either
  240. * reply is received or when async timeout detection task will fail resending
  241. * and drop transaction.
  242. */
  243. static int netfs_trans_push_dst(struct netfs_trans *t, struct netfs_state *st)
  244. {
  245. struct netfs_trans_dst *dst;
  246. int err;
  247. dst = mempool_alloc(netfs_trans_dst_pool, GFP_KERNEL);
  248. if (!dst)
  249. return -ENOMEM;
  250. dst->retries = 0;
  251. dst->send_time = 0;
  252. dst->state = st;
  253. dst->trans = t;
  254. netfs_trans_get(t);
  255. mutex_lock(&st->trans_lock);
  256. err = netfs_trans_insert(dst, st);
  257. mutex_unlock(&st->trans_lock);
  258. if (err)
  259. goto err_out_free;
  260. spin_lock(&t->dst_lock);
  261. list_add_tail(&dst->trans_entry, &t->dst_list);
  262. spin_unlock(&t->dst_lock);
  263. return 0;
  264. err_out_free:
  265. t->result = err;
  266. netfs_trans_put(t);
  267. mempool_free(dst, netfs_trans_dst_pool);
  268. return err;
  269. }
  270. static void netfs_trans_free_dst(struct netfs_trans_dst *dst)
  271. {
  272. netfs_trans_put(dst->trans);
  273. mempool_free(dst, netfs_trans_dst_pool);
  274. }
  275. static void netfs_trans_remove_dst(struct netfs_trans_dst *dst)
  276. {
  277. if (netfs_trans_remove_state(dst))
  278. netfs_trans_free_dst(dst);
  279. }
  280. /*
  281. * Drop destination transaction entry when we know it.
  282. */
  283. void netfs_trans_drop_dst(struct netfs_trans_dst *dst)
  284. {
  285. struct netfs_trans *t = dst->trans;
  286. spin_lock(&t->dst_lock);
  287. list_del_init(&dst->trans_entry);
  288. spin_unlock(&t->dst_lock);
  289. netfs_trans_remove_dst(dst);
  290. }
  291. /*
  292. * Drop destination transaction entry when we know it and when we
  293. * already removed dst from state tree.
  294. */
  295. void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst)
  296. {
  297. struct netfs_trans *t = dst->trans;
  298. spin_lock(&t->dst_lock);
  299. list_del_init(&dst->trans_entry);
  300. spin_unlock(&t->dst_lock);
  301. netfs_trans_free_dst(dst);
  302. }
  303. /*
  304. * This drops destination transaction entry from appropriate network state
  305. * tree and drops related reference counter. It is possible that transaction
  306. * will be freed here if its reference counter hits zero.
  307. * Destination transaction entry will be freed.
  308. */
  309. void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st)
  310. {
  311. struct netfs_trans_dst *dst, *tmp, *ret = NULL;
  312. spin_lock(&t->dst_lock);
  313. list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
  314. if (dst->state == st) {
  315. ret = dst;
  316. list_del(&dst->trans_entry);
  317. break;
  318. }
  319. }
  320. spin_unlock(&t->dst_lock);
  321. if (ret)
  322. netfs_trans_remove_dst(ret);
  323. }
  324. /*
  325. * This drops destination transaction entry from appropriate network state
  326. * tree and drops related reference counter. It is possible that transaction
  327. * will be freed here if its reference counter hits zero.
  328. * Destination transaction entry will be freed.
  329. */
  330. void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st)
  331. {
  332. struct netfs_trans_dst *dst, *tmp, *ret;
  333. spin_lock(&t->dst_lock);
  334. ret = list_entry(t->dst_list.prev, struct netfs_trans_dst, trans_entry);
  335. if (ret->state != st) {
  336. ret = NULL;
  337. list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
  338. if (dst->state == st) {
  339. ret = dst;
  340. list_del_init(&dst->trans_entry);
  341. break;
  342. }
  343. }
  344. } else {
  345. list_del(&ret->trans_entry);
  346. }
  347. spin_unlock(&t->dst_lock);
  348. if (ret)
  349. netfs_trans_remove_dst(ret);
  350. }
  351. static int netfs_trans_push(struct netfs_trans *t, struct netfs_state *st)
  352. {
  353. int err;
  354. err = netfs_trans_push_dst(t, st);
  355. if (err)
  356. return err;
  357. err = netfs_trans_send(t, st);
  358. if (err)
  359. goto err_out_free;
  360. if (t->flags & NETFS_TRANS_SINGLE_DST)
  361. pohmelfs_switch_active(st->psb);
  362. return 0;
  363. err_out_free:
  364. t->result = err;
  365. netfs_trans_drop_last(t, st);
  366. return err;
  367. }
  368. int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
  369. {
  370. struct pohmelfs_config *c;
  371. int err = -ENODEV;
  372. struct netfs_state *st;
  373. #if 0
  374. dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
  375. __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
  376. #endif
  377. mutex_lock(&psb->state_lock);
  378. list_for_each_entry(c, &psb->state_list, config_entry) {
  379. st = &c->state;
  380. if (t->flags & NETFS_TRANS_SINGLE_DST) {
  381. if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
  382. continue;
  383. } else {
  384. if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
  385. continue;
  386. }
  387. if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio) &&
  388. (t->flags & NETFS_TRANS_SINGLE_DST))
  389. st = &psb->active_state->state;
  390. err = netfs_trans_push(t, st);
  391. if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
  392. break;
  393. }
  394. mutex_unlock(&psb->state_lock);
  395. #if 0
  396. dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
  397. __func__, t, t->gen, t->iovec.iov_len, t->page_num, err);
  398. #endif
  399. if (err)
  400. t->result = err;
  401. return err;
  402. }
  403. int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
  404. {
  405. int err;
  406. struct netfs_cmd *cmd = t->iovec.iov_base;
  407. t->gen = atomic_inc_return(&psb->trans_gen);
  408. cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
  409. t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
  410. cmd->cmd = NETFS_TRANS;
  411. cmd->start = t->gen;
  412. cmd->id = 0;
  413. if (psb->perform_crypto) {
  414. cmd->ext = psb->crypto_attached_size;
  415. cmd->csize = psb->crypto_attached_size;
  416. }
  417. dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
  418. __func__, t->gen, cmd->size, t->iovec.iov_len, t->attached_size, t->attached_pages);
  419. err = pohmelfs_trans_crypt(t, psb);
  420. if (err) {
  421. t->result = err;
  422. netfs_convert_cmd(cmd);
  423. dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
  424. __func__, cmd->start, psb->crypto_attached_size, t->attached_size, t->attached_pages, cmd->size, err);
  425. }
  426. netfs_trans_put(t);
  427. return err;
  428. }
  429. /*
  430. * Resend transaction to remote server(s).
  431. * If new servers were added into superblock, we can try to send data
  432. * to them too.
  433. *
  434. * It is called under superblock's state_lock, so we can safely
  435. * dereference psb->state_list. Also, transaction's reference counter is
  436. * bumped, so it can not go away under us, thus we can safely access all
  437. * its members. State is locked.
  438. *
  439. * This function returns 0 if transaction was successfully sent to at
  440. * least one destination target.
  441. */
  442. int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb)
  443. {
  444. struct netfs_trans_dst *dst;
  445. struct netfs_state *st;
  446. struct pohmelfs_config *c;
  447. int err, exist, error = -ENODEV;
  448. list_for_each_entry(c, &psb->state_list, config_entry) {
  449. st = &c->state;
  450. exist = 0;
  451. spin_lock(&t->dst_lock);
  452. list_for_each_entry(dst, &t->dst_list, trans_entry) {
  453. if (st == dst->state) {
  454. exist = 1;
  455. break;
  456. }
  457. }
  458. spin_unlock(&t->dst_lock);
  459. if (exist) {
  460. if (!(t->flags & NETFS_TRANS_SINGLE_DST) ||
  461. (c->config_entry.next == &psb->state_list)) {
  462. dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
  463. __func__, st, t, t->gen);
  464. err = netfs_trans_send(t, st);
  465. if (!err)
  466. error = 0;
  467. }
  468. continue;
  469. }
  470. dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
  471. __func__, st, t, t->gen);
  472. err = netfs_trans_push(t, st);
  473. if (err)
  474. continue;
  475. error = 0;
  476. if (t->flags & NETFS_TRANS_SINGLE_DST)
  477. break;
  478. }
  479. t->result = error;
  480. return error;
  481. }
  482. void *netfs_trans_add(struct netfs_trans *t, unsigned int size)
  483. {
  484. struct iovec *io = &t->iovec;
  485. void *ptr;
  486. if (size > t->total_size) {
  487. ptr = ERR_PTR(-EINVAL);
  488. goto out;
  489. }
  490. if (io->iov_len + size > t->total_size) {
  491. dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
  492. __func__, t, t->gen, io->iov_len, size, t->total_size);
  493. ptr = ERR_PTR(-E2BIG);
  494. goto out;
  495. }
  496. ptr = io->iov_base + io->iov_len;
  497. io->iov_len += size;
  498. out:
  499. dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
  500. __func__, t, t->gen, size, io->iov_len);
  501. return ptr;
  502. }
  503. void netfs_trans_free(struct netfs_trans *t)
  504. {
  505. if (t->eng)
  506. pohmelfs_crypto_thread_make_ready(t->eng->thread);
  507. kfree(t);
  508. }
  509. struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
  510. unsigned int flags, unsigned int nr)
  511. {
  512. struct netfs_trans *t;
  513. unsigned int num, cont, pad, size_no_trans;
  514. unsigned int crypto_added = 0;
  515. struct netfs_cmd *cmd;
  516. if (psb->perform_crypto)
  517. crypto_added = psb->crypto_attached_size;
  518. /*
  519. * |sizeof(struct netfs_trans)|
  520. * |sizeof(struct netfs_cmd)| - transaction header
  521. * |size| - buffer with requested size
  522. * |padding| - crypto padding, zero bytes
  523. * |nr * sizeof(struct page *)| - array of page pointers
  524. *
  525. * Overall size should be less than PAGE_SIZE for guaranteed allocation.
  526. */
  527. cont = size;
  528. size = ALIGN(size, psb->crypto_align_size);
  529. pad = size - cont;
  530. size_no_trans = size + sizeof(struct netfs_cmd) * 2 + crypto_added;
  531. cont = sizeof(struct netfs_trans) + size_no_trans;
  532. num = (PAGE_SIZE - cont)/sizeof(struct page *);
  533. if (nr > num)
  534. nr = num;
  535. t = kzalloc(cont + nr*sizeof(struct page *), GFP_NOIO);
  536. if (!t)
  537. goto err_out_exit;
  538. t->iovec.iov_base = (void *)(t + 1);
  539. t->pages = (struct page **)(t->iovec.iov_base + size_no_trans);
  540. /*
  541. * Reserving space for transaction header.
  542. */
  543. t->iovec.iov_len = sizeof(struct netfs_cmd) + crypto_added;
  544. netfs_trans_init_static(t, nr, size_no_trans);
  545. t->flags = flags;
  546. t->psb = psb;
  547. cmd = (struct netfs_cmd *)t->iovec.iov_base;
  548. cmd->size = size;
  549. cmd->cpad = pad;
  550. cmd->csize = crypto_added;
  551. dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
  552. "page_num: %u, base: %p, pages: %p.\n",
  553. __func__, t, t->gen, size, pad, psb->crypto_align_size, flags, nr,
  554. t->iovec.iov_base, t->pages);
  555. return t;
  556. err_out_exit:
  557. return NULL;
  558. }
  559. int netfs_trans_init(void)
  560. {
  561. int err = -ENOMEM;
  562. netfs_trans_dst = kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst),
  563. 0, 0, NULL);
  564. if (!netfs_trans_dst)
  565. goto err_out_exit;
  566. netfs_trans_dst_pool = mempool_create_slab_pool(256, netfs_trans_dst);
  567. if (!netfs_trans_dst_pool)
  568. goto err_out_free;
  569. return 0;
  570. err_out_free:
  571. kmem_cache_destroy(netfs_trans_dst);
  572. err_out_exit:
  573. return err;
  574. }
  575. void netfs_trans_exit(void)
  576. {
  577. mempool_destroy(netfs_trans_dst_pool);
  578. kmem_cache_destroy(netfs_trans_dst);
  579. }