PageRenderTime 46ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/fs/afs/flock.c

https://github.com/mstsirkin/linux
C | 588 lines | 400 code | 74 blank | 114 comment | 76 complexity | 3a772888e5d8e6723ae8be647f0dd32d MD5 | raw file
  1. /* AFS file locking support
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include "internal.h"
  12. #define AFS_LOCK_GRANTED 0
  13. #define AFS_LOCK_PENDING 1
  14. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
  15. static void afs_fl_release_private(struct file_lock *fl);
  16. static struct workqueue_struct *afs_lock_manager;
  17. static DEFINE_MUTEX(afs_lock_manager_mutex);
  18. static const struct file_lock_operations afs_lock_ops = {
  19. .fl_copy_lock = afs_fl_copy_lock,
  20. .fl_release_private = afs_fl_release_private,
  21. };
  22. /*
  23. * initialise the lock manager thread if it isn't already running
  24. */
  25. static int afs_init_lock_manager(void)
  26. {
  27. int ret;
  28. ret = 0;
  29. if (!afs_lock_manager) {
  30. mutex_lock(&afs_lock_manager_mutex);
  31. if (!afs_lock_manager) {
  32. afs_lock_manager =
  33. create_singlethread_workqueue("kafs_lockd");
  34. if (!afs_lock_manager)
  35. ret = -ENOMEM;
  36. }
  37. mutex_unlock(&afs_lock_manager_mutex);
  38. }
  39. return ret;
  40. }
  41. /*
  42. * destroy the lock manager thread if it's running
  43. */
  44. void __exit afs_kill_lock_manager(void)
  45. {
  46. if (afs_lock_manager)
  47. destroy_workqueue(afs_lock_manager);
  48. }
  49. /*
  50. * if the callback is broken on this vnode, then the lock may now be available
  51. */
  52. void afs_lock_may_be_available(struct afs_vnode *vnode)
  53. {
  54. _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
  55. queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
  56. }
  57. /*
  58. * the lock will time out in 5 minutes unless we extend it, so schedule
  59. * extension in a bit less than that time
  60. */
  61. static void afs_schedule_lock_extension(struct afs_vnode *vnode)
  62. {
  63. queue_delayed_work(afs_lock_manager, &vnode->lock_work,
  64. AFS_LOCKWAIT * HZ / 2);
  65. }
  66. /*
  67. * grant one or more locks (readlocks are allowed to jump the queue if the
  68. * first lock in the queue is itself a readlock)
  69. * - the caller must hold the vnode lock
  70. */
  71. static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
  72. {
  73. struct file_lock *p, *_p;
  74. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  75. if (fl->fl_type == F_RDLCK) {
  76. list_for_each_entry_safe(p, _p, &vnode->pending_locks,
  77. fl_u.afs.link) {
  78. if (p->fl_type == F_RDLCK) {
  79. p->fl_u.afs.state = AFS_LOCK_GRANTED;
  80. list_move_tail(&p->fl_u.afs.link,
  81. &vnode->granted_locks);
  82. wake_up(&p->fl_wait);
  83. }
  84. }
  85. }
  86. }
  87. /*
  88. * do work for a lock, including:
  89. * - probing for a lock we're waiting on but didn't get immediately
  90. * - extending a lock that's close to timing out
  91. */
  92. void afs_lock_work(struct work_struct *work)
  93. {
  94. struct afs_vnode *vnode =
  95. container_of(work, struct afs_vnode, lock_work.work);
  96. struct file_lock *fl;
  97. afs_lock_type_t type;
  98. struct key *key;
  99. int ret;
  100. _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
  101. spin_lock(&vnode->lock);
  102. if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
  103. _debug("unlock");
  104. spin_unlock(&vnode->lock);
  105. /* attempt to release the server lock; if it fails, we just
  106. * wait 5 minutes and it'll time out anyway */
  107. ret = afs_vnode_release_lock(vnode, vnode->unlock_key);
  108. if (ret < 0)
  109. printk(KERN_WARNING "AFS:"
  110. " Failed to release lock on {%x:%x} error %d\n",
  111. vnode->fid.vid, vnode->fid.vnode, ret);
  112. spin_lock(&vnode->lock);
  113. key_put(vnode->unlock_key);
  114. vnode->unlock_key = NULL;
  115. clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
  116. }
  117. /* if we've got a lock, then it must be time to extend that lock as AFS
  118. * locks time out after 5 minutes */
  119. if (!list_empty(&vnode->granted_locks)) {
  120. _debug("extend");
  121. if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
  122. BUG();
  123. fl = list_entry(vnode->granted_locks.next,
  124. struct file_lock, fl_u.afs.link);
  125. key = key_get(fl->fl_file->private_data);
  126. spin_unlock(&vnode->lock);
  127. ret = afs_vnode_extend_lock(vnode, key);
  128. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  129. key_put(key);
  130. switch (ret) {
  131. case 0:
  132. afs_schedule_lock_extension(vnode);
  133. break;
  134. default:
  135. /* ummm... we failed to extend the lock - retry
  136. * extension shortly */
  137. printk(KERN_WARNING "AFS:"
  138. " Failed to extend lock on {%x:%x} error %d\n",
  139. vnode->fid.vid, vnode->fid.vnode, ret);
  140. queue_delayed_work(afs_lock_manager, &vnode->lock_work,
  141. HZ * 10);
  142. break;
  143. }
  144. _leave(" [extend]");
  145. return;
  146. }
  147. /* if we don't have a granted lock, then we must've been called back by
  148. * the server, and so if might be possible to get a lock we're
  149. * currently waiting for */
  150. if (!list_empty(&vnode->pending_locks)) {
  151. _debug("get");
  152. if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
  153. BUG();
  154. fl = list_entry(vnode->pending_locks.next,
  155. struct file_lock, fl_u.afs.link);
  156. key = key_get(fl->fl_file->private_data);
  157. type = (fl->fl_type == F_RDLCK) ?
  158. AFS_LOCK_READ : AFS_LOCK_WRITE;
  159. spin_unlock(&vnode->lock);
  160. ret = afs_vnode_set_lock(vnode, key, type);
  161. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  162. switch (ret) {
  163. case -EWOULDBLOCK:
  164. _debug("blocked");
  165. break;
  166. case 0:
  167. _debug("acquired");
  168. if (type == AFS_LOCK_READ)
  169. set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  170. else
  171. set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  172. ret = AFS_LOCK_GRANTED;
  173. default:
  174. spin_lock(&vnode->lock);
  175. /* the pending lock may have been withdrawn due to a
  176. * signal */
  177. if (list_entry(vnode->pending_locks.next,
  178. struct file_lock, fl_u.afs.link) == fl) {
  179. fl->fl_u.afs.state = ret;
  180. if (ret == AFS_LOCK_GRANTED)
  181. afs_grant_locks(vnode, fl);
  182. else
  183. list_del_init(&fl->fl_u.afs.link);
  184. wake_up(&fl->fl_wait);
  185. spin_unlock(&vnode->lock);
  186. } else {
  187. _debug("withdrawn");
  188. clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  189. clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  190. spin_unlock(&vnode->lock);
  191. afs_vnode_release_lock(vnode, key);
  192. if (!list_empty(&vnode->pending_locks))
  193. afs_lock_may_be_available(vnode);
  194. }
  195. break;
  196. }
  197. key_put(key);
  198. _leave(" [pend]");
  199. return;
  200. }
  201. /* looks like the lock request was withdrawn on a signal */
  202. spin_unlock(&vnode->lock);
  203. _leave(" [no locks]");
  204. }
  205. /*
  206. * pass responsibility for the unlocking of a vnode on the server to the
  207. * manager thread, lest a pending signal in the calling thread interrupt
  208. * AF_RXRPC
  209. * - the caller must hold the vnode lock
  210. */
  211. static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
  212. {
  213. cancel_delayed_work(&vnode->lock_work);
  214. if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
  215. !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
  216. BUG();
  217. if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
  218. BUG();
  219. vnode->unlock_key = key_get(key);
  220. afs_lock_may_be_available(vnode);
  221. }
  222. /*
  223. * request a lock on a file on the server
  224. */
  225. static int afs_do_setlk(struct file *file, struct file_lock *fl)
  226. {
  227. struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
  228. afs_lock_type_t type;
  229. struct key *key = file->private_data;
  230. int ret;
  231. _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
  232. /* only whole-file locks are supported */
  233. if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
  234. return -EINVAL;
  235. ret = afs_init_lock_manager();
  236. if (ret < 0)
  237. return ret;
  238. fl->fl_ops = &afs_lock_ops;
  239. INIT_LIST_HEAD(&fl->fl_u.afs.link);
  240. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  241. type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
  242. lock_flocks();
  243. /* make sure we've got a callback on this file and that our view of the
  244. * data version is up to date */
  245. ret = afs_vnode_fetch_status(vnode, NULL, key);
  246. if (ret < 0)
  247. goto error;
  248. if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
  249. ret = -EAGAIN;
  250. goto error;
  251. }
  252. spin_lock(&vnode->lock);
  253. /* if we've already got a readlock on the server then we can instantly
  254. * grant another readlock, irrespective of whether there are any
  255. * pending writelocks */
  256. if (type == AFS_LOCK_READ &&
  257. vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
  258. _debug("instant readlock");
  259. ASSERTCMP(vnode->flags &
  260. ((1 << AFS_VNODE_LOCKING) |
  261. (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
  262. ASSERT(!list_empty(&vnode->granted_locks));
  263. goto sharing_existing_lock;
  264. }
  265. /* if there's no-one else with a lock on this vnode, then we need to
  266. * ask the server for a lock */
  267. if (list_empty(&vnode->pending_locks) &&
  268. list_empty(&vnode->granted_locks)) {
  269. _debug("not locked");
  270. ASSERTCMP(vnode->flags &
  271. ((1 << AFS_VNODE_LOCKING) |
  272. (1 << AFS_VNODE_READLOCKED) |
  273. (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
  274. list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
  275. set_bit(AFS_VNODE_LOCKING, &vnode->flags);
  276. spin_unlock(&vnode->lock);
  277. ret = afs_vnode_set_lock(vnode, key, type);
  278. clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
  279. switch (ret) {
  280. case 0:
  281. _debug("acquired");
  282. goto acquired_server_lock;
  283. case -EWOULDBLOCK:
  284. _debug("would block");
  285. spin_lock(&vnode->lock);
  286. ASSERT(list_empty(&vnode->granted_locks));
  287. ASSERTCMP(vnode->pending_locks.next, ==,
  288. &fl->fl_u.afs.link);
  289. goto wait;
  290. default:
  291. spin_lock(&vnode->lock);
  292. list_del_init(&fl->fl_u.afs.link);
  293. spin_unlock(&vnode->lock);
  294. goto error;
  295. }
  296. }
  297. /* otherwise, we need to wait for a local lock to become available */
  298. _debug("wait local");
  299. list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
  300. wait:
  301. if (!(fl->fl_flags & FL_SLEEP)) {
  302. _debug("noblock");
  303. ret = -EAGAIN;
  304. goto abort_attempt;
  305. }
  306. spin_unlock(&vnode->lock);
  307. /* now we need to sleep and wait for the lock manager thread to get the
  308. * lock from the server */
  309. _debug("sleep");
  310. ret = wait_event_interruptible(fl->fl_wait,
  311. fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
  312. if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
  313. ret = fl->fl_u.afs.state;
  314. if (ret < 0)
  315. goto error;
  316. spin_lock(&vnode->lock);
  317. goto given_lock;
  318. }
  319. /* we were interrupted, but someone may still be in the throes of
  320. * giving us the lock */
  321. _debug("intr");
  322. ASSERTCMP(ret, ==, -ERESTARTSYS);
  323. spin_lock(&vnode->lock);
  324. if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
  325. ret = fl->fl_u.afs.state;
  326. if (ret < 0) {
  327. spin_unlock(&vnode->lock);
  328. goto error;
  329. }
  330. goto given_lock;
  331. }
  332. abort_attempt:
  333. /* we aren't going to get the lock, either because we're unwilling to
  334. * wait, or because some signal happened */
  335. _debug("abort");
  336. if (list_empty(&vnode->granted_locks) &&
  337. vnode->pending_locks.next == &fl->fl_u.afs.link) {
  338. if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
  339. /* kick the next pending lock into having a go */
  340. list_del_init(&fl->fl_u.afs.link);
  341. afs_lock_may_be_available(vnode);
  342. }
  343. } else {
  344. list_del_init(&fl->fl_u.afs.link);
  345. }
  346. spin_unlock(&vnode->lock);
  347. goto error;
  348. acquired_server_lock:
  349. /* we've acquired a server lock, but it needs to be renewed after 5
  350. * mins */
  351. spin_lock(&vnode->lock);
  352. afs_schedule_lock_extension(vnode);
  353. if (type == AFS_LOCK_READ)
  354. set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
  355. else
  356. set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
  357. sharing_existing_lock:
  358. /* the lock has been granted as far as we're concerned... */
  359. fl->fl_u.afs.state = AFS_LOCK_GRANTED;
  360. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  361. given_lock:
  362. /* ... but we do still need to get the VFS's blessing */
  363. ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
  364. ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
  365. (1 << AFS_VNODE_WRITELOCKED))) != 0);
  366. ret = posix_lock_file(file, fl, NULL);
  367. if (ret < 0)
  368. goto vfs_rejected_lock;
  369. spin_unlock(&vnode->lock);
  370. /* again, make sure we've got a callback on this file and, again, make
  371. * sure that our view of the data version is up to date (we ignore
  372. * errors incurred here and deal with the consequences elsewhere) */
  373. afs_vnode_fetch_status(vnode, NULL, key);
  374. error:
  375. unlock_flocks();
  376. _leave(" = %d", ret);
  377. return ret;
  378. vfs_rejected_lock:
  379. /* the VFS rejected the lock we just obtained, so we have to discard
  380. * what we just got */
  381. _debug("vfs refused %d", ret);
  382. list_del_init(&fl->fl_u.afs.link);
  383. if (list_empty(&vnode->granted_locks))
  384. afs_defer_unlock(vnode, key);
  385. goto abort_attempt;
  386. }
  387. /*
  388. * unlock on a file on the server
  389. */
  390. static int afs_do_unlk(struct file *file, struct file_lock *fl)
  391. {
  392. struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
  393. struct key *key = file->private_data;
  394. int ret;
  395. _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
  396. /* only whole-file unlocks are supported */
  397. if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
  398. return -EINVAL;
  399. fl->fl_ops = &afs_lock_ops;
  400. INIT_LIST_HEAD(&fl->fl_u.afs.link);
  401. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  402. spin_lock(&vnode->lock);
  403. ret = posix_lock_file(file, fl, NULL);
  404. if (ret < 0) {
  405. spin_unlock(&vnode->lock);
  406. _leave(" = %d [vfs]", ret);
  407. return ret;
  408. }
  409. /* discard the server lock only if all granted locks are gone */
  410. if (list_empty(&vnode->granted_locks))
  411. afs_defer_unlock(vnode, key);
  412. spin_unlock(&vnode->lock);
  413. _leave(" = 0");
  414. return 0;
  415. }
  416. /*
  417. * return information about a lock we currently hold, if indeed we hold one
  418. */
  419. static int afs_do_getlk(struct file *file, struct file_lock *fl)
  420. {
  421. struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
  422. struct key *key = file->private_data;
  423. int ret, lock_count;
  424. _enter("");
  425. fl->fl_type = F_UNLCK;
  426. mutex_lock(&vnode->vfs_inode.i_mutex);
  427. /* check local lock records first */
  428. ret = 0;
  429. posix_test_lock(file, fl);
  430. if (fl->fl_type == F_UNLCK) {
  431. /* no local locks; consult the server */
  432. ret = afs_vnode_fetch_status(vnode, NULL, key);
  433. if (ret < 0)
  434. goto error;
  435. lock_count = vnode->status.lock_count;
  436. if (lock_count) {
  437. if (lock_count > 0)
  438. fl->fl_type = F_RDLCK;
  439. else
  440. fl->fl_type = F_WRLCK;
  441. fl->fl_start = 0;
  442. fl->fl_end = OFFSET_MAX;
  443. }
  444. }
  445. error:
  446. mutex_unlock(&vnode->vfs_inode.i_mutex);
  447. _leave(" = %d [%hd]", ret, fl->fl_type);
  448. return ret;
  449. }
  450. /*
  451. * manage POSIX locks on a file
  452. */
  453. int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  454. {
  455. struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
  456. _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
  457. vnode->fid.vid, vnode->fid.vnode, cmd,
  458. fl->fl_type, fl->fl_flags,
  459. (long long) fl->fl_start, (long long) fl->fl_end);
  460. /* AFS doesn't support mandatory locks */
  461. if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
  462. return -ENOLCK;
  463. if (IS_GETLK(cmd))
  464. return afs_do_getlk(file, fl);
  465. if (fl->fl_type == F_UNLCK)
  466. return afs_do_unlk(file, fl);
  467. return afs_do_setlk(file, fl);
  468. }
  469. /*
  470. * manage FLOCK locks on a file
  471. */
  472. int afs_flock(struct file *file, int cmd, struct file_lock *fl)
  473. {
  474. struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
  475. _enter("{%x:%u},%d,{t=%x,fl=%x}",
  476. vnode->fid.vid, vnode->fid.vnode, cmd,
  477. fl->fl_type, fl->fl_flags);
  478. /*
  479. * No BSD flocks over NFS allowed.
  480. * Note: we could try to fake a POSIX lock request here by
  481. * using ((u32) filp | 0x80000000) or some such as the pid.
  482. * Not sure whether that would be unique, though, or whether
  483. * that would break in other places.
  484. */
  485. if (!(fl->fl_flags & FL_FLOCK))
  486. return -ENOLCK;
  487. /* we're simulating flock() locks using posix locks on the server */
  488. fl->fl_owner = (fl_owner_t) file;
  489. fl->fl_start = 0;
  490. fl->fl_end = OFFSET_MAX;
  491. if (fl->fl_type == F_UNLCK)
  492. return afs_do_unlk(file, fl);
  493. return afs_do_setlk(file, fl);
  494. }
  495. /*
  496. * the POSIX lock management core VFS code copies the lock record and adds the
  497. * copy into its own list, so we need to add that copy to the vnode's lock
  498. * queue in the same place as the original (which will be deleted shortly
  499. * after)
  500. */
  501. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
  502. {
  503. _enter("");
  504. list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
  505. }
  506. /*
  507. * need to remove this lock from the vnode queue when it's removed from the
  508. * VFS's list
  509. */
  510. static void afs_fl_release_private(struct file_lock *fl)
  511. {
  512. _enter("");
  513. list_del_init(&fl->fl_u.afs.link);
  514. }