/kern_oII/fs/sysfs/file.c

http://omnia2droid.googlecode.com/ · C · 755 lines · 437 code · 116 blank · 202 comment · 65 complexity · 7b9ae0d223945808514b3a947a4250ab MD5 · raw file

  1. /*
  2. * fs/sysfs/file.c - sysfs regular (text) file implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
  7. *
  8. * This file is released under the GPLv2.
  9. *
  10. * Please see Documentation/filesystems/sysfs.txt for more information.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kobject.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/slab.h>
  16. #include <linux/fsnotify.h>
  17. #include <linux/namei.h>
  18. #include <linux/poll.h>
  19. #include <linux/list.h>
  20. #include <linux/mutex.h>
  21. #include <linux/limits.h>
  22. #include <asm/uaccess.h>
  23. #include "sysfs.h"
  24. /* used in crash dumps to help with debugging */
  25. static char last_sysfs_file[PATH_MAX];
  26. void sysfs_printk_last_file(void)
  27. {
  28. printk(KERN_EMERG "last sysfs file: %s\n", last_sysfs_file);
  29. }
  30. /*
  31. * There's one sysfs_buffer for each open file and one
  32. * sysfs_open_dirent for each sysfs_dirent with one or more open
  33. * files.
  34. *
  35. * filp->private_data points to sysfs_buffer and
  36. * sysfs_dirent->s_attr.open points to sysfs_open_dirent. s_attr.open
  37. * is protected by sysfs_open_dirent_lock.
  38. */
  39. static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
  40. struct sysfs_open_dirent {
  41. atomic_t refcnt;
  42. atomic_t event;
  43. wait_queue_head_t poll;
  44. struct list_head buffers; /* goes through sysfs_buffer.list */
  45. };
  46. struct sysfs_buffer {
  47. size_t count;
  48. loff_t pos;
  49. char * page;
  50. struct sysfs_ops * ops;
  51. struct mutex mutex;
  52. int needs_read_fill;
  53. int event;
  54. struct list_head list;
  55. };
  56. /**
  57. * fill_read_buffer - allocate and fill buffer from object.
  58. * @dentry: dentry pointer.
  59. * @buffer: data buffer for file.
  60. *
  61. * Allocate @buffer->page, if it hasn't been already, then call the
  62. * kobject's show() method to fill the buffer with this attribute's
  63. * data.
  64. * This is called only once, on the file's first read unless an error
  65. * is returned.
  66. */
  67. static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
  68. {
  69. struct sysfs_dirent *attr_sd = dentry->d_fsdata;
  70. struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
  71. struct sysfs_ops * ops = buffer->ops;
  72. int ret = 0;
  73. ssize_t count;
  74. if (!buffer->page)
  75. buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
  76. if (!buffer->page)
  77. return -ENOMEM;
  78. /* need attr_sd for attr and ops, its parent for kobj */
  79. if (!sysfs_get_active_two(attr_sd))
  80. return -ENODEV;
  81. buffer->event = atomic_read(&attr_sd->s_attr.open->event);
  82. count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
  83. sysfs_put_active_two(attr_sd);
  84. /*
  85. * The code works fine with PAGE_SIZE return but it's likely to
  86. * indicate truncated result or overflow in normal use cases.
  87. */
  88. if (count >= (ssize_t)PAGE_SIZE) {
  89. print_symbol("fill_read_buffer: %s returned bad count\n",
  90. (unsigned long)ops->show);
  91. /* Try to struggle along */
  92. count = PAGE_SIZE - 1;
  93. }
  94. if (count >= 0) {
  95. buffer->needs_read_fill = 0;
  96. buffer->count = count;
  97. } else {
  98. ret = count;
  99. }
  100. return ret;
  101. }
  102. /**
  103. * sysfs_read_file - read an attribute.
  104. * @file: file pointer.
  105. * @buf: buffer to fill.
  106. * @count: number of bytes to read.
  107. * @ppos: starting offset in file.
  108. *
  109. * Userspace wants to read an attribute file. The attribute descriptor
  110. * is in the file's ->d_fsdata. The target object is in the directory's
  111. * ->d_fsdata.
  112. *
  113. * We call fill_read_buffer() to allocate and fill the buffer from the
  114. * object's show() method exactly once (if the read is happening from
  115. * the beginning of the file). That should fill the entire buffer with
  116. * all the data the object has to offer for that attribute.
  117. * We then call flush_read_buffer() to copy the buffer to userspace
  118. * in the increments specified.
  119. */
  120. static ssize_t
  121. sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  122. {
  123. struct sysfs_buffer * buffer = file->private_data;
  124. ssize_t retval = 0;
  125. mutex_lock(&buffer->mutex);
  126. if (buffer->needs_read_fill || *ppos == 0) {
  127. retval = fill_read_buffer(file->f_path.dentry,buffer);
  128. if (retval)
  129. goto out;
  130. }
  131. pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
  132. __func__, count, *ppos, buffer->page);
  133. retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
  134. buffer->count);
  135. out:
  136. mutex_unlock(&buffer->mutex);
  137. return retval;
  138. }
  139. /**
  140. * fill_write_buffer - copy buffer from userspace.
  141. * @buffer: data buffer for file.
  142. * @buf: data from user.
  143. * @count: number of bytes in @userbuf.
  144. *
  145. * Allocate @buffer->page if it hasn't been already, then
  146. * copy the user-supplied buffer into it.
  147. */
  148. static int
  149. fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t count)
  150. {
  151. int error;
  152. if (!buffer->page)
  153. buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
  154. if (!buffer->page)
  155. return -ENOMEM;
  156. if (count >= PAGE_SIZE)
  157. count = PAGE_SIZE - 1;
  158. error = copy_from_user(buffer->page,buf,count);
  159. buffer->needs_read_fill = 1;
  160. /* if buf is assumed to contain a string, terminate it by \0,
  161. so e.g. sscanf() can scan the string easily */
  162. buffer->page[count] = 0;
  163. return error ? -EFAULT : count;
  164. }
  165. /**
  166. * flush_write_buffer - push buffer to kobject.
  167. * @dentry: dentry to the attribute
  168. * @buffer: data buffer for file.
  169. * @count: number of bytes
  170. *
  171. * Get the correct pointers for the kobject and the attribute we're
  172. * dealing with, then call the store() method for the attribute,
  173. * passing the buffer that we acquired in fill_write_buffer().
  174. */
  175. static int
  176. flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
  177. {
  178. struct sysfs_dirent *attr_sd = dentry->d_fsdata;
  179. struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
  180. struct sysfs_ops * ops = buffer->ops;
  181. int rc;
  182. /* need attr_sd for attr and ops, its parent for kobj */
  183. if (!sysfs_get_active_two(attr_sd))
  184. return -ENODEV;
  185. rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
  186. sysfs_put_active_two(attr_sd);
  187. return rc;
  188. }
  189. /**
  190. * sysfs_write_file - write an attribute.
  191. * @file: file pointer
  192. * @buf: data to write
  193. * @count: number of bytes
  194. * @ppos: starting offset
  195. *
  196. * Similar to sysfs_read_file(), though working in the opposite direction.
  197. * We allocate and fill the data from the user in fill_write_buffer(),
  198. * then push it to the kobject in flush_write_buffer().
  199. * There is no easy way for us to know if userspace is only doing a partial
  200. * write, so we don't support them. We expect the entire buffer to come
  201. * on the first write.
  202. * Hint: if you're writing a value, first read the file, modify only the
  203. * the value you're changing, then write entire buffer back.
  204. */
  205. static ssize_t
  206. sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  207. {
  208. struct sysfs_buffer * buffer = file->private_data;
  209. ssize_t len;
  210. mutex_lock(&buffer->mutex);
  211. len = fill_write_buffer(buffer, buf, count);
  212. if (len > 0)
  213. len = flush_write_buffer(file->f_path.dentry, buffer, len);
  214. if (len > 0)
  215. *ppos += len;
  216. mutex_unlock(&buffer->mutex);
  217. return len;
  218. }
  219. /**
  220. * sysfs_get_open_dirent - get or create sysfs_open_dirent
  221. * @sd: target sysfs_dirent
  222. * @buffer: sysfs_buffer for this instance of open
  223. *
  224. * If @sd->s_attr.open exists, increment its reference count;
  225. * otherwise, create one. @buffer is chained to the buffers
  226. * list.
  227. *
  228. * LOCKING:
  229. * Kernel thread context (may sleep).
  230. *
  231. * RETURNS:
  232. * 0 on success, -errno on failure.
  233. */
  234. static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
  235. struct sysfs_buffer *buffer)
  236. {
  237. struct sysfs_open_dirent *od, *new_od = NULL;
  238. retry:
  239. spin_lock(&sysfs_open_dirent_lock);
  240. if (!sd->s_attr.open && new_od) {
  241. sd->s_attr.open = new_od;
  242. new_od = NULL;
  243. }
  244. od = sd->s_attr.open;
  245. if (od) {
  246. atomic_inc(&od->refcnt);
  247. list_add_tail(&buffer->list, &od->buffers);
  248. }
  249. spin_unlock(&sysfs_open_dirent_lock);
  250. if (od) {
  251. kfree(new_od);
  252. return 0;
  253. }
  254. /* not there, initialize a new one and retry */
  255. new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
  256. if (!new_od)
  257. return -ENOMEM;
  258. atomic_set(&new_od->refcnt, 0);
  259. atomic_set(&new_od->event, 1);
  260. init_waitqueue_head(&new_od->poll);
  261. INIT_LIST_HEAD(&new_od->buffers);
  262. goto retry;
  263. }
  264. /**
  265. * sysfs_put_open_dirent - put sysfs_open_dirent
  266. * @sd: target sysfs_dirent
  267. * @buffer: associated sysfs_buffer
  268. *
  269. * Put @sd->s_attr.open and unlink @buffer from the buffers list.
  270. * If reference count reaches zero, disassociate and free it.
  271. *
  272. * LOCKING:
  273. * None.
  274. */
  275. static void sysfs_put_open_dirent(struct sysfs_dirent *sd,
  276. struct sysfs_buffer *buffer)
  277. {
  278. struct sysfs_open_dirent *od = sd->s_attr.open;
  279. spin_lock(&sysfs_open_dirent_lock);
  280. list_del(&buffer->list);
  281. if (atomic_dec_and_test(&od->refcnt))
  282. sd->s_attr.open = NULL;
  283. else
  284. od = NULL;
  285. spin_unlock(&sysfs_open_dirent_lock);
  286. kfree(od);
  287. }
  288. static int sysfs_open_file(struct inode *inode, struct file *file)
  289. {
  290. struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
  291. struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
  292. struct sysfs_buffer *buffer;
  293. struct sysfs_ops *ops;
  294. int error = -EACCES;
  295. char *p;
  296. p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
  297. if (p)
  298. memmove(last_sysfs_file, p, strlen(p) + 1);
  299. /* need attr_sd for attr and ops, its parent for kobj */
  300. if (!sysfs_get_active_two(attr_sd))
  301. return -ENODEV;
  302. /* every kobject with an attribute needs a ktype assigned */
  303. if (kobj->ktype && kobj->ktype->sysfs_ops)
  304. ops = kobj->ktype->sysfs_ops;
  305. else {
  306. WARN(1, KERN_ERR "missing sysfs attribute operations for "
  307. "kobject: %s\n", kobject_name(kobj));
  308. goto err_out;
  309. }
  310. /* File needs write support.
  311. * The inode's perms must say it's ok,
  312. * and we must have a store method.
  313. */
  314. if (file->f_mode & FMODE_WRITE) {
  315. if (!(inode->i_mode & S_IWUGO) || !ops->store)
  316. goto err_out;
  317. }
  318. /* File needs read support.
  319. * The inode's perms must say it's ok, and we there
  320. * must be a show method for it.
  321. */
  322. if (file->f_mode & FMODE_READ) {
  323. if (!(inode->i_mode & S_IRUGO) || !ops->show)
  324. goto err_out;
  325. }
  326. /* No error? Great, allocate a buffer for the file, and store it
  327. * it in file->private_data for easy access.
  328. */
  329. error = -ENOMEM;
  330. buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
  331. if (!buffer)
  332. goto err_out;
  333. mutex_init(&buffer->mutex);
  334. buffer->needs_read_fill = 1;
  335. buffer->ops = ops;
  336. file->private_data = buffer;
  337. /* make sure we have open dirent struct */
  338. error = sysfs_get_open_dirent(attr_sd, buffer);
  339. if (error)
  340. goto err_free;
  341. /* open succeeded, put active references */
  342. sysfs_put_active_two(attr_sd);
  343. return 0;
  344. err_free:
  345. kfree(buffer);
  346. err_out:
  347. sysfs_put_active_two(attr_sd);
  348. return error;
  349. }
  350. static int sysfs_release(struct inode *inode, struct file *filp)
  351. {
  352. struct sysfs_dirent *sd = filp->f_path.dentry->d_fsdata;
  353. struct sysfs_buffer *buffer = filp->private_data;
  354. sysfs_put_open_dirent(sd, buffer);
  355. if (buffer->page)
  356. free_page((unsigned long)buffer->page);
  357. kfree(buffer);
  358. return 0;
  359. }
  360. /* Sysfs attribute files are pollable. The idea is that you read
  361. * the content and then you use 'poll' or 'select' to wait for
  362. * the content to change. When the content changes (assuming the
  363. * manager for the kobject supports notification), poll will
  364. * return POLLERR|POLLPRI, and select will return the fd whether
  365. * it is waiting for read, write, or exceptions.
  366. * Once poll/select indicates that the value has changed, you
  367. * need to close and re-open the file, or seek to 0 and read again.
  368. * Reminder: this only works for attributes which actively support
  369. * it, and it is not possible to test an attribute from userspace
  370. * to see if it supports poll (Neither 'poll' nor 'select' return
  371. * an appropriate error code). When in doubt, set a suitable timeout value.
  372. */
  373. static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
  374. {
  375. struct sysfs_buffer * buffer = filp->private_data;
  376. struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
  377. struct sysfs_open_dirent *od = attr_sd->s_attr.open;
  378. /* need parent for the kobj, grab both */
  379. if (!sysfs_get_active_two(attr_sd))
  380. goto trigger;
  381. poll_wait(filp, &od->poll, wait);
  382. sysfs_put_active_two(attr_sd);
  383. if (buffer->event != atomic_read(&od->event))
  384. goto trigger;
  385. return DEFAULT_POLLMASK;
  386. trigger:
  387. buffer->needs_read_fill = 1;
  388. return DEFAULT_POLLMASK|POLLERR|POLLPRI;
  389. }
  390. void sysfs_notify_dirent(struct sysfs_dirent *sd)
  391. {
  392. struct sysfs_open_dirent *od;
  393. spin_lock(&sysfs_open_dirent_lock);
  394. od = sd->s_attr.open;
  395. if (od) {
  396. atomic_inc(&od->event);
  397. wake_up_interruptible(&od->poll);
  398. }
  399. spin_unlock(&sysfs_open_dirent_lock);
  400. }
  401. EXPORT_SYMBOL_GPL(sysfs_notify_dirent);
  402. void sysfs_notify(struct kobject *k, const char *dir, const char *attr)
  403. {
  404. struct sysfs_dirent *sd = k->sd;
  405. mutex_lock(&sysfs_mutex);
  406. if (sd && dir)
  407. sd = sysfs_find_dirent(sd, dir);
  408. if (sd && attr)
  409. sd = sysfs_find_dirent(sd, attr);
  410. if (sd)
  411. sysfs_notify_dirent(sd);
  412. mutex_unlock(&sysfs_mutex);
  413. }
  414. EXPORT_SYMBOL_GPL(sysfs_notify);
  415. const struct file_operations sysfs_file_operations = {
  416. .read = sysfs_read_file,
  417. .write = sysfs_write_file,
  418. .llseek = generic_file_llseek,
  419. .open = sysfs_open_file,
  420. .release = sysfs_release,
  421. .poll = sysfs_poll,
  422. };
  423. int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
  424. const struct attribute *attr, int type, mode_t amode)
  425. {
  426. umode_t mode = (amode & S_IALLUGO) | S_IFREG;
  427. struct sysfs_addrm_cxt acxt;
  428. struct sysfs_dirent *sd;
  429. int rc;
  430. sd = sysfs_new_dirent(attr->name, mode, type);
  431. if (!sd)
  432. return -ENOMEM;
  433. sd->s_attr.attr = (void *)attr;
  434. sysfs_addrm_start(&acxt, dir_sd);
  435. rc = sysfs_add_one(&acxt, sd);
  436. sysfs_addrm_finish(&acxt);
  437. if (rc)
  438. sysfs_put(sd);
  439. return rc;
  440. }
  441. int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
  442. int type)
  443. {
  444. return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
  445. }
  446. /**
  447. * sysfs_create_file - create an attribute file for an object.
  448. * @kobj: object we're creating for.
  449. * @attr: attribute descriptor.
  450. */
  451. int sysfs_create_file(struct kobject * kobj, const struct attribute * attr)
  452. {
  453. BUG_ON(!kobj || !kobj->sd || !attr);
  454. return sysfs_add_file(kobj->sd, attr, SYSFS_KOBJ_ATTR);
  455. }
  456. /**
  457. * sysfs_add_file_to_group - add an attribute file to a pre-existing group.
  458. * @kobj: object we're acting for.
  459. * @attr: attribute descriptor.
  460. * @group: group name.
  461. */
  462. int sysfs_add_file_to_group(struct kobject *kobj,
  463. const struct attribute *attr, const char *group)
  464. {
  465. struct sysfs_dirent *dir_sd;
  466. int error;
  467. if (group)
  468. dir_sd = sysfs_get_dirent(kobj->sd, group);
  469. else
  470. dir_sd = sysfs_get(kobj->sd);
  471. if (!dir_sd)
  472. return -ENOENT;
  473. error = sysfs_add_file(dir_sd, attr, SYSFS_KOBJ_ATTR);
  474. sysfs_put(dir_sd);
  475. return error;
  476. }
  477. EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
  478. /**
  479. * sysfs_chmod_file - update the modified mode value on an object attribute.
  480. * @kobj: object we're acting for.
  481. * @attr: attribute descriptor.
  482. * @mode: file permissions.
  483. *
  484. */
  485. int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
  486. {
  487. struct sysfs_dirent *victim_sd = NULL;
  488. struct dentry *victim = NULL;
  489. struct inode * inode;
  490. struct iattr newattrs;
  491. int rc;
  492. rc = -ENOENT;
  493. victim_sd = sysfs_get_dirent(kobj->sd, attr->name);
  494. if (!victim_sd)
  495. goto out;
  496. mutex_lock(&sysfs_rename_mutex);
  497. victim = sysfs_get_dentry(victim_sd);
  498. mutex_unlock(&sysfs_rename_mutex);
  499. if (IS_ERR(victim)) {
  500. rc = PTR_ERR(victim);
  501. victim = NULL;
  502. goto out;
  503. }
  504. inode = victim->d_inode;
  505. mutex_lock(&inode->i_mutex);
  506. newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
  507. newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
  508. newattrs.ia_ctime = current_fs_time(inode->i_sb);
  509. rc = sysfs_setattr(victim, &newattrs);
  510. if (rc == 0) {
  511. fsnotify_change(victim, newattrs.ia_valid);
  512. mutex_lock(&sysfs_mutex);
  513. victim_sd->s_mode = newattrs.ia_mode;
  514. mutex_unlock(&sysfs_mutex);
  515. }
  516. mutex_unlock(&inode->i_mutex);
  517. out:
  518. dput(victim);
  519. sysfs_put(victim_sd);
  520. return rc;
  521. }
  522. EXPORT_SYMBOL_GPL(sysfs_chmod_file);
  523. /**
  524. * sysfs_remove_file - remove an object attribute.
  525. * @kobj: object we're acting for.
  526. * @attr: attribute descriptor.
  527. *
  528. * Hash the attribute name and kill the victim.
  529. */
  530. void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
  531. {
  532. sysfs_hash_and_remove(kobj->sd, attr->name);
  533. }
  534. /**
  535. * sysfs_remove_file_from_group - remove an attribute file from a group.
  536. * @kobj: object we're acting for.
  537. * @attr: attribute descriptor.
  538. * @group: group name.
  539. */
  540. void sysfs_remove_file_from_group(struct kobject *kobj,
  541. const struct attribute *attr, const char *group)
  542. {
  543. struct sysfs_dirent *dir_sd;
  544. if (group)
  545. dir_sd = sysfs_get_dirent(kobj->sd, group);
  546. else
  547. dir_sd = sysfs_get(kobj->sd);
  548. if (dir_sd) {
  549. sysfs_hash_and_remove(dir_sd, attr->name);
  550. sysfs_put(dir_sd);
  551. }
  552. }
  553. EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
  554. struct sysfs_schedule_callback_struct {
  555. struct list_head workq_list;
  556. struct kobject *kobj;
  557. void (*func)(void *);
  558. void *data;
  559. struct module *owner;
  560. struct work_struct work;
  561. };
  562. static struct workqueue_struct *sysfs_workqueue;
  563. static DEFINE_MUTEX(sysfs_workq_mutex);
  564. static LIST_HEAD(sysfs_workq);
  565. static void sysfs_schedule_callback_work(struct work_struct *work)
  566. {
  567. struct sysfs_schedule_callback_struct *ss = container_of(work,
  568. struct sysfs_schedule_callback_struct, work);
  569. (ss->func)(ss->data);
  570. kobject_put(ss->kobj);
  571. module_put(ss->owner);
  572. mutex_lock(&sysfs_workq_mutex);
  573. list_del(&ss->workq_list);
  574. mutex_unlock(&sysfs_workq_mutex);
  575. kfree(ss);
  576. }
  577. /**
  578. * sysfs_schedule_callback - helper to schedule a callback for a kobject
  579. * @kobj: object we're acting for.
  580. * @func: callback function to invoke later.
  581. * @data: argument to pass to @func.
  582. * @owner: module owning the callback code
  583. *
  584. * sysfs attribute methods must not unregister themselves or their parent
  585. * kobject (which would amount to the same thing). Attempts to do so will
  586. * deadlock, since unregistration is mutually exclusive with driver
  587. * callbacks.
  588. *
  589. * Instead methods can call this routine, which will attempt to allocate
  590. * and schedule a workqueue request to call back @func with @data as its
  591. * argument in the workqueue's process context. @kobj will be pinned
  592. * until @func returns.
  593. *
  594. * Returns 0 if the request was submitted, -ENOMEM if storage could not
  595. * be allocated, -ENODEV if a reference to @owner isn't available,
  596. * -EAGAIN if a callback has already been scheduled for @kobj.
  597. */
  598. int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
  599. void *data, struct module *owner)
  600. {
  601. struct sysfs_schedule_callback_struct *ss, *tmp;
  602. if (!try_module_get(owner))
  603. return -ENODEV;
  604. mutex_lock(&sysfs_workq_mutex);
  605. list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list)
  606. if (ss->kobj == kobj) {
  607. module_put(owner);
  608. mutex_unlock(&sysfs_workq_mutex);
  609. return -EAGAIN;
  610. }
  611. mutex_unlock(&sysfs_workq_mutex);
  612. if (sysfs_workqueue == NULL) {
  613. sysfs_workqueue = create_singlethread_workqueue("sysfsd");
  614. if (sysfs_workqueue == NULL) {
  615. module_put(owner);
  616. return -ENOMEM;
  617. }
  618. }
  619. ss = kmalloc(sizeof(*ss), GFP_KERNEL);
  620. if (!ss) {
  621. module_put(owner);
  622. return -ENOMEM;
  623. }
  624. kobject_get(kobj);
  625. ss->kobj = kobj;
  626. ss->func = func;
  627. ss->data = data;
  628. ss->owner = owner;
  629. INIT_WORK(&ss->work, sysfs_schedule_callback_work);
  630. INIT_LIST_HEAD(&ss->workq_list);
  631. mutex_lock(&sysfs_workq_mutex);
  632. list_add_tail(&ss->workq_list, &sysfs_workq);
  633. mutex_unlock(&sysfs_workq_mutex);
  634. queue_work(sysfs_workqueue, &ss->work);
  635. return 0;
  636. }
  637. EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
  638. EXPORT_SYMBOL_GPL(sysfs_create_file);
  639. EXPORT_SYMBOL_GPL(sysfs_remove_file);