/drivers/staging/android/logger.c

https://bitbucket.org/wisechild/galaxy-nexus · C · 616 lines · 368 code · 96 blank · 152 comment · 62 complexity · 3d7863840a40448f24c395da531c9aa5 MD5 · raw file

  1. /*
  2. * drivers/misc/logger.c
  3. *
  4. * A Logging Subsystem
  5. *
  6. * Copyright (C) 2007-2008 Google, Inc.
  7. *
  8. * Robert Love <rlove@google.com>
  9. *
  10. * This software is licensed under the terms of the GNU General Public
  11. * License version 2, as published by the Free Software Foundation, and
  12. * may be copied, distributed, and modified under those terms.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/sched.h>
  20. #include <linux/module.h>
  21. #include <linux/fs.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/poll.h>
  25. #include <linux/slab.h>
  26. #include <linux/time.h>
  27. #include "logger.h"
  28. #include <asm/ioctls.h>
  29. /*
  30. * struct logger_log - represents a specific log, such as 'main' or 'radio'
  31. *
  32. * This structure lives from module insertion until module removal, so it does
  33. * not need additional reference counting. The structure is protected by the
  34. * mutex 'mutex'.
  35. */
  36. struct logger_log {
  37. unsigned char *buffer;/* the ring buffer itself */
  38. struct miscdevice misc; /* misc device representing the log */
  39. wait_queue_head_t wq; /* wait queue for readers */
  40. struct list_head readers; /* this log's readers */
  41. struct mutex mutex; /* mutex protecting buffer */
  42. size_t w_off; /* current write head offset */
  43. size_t head; /* new readers start here */
  44. size_t size; /* size of the log */
  45. };
  46. /*
  47. * struct logger_reader - a logging device open for reading
  48. *
  49. * This object lives from open to release, so we don't need additional
  50. * reference counting. The structure is protected by log->mutex.
  51. */
  52. struct logger_reader {
  53. struct logger_log *log; /* associated log */
  54. struct list_head list; /* entry in logger_log's list */
  55. size_t r_off; /* current read head offset */
  56. };
  57. /* logger_offset - returns index 'n' into the log via (optimized) modulus */
  58. #define logger_offset(n) ((n) & (log->size - 1))
  59. /*
  60. * file_get_log - Given a file structure, return the associated log
  61. *
  62. * This isn't aesthetic. We have several goals:
  63. *
  64. * 1) Need to quickly obtain the associated log during an I/O operation
  65. * 2) Readers need to maintain state (logger_reader)
  66. * 3) Writers need to be very fast (open() should be a near no-op)
  67. *
  68. * In the reader case, we can trivially go file->logger_reader->logger_log.
  69. * For a writer, we don't want to maintain a logger_reader, so we just go
  70. * file->logger_log. Thus what file->private_data points at depends on whether
  71. * or not the file was opened for reading. This function hides that dirtiness.
  72. */
  73. static inline struct logger_log *file_get_log(struct file *file)
  74. {
  75. if (file->f_mode & FMODE_READ) {
  76. struct logger_reader *reader = file->private_data;
  77. return reader->log;
  78. } else
  79. return file->private_data;
  80. }
  81. /*
  82. * get_entry_len - Grabs the length of the payload of the next entry starting
  83. * from 'off'.
  84. *
  85. * Caller needs to hold log->mutex.
  86. */
  87. static __u32 get_entry_len(struct logger_log *log, size_t off)
  88. {
  89. __u16 val;
  90. switch (log->size - off) {
  91. case 1:
  92. memcpy(&val, log->buffer + off, 1);
  93. memcpy(((char *) &val) + 1, log->buffer, 1);
  94. break;
  95. default:
  96. memcpy(&val, log->buffer + off, 2);
  97. }
  98. return sizeof(struct logger_entry) + val;
  99. }
  100. /*
  101. * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
  102. * user-space buffer 'buf'. Returns 'count' on success.
  103. *
  104. * Caller must hold log->mutex.
  105. */
  106. static ssize_t do_read_log_to_user(struct logger_log *log,
  107. struct logger_reader *reader,
  108. char __user *buf,
  109. size_t count)
  110. {
  111. size_t len;
  112. /*
  113. * We read from the log in two disjoint operations. First, we read from
  114. * the current read head offset up to 'count' bytes or to the end of
  115. * the log, whichever comes first.
  116. */
  117. len = min(count, log->size - reader->r_off);
  118. if (copy_to_user(buf, log->buffer + reader->r_off, len))
  119. return -EFAULT;
  120. /*
  121. * Second, we read any remaining bytes, starting back at the head of
  122. * the log.
  123. */
  124. if (count != len)
  125. if (copy_to_user(buf + len, log->buffer, count - len))
  126. return -EFAULT;
  127. reader->r_off = logger_offset(reader->r_off + count);
  128. return count;
  129. }
  130. /*
  131. * logger_read - our log's read() method
  132. *
  133. * Behavior:
  134. *
  135. * - O_NONBLOCK works
  136. * - If there are no log entries to read, blocks until log is written to
  137. * - Atomically reads exactly one log entry
  138. *
  139. * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
  140. * buffer is insufficient to hold next entry.
  141. */
  142. static ssize_t logger_read(struct file *file, char __user *buf,
  143. size_t count, loff_t *pos)
  144. {
  145. struct logger_reader *reader = file->private_data;
  146. struct logger_log *log = reader->log;
  147. ssize_t ret;
  148. DEFINE_WAIT(wait);
  149. start:
  150. while (1) {
  151. prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
  152. mutex_lock(&log->mutex);
  153. ret = (log->w_off == reader->r_off);
  154. mutex_unlock(&log->mutex);
  155. if (!ret)
  156. break;
  157. if (file->f_flags & O_NONBLOCK) {
  158. ret = -EAGAIN;
  159. break;
  160. }
  161. if (signal_pending(current)) {
  162. ret = -EINTR;
  163. break;
  164. }
  165. schedule();
  166. }
  167. finish_wait(&log->wq, &wait);
  168. if (ret)
  169. return ret;
  170. mutex_lock(&log->mutex);
  171. /* is there still something to read or did we race? */
  172. if (unlikely(log->w_off == reader->r_off)) {
  173. mutex_unlock(&log->mutex);
  174. goto start;
  175. }
  176. /* get the size of the next entry */
  177. ret = get_entry_len(log, reader->r_off);
  178. if (count < ret) {
  179. ret = -EINVAL;
  180. goto out;
  181. }
  182. /* get exactly one entry from the log */
  183. ret = do_read_log_to_user(log, reader, buf, ret);
  184. out:
  185. mutex_unlock(&log->mutex);
  186. return ret;
  187. }
  188. /*
  189. * get_next_entry - return the offset of the first valid entry at least 'len'
  190. * bytes after 'off'.
  191. *
  192. * Caller must hold log->mutex.
  193. */
  194. static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
  195. {
  196. size_t count = 0;
  197. do {
  198. size_t nr = get_entry_len(log, off);
  199. off = logger_offset(off + nr);
  200. count += nr;
  201. } while (count < len);
  202. return off;
  203. }
  204. /*
  205. * clock_interval - is a < c < b in mod-space? Put another way, does the line
  206. * from a to b cross c?
  207. */
  208. static inline int clock_interval(size_t a, size_t b, size_t c)
  209. {
  210. if (b < a) {
  211. if (a < c || b >= c)
  212. return 1;
  213. } else {
  214. if (a < c && b >= c)
  215. return 1;
  216. }
  217. return 0;
  218. }
  219. /*
  220. * fix_up_readers - walk the list of all readers and "fix up" any who were
  221. * lapped by the writer; also do the same for the default "start head".
  222. * We do this by "pulling forward" the readers and start head to the first
  223. * entry after the new write head.
  224. *
  225. * The caller needs to hold log->mutex.
  226. */
  227. static void fix_up_readers(struct logger_log *log, size_t len)
  228. {
  229. size_t old = log->w_off;
  230. size_t new = logger_offset(old + len);
  231. struct logger_reader *reader;
  232. if (clock_interval(old, new, log->head))
  233. log->head = get_next_entry(log, log->head, len);
  234. list_for_each_entry(reader, &log->readers, list)
  235. if (clock_interval(old, new, reader->r_off))
  236. reader->r_off = get_next_entry(log, reader->r_off, len);
  237. }
  238. /*
  239. * do_write_log - writes 'len' bytes from 'buf' to 'log'
  240. *
  241. * The caller needs to hold log->mutex.
  242. */
  243. static void do_write_log(struct logger_log *log, const void *buf, size_t count)
  244. {
  245. size_t len;
  246. len = min(count, log->size - log->w_off);
  247. memcpy(log->buffer + log->w_off, buf, len);
  248. if (count != len)
  249. memcpy(log->buffer, buf + len, count - len);
  250. log->w_off = logger_offset(log->w_off + count);
  251. }
  252. /*
  253. * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
  254. * the log 'log'
  255. *
  256. * The caller needs to hold log->mutex.
  257. *
  258. * Returns 'count' on success, negative error code on failure.
  259. */
  260. static ssize_t do_write_log_from_user(struct logger_log *log,
  261. const void __user *buf, size_t count)
  262. {
  263. size_t len;
  264. len = min(count, log->size - log->w_off);
  265. if (len && copy_from_user(log->buffer + log->w_off, buf, len))
  266. return -EFAULT;
  267. if (count != len)
  268. if (copy_from_user(log->buffer, buf + len, count - len))
  269. return -EFAULT;
  270. log->w_off = logger_offset(log->w_off + count);
  271. return count;
  272. }
  273. /*
  274. * logger_aio_write - our write method, implementing support for write(),
  275. * writev(), and aio_write(). Writes are our fast path, and we try to optimize
  276. * them above all else.
  277. */
  278. ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
  279. unsigned long nr_segs, loff_t ppos)
  280. {
  281. struct logger_log *log = file_get_log(iocb->ki_filp);
  282. size_t orig = log->w_off;
  283. struct logger_entry header;
  284. struct timespec now;
  285. ssize_t ret = 0;
  286. now = current_kernel_time();
  287. header.pid = current->tgid;
  288. header.tid = current->pid;
  289. header.sec = now.tv_sec;
  290. header.nsec = now.tv_nsec;
  291. header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
  292. /* null writes succeed, return zero */
  293. if (unlikely(!header.len))
  294. return 0;
  295. mutex_lock(&log->mutex);
  296. /*
  297. * Fix up any readers, pulling them forward to the first readable
  298. * entry after (what will be) the new write offset. We do this now
  299. * because if we partially fail, we can end up with clobbered log
  300. * entries that encroach on readable buffer.
  301. */
  302. fix_up_readers(log, sizeof(struct logger_entry) + header.len);
  303. do_write_log(log, &header, sizeof(struct logger_entry));
  304. while (nr_segs-- > 0) {
  305. size_t len;
  306. ssize_t nr;
  307. /* figure out how much of this vector we can keep */
  308. len = min_t(size_t, iov->iov_len, header.len - ret);
  309. /* write out this segment's payload */
  310. nr = do_write_log_from_user(log, iov->iov_base, len);
  311. if (unlikely(nr < 0)) {
  312. log->w_off = orig;
  313. mutex_unlock(&log->mutex);
  314. return nr;
  315. }
  316. iov++;
  317. ret += nr;
  318. }
  319. mutex_unlock(&log->mutex);
  320. /* wake up any blocked readers */
  321. wake_up_interruptible(&log->wq);
  322. return ret;
  323. }
  324. static struct logger_log *get_log_from_minor(int);
  325. /*
  326. * logger_open - the log's open() file operation
  327. *
  328. * Note how near a no-op this is in the write-only case. Keep it that way!
  329. */
  330. static int logger_open(struct inode *inode, struct file *file)
  331. {
  332. struct logger_log *log;
  333. int ret;
  334. ret = nonseekable_open(inode, file);
  335. if (ret)
  336. return ret;
  337. log = get_log_from_minor(MINOR(inode->i_rdev));
  338. if (!log)
  339. return -ENODEV;
  340. if (file->f_mode & FMODE_READ) {
  341. struct logger_reader *reader;
  342. reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
  343. if (!reader)
  344. return -ENOMEM;
  345. reader->log = log;
  346. INIT_LIST_HEAD(&reader->list);
  347. mutex_lock(&log->mutex);
  348. reader->r_off = log->head;
  349. list_add_tail(&reader->list, &log->readers);
  350. mutex_unlock(&log->mutex);
  351. file->private_data = reader;
  352. } else
  353. file->private_data = log;
  354. return 0;
  355. }
  356. /*
  357. * logger_release - the log's release file operation
  358. *
  359. * Note this is a total no-op in the write-only case. Keep it that way!
  360. */
  361. static int logger_release(struct inode *ignored, struct file *file)
  362. {
  363. if (file->f_mode & FMODE_READ) {
  364. struct logger_reader *reader = file->private_data;
  365. list_del(&reader->list);
  366. kfree(reader);
  367. }
  368. return 0;
  369. }
  370. /*
  371. * logger_poll - the log's poll file operation, for poll/select/epoll
  372. *
  373. * Note we always return POLLOUT, because you can always write() to the log.
  374. * Note also that, strictly speaking, a return value of POLLIN does not
  375. * guarantee that the log is readable without blocking, as there is a small
  376. * chance that the writer can lap the reader in the interim between poll()
  377. * returning and the read() request.
  378. */
  379. static unsigned int logger_poll(struct file *file, poll_table *wait)
  380. {
  381. struct logger_reader *reader;
  382. struct logger_log *log;
  383. unsigned int ret = POLLOUT | POLLWRNORM;
  384. if (!(file->f_mode & FMODE_READ))
  385. return ret;
  386. reader = file->private_data;
  387. log = reader->log;
  388. poll_wait(file, &log->wq, wait);
  389. mutex_lock(&log->mutex);
  390. if (log->w_off != reader->r_off)
  391. ret |= POLLIN | POLLRDNORM;
  392. mutex_unlock(&log->mutex);
  393. return ret;
  394. }
  395. static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  396. {
  397. struct logger_log *log = file_get_log(file);
  398. struct logger_reader *reader;
  399. long ret = -ENOTTY;
  400. mutex_lock(&log->mutex);
  401. switch (cmd) {
  402. case LOGGER_GET_LOG_BUF_SIZE:
  403. ret = log->size;
  404. break;
  405. case LOGGER_GET_LOG_LEN:
  406. if (!(file->f_mode & FMODE_READ)) {
  407. ret = -EBADF;
  408. break;
  409. }
  410. reader = file->private_data;
  411. if (log->w_off >= reader->r_off)
  412. ret = log->w_off - reader->r_off;
  413. else
  414. ret = (log->size - reader->r_off) + log->w_off;
  415. break;
  416. case LOGGER_GET_NEXT_ENTRY_LEN:
  417. if (!(file->f_mode & FMODE_READ)) {
  418. ret = -EBADF;
  419. break;
  420. }
  421. reader = file->private_data;
  422. if (log->w_off != reader->r_off)
  423. ret = get_entry_len(log, reader->r_off);
  424. else
  425. ret = 0;
  426. break;
  427. case LOGGER_FLUSH_LOG:
  428. if (!(file->f_mode & FMODE_WRITE)) {
  429. ret = -EBADF;
  430. break;
  431. }
  432. list_for_each_entry(reader, &log->readers, list)
  433. reader->r_off = log->w_off;
  434. log->head = log->w_off;
  435. ret = 0;
  436. break;
  437. }
  438. mutex_unlock(&log->mutex);
  439. return ret;
  440. }
  441. static const struct file_operations logger_fops = {
  442. .owner = THIS_MODULE,
  443. .read = logger_read,
  444. .aio_write = logger_aio_write,
  445. .poll = logger_poll,
  446. .unlocked_ioctl = logger_ioctl,
  447. .compat_ioctl = logger_ioctl,
  448. .open = logger_open,
  449. .release = logger_release,
  450. };
  451. /*
  452. * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
  453. * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
  454. * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
  455. */
  456. #define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
  457. static unsigned char _buf_ ## VAR[SIZE]; \
  458. static struct logger_log VAR = { \
  459. .buffer = _buf_ ## VAR, \
  460. .misc = { \
  461. .minor = MISC_DYNAMIC_MINOR, \
  462. .name = NAME, \
  463. .fops = &logger_fops, \
  464. .parent = NULL, \
  465. }, \
  466. .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
  467. .readers = LIST_HEAD_INIT(VAR .readers), \
  468. .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
  469. .w_off = 0, \
  470. .head = 0, \
  471. .size = SIZE, \
  472. };
  473. DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
  474. DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
  475. DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
  476. DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
  477. static struct logger_log *get_log_from_minor(int minor)
  478. {
  479. if (log_main.misc.minor == minor)
  480. return &log_main;
  481. if (log_events.misc.minor == minor)
  482. return &log_events;
  483. if (log_radio.misc.minor == minor)
  484. return &log_radio;
  485. if (log_system.misc.minor == minor)
  486. return &log_system;
  487. return NULL;
  488. }
  489. static int __init init_log(struct logger_log *log)
  490. {
  491. int ret;
  492. ret = misc_register(&log->misc);
  493. if (unlikely(ret)) {
  494. printk(KERN_ERR "logger: failed to register misc "
  495. "device for log '%s'!\n", log->misc.name);
  496. return ret;
  497. }
  498. printk(KERN_INFO "logger: created %luK log '%s'\n",
  499. (unsigned long) log->size >> 10, log->misc.name);
  500. return 0;
  501. }
  502. static int __init logger_init(void)
  503. {
  504. int ret;
  505. ret = init_log(&log_main);
  506. if (unlikely(ret))
  507. goto out;
  508. ret = init_log(&log_events);
  509. if (unlikely(ret))
  510. goto out;
  511. ret = init_log(&log_radio);
  512. if (unlikely(ret))
  513. goto out;
  514. ret = init_log(&log_system);
  515. if (unlikely(ret))
  516. goto out;
  517. out:
  518. return ret;
  519. }
  520. device_initcall(logger_init);