PageRenderTime 27ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/staging/iio/accel/lis3l02dq_ring.c

https://bitbucket.org/GT-P6200/samsung-kernel-p6200
C | 514 lines | 379 code | 58 blank | 77 comment | 47 complexity | ba2de05258ae8ea7fd50f58eef015c1e MD5 | raw file
  1. #include <linux/interrupt.h>
  2. #include <linux/irq.h>
  3. #include <linux/gpio.h>
  4. #include <linux/workqueue.h>
  5. #include <linux/mutex.h>
  6. #include <linux/device.h>
  7. #include <linux/kernel.h>
  8. #include <linux/spi/spi.h>
  9. #include <linux/sysfs.h>
  10. #include <linux/list.h>
  11. #include <linux/slab.h>
  12. #include "../iio.h"
  13. #include "../sysfs.h"
  14. #include "../ring_sw.h"
  15. #include "accel.h"
  16. #include "../trigger.h"
  17. #include "lis3l02dq.h"
  18. /**
  19. * combine_8_to_16() utility function to munge to u8s into u16
  20. **/
  21. static inline u16 combine_8_to_16(u8 lower, u8 upper)
  22. {
  23. u16 _lower = lower;
  24. u16 _upper = upper;
  25. return _lower | (_upper << 8);
  26. }
  27. /**
  28. * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
  29. * @scan_el: associtate iio scan element attribute
  30. * @indio_dev: the device structure
  31. * @bool: desired state
  32. *
  33. * mlock already held when this is called.
  34. **/
  35. static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
  36. struct iio_dev *indio_dev,
  37. bool state)
  38. {
  39. u8 t, mask;
  40. int ret;
  41. ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
  42. LIS3L02DQ_REG_CTRL_1_ADDR,
  43. &t);
  44. if (ret)
  45. goto error_ret;
  46. switch (scan_el->label) {
  47. case LIS3L02DQ_REG_OUT_X_L_ADDR:
  48. mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
  49. break;
  50. case LIS3L02DQ_REG_OUT_Y_L_ADDR:
  51. mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
  52. break;
  53. case LIS3L02DQ_REG_OUT_Z_L_ADDR:
  54. mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
  55. break;
  56. default:
  57. ret = -EINVAL;
  58. goto error_ret;
  59. }
  60. if (!(mask & t) == state) {
  61. if (state)
  62. t |= mask;
  63. else
  64. t &= ~mask;
  65. ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
  66. LIS3L02DQ_REG_CTRL_1_ADDR,
  67. &t);
  68. }
  69. error_ret:
  70. return ret;
  71. }
  72. static IIO_SCAN_EL_C(accel_x, 0, IIO_SIGNED(16),
  73. LIS3L02DQ_REG_OUT_X_L_ADDR,
  74. &lis3l02dq_scan_el_set_state);
  75. static IIO_SCAN_EL_C(accel_y, 1, IIO_SIGNED(16),
  76. LIS3L02DQ_REG_OUT_Y_L_ADDR,
  77. &lis3l02dq_scan_el_set_state);
  78. static IIO_SCAN_EL_C(accel_z, 2, IIO_SIGNED(16),
  79. LIS3L02DQ_REG_OUT_Z_L_ADDR,
  80. &lis3l02dq_scan_el_set_state);
  81. static IIO_SCAN_EL_TIMESTAMP(3);
  82. static struct attribute *lis3l02dq_scan_el_attrs[] = {
  83. &iio_scan_el_accel_x.dev_attr.attr,
  84. &iio_scan_el_accel_y.dev_attr.attr,
  85. &iio_scan_el_accel_z.dev_attr.attr,
  86. &iio_scan_el_timestamp.dev_attr.attr,
  87. NULL,
  88. };
  89. static struct attribute_group lis3l02dq_scan_el_group = {
  90. .attrs = lis3l02dq_scan_el_attrs,
  91. .name = "scan_elements",
  92. };
  93. /**
  94. * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
  95. * @private_data: iio_dev
  96. **/
  97. static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev, s64 time)
  98. {
  99. struct iio_sw_ring_helper_state *h
  100. = iio_dev_get_devdata(indio_dev);
  101. struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
  102. /* in this case we need to slightly extend the helper function */
  103. iio_sw_poll_func_th(indio_dev, time);
  104. /* Indicate that this interrupt is being handled */
  105. /* Technically this is trigger related, but without this
  106. * handler running there is currently now way for the interrupt
  107. * to clear.
  108. */
  109. st->inter = 1;
  110. }
  111. /**
  112. * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
  113. **/
  114. static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *indio_dev,
  115. int index,
  116. s64 timestamp,
  117. int no_test)
  118. {
  119. struct iio_sw_ring_helper_state *h
  120. = iio_dev_get_devdata(indio_dev);
  121. struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
  122. iio_trigger_poll(st->trig, timestamp);
  123. return IRQ_HANDLED;
  124. }
  125. /* This is an event as it is a response to a physical interrupt */
  126. IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
  127. /**
  128. * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
  129. **/
  130. ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
  131. struct device_attribute *attr,
  132. char *buf)
  133. {
  134. struct iio_scan_el *el = NULL;
  135. int ret, len = 0, i = 0;
  136. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  137. struct iio_dev *dev_info = dev_get_drvdata(dev);
  138. s16 *data;
  139. while (dev_info->scan_el_attrs->attrs[i]) {
  140. el = to_iio_scan_el((struct device_attribute *)
  141. (dev_info->scan_el_attrs->attrs[i]));
  142. /* label is in fact the address */
  143. if (el->label == this_attr->address)
  144. break;
  145. i++;
  146. }
  147. if (!dev_info->scan_el_attrs->attrs[i]) {
  148. ret = -EINVAL;
  149. goto error_ret;
  150. }
  151. /* If this element is in the scan mask */
  152. ret = iio_scan_mask_query(dev_info, el->number);
  153. if (ret < 0)
  154. goto error_ret;
  155. if (ret) {
  156. data = kmalloc(dev_info->ring->access.get_bpd(dev_info->ring),
  157. GFP_KERNEL);
  158. if (data == NULL)
  159. return -ENOMEM;
  160. ret = dev_info->ring->access.read_last(dev_info->ring,
  161. (u8 *)data);
  162. if (ret)
  163. goto error_free_data;
  164. } else {
  165. ret = -EINVAL;
  166. goto error_ret;
  167. }
  168. len = iio_scan_mask_count_to_right(dev_info, el->number);
  169. if (len < 0) {
  170. ret = len;
  171. goto error_free_data;
  172. }
  173. len = sprintf(buf, "ring %d\n", data[len]);
  174. error_free_data:
  175. kfree(data);
  176. error_ret:
  177. return ret ? ret : len;
  178. }
  179. static const u8 read_all_tx_array[] = {
  180. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
  181. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
  182. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
  183. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
  184. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
  185. LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
  186. };
  187. /**
  188. * lis3l02dq_read_all() Reads all channels currently selected
  189. * @st: device specific state
  190. * @rx_array: (dma capable) recieve array, must be at least
  191. * 4*number of channels
  192. **/
  193. static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
  194. {
  195. struct spi_transfer *xfers;
  196. struct spi_message msg;
  197. int ret, i, j = 0;
  198. xfers = kzalloc((st->help.indio_dev->scan_count) * 2
  199. * sizeof(*xfers), GFP_KERNEL);
  200. if (!xfers)
  201. return -ENOMEM;
  202. mutex_lock(&st->buf_lock);
  203. for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
  204. if (st->help.indio_dev->scan_mask & (1 << i)) {
  205. /* lower byte */
  206. xfers[j].tx_buf = st->tx + 2*j;
  207. st->tx[2*j] = read_all_tx_array[i*4];
  208. st->tx[2*j + 1] = 0;
  209. if (rx_array)
  210. xfers[j].rx_buf = rx_array + j*2;
  211. xfers[j].bits_per_word = 8;
  212. xfers[j].len = 2;
  213. xfers[j].cs_change = 1;
  214. j++;
  215. /* upper byte */
  216. xfers[j].tx_buf = st->tx + 2*j;
  217. st->tx[2*j] = read_all_tx_array[i*4 + 2];
  218. st->tx[2*j + 1] = 0;
  219. if (rx_array)
  220. xfers[j].rx_buf = rx_array + j*2;
  221. xfers[j].bits_per_word = 8;
  222. xfers[j].len = 2;
  223. xfers[j].cs_change = 1;
  224. j++;
  225. }
  226. }
  227. /* After these are transmitted, the rx_buff should have
  228. * values in alternate bytes
  229. */
  230. spi_message_init(&msg);
  231. for (j = 0; j < st->help.indio_dev->scan_count * 2; j++)
  232. spi_message_add_tail(&xfers[j], &msg);
  233. ret = spi_sync(st->us, &msg);
  234. mutex_unlock(&st->buf_lock);
  235. kfree(xfers);
  236. return ret;
  237. }
  238. static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
  239. {
  240. struct iio_sw_ring_helper_state *h
  241. = container_of(work_s, struct iio_sw_ring_helper_state,
  242. work_trigger_to_ring);
  243. struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
  244. st->inter = 0;
  245. iio_sw_trigger_bh_to_ring(work_s);
  246. }
  247. static int lis3l02dq_get_ring_element(struct iio_sw_ring_helper_state *h,
  248. u8 *buf)
  249. {
  250. int ret, i;
  251. u8 *rx_array ;
  252. s16 *data = (s16 *)buf;
  253. rx_array = kzalloc(4 * (h->indio_dev->scan_count), GFP_KERNEL);
  254. if (rx_array == NULL)
  255. return -ENOMEM;
  256. ret = lis3l02dq_read_all(lis3l02dq_h_to_s(h), rx_array);
  257. if (ret < 0)
  258. return ret;
  259. for (i = 0; i < h->indio_dev->scan_count; i++)
  260. data[i] = combine_8_to_16(rx_array[i*4+1],
  261. rx_array[i*4+3]);
  262. kfree(rx_array);
  263. return i*sizeof(data[0]);
  264. }
  265. /* Caller responsible for locking as necessary. */
  266. static int
  267. __lis3l02dq_write_data_ready_config(struct device *dev,
  268. struct iio_event_handler_list *list,
  269. bool state)
  270. {
  271. int ret;
  272. u8 valold;
  273. bool currentlyset;
  274. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  275. /* Get the current event mask register */
  276. ret = lis3l02dq_spi_read_reg_8(dev,
  277. LIS3L02DQ_REG_CTRL_2_ADDR,
  278. &valold);
  279. if (ret)
  280. goto error_ret;
  281. /* Find out if data ready is already on */
  282. currentlyset
  283. = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
  284. /* Disable requested */
  285. if (!state && currentlyset) {
  286. valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
  287. /* The double write is to overcome a hardware bug?*/
  288. ret = lis3l02dq_spi_write_reg_8(dev,
  289. LIS3L02DQ_REG_CTRL_2_ADDR,
  290. &valold);
  291. if (ret)
  292. goto error_ret;
  293. ret = lis3l02dq_spi_write_reg_8(dev,
  294. LIS3L02DQ_REG_CTRL_2_ADDR,
  295. &valold);
  296. if (ret)
  297. goto error_ret;
  298. iio_remove_event_from_list(list,
  299. &indio_dev->interrupts[0]
  300. ->ev_list);
  301. /* Enable requested */
  302. } else if (state && !currentlyset) {
  303. /* if not set, enable requested */
  304. valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
  305. iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
  306. ret = lis3l02dq_spi_write_reg_8(dev,
  307. LIS3L02DQ_REG_CTRL_2_ADDR,
  308. &valold);
  309. if (ret)
  310. goto error_ret;
  311. }
  312. return 0;
  313. error_ret:
  314. return ret;
  315. }
  316. /**
  317. * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
  318. *
  319. * If disabling the interrupt also does a final read to ensure it is clear.
  320. * This is only important in some cases where the scan enable elements are
  321. * switched before the ring is reenabled.
  322. **/
  323. static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
  324. bool state)
  325. {
  326. struct lis3l02dq_state *st = trig->private_data;
  327. int ret = 0;
  328. u8 t;
  329. __lis3l02dq_write_data_ready_config(&st->help.indio_dev->dev,
  330. &iio_event_data_rdy_trig,
  331. state);
  332. if (state == false) {
  333. /* possible quirk with handler currently worked around
  334. by ensuring the work queue is empty */
  335. flush_scheduled_work();
  336. /* Clear any outstanding ready events */
  337. ret = lis3l02dq_read_all(st, NULL);
  338. }
  339. lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
  340. LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
  341. &t);
  342. return ret;
  343. }
  344. static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
  345. static struct attribute *lis3l02dq_trigger_attrs[] = {
  346. &dev_attr_name.attr,
  347. NULL,
  348. };
  349. static const struct attribute_group lis3l02dq_trigger_attr_group = {
  350. .attrs = lis3l02dq_trigger_attrs,
  351. };
  352. /**
  353. * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
  354. * @trig: the datardy trigger
  355. *
  356. * As the trigger may occur on any data element being updated it is
  357. * really rather likely to occur during the read from the previous
  358. * trigger event. The only way to discover if this has occured on
  359. * boards not supporting level interrupts is to take a look at the line.
  360. * If it is indicating another interrupt and we don't seem to have a
  361. * handler looking at it, then we need to notify the core that we need
  362. * to tell the triggering core to try reading all these again.
  363. **/
  364. static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
  365. {
  366. struct lis3l02dq_state *st = trig->private_data;
  367. enable_irq(st->us->irq);
  368. /* If gpio still high (or high again) */
  369. if (gpio_get_value(irq_to_gpio(st->us->irq)))
  370. if (st->inter == 0) {
  371. /* already interrupt handler dealing with it */
  372. disable_irq_nosync(st->us->irq);
  373. if (st->inter == 1) {
  374. /* interrupt handler snuck in between test
  375. * and disable */
  376. enable_irq(st->us->irq);
  377. return 0;
  378. }
  379. return -EAGAIN;
  380. }
  381. /* irq reenabled so success! */
  382. return 0;
  383. }
  384. int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
  385. {
  386. int ret;
  387. struct lis3l02dq_state *state = indio_dev->dev_data;
  388. state->trig = iio_allocate_trigger();
  389. if (!state->trig)
  390. return -ENOMEM;
  391. state->trig->name = kasprintf(GFP_KERNEL,
  392. "lis3l02dq-dev%d",
  393. indio_dev->id);
  394. if (!state->trig->name) {
  395. ret = -ENOMEM;
  396. goto error_free_trig;
  397. }
  398. state->trig->dev.parent = &state->us->dev;
  399. state->trig->owner = THIS_MODULE;
  400. state->trig->private_data = state;
  401. state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
  402. state->trig->try_reenable = &lis3l02dq_trig_try_reen;
  403. state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
  404. ret = iio_trigger_register(state->trig);
  405. if (ret)
  406. goto error_free_trig_name;
  407. return 0;
  408. error_free_trig_name:
  409. kfree(state->trig->name);
  410. error_free_trig:
  411. iio_free_trigger(state->trig);
  412. return ret;
  413. }
  414. void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
  415. {
  416. struct lis3l02dq_state *state = indio_dev->dev_data;
  417. iio_trigger_unregister(state->trig);
  418. kfree(state->trig->name);
  419. iio_free_trigger(state->trig);
  420. }
  421. void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
  422. {
  423. kfree(indio_dev->pollfunc);
  424. iio_sw_rb_free(indio_dev->ring);
  425. }
  426. int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
  427. {
  428. int ret;
  429. struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
  430. INIT_WORK(&h->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
  431. /* Set default scan mode */
  432. h->get_ring_element = &lis3l02dq_get_ring_element;
  433. iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
  434. iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
  435. iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
  436. indio_dev->scan_timestamp = true;
  437. indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
  438. indio_dev->ring = iio_sw_rb_allocate(indio_dev);
  439. if (!indio_dev->ring)
  440. return -ENOMEM;
  441. /* Effectively select the ring buffer implementation */
  442. iio_ring_sw_register_funcs(&indio_dev->ring->access);
  443. indio_dev->ring->bpe = 2;
  444. indio_dev->ring->preenable = &iio_sw_ring_preenable;
  445. indio_dev->ring->postenable = &iio_triggered_ring_postenable;
  446. indio_dev->ring->predisable = &iio_triggered_ring_predisable;
  447. indio_dev->ring->owner = THIS_MODULE;
  448. ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th);
  449. if (ret)
  450. goto error_iio_sw_rb_free;;
  451. indio_dev->modes |= INDIO_RING_TRIGGERED;
  452. return 0;
  453. error_iio_sw_rb_free:
  454. iio_sw_rb_free(indio_dev->ring);
  455. return ret;
  456. }