PageRenderTime 43ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/staging/hv/ring_buffer.c

https://bitbucket.org/sola/android_board_pandaboard_kernel
C | 524 lines | 259 code | 95 blank | 170 comment | 9 complexity | adeb173d48151d55779fcacbf9f0de2d MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. *
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Authors:
  19. * Haiyang Zhang <haiyangz@microsoft.com>
  20. * Hank Janssen <hjanssen@microsoft.com>
  21. * K. Y. Srinivasan <kys@microsoft.com>
  22. *
  23. */
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #include <linux/kernel.h>
  26. #include <linux/mm.h>
  27. #include "hyperv.h"
  28. #include "hyperv_vmbus.h"
  29. /* #defines */
  30. /* Amount of space to write to */
  31. #define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
  32. /*
  33. *
  34. * hv_get_ringbuffer_availbytes()
  35. *
  36. * Get number of bytes available to read and to write to
  37. * for the specified ring buffer
  38. */
  39. static inline void
  40. hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
  41. u32 *read, u32 *write)
  42. {
  43. u32 read_loc, write_loc;
  44. /* Capture the read/write indices before they changed */
  45. read_loc = rbi->ring_buffer->read_index;
  46. write_loc = rbi->ring_buffer->write_index;
  47. *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
  48. *read = rbi->ring_datasize - *write;
  49. }
  50. /*
  51. * hv_get_next_write_location()
  52. *
  53. * Get the next write location for the specified ring buffer
  54. *
  55. */
  56. static inline u32
  57. hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
  58. {
  59. u32 next = ring_info->ring_buffer->write_index;
  60. return next;
  61. }
  62. /*
  63. * hv_set_next_write_location()
  64. *
  65. * Set the next write location for the specified ring buffer
  66. *
  67. */
  68. static inline void
  69. hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
  70. u32 next_write_location)
  71. {
  72. ring_info->ring_buffer->write_index = next_write_location;
  73. }
  74. /*
  75. * hv_get_next_read_location()
  76. *
  77. * Get the next read location for the specified ring buffer
  78. */
  79. static inline u32
  80. hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  81. {
  82. u32 next = ring_info->ring_buffer->read_index;
  83. return next;
  84. }
  85. /*
  86. * hv_get_next_readlocation_withoffset()
  87. *
  88. * Get the next read location + offset for the specified ring buffer.
  89. * This allows the caller to skip
  90. */
  91. static inline u32
  92. hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
  93. u32 offset)
  94. {
  95. u32 next = ring_info->ring_buffer->read_index;
  96. next += offset;
  97. next %= ring_info->ring_datasize;
  98. return next;
  99. }
  100. /*
  101. *
  102. * hv_set_next_read_location()
  103. *
  104. * Set the next read location for the specified ring buffer
  105. *
  106. */
  107. static inline void
  108. hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
  109. u32 next_read_location)
  110. {
  111. ring_info->ring_buffer->read_index = next_read_location;
  112. }
  113. /*
  114. *
  115. * hv_get_ring_buffer()
  116. *
  117. * Get the start of the ring buffer
  118. */
  119. static inline void *
  120. hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
  121. {
  122. return (void *)ring_info->ring_buffer->buffer;
  123. }
  124. /*
  125. *
  126. * hv_get_ring_buffersize()
  127. *
  128. * Get the size of the ring buffer
  129. */
  130. static inline u32
  131. hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
  132. {
  133. return ring_info->ring_datasize;
  134. }
  135. /*
  136. *
  137. * hv_get_ring_bufferindices()
  138. *
  139. * Get the read and write indices as u64 of the specified ring buffer
  140. *
  141. */
  142. static inline u64
  143. hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  144. {
  145. return (u64)ring_info->ring_buffer->write_index << 32;
  146. }
  147. /*
  148. *
  149. * hv_dump_ring_info()
  150. *
  151. * Dump out to console the ring buffer info
  152. *
  153. */
  154. void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix)
  155. {
  156. u32 bytes_avail_towrite;
  157. u32 bytes_avail_toread;
  158. hv_get_ringbuffer_availbytes(ring_info,
  159. &bytes_avail_toread,
  160. &bytes_avail_towrite);
  161. DPRINT(VMBUS,
  162. DEBUG_RING_LVL,
  163. "%s <<ringinfo %p buffer %p avail write %u "
  164. "avail read %u read idx %u write idx %u>>",
  165. prefix,
  166. ring_info,
  167. ring_info->ring_buffer->buffer,
  168. bytes_avail_towrite,
  169. bytes_avail_toread,
  170. ring_info->ring_buffer->read_index,
  171. ring_info->ring_buffer->write_index);
  172. }
  173. /*
  174. *
  175. * hv_copyfrom_ringbuffer()
  176. *
  177. * Helper routine to copy to source from ring buffer.
  178. * Assume there is enough room. Handles wrap-around in src case only!!
  179. *
  180. */
  181. static u32 hv_copyfrom_ringbuffer(
  182. struct hv_ring_buffer_info *ring_info,
  183. void *dest,
  184. u32 destlen,
  185. u32 start_read_offset)
  186. {
  187. void *ring_buffer = hv_get_ring_buffer(ring_info);
  188. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  189. u32 frag_len;
  190. /* wrap-around detected at the src */
  191. if (destlen > ring_buffer_size - start_read_offset) {
  192. frag_len = ring_buffer_size - start_read_offset;
  193. memcpy(dest, ring_buffer + start_read_offset, frag_len);
  194. memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
  195. } else
  196. memcpy(dest, ring_buffer + start_read_offset, destlen);
  197. start_read_offset += destlen;
  198. start_read_offset %= ring_buffer_size;
  199. return start_read_offset;
  200. }
  201. /*
  202. *
  203. * hv_copyto_ringbuffer()
  204. *
  205. * Helper routine to copy from source to ring buffer.
  206. * Assume there is enough room. Handles wrap-around in dest case only!!
  207. *
  208. */
  209. static u32 hv_copyto_ringbuffer(
  210. struct hv_ring_buffer_info *ring_info,
  211. u32 start_write_offset,
  212. void *src,
  213. u32 srclen)
  214. {
  215. void *ring_buffer = hv_get_ring_buffer(ring_info);
  216. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  217. u32 frag_len;
  218. /* wrap-around detected! */
  219. if (srclen > ring_buffer_size - start_write_offset) {
  220. frag_len = ring_buffer_size - start_write_offset;
  221. memcpy(ring_buffer + start_write_offset, src, frag_len);
  222. memcpy(ring_buffer, src + frag_len, srclen - frag_len);
  223. } else
  224. memcpy(ring_buffer + start_write_offset, src, srclen);
  225. start_write_offset += srclen;
  226. start_write_offset %= ring_buffer_size;
  227. return start_write_offset;
  228. }
  229. /*
  230. *
  231. * hv_ringbuffer_get_debuginfo()
  232. *
  233. * Get various debug metrics for the specified ring buffer
  234. *
  235. */
  236. void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
  237. struct hv_ring_buffer_debug_info *debug_info)
  238. {
  239. u32 bytes_avail_towrite;
  240. u32 bytes_avail_toread;
  241. if (ring_info->ring_buffer) {
  242. hv_get_ringbuffer_availbytes(ring_info,
  243. &bytes_avail_toread,
  244. &bytes_avail_towrite);
  245. debug_info->bytes_avail_toread = bytes_avail_toread;
  246. debug_info->bytes_avail_towrite = bytes_avail_towrite;
  247. debug_info->current_read_index =
  248. ring_info->ring_buffer->read_index;
  249. debug_info->current_write_index =
  250. ring_info->ring_buffer->write_index;
  251. debug_info->current_interrupt_mask =
  252. ring_info->ring_buffer->interrupt_mask;
  253. }
  254. }
  255. /*
  256. *
  257. * hv_get_ringbuffer_interrupt_mask()
  258. *
  259. * Get the interrupt mask for the specified ring buffer
  260. *
  261. */
  262. u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
  263. {
  264. return rbi->ring_buffer->interrupt_mask;
  265. }
  266. /*
  267. *
  268. * hv_ringbuffer_init()
  269. *
  270. *Initialize the ring buffer
  271. *
  272. */
  273. int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
  274. void *buffer, u32 buflen)
  275. {
  276. if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
  277. return -EINVAL;
  278. memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
  279. ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
  280. ring_info->ring_buffer->read_index =
  281. ring_info->ring_buffer->write_index = 0;
  282. ring_info->ring_size = buflen;
  283. ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
  284. spin_lock_init(&ring_info->ring_lock);
  285. return 0;
  286. }
  287. /*
  288. *
  289. * hv_ringbuffer_cleanup()
  290. *
  291. * Cleanup the ring buffer
  292. *
  293. */
  294. void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  295. {
  296. }
  297. /*
  298. *
  299. * hv_ringbuffer_write()
  300. *
  301. * Write to the ring buffer
  302. *
  303. */
  304. int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
  305. struct scatterlist *sglist, u32 sgcount)
  306. {
  307. int i = 0;
  308. u32 bytes_avail_towrite;
  309. u32 bytes_avail_toread;
  310. u32 totalbytes_towrite = 0;
  311. struct scatterlist *sg;
  312. u32 next_write_location;
  313. u64 prev_indices = 0;
  314. unsigned long flags;
  315. for_each_sg(sglist, sg, sgcount, i)
  316. {
  317. totalbytes_towrite += sg->length;
  318. }
  319. totalbytes_towrite += sizeof(u64);
  320. spin_lock_irqsave(&outring_info->ring_lock, flags);
  321. hv_get_ringbuffer_availbytes(outring_info,
  322. &bytes_avail_toread,
  323. &bytes_avail_towrite);
  324. /* If there is only room for the packet, assume it is full. */
  325. /* Otherwise, the next time around, we think the ring buffer */
  326. /* is empty since the read index == write index */
  327. if (bytes_avail_towrite <= totalbytes_towrite) {
  328. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  329. return -1;
  330. }
  331. /* Write to the ring buffer */
  332. next_write_location = hv_get_next_write_location(outring_info);
  333. for_each_sg(sglist, sg, sgcount, i)
  334. {
  335. next_write_location = hv_copyto_ringbuffer(outring_info,
  336. next_write_location,
  337. sg_virt(sg),
  338. sg->length);
  339. }
  340. /* Set previous packet start */
  341. prev_indices = hv_get_ring_bufferindices(outring_info);
  342. next_write_location = hv_copyto_ringbuffer(outring_info,
  343. next_write_location,
  344. &prev_indices,
  345. sizeof(u64));
  346. /* Make sure we flush all writes before updating the writeIndex */
  347. mb();
  348. /* Now, update the write location */
  349. hv_set_next_write_location(outring_info, next_write_location);
  350. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  351. return 0;
  352. }
  353. /*
  354. *
  355. * hv_ringbuffer_peek()
  356. *
  357. * Read without advancing the read index
  358. *
  359. */
  360. int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
  361. void *Buffer, u32 buflen)
  362. {
  363. u32 bytes_avail_towrite;
  364. u32 bytes_avail_toread;
  365. u32 next_read_location = 0;
  366. unsigned long flags;
  367. spin_lock_irqsave(&Inring_info->ring_lock, flags);
  368. hv_get_ringbuffer_availbytes(Inring_info,
  369. &bytes_avail_toread,
  370. &bytes_avail_towrite);
  371. /* Make sure there is something to read */
  372. if (bytes_avail_toread < buflen) {
  373. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  374. return -1;
  375. }
  376. /* Convert to byte offset */
  377. next_read_location = hv_get_next_read_location(Inring_info);
  378. next_read_location = hv_copyfrom_ringbuffer(Inring_info,
  379. Buffer,
  380. buflen,
  381. next_read_location);
  382. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  383. return 0;
  384. }
  385. /*
  386. *
  387. * hv_ringbuffer_read()
  388. *
  389. * Read and advance the read index
  390. *
  391. */
  392. int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
  393. u32 buflen, u32 offset)
  394. {
  395. u32 bytes_avail_towrite;
  396. u32 bytes_avail_toread;
  397. u32 next_read_location = 0;
  398. u64 prev_indices = 0;
  399. unsigned long flags;
  400. if (buflen <= 0)
  401. return -EINVAL;
  402. spin_lock_irqsave(&inring_info->ring_lock, flags);
  403. hv_get_ringbuffer_availbytes(inring_info,
  404. &bytes_avail_toread,
  405. &bytes_avail_towrite);
  406. /* Make sure there is something to read */
  407. if (bytes_avail_toread < buflen) {
  408. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  409. return -1;
  410. }
  411. next_read_location =
  412. hv_get_next_readlocation_withoffset(inring_info, offset);
  413. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  414. buffer,
  415. buflen,
  416. next_read_location);
  417. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  418. &prev_indices,
  419. sizeof(u64),
  420. next_read_location);
  421. /* Make sure all reads are done before we update the read index since */
  422. /* the writer may start writing to the read area once the read index */
  423. /*is updated */
  424. mb();
  425. /* Update the read index */
  426. hv_set_next_read_location(inring_info, next_read_location);
  427. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  428. return 0;
  429. }