PageRenderTime 163ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/librpc/svc_clnt_common.c

https://bitbucket.org/androidarmv6/android_hardware_msm7k
C | 327 lines | 236 code | 53 blank | 38 comment | 17 complexity | ed6698988702794d77dc683599ade2ae MD5 | raw file
  1. #include <rpc/rpc.h>
  2. #include <arpa/inet.h>
  3. #include <errno.h>
  4. #include <debug.h>
  5. extern int r_open(const char *router);
  6. extern void r_close(int handle);
  7. extern int r_read(int handle, char *buf, uint32 size);
  8. extern int r_write(int handle, const char *buf, uint32 size);
  9. extern int r_control(int handle, const uint32 cmd, void *arg);
  10. static void xdr_std_destroy(xdr_s_type *xdr)
  11. {
  12. /* whatever */
  13. }
  14. static bool_t xdr_std_control(xdr_s_type *xdr, int request, void *info)
  15. {
  16. return r_control(xdr->fd, request, info);
  17. }
  18. static bool_t xdr_std_msg_done(xdr_s_type *xdr)
  19. {
  20. /* whatever */
  21. return TRUE;
  22. }
  23. /* Outgoing message control functions */
  24. static bool_t xdr_std_msg_start(xdr_s_type *xdr,
  25. rpc_msg_e_type rpc_msg_type)
  26. {
  27. /* xid is does not matter under our set of assumptions: that for a single
  28. * program/version channel, communication is synchronous. If several
  29. * processes attempt to call functions on a program, then the rpcrouter
  30. * driver will ensure that the calls are properly muxed, because the
  31. * processes will have separate PIDs, and the rpcrouter driver uses PIDs to
  32. * keep track of RPC transactions. For multiple threads in the same
  33. * process accessing the same program, we serialize access in clnt_call()
  34. * by locking a mutex around the RPC call. If threads in the same process
  35. * call into different programs, then there is no issue, again because of
  36. * the use of a mutex in clnt_call().
  37. *
  38. * NOTE: This comment assumes that the only way we talk to the RPC router
  39. * from a client is by using clnt_call(), which is the case for all
  40. * client code generated by rpcgen().
  41. *
  42. * NOTE: The RPC router driver will soon be able to open a separate device
  43. * file for each program/version channel. This will allow for
  44. * natural multiplexing among clients, as we won't have to rely on
  45. * the mutex for the case where different programs are being called
  46. * into by separate threads in the same process. When this happens,
  47. * we'll need to optimize the RPC library to add a separate mutex for
  48. * each program/version channel, which will require some sort of
  49. * registry.
  50. */
  51. if (rpc_msg_type == RPC_MSG_CALL) xdr->xid++;
  52. /* We start writing into the outgoing-message buffer at index 32, because
  53. we need to write header information before we send the message. The
  54. header information includes the destination address and the pacmark
  55. header.
  56. */
  57. xdr->out_next = (RPC_OFFSET+2)*sizeof(uint32);
  58. /* we write the pacmark header when we send the message. */
  59. ((uint32 *)xdr->out_msg)[RPC_OFFSET] = htonl(xdr->xid);
  60. /* rpc call or reply? */
  61. ((uint32 *)xdr->out_msg)[RPC_OFFSET+1] = htonl(rpc_msg_type);
  62. return TRUE;
  63. }
  64. static bool_t xdr_std_msg_abort(xdr_s_type *xdr)
  65. {
  66. /* dummy */
  67. return TRUE;
  68. }
  69. /* Can be used to send both calls and replies. */
  70. extern bool_t xdr_recv_reply_header(xdr_s_type *xdr, rpc_reply_header *reply);
  71. #include <stdio.h>
  72. static bool_t xdr_std_msg_send(xdr_s_type *xdr)
  73. {
  74. int ret;
  75. /* Send the RPC packet. */
  76. ret = r_write(xdr->fd, (void *)xdr->out_msg, xdr->out_next);
  77. xdr->xdr_err = ret;
  78. if ( ret != xdr->out_next)
  79. return FALSE;
  80. return TRUE;
  81. }
  82. static bool_t xdr_std_read(xdr_s_type *xdr)
  83. {
  84. xdr->in_len = r_read(xdr->fd, (void *)xdr->in_msg, RPCROUTER_MSGSIZE_MAX);
  85. if (xdr->in_len < 0) return FALSE;
  86. if (xdr->in_len < (RPC_OFFSET+2)*4) {
  87. xdr->in_len = -1;
  88. return FALSE;
  89. }
  90. xdr->in_next = (RPC_OFFSET+2)*4;
  91. return TRUE;
  92. }
  93. /* Message data functions */
  94. static bool_t xdr_std_send_uint32(xdr_s_type *xdr, const uint32 *value)
  95. {
  96. if (xdr->out_next >= RPCROUTER_MSGSIZE_MAX - 3) return FALSE;
  97. *(int32 *)(xdr->out_msg + xdr->out_next) = htonl(*value);
  98. xdr->out_next += 4;
  99. return TRUE;
  100. }
  101. static bool_t xdr_std_send_int8(xdr_s_type *xdr, const int8 *value)
  102. {
  103. uint32 val = *value;
  104. return xdr_std_send_uint32(xdr, &val);
  105. }
  106. static bool_t xdr_std_send_uint8(xdr_s_type *xdr, const uint8 *value)
  107. {
  108. uint32 val = *value;
  109. return xdr_std_send_uint32(xdr, &val);
  110. }
  111. static bool_t xdr_std_send_int16(xdr_s_type *xdr, const int16 *value)
  112. {
  113. uint32 val = *value;
  114. return xdr_std_send_uint32(xdr, &val);
  115. }
  116. static bool_t xdr_std_send_uint16(xdr_s_type *xdr, const uint16 *value)
  117. {
  118. uint32 val = *value;
  119. return xdr_std_send_uint32(xdr, &val);
  120. }
  121. static bool_t xdr_std_send_int32(xdr_s_type *xdr, const int32 *value)
  122. {
  123. return xdr_std_send_uint32(xdr, (uint32_t *)value);
  124. }
  125. static bool_t xdr_std_send_bytes(xdr_s_type *xdr, const uint8 *buf,
  126. uint32 len)
  127. {
  128. if (xdr->out_next + len > RPCROUTER_MSGSIZE_MAX) return FALSE;
  129. while(len--)
  130. xdr->out_msg[xdr->out_next++] = *buf++;
  131. while(xdr->out_next % 4)
  132. xdr->out_msg[xdr->out_next++] = 0;
  133. return TRUE;
  134. }
  135. #if 0
  136. #include <unwind.h>
  137. typedef struct
  138. {
  139. size_t count;
  140. intptr_t* addrs;
  141. } stack_crawl_state_t;
  142. static _Unwind_Reason_Code trace_function(_Unwind_Context *context, void *arg)
  143. {
  144. stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
  145. if (state->count) {
  146. intptr_t ip = (intptr_t)_Unwind_GetIP(context);
  147. if (ip) {
  148. state->addrs[0] = ip;
  149. state->addrs++;
  150. state->count--;
  151. }
  152. }
  153. return _URC_NO_REASON;
  154. }
  155. static inline
  156. int get_backtrace(intptr_t* addrs, size_t max_entries)
  157. {
  158. stack_crawl_state_t state;
  159. state.count = max_entries;
  160. state.addrs = (intptr_t*)addrs;
  161. _Unwind_Backtrace(trace_function, (void*)&state);
  162. return max_entries - state.count;
  163. }
  164. #endif
  165. static bool_t xdr_std_recv_uint32(xdr_s_type *xdr, uint32 *value)
  166. {
  167. #if 0
  168. intptr_t *trace[20], *tr;
  169. int nc = get_backtrace(trace, 20);
  170. tr = trace;
  171. while(nc--)
  172. D("\t%02d: %p\n", nc, *tr++);
  173. #endif
  174. if (xdr->in_next + 4 > xdr->in_len) { return FALSE; }
  175. if (value) *value = ntohl(*(uint32 *)(xdr->in_msg + xdr->in_next));
  176. xdr->in_next += 4;
  177. return TRUE;
  178. }
  179. #define RECEIVE \
  180. uint32 val; \
  181. if (xdr_std_recv_uint32(xdr, &val)) { \
  182. *value = val; \
  183. return TRUE; \
  184. } \
  185. return FALSE
  186. static bool_t xdr_std_recv_int8(xdr_s_type *xdr, int8 *value)
  187. {
  188. RECEIVE;
  189. }
  190. static bool_t xdr_std_recv_uint8(xdr_s_type *xdr, uint8 *value)
  191. {
  192. RECEIVE;
  193. }
  194. static bool_t xdr_std_recv_int16(xdr_s_type *xdr, int16 *value)
  195. {
  196. RECEIVE;
  197. }
  198. static bool_t xdr_std_recv_uint16(xdr_s_type *xdr, uint16 *value)
  199. {
  200. RECEIVE;
  201. }
  202. #undef RECEIVE
  203. static bool_t xdr_std_recv_int32(xdr_s_type *xdr, int32 *value)
  204. {
  205. return xdr_std_recv_uint32(xdr, (uint32 * )value);
  206. }
  207. static bool_t xdr_std_recv_bytes(xdr_s_type *xdr, uint8 *buf, uint32 len)
  208. {
  209. if (xdr->in_next + (int)len > xdr->in_len) return FALSE;
  210. if (buf) memcpy(buf, &xdr->in_msg[xdr->in_next], len);
  211. xdr->in_next += len;
  212. xdr->in_next = (xdr->in_next + 3) & ~3;
  213. return TRUE;
  214. }
  215. const xdr_ops_s_type xdr_std_xops = {
  216. xdr_std_destroy,
  217. xdr_std_control,
  218. xdr_std_read,
  219. xdr_std_msg_done,
  220. xdr_std_msg_start,
  221. xdr_std_msg_abort,
  222. xdr_std_msg_send,
  223. xdr_std_send_int8,
  224. xdr_std_send_uint8,
  225. xdr_std_send_int16,
  226. xdr_std_send_uint16,
  227. xdr_std_send_int32,
  228. xdr_std_send_uint32,
  229. xdr_std_send_bytes,
  230. xdr_std_recv_int8,
  231. xdr_std_recv_uint8,
  232. xdr_std_recv_int16,
  233. xdr_std_recv_uint16,
  234. xdr_std_recv_int32,
  235. xdr_std_recv_uint32,
  236. xdr_std_recv_bytes,
  237. };
  238. xdr_s_type *xdr_init_common(const char *router, int is_client)
  239. {
  240. xdr_s_type *xdr = (xdr_s_type *)calloc(1, sizeof(xdr_s_type));
  241. xdr->xops = &xdr_std_xops;
  242. xdr->fd = r_open(router);
  243. if (xdr->fd < 0) {
  244. E("ERROR OPENING [%s]: %s\n", router, strerror(errno));
  245. free(xdr);
  246. return NULL;
  247. }
  248. xdr->is_client = is_client;
  249. D("OPENED [%s] fd %d\n", router, xdr->fd);
  250. return xdr;
  251. }
  252. xdr_s_type *xdr_clone(xdr_s_type *other)
  253. {
  254. xdr_s_type *xdr = (xdr_s_type *)calloc(1, sizeof(xdr_s_type));
  255. xdr->xops = &xdr_std_xops;
  256. xdr->fd = dup(other->fd);
  257. if (xdr->fd < 0) {
  258. E("ERROR DUPLICATING FD %d: %s\n", other->fd, strerror(errno));
  259. free(xdr);
  260. return NULL;
  261. }
  262. xdr->xid = xdr->xid;
  263. xdr->x_prog = other->x_prog;
  264. xdr->x_vers = other->x_vers;
  265. xdr->is_client = other->is_client;
  266. D("CLONED fd %d --> %d\n", other->fd, xdr->fd);
  267. return xdr;
  268. }
  269. void xdr_destroy_common(xdr_s_type *xdr)
  270. {
  271. D("CLOSING fd %d\n", xdr->fd);
  272. r_close(xdr->fd);
  273. free(xdr);
  274. }