PageRenderTime 51ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/mordor/examples/iombench.cpp

http://github.com/mozy/mordor
C++ | 320 lines | 244 code | 57 blank | 19 comment | 26 complexity | c958e3d1d8376aa9f32dc6d68c46e9e6 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. //
  2. // Mordor IOManager benchmark app.
  3. //
  4. // Can act as both the client and the server.
  5. //
  6. #include "mordor/predef.h"
  7. #include "netbench.h"
  8. #include <iostream>
  9. #include <boost/scoped_array.hpp>
  10. #include "mordor/atomic.h"
  11. #include "mordor/config.h"
  12. #include "mordor/fibersynchronization.h"
  13. #include "mordor/iomanager.h"
  14. #include "mordor/log.h"
  15. #include "mordor/main.h"
  16. #include "mordor/socket.h"
  17. using namespace Mordor;
  18. static ConfigVar<int>::ptr g_iomThreads = Config::lookup<int>(
  19. "iombench.threads", 1, "Number of threads used by the iomanager");
  20. static Logger::ptr g_log = Log::lookup("mordor:iombench");
  21. class IOMBenchServer : public NetBenchServer
  22. {
  23. public:
  24. IOMBenchServer(IOManager& iom)
  25. : m_iom(iom)
  26. {}
  27. void run(std::string& host,
  28. size_t perConnToRead,
  29. size_t perConnToWrite,
  30. boost::function<void()> done)
  31. {
  32. m_iom.schedule(boost::bind(&IOMBenchServer::server, this, host,
  33. perConnToRead, perConnToWrite));
  34. done();
  35. }
  36. void stop()
  37. {
  38. m_sock->cancelAccept();
  39. }
  40. private:
  41. void server(std::string host, size_t perConnToRead, size_t perConnToWrite)
  42. {
  43. m_data.reset(new char[perConnToWrite]);
  44. memset(m_data.get(), 'B', perConnToWrite);
  45. // figure out the host addr to use
  46. std::vector<Address::ptr> addrs = Address::lookup(host);
  47. MORDOR_ASSERT(!addrs.empty());
  48. // setup the server
  49. m_sock = addrs.front()->createSocket(m_iom, SOCK_STREAM);
  50. m_sock->setOption(SOL_SOCKET, SO_REUSEADDR, 1);
  51. m_sock->bind(addrs.front());
  52. m_sock->listen();
  53. // accept connections
  54. while (true) {
  55. Socket::ptr conn;
  56. try {
  57. conn = m_sock->accept();
  58. } catch (Exception&) {
  59. return;
  60. }
  61. m_iom.schedule(boost::bind(&IOMBenchServer::handleConn,
  62. this,
  63. conn,
  64. perConnToRead,
  65. perConnToWrite));
  66. }
  67. }
  68. void handleConn(Socket::ptr conn,
  69. size_t perConnToRead, size_t perConnToWrite)
  70. {
  71. boost::scoped_array<char> rdata(new char[perConnToRead]);
  72. size_t n;
  73. while (true) {
  74. n = 0;
  75. while (n < perConnToRead) {
  76. n = conn->receive(&rdata[n], perConnToRead - n);
  77. if (n == 0) {
  78. return;
  79. }
  80. }
  81. n = 0;
  82. while (n < perConnToWrite) {
  83. n += conn->send(&m_data[n], perConnToWrite - n);
  84. }
  85. }
  86. }
  87. private:
  88. IOManager& m_iom;
  89. Socket::ptr m_sock;
  90. boost::scoped_array<char> m_data;
  91. };
  92. class IOMBenchClient : public NetBenchClient
  93. {
  94. public:
  95. IOMBenchClient(IOManager& iom)
  96. : m_iom(iom),
  97. m_connectedCond(m_mutex),
  98. m_readyCond(m_mutex),
  99. m_doneCond(m_mutex),
  100. m_round(0),
  101. m_totalClients(0),
  102. m_stop(false)
  103. { }
  104. void init(std::string& host, size_t perConnToRead, size_t perConnToWrite,
  105. boost::function<void()> done)
  106. {
  107. m_perConnToRead = perConnToRead;
  108. m_perConnToWrite = perConnToWrite;
  109. // prep the common send buffer
  110. m_data.reset(new char[m_perConnToWrite]);
  111. memset(m_data.get(), 'A', m_perConnToWrite);
  112. // figure out the host addr to use
  113. std::vector<Address::ptr> addrs = Address::lookup(host);
  114. MORDOR_ASSERT(!addrs.empty());
  115. // save off the server addr
  116. m_addr = addrs.front();
  117. done();
  118. }
  119. void prepClientsForNextRound(size_t newClients,
  120. size_t newActive,
  121. size_t iters,
  122. boost::function<void()> done)
  123. {
  124. m_iters = iters;
  125. m_newClients = 0;
  126. m_newClientsNeeded = newClients;
  127. m_totalClients += newClients;
  128. MORDOR_LOG_DEBUG(g_log) << "prep "
  129. << "newClients " << newClients << " "
  130. << "newActive " << newActive << " "
  131. << "iters " << iters;
  132. for (size_t i = 0; i < newClients; i++) {
  133. m_iom.schedule(boost::bind(&IOMBenchClient::client,
  134. this, newActive > 0));
  135. if (newActive) {
  136. newActive--;
  137. }
  138. }
  139. // Wait for all new clients to get connected and waiting at the
  140. // top of their request loop
  141. FiberMutex::ScopedLock lock(m_mutex);
  142. while (m_newClients != m_newClientsNeeded) {
  143. m_connectedCond.wait();
  144. }
  145. lock.unlock();
  146. done();
  147. }
  148. // implementers are encouraged to actually tally numOps in the done
  149. // callback so that we can check to make sure that we did the work
  150. // that we expected to
  151. void startRound(boost::function<void(size_t numOps)> done)
  152. {
  153. m_clientsDone = 0;
  154. m_opsDone = 0;
  155. m_round++;
  156. MORDOR_LOG_DEBUG(g_log) << "round start " << m_round
  157. << " " << m_clientsDone << " " << m_totalClients;
  158. m_readyCond.broadcast();
  159. // Wait for all clients to finish
  160. FiberMutex::ScopedLock lock(m_mutex);
  161. while (m_clientsDone != m_totalClients) {
  162. MORDOR_LOG_DEBUG(g_log) << "round wait " << m_clientsDone
  163. << " " << m_totalClients;
  164. m_doneCond.wait();
  165. }
  166. lock.unlock();
  167. MORDOR_LOG_DEBUG(g_log) << "round done " << m_opsDone;
  168. done(m_opsDone);
  169. }
  170. void stop()
  171. {
  172. FiberMutex::ScopedLock lock(m_mutex);
  173. m_stop = true;
  174. m_readyCond.broadcast();
  175. }
  176. void client(bool active)
  177. {
  178. MORDOR_LOG_DEBUG(g_log) << "client start " << active;
  179. // due to other synchronization I don't think we need
  180. // to actually lock to store the round number, but meh
  181. FiberMutex::ScopedLock lock(m_mutex);
  182. int round = m_round;
  183. lock.unlock();
  184. Socket::ptr conn = m_addr->createSocket(m_iom, SOCK_STREAM);
  185. conn->connect(m_addr);
  186. lock.lock();
  187. if (++m_newClients == m_newClientsNeeded) {
  188. m_connectedCond.signal();
  189. }
  190. lock.unlock();
  191. while (true) {
  192. waitForNextRound(round);
  193. if (m_stop) {
  194. return;
  195. }
  196. if (active) {
  197. sendRequests(conn);
  198. }
  199. if (++m_clientsDone == m_totalClients) {
  200. m_doneCond.signal();
  201. }
  202. }
  203. }
  204. void waitForNextRound(int& round)
  205. {
  206. FiberMutex::ScopedLock lock(m_mutex);
  207. ++round;
  208. while (!m_stop && round != m_round) {
  209. m_readyCond.wait();
  210. }
  211. }
  212. void sendRequests(Socket::ptr conn)
  213. {
  214. for (size_t i = 0; i < m_iters; ++i) {
  215. size_t n = 0;
  216. while (n < m_perConnToWrite) {
  217. n = conn->send(&m_data[n], m_perConnToWrite - n);
  218. }
  219. boost::scoped_array<char> buf(new char[m_perConnToRead]);
  220. n = 0;
  221. while (n < m_perConnToRead) {
  222. n += conn->receive(&buf[n], m_perConnToRead - n);
  223. MORDOR_ASSERT(n != 0);
  224. }
  225. m_opsDone++;
  226. }
  227. }
  228. private:
  229. IOManager& m_iom;
  230. Address::ptr m_addr;
  231. boost::scoped_array<char> m_data;
  232. FiberMutex m_mutex;
  233. FiberCondition m_connectedCond;
  234. FiberCondition m_readyCond;
  235. FiberCondition m_doneCond;
  236. int m_round;
  237. size_t m_totalClients;
  238. size_t m_newClients;
  239. size_t m_newClientsNeeded;
  240. size_t m_iters;
  241. size_t m_perConnToRead;
  242. size_t m_perConnToWrite;
  243. Atomic<size_t> m_clientsDone;
  244. Atomic<size_t> m_opsDone;
  245. bool m_stop;
  246. };
  247. MORDOR_MAIN(int argc, char *argv[])
  248. {
  249. try {
  250. NetBench bench(argc, argv);
  251. Config::loadFromEnvironment();
  252. IOManager iom(g_iomThreads->val());
  253. IOMBenchServer server(iom);
  254. IOMBenchClient client(iom);
  255. bench.run(&server, &client);
  256. iom.stop();
  257. return 0;
  258. } catch (...) {
  259. std::cerr << "caught: "
  260. << boost::current_exception_diagnostic_information() << "\n";
  261. throw;
  262. }
  263. }