PageRenderTime 155ms CodeModel.GetById 50ms app.highlight 52ms RepoModel.GetById 29ms app.codeStats 0ms

/mordor/examples/iombench.cpp

http://github.com/mozy/mordor
C++ | 320 lines | 244 code | 57 blank | 19 comment | 26 complexity | c958e3d1d8376aa9f32dc6d68c46e9e6 MD5 | raw file
  1//
  2// Mordor IOManager benchmark app.
  3//
  4// Can act as both the client and the server.
  5//
  6
  7#include "mordor/predef.h"
  8
  9#include "netbench.h"
 10
 11#include <iostream>
 12
 13#include <boost/scoped_array.hpp>
 14
 15#include "mordor/atomic.h"
 16#include "mordor/config.h"
 17#include "mordor/fibersynchronization.h"
 18#include "mordor/iomanager.h"
 19#include "mordor/log.h"
 20#include "mordor/main.h"
 21#include "mordor/socket.h"
 22
 23using namespace Mordor;
 24
 25static ConfigVar<int>::ptr g_iomThreads = Config::lookup<int>(
 26    "iombench.threads", 1, "Number of threads used by the iomanager");
 27
 28static Logger::ptr g_log = Log::lookup("mordor:iombench");
 29
 30class IOMBenchServer : public NetBenchServer
 31{
 32public:
 33    IOMBenchServer(IOManager& iom)
 34        : m_iom(iom)
 35    {}
 36
 37    void run(std::string& host,
 38             size_t perConnToRead,
 39             size_t perConnToWrite,
 40             boost::function<void()> done)
 41    {
 42        m_iom.schedule(boost::bind(&IOMBenchServer::server, this, host,
 43                                   perConnToRead, perConnToWrite));
 44        done();
 45    }
 46
 47    void stop()
 48    {
 49        m_sock->cancelAccept();
 50    }
 51
 52private:
 53    void server(std::string host, size_t perConnToRead, size_t perConnToWrite)
 54    {
 55        m_data.reset(new char[perConnToWrite]);
 56        memset(m_data.get(), 'B', perConnToWrite);
 57
 58        // figure out the host addr to use
 59        std::vector<Address::ptr> addrs = Address::lookup(host);
 60        MORDOR_ASSERT(!addrs.empty());
 61
 62        // setup the server
 63        m_sock = addrs.front()->createSocket(m_iom, SOCK_STREAM);
 64        m_sock->setOption(SOL_SOCKET, SO_REUSEADDR, 1);
 65        m_sock->bind(addrs.front());
 66        m_sock->listen();
 67
 68        // accept connections
 69        while (true) {
 70            Socket::ptr conn;
 71            try {
 72                conn = m_sock->accept();
 73            } catch (Exception&) {
 74                return;
 75            }
 76            m_iom.schedule(boost::bind(&IOMBenchServer::handleConn,
 77                                               this,
 78                                               conn,
 79                                               perConnToRead,
 80                                               perConnToWrite));
 81        }
 82    }
 83
 84    void handleConn(Socket::ptr conn,
 85                    size_t perConnToRead, size_t perConnToWrite)
 86    {
 87        boost::scoped_array<char> rdata(new char[perConnToRead]);
 88
 89        size_t n;
 90        while (true) {
 91            n = 0;
 92            while (n < perConnToRead) {
 93                n = conn->receive(&rdata[n], perConnToRead - n);
 94                if (n == 0) {
 95                    return;
 96                }
 97            }
 98
 99            n = 0;
100            while (n < perConnToWrite) {
101                n += conn->send(&m_data[n], perConnToWrite - n);
102            }
103        }
104    }
105
106private:
107    IOManager& m_iom;
108    Socket::ptr m_sock;
109    boost::scoped_array<char> m_data;
110};
111
112class IOMBenchClient : public NetBenchClient
113{
114public:
115    IOMBenchClient(IOManager& iom)
116        : m_iom(iom),
117          m_connectedCond(m_mutex),
118          m_readyCond(m_mutex),
119          m_doneCond(m_mutex),
120          m_round(0),
121          m_totalClients(0),
122          m_stop(false)
123    { }
124
125    void init(std::string& host, size_t perConnToRead, size_t perConnToWrite,
126              boost::function<void()> done)
127    {
128        m_perConnToRead = perConnToRead;
129        m_perConnToWrite = perConnToWrite;
130
131        // prep the common send buffer
132        m_data.reset(new char[m_perConnToWrite]);
133        memset(m_data.get(), 'A', m_perConnToWrite);
134
135        // figure out the host addr to use
136        std::vector<Address::ptr> addrs = Address::lookup(host);
137        MORDOR_ASSERT(!addrs.empty());
138
139        // save off the server addr
140        m_addr = addrs.front();
141
142        done();
143    }
144
145    void prepClientsForNextRound(size_t newClients,
146                                 size_t newActive,
147                                 size_t iters,
148                                 boost::function<void()> done)
149    {
150        m_iters = iters;
151        m_newClients = 0;
152        m_newClientsNeeded = newClients;
153        m_totalClients += newClients;
154
155        MORDOR_LOG_DEBUG(g_log) << "prep "
156            << "newClients " << newClients << " "
157            << "newActive " << newActive << " "
158            << "iters " << iters;
159
160        for (size_t i = 0; i < newClients; i++) {
161            m_iom.schedule(boost::bind(&IOMBenchClient::client,
162                                               this, newActive > 0));
163            if (newActive) {
164                newActive--;
165            }
166        }
167
168        // Wait for all new clients to get connected and waiting at the
169        // top of their request loop
170        FiberMutex::ScopedLock lock(m_mutex);
171        while (m_newClients != m_newClientsNeeded) {
172            m_connectedCond.wait();
173        }
174        lock.unlock();
175
176        done();
177    }
178
179    // implementers are encouraged to actually tally numOps in the done
180    // callback so that we can check to make sure that we did the work
181    // that we expected to
182    void startRound(boost::function<void(size_t numOps)> done)
183    {
184        m_clientsDone = 0;
185        m_opsDone = 0;
186        m_round++;
187        MORDOR_LOG_DEBUG(g_log) << "round start " << m_round
188            << " " << m_clientsDone << " " << m_totalClients;
189        m_readyCond.broadcast();
190
191        // Wait for all clients to finish
192        FiberMutex::ScopedLock lock(m_mutex);
193        while (m_clientsDone != m_totalClients) {
194            MORDOR_LOG_DEBUG(g_log) << "round wait " << m_clientsDone
195                << " " << m_totalClients;
196            m_doneCond.wait();
197        }
198        lock.unlock();
199
200        MORDOR_LOG_DEBUG(g_log) << "round done " << m_opsDone;
201        done(m_opsDone);
202    }
203
204    void stop()
205    {
206        FiberMutex::ScopedLock lock(m_mutex);
207        m_stop = true;
208        m_readyCond.broadcast();
209    }
210
211    void client(bool active)
212    {
213        MORDOR_LOG_DEBUG(g_log) << "client start " << active;
214
215        // due to other synchronization I don't think we need
216        // to actually lock to store the round number, but meh
217        FiberMutex::ScopedLock lock(m_mutex);
218        int round = m_round;
219        lock.unlock();
220
221        Socket::ptr conn = m_addr->createSocket(m_iom, SOCK_STREAM);
222        conn->connect(m_addr);
223
224        lock.lock();
225        if (++m_newClients == m_newClientsNeeded) {
226            m_connectedCond.signal();
227        }
228        lock.unlock();
229
230        while (true) {
231            waitForNextRound(round);
232
233            if (m_stop) {
234                return;
235            }
236
237            if (active) {
238                sendRequests(conn);
239            }
240
241            if (++m_clientsDone == m_totalClients) {
242                m_doneCond.signal();
243            }
244        }
245    }
246
247    void waitForNextRound(int& round)
248    {
249        FiberMutex::ScopedLock lock(m_mutex);
250        ++round;
251        while (!m_stop && round != m_round) {
252            m_readyCond.wait();
253        }
254    }
255
256    void sendRequests(Socket::ptr conn)
257    {
258        for (size_t i = 0; i < m_iters; ++i) {
259            size_t n = 0;
260            while (n < m_perConnToWrite) {
261                n = conn->send(&m_data[n], m_perConnToWrite - n);
262            }
263
264            boost::scoped_array<char> buf(new char[m_perConnToRead]);
265            n = 0;
266            while (n < m_perConnToRead) {
267                n += conn->receive(&buf[n], m_perConnToRead - n);
268                MORDOR_ASSERT(n != 0);
269            }
270
271            m_opsDone++;
272        }
273    }
274
275private:
276    IOManager& m_iom;
277    Address::ptr m_addr;
278
279    boost::scoped_array<char> m_data;
280
281    FiberMutex m_mutex;
282    FiberCondition m_connectedCond;
283    FiberCondition m_readyCond;
284    FiberCondition m_doneCond;
285
286    int m_round;
287    size_t m_totalClients;
288    size_t m_newClients;
289    size_t m_newClientsNeeded;
290
291    size_t m_iters;
292    size_t m_perConnToRead;
293    size_t m_perConnToWrite;
294
295    Atomic<size_t> m_clientsDone;
296    Atomic<size_t> m_opsDone;
297
298    bool m_stop;
299};
300
301MORDOR_MAIN(int argc, char *argv[])
302{
303    try {
304        NetBench bench(argc, argv);
305
306        Config::loadFromEnvironment();
307        IOManager iom(g_iomThreads->val());
308
309        IOMBenchServer server(iom);
310        IOMBenchClient client(iom);
311
312        bench.run(&server, &client);
313        iom.stop();
314        return 0;
315    } catch (...) {
316        std::cerr << "caught: "
317                  << boost::current_exception_diagnostic_information() << "\n";
318        throw;
319    }
320}