PageRenderTime 66ms CodeModel.GetById 13ms app.highlight 44ms RepoModel.GetById 1ms app.codeStats 0ms

/stacklesslib/replacements/socket.py

https://bitbucket.org/krisvale/stacklesslib
Python | 974 lines | 855 code | 32 blank | 87 comment | 32 complexity | 8c3c0e1c1c5f91cb25b0b0fd166c7ae2 MD5 | raw file
  1#
  2# Stackless compatible socket module.
  3#
  4# Author: Richard Tew <richard.m.tew@gmail.com>
  5#
  6# Feel free to email me with any questions, comments, or suggestions for
  7# improvement.
  8#
  9# Remaining work:
 10#
 11# = Test suite that verifies that emulated behaviour is correct.
 12#   = When closing the socket, pending senders are sent ECONNRESET.
 13#     This was obtained by opening a server socket, connecting a
 14#     client and then closing the server.  Then the client did a
 15#     send and got ECONNRESET.
 16# = Asyncore does not add that much to this module.  In fact, its
 17#   limitations and differences between implementations in different Python
 18#   versions just complicate things.
 19# = Select on Windows only handles 512 sockets at a time.  So if there
 20#   are more sockets than that, then they need to be separated and
 21#   batched around this limitation.
 22# = It should be possible to have this wrap different mechanisms of
 23#   asynchronous IO, from select to IO completion ports.
 24# = UDP support is mostly there due to the new hands off approach, but
 25#   there are a few spots like handle_write and timeout handling, which need
 26#   to be dealt with.
 27#
 28# Python standard library socket unit test state:
 29#
 30# - 2.5: Bad.
 31# - 2.6: Excellent (two UDP failures).
 32# - 2.7: Excellent (two UDP failures).
 33#
 34# This module is otherwise known to generally work for 2.5, 2.6 and 2.7.
 35#
 36# Small parts of this code were contributed back with permission from an
 37# internal version of this module in use at CCP Games.
 38#
 39
 40from __future__ import absolute_import
 41import asyncore
 42from collections import deque
 43import gc
 44import logging
 45import select
 46import socket as stdsocket # We need the "socket" name for the function we export.
 47import sys
 48import time
 49import types
 50import weakref
 51
 52import stackless
 53from stacklesslib.util import send_throw
 54
 55log = logging.getLogger(__name__)
 56
 57# If you pump the scheduler and wish to prevent the scheduler from staying
 58# non-empty for prolonged periods of time, If you do not pump the scheduler,
 59# you may however wish to prevent calls to poll() from running too long.
 60# Doing so gives all managed sockets a fairer chance at being read from,
 61# rather than paying prolonged attention to sockets with more incoming data.
 62#
 63# These values govern how long a poll() call spends at a given attempt
 64# of reading the data present on a given socket.
 65#
 66VALUE_MAX_NONBLOCKINGREAD_SIZE = 1000000
 67VALUE_MAX_NONBLOCKINGREAD_CALLS = 100
 68
 69## Monkey-patching support..
 70
 71# We need this so that sockets are cleared out when they are no longer in use.
 72# In fact, it is essential to correct operation of this code.
 73asyncore.socket_map = weakref.WeakValueDictionary()
 74
 75try:
 76    from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
 77         ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, \
 78         ECONNREFUSED
 79except Exception:
 80    # Fallback on hard-coded PS3 constants.
 81    EALREADY = 37
 82    EINPROGRESS = 36
 83    EWOULDBLOCK = 35
 84    ECONNRESET = 54
 85    ENOTCONN = 57
 86    ESHUTDOWN = 58
 87    EINTR = 4
 88    EISCONN = 56
 89    EBADF = 9
 90    ECONNABORTED = 53
 91    ECONNREFUSED = 61
 92
 93# If we are to masquerade as the socket module, we need to provide the constants.
 94if "__all__" in stdsocket.__dict__:
 95    __all__ = stdsocket.__all__
 96    for k, v in stdsocket.__dict__.iteritems():
 97        if k in __all__:
 98            globals()[k] = v
 99        elif k == "EBADF":
100            globals()[k] = v
101else:
102    for k, v in stdsocket.__dict__.iteritems():
103        if k.upper() == k:
104            globals()[k] = v
105    error = stdsocket.error
106    timeout = stdsocket.timeout
107    # WARNING: this function blocks and is not thread safe.
108    # The only solution is to spawn a thread to handle all
109    # getaddrinfo requests.  Implementing a stackless DNS
110    # lookup service is only second best as getaddrinfo may
111    # use other methods.
112    getaddrinfo = stdsocket.getaddrinfo
113
114# urllib2 apparently uses this directly.  We need to cater to that.
115if hasattr(stdsocket, "_fileobject"):
116    _fileobject = stdsocket._fileobject
117
118# Someone needs to invoke asyncore.poll() regularly to keep the socket
119# data moving.  The "ManageSockets" function here is a simple example
120# of such a function.  It is started by StartManager(), which uses the
121# global "managerRunning" to ensure that no more than one copy is
122# running.
123#
124# If you think you can do this better, register an alternative to
125# StartManager using stacklesssocket_manager().  Your function will be
126# called every time a new socket is created; it's your responsibility
127# to ensure it doesn't start multiple copies of itself unnecessarily.
128#
129
130# By Nike: Added poll_interval on install to have it configurable from outside,
131
132managerRunning = False
133poll_interval = 0.05
134
135def ManageSockets():
136    global managerRunning
137
138    try:
139        while len(asyncore.socket_map) and managerRunning:
140            # Check the sockets for activity.
141            #print "POLL"
142            asyncore.poll(poll_interval)
143            # Yield to give other tasklets a chance to be scheduled.
144            _schedule_func()
145    finally:
146        managerRunning = False
147
148def StartManager():
149    global managerRunning
150    if not managerRunning:
151        managerRunning = True
152        return stackless.tasklet(ManageSockets)()
153def StopManager():
154    global managerRunning
155    managerRunning = False
156
157def pump():
158    """poll the sockets without waiting"""
159    asyncore.poll(0)
160
161_schedule_func = stackless.schedule
162_manage_sockets_func = StartManager
163_sleep_func = None
164_timeout_func = None
165_channel_refs = weakref.WeakKeyDictionary()
166
167def make_channel():
168    c = stackless.channel()
169    _channel_refs[c] = None
170    return c
171
172def can_timeout():
173    return _sleep_func is not None or _timeout_func is not None
174
175def stacklesssocket_manager(mgr):
176    global _manage_sockets_func
177    _manage_sockets_func = mgr
178
179def socket(*args, **kwargs):
180    import sys
181    if "socket" in sys.modules and sys.modules["socket"] is not stdsocket:
182        raise RuntimeError("Use 'stacklesssocket.install' instead of replacing the 'socket' module")
183
184_realsocket_old = stdsocket._realsocket
185_socketobject_old = stdsocket.socket
186
187class _socketobject_new(_socketobject_old):
188    def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
189        # We need to do this here.
190        if _sock is None:
191            _sock = _realsocket_old(family, type, proto)
192            _sock = _fakesocket(_sock)
193            if _manage_sockets_func:
194                _manage_sockets_func()
195        _socketobject_old.__init__(self, family, type, proto, _sock)
196        if not isinstance(self._sock, _fakesocket):
197            raise RuntimeError("bad socket")
198
199    def accept(self):
200        sock, addr = self._sock.accept()
201        sock = _fakesocket(sock)
202        sock.wasConnected = True
203        return _socketobject_new(_sock=sock), addr
204
205    def setblockingsend(self, flag=None):
206        self._sock.setblockingsend(flag)
207
208    accept.__doc__ = _socketobject_old.accept.__doc__
209
210def make_blocking_socket(family=AF_INET, type=SOCK_STREAM, proto=0):
211    """
212    Sometimes you may want to create a normal Python socket, even when
213    monkey-patching is in effect.  One use case might be when you are trying to
214    do socket operations on the last runnable tasklet, if these socket
215    operations are on small writes on a non-connected UDP socket then you
216    might as well just use a blocking socket, as the effect of blocking
217    is negligible.
218    """
219    _sock = _realsocket_old(family, type, proto)
220    return _socketobject_old(_sock=_sock)
221
222
223def install(pi=None):
224    global poll_interval
225    if stdsocket._realsocket is socket:
226        raise StandardError("Still installed")
227    stdsocket._realsocket = socket
228    stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_new
229    if pi is not None:
230        poll_interval = pi
231
232def uninstall():
233    stdsocket._realsocket = _realsocket_old
234    stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_old
235
236READY_TO_SCHEDULE_TAG = "_SET_ASIDE"
237
238def ready_to_schedule(flag):
239    """
240    There may be cases where it is desirable to have socket operations happen before
241    an application starts up its framework, which would then poll asyncore.  This
242    function is intended to allow all sockets to be switched between working
243    "stacklessly" or working directly on their underlying socket objects in a
244    blocking manner.
245
246    Note that sockets created while this is in effect lack attribute values that
247    asyncore or this module may have set if the sockets were created in a full
248    monkey patched manner.
249    """
250
251    def reroute_wrapper(funcName):
252        def reroute_call(self, *args, **kwargs):
253            if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
254                return
255            return getattr(self.socket, funcName)(*args, **kwargs)
256        return reroute_call
257
258    def update_method_referrers(methodName, oldClassMethod, newClassMethod):
259        """
260        The instance methods we need to update are stored in slots on instances of
261        socket._socketobject (actually our replacement subclass _socketobject_new).
262        """
263        for referrer1 in gc.get_referrers(oldClassMethod):
264            if isinstance(referrer1, types.MethodType):
265                for referrer2 in gc.get_referrers(referrer1):
266                    if isinstance(referrer2, _socketobject_new):
267                        setattr(referrer2, methodName, types.MethodType(newClassMethod, referrer1.im_self, referrer1.im_class))
268
269    # Guard against removal if not in place.
270    if flag:
271        if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
272            return
273        del _fakesocket.__dict__[READY_TO_SCHEDULE_TAG]
274    else:
275        _fakesocket.__dict__[READY_TO_SCHEDULE_TAG] = None
276    # sys.__stdout__.write("READY_TO_SCHEDULE %s\n" % flag)
277
278    # Play switcheroo with the attributes to get direct socket usage, or normal socket usage.
279    for attributeName in dir(_realsocket_old):
280        if not attributeName.startswith("_"):
281            storageAttributeName = attributeName +"_SET_ASIDE"
282            if flag:
283                storedValue = _fakesocket.__dict__.pop(storageAttributeName, None)
284                if storedValue is not None:
285                    rerouteValue = _fakesocket.__dict__[attributeName]
286                    # sys.__stdout__.write("___ RESTORING %s (AS %s) (WAS %s)\n" % (attributeName, storedValue, rerouteValue))
287                    _fakesocket.__dict__[attributeName] = storedValue
288                    update_method_referrers(attributeName, rerouteValue, storedValue)
289            else:
290                if attributeName in _fakesocket.__dict__:
291                    # sys.__stdout__.write("___ STORING %s = %s\n" % (attributeName, _fakesocket.__dict__[attributeName]))
292                    _fakesocket.__dict__[storageAttributeName] = _fakesocket.__dict__[attributeName]
293                _fakesocket.__dict__[attributeName] = reroute_wrapper(attributeName)
294
295
296# asyncore in Python 2.6 treats socket connection errors as connections.
297if sys.version_info[0] == 2 and sys.version_info[1] == 6:
298    class asyncore_dispatcher(asyncore.dispatcher):
299        def handle_connect_event(self):
300            err = self.socket.getsockopt(stdsocket.SOL_SOCKET, stdsocket.SO_ERROR)
301            if err != 0:
302                raise stdsocket.error(err, asyncore._strerror(err))
303            super(asyncore_dispatcher, self).handle_connect_event()
304else:
305    asyncore_dispatcher = asyncore.dispatcher
306
307
308class _fakesocket(asyncore_dispatcher):
309    connectChannel = None
310    acceptChannel = None
311    wasConnected = False
312
313    _timeout = None
314    _blocking = True
315
316    lastReadChannelRef = None
317    lastReadTally = 0
318    lastReadCalls = 0
319
320    def __init__(self, realSocket):
321        # This is worth doing.  I was passing in an invalid socket which
322        # was an instance of _fakesocket and it was causing tasklet death.
323        if not isinstance(realSocket, _realsocket_old):
324            raise StandardError("An invalid socket passed to fakesocket %s" % realSocket.__class__)
325
326        # This will register the real socket in the internal socket map.
327        asyncore_dispatcher.__init__(self, realSocket)
328
329        self.readQueue = deque()
330        self.writeQueue = deque()
331        self._blockingsend = True # Default behaviour is to block and wait for result for send()
332        self._stream = realSocket.type == stdsocket.SOCK_STREAM
333
334        if can_timeout():
335            self._timeout = stdsocket.getdefaulttimeout()
336
337    def receive_with_timeout(self, channel):
338        if self._timeout is not None:
339            # Start a timing out process.
340            # a) Engage a pre-existing external tasklet to send an exception on our channel if it has a receiver, if we are still there when it times out.
341            # b) Launch a tasklet that does a sleep, and sends an exception if we are still waiting, when it is awoken.
342            # Block waiting for a send.
343
344            if _timeout_func is not None:
345                # You will want to use this if you are using sockets in a different thread from your sleep functionality.
346                _timeout_func(self._timeout, channel, (timeout, "timed out"))
347            elif _sleep_func is not None:
348                stackless.tasklet(self._manage_receive_with_timeout)(channel)
349            else:
350                raise NotImplementedError("should not be here")
351
352            try:
353                ret = channel.receive()
354            except BaseException, e:
355                log.debug('sock %d, receive exception %r', id(self), e)
356                raise
357            return ret
358        else:
359            return channel.receive()
360
361    def _manage_receive_with_timeout(self, channel):
362        if channel.balance < 0:
363            _sleep_func(self._timeout)
364            if channel.balance < 0:
365                channel.send_exception(timeout, "timed out")
366
367    def __del__(self):
368        # There are no more users (sockets or files) of this fake socket, we
369        # are safe to close it fully.  If we don't, asyncore will choke on
370        # the weakref failures.
371        self.close()
372
373    # The asyncore version of this function depends on socket being set
374    # which is not the case when this fake socket has been closed.
375    def __getattr__(self, attr):
376        if not hasattr(self, "socket"):
377            raise AttributeError("socket attribute unset on '"+ attr +"' lookup")
378        return getattr(self.socket, attr)
379
380    ## Asyncore potential activity indicators.
381
382    def readable(self):
383        if self.socket.type == SOCK_DGRAM:
384            return True
385        if len(self.readQueue):
386            return True
387        if self.acceptChannel is not None and self.acceptChannel.balance < 0:
388            return True
389        if self.connectChannel is not None and self.connectChannel.balance < 0:
390            return True
391        return False
392
393    def writable(self):
394        if self.socket.type != SOCK_DGRAM and not self.connected:
395            return True
396        if len(self.writeQueue):
397            return True
398        return False
399
400    ## Overriden socket methods.
401
402    def accept(self):
403        self._ensure_non_blocking_read()
404        if not self.acceptChannel:
405            self.acceptChannel = make_channel()
406        return self.receive_with_timeout(self.acceptChannel)
407
408    def listen(self, num):
409        if num > 2<<29:
410            raise OverflowError # for socket unittests compatibility
411        asyncore_dispatcher.listen(self, num)
412
413    def connect(self, address):
414        """
415        If a timeout is set for the connection attempt, and the timeout occurs
416        then it is the responsibility of the user to close the socket, should
417        they not wish the connection to potentially establish anyway.
418        """
419        asyncore_dispatcher.connect(self, address)
420
421        # UDP sockets do not connect.
422        if self.socket.type != SOCK_DGRAM and not self.connected:
423            if not self.connectChannel:
424                self.connectChannel = make_channel()
425                # Prefer the sender.  Do not block when sending, given that
426                # there is a tasklet known to be waiting, this will happen.
427                self.connectChannel.preference = 1
428            self.receive_with_timeout(self.connectChannel)
429
430    def _send(self, data, flags, nowait=False, dest=None):
431        if not dest:
432            self._ensure_connected()
433
434        if not nowait:
435            channel = make_channel()
436            channel.preference = 1 # Prefer the sender.
437        else:
438            channel = None
439        self.writeQueue.append((channel, flags, data, dest))
440        if channel:
441            return self.receive_with_timeout(channel)
442        else:
443            return len(data)
444
445    def setblockingsend(self, flag=None):
446        old = self._blockingsend
447        if flag is not None:
448            self._blockingsend = flag
449        return old
450
451    def send(self, data, flags=0):
452        return self._send(data, flags, not self._blockingsend)
453
454    def sendall(self, data, flags=0):
455        if not self._blockingsend:
456            self._send(data, flags, True)
457            return
458        while len(data):
459            nbytes = self._send(data, flags)
460            if nbytes == 0:
461                raise Exception("completely unexpected situation, no data sent")
462            data = data[nbytes:]
463
464    def sendto(self, *args):
465        # sendto(data, address)
466        # sendto(data [, flags], address)
467        # go through hoops to emulate std socket errors for unittests
468        if len(args) == 2:
469            sendData, flags, sendAddress = args[0], 0, args[1]
470        elif len(args) == 3:
471            sendData, flags, sendAddress = args
472        else:
473            raise TypeError, "sendto() takes 2 or 3 arguments (%d given)" % (len(args))
474        # wrap sendAddress so that an empty value doesn't trigger connection test
475        return self._send(sendData, flags, not self._blockingsend, (sendAddress,))
476
477    def _recv(self, methodName, args, sizeIdx=0):
478        self._ensure_non_blocking_read()
479
480        if self._fileno is None:
481            log.debug("sock %d, self._fileno is None", id(self))
482            return ""
483
484        if len(args) >= sizeIdx+1:
485            generalArgs = list(args)
486            generalArgs[sizeIdx] = 0
487            generalArgs = tuple(generalArgs)
488        else:
489            generalArgs = args
490        #print self._fileno, "_recv:---ENTER---", (methodName, args)
491        channel = None
492        if self.lastReadChannelRef is not None and self.lastReadTally < VALUE_MAX_NONBLOCKINGREAD_SIZE and self.lastReadCalls < VALUE_MAX_NONBLOCKINGREAD_CALLS:
493            channel = self.lastReadChannelRef()
494            self.lastReadChannelRef = None
495        #elif self.lastReadTally >= VALUE_MAX_NONBLOCKINGREAD_SIZE or self.lastReadCalls >= VALUE_MAX_NONBLOCKINGREAD_CALLS:
496            #print "_recv:FORCE-CHANNEL-CHANGE %d %d" % (self.lastReadTally, self.lastReadCalls)
497
498        if channel is None:
499            channel = make_channel()
500            channel.preference = -1 # Prefer the receiver.
501            self.lastReadTally = self.lastReadCalls = 0
502            #print self._fileno, "_recv:NEW-CHANNEL", id(channel)
503            self.readQueue.append([ channel, methodName, args ])
504        else:
505            self.readQueue[0][1:] = (methodName, args)
506            #print self._fileno, "_recv:RECYCLE-CHANNEL", id(channel), self.lastReadTally
507
508        ret = self.receive_with_timeout(channel)
509
510        #storing the last channel is a way to communicate with the producer tasklet, so that it
511        #immediately tries to read more, when we do the next receive.  This is to optimize cases
512        #where one can do multiple recv() calls without blocking, but each call only gives you
513        #a limited amount of data.  We then get a tight tasklet interaction between consumer
514        #and producer until EWOULDBLOCK is received from the socket.
515        self.lastReadChannelRef = weakref.ref(channel)
516        if isinstance(ret, types.StringTypes):
517            recvlen = len(ret)
518        elif methodName == "recvfrom":
519            recvlen = len(ret[0])
520        elif methodName == "recvfrom_into":
521            recvlen = ret[0]
522        else:
523            recvlen = ret
524        self.lastReadTally += recvlen
525        self.lastReadCalls += 1
526
527        #print self._fileno, "_recv:---EXIT---", (methodName, args) , recvlen, self.lastReadChannelRef()
528
529        return ret
530
531    def recv(self, *args):
532        if self.socket.type != SOCK_DGRAM and not self.connected:
533            # Sockets which have never been connected do this.
534            if not self.wasConnected:
535                raise error(ENOTCONN, 'Socket is not connected')
536
537        return self._recv("recv", args)
538
539    def recv_into(self, *args):
540        if self.socket.type != SOCK_DGRAM and not self.connected:
541            # Sockets which have never been connected do this.
542            if not self.wasConnected:
543                raise error(ENOTCONN, 'Socket is not connected')
544
545        return self._recv("recv_into", args, sizeIdx=1)
546
547    def recvfrom(self, *args):
548        return self._recv("recvfrom", args)
549
550    def recvfrom_into(self, *args):
551        return self._recv("recvfrom_into", args, sizeIdx=1)
552
553    def close(self):
554        if self._fileno is None:
555            return
556
557        asyncore_dispatcher.close(self)
558
559        self.connected = False
560        self.accepting = False
561
562        # Clear out all the channels with relevant errors.
563        while self.acceptChannel and self.acceptChannel.balance < 0:
564            self.acceptChannel.send_exception(stdsocket.error, EBADF, 'Bad file descriptor')
565        while self.connectChannel and self.connectChannel.balance < 0:
566            self.connectChannel.send_exception(stdsocket.error, ECONNREFUSED, 'Connection refused')
567        self._clear_queue(self.writeQueue, stdsocket.error, ECONNRESET)
568        self._clear_queue(self.readQueue)
569
570    def _clear_queue(self, queue, *args):
571        for t in queue:
572            if t[0] and t[0].balance < 0:
573                if len(args):
574                    t[0].send_exception(*args)
575                else:
576                    t[0].send("")
577        queue.clear()
578
579    # asyncore doesn't support this.  Why not?
580    def fileno(self):
581        return self.socket.fileno()
582
583    def _ensure_non_blocking_read(self):
584        if not self._blocking:
585            # Ensure there is something on the socket, before fetching it.  Otherwise, error complaining.
586            r, w, e = select.select([ self ], [], [], 0.0)
587            if not r:
588                raise stdsocket.error(EWOULDBLOCK, "The socket operation could not complete without blocking")
589
590    def _ensure_connected(self):
591        if not self.connected:
592            # The socket was never connected.
593            if not self.wasConnected:
594                raise error(ENOTCONN, "Socket is not connected")
595            # The socket has been closed already.
596            raise error(EBADF, 'Bad file descriptor')
597
598    def setblocking(self, flag):
599        self._blocking = flag
600        if flag:
601            self._timeout = None
602        else:
603            self._timeout = 0.0
604
605    def gettimeout(self):
606        return self._timeout
607
608    def settimeout(self, value):
609        if value == 0.0:
610            self._blocking = False
611            self._timeout = 0.0
612        else:
613            if value and not can_timeout():
614                raise RuntimeError("This is a stackless socket - to have timeout support you need to provide a sleep function")
615            self._blocking = True
616            self._timeout = value
617
618    def handle_accept(self):
619        if self.acceptChannel and self.acceptChannel.balance < 0:
620            t = asyncore.dispatcher.accept(self)
621            if t is None:
622                return
623            t[0].setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
624            stackless.tasklet(self.acceptChannel.send)(t)
625
626    # Inform the blocked connect call that the connection has been made.
627    def handle_connect(self):
628        if self.socket.type != SOCK_DGRAM:
629            if self.connectChannel and self.connectChannel.balance < 0:
630                self.wasConnected = True
631                self.connectChannel.send(None)
632
633    # Asyncore says its done but self.readBuffer may be non-empty
634    # so can't close yet.  Do nothing and let 'recv' trigger the close.
635    def handle_close(self):
636        # These do not interfere with ongoing reads, but should prevent
637        # sends and the like from going through.
638        self.connected = False
639        self.accepting = False
640
641        # This also gets called in the case that a non-blocking connect gets
642        # back to us with a no.  If we don't reject the connect, then all
643        # connect calls that do not connect will block indefinitely.
644        if self.connectChannel is not None:
645            self.close()
646
647    # Some error, just close the channel and let that raise errors to
648    # blocked calls.
649    def handle_expt(self):
650        if False:
651            import traceback
652            print "handle_expt: START"
653            traceback.print_exc()
654            print "handle_expt: END"
655        self.close()
656
657    def handle_error(self):
658        log.exception("Unexpected error")
659
660    def handle_read(self):
661        """
662            This will be called once per-poll call per socket with data in its buffer to be read.
663
664            If you call poll once every 30th of a second, then you are going to be rate limited
665            in terms of how fast you can read incoming data by the packet size they arrive in.
666            In order to deal with the worst case scenario, advantage is taken of how scheduling
667            works in order to keep reading until there is no more data left to read.
668
669            1.  This function is called indicating data is present to read.
670            2.  The desired amount is read and a send call is made on the channel with it.
671            3.  The function is blocked on that action and the tasklet it is running in is reinserted into the scheduler.
672            4.  The tasklet that made the read related socket call is awakened with the given data.
673            5.  It returns the data to the function that made that call.
674            6.  The function that made the call makes another read related socket call.
675                a) If the call is similar enough to the last call, then the previous channel is retrieved.
676                b) Otherwise, a new channel is created.
677            7.  The tasklet that is making the read related socket call is blocked on the channel.
678            8.  This tasklet that was blocked sending gets scheduled again.
679                a) If there is a tasklet blocked on the channel that it was using, then goto 2.
680                b) Otherwise, the function exits.
681
682            Note that if this function loops indefinitely, and the scheduler is pumped rather than
683            continuously run, the pumping application will stay in its pump call for a prolonged
684            period of time potentially starving the rest of the application for CPU time.
685
686            An attempt is made in _recv to limit the amount of data read in this manner to a fixed
687            amount and it lets this function exit if that amount is exceeded.  However, this it is
688            up to the user of Stackless to understand how their application schedules and blocks,
689            and there are situations where small reads may still effectively loop indefinitely.
690        """
691
692        if not len(self.readQueue):
693            return
694
695        channel, methodName, args = self.readQueue[0]
696        #print self._fileno, "handle_read:---ENTER---", id(channel)
697        while channel.balance < 0:
698            args = self.readQueue[0][2]
699            #print self._fileno, "handle_read:CALL", id(channel), args
700            try:
701                try:
702                    result = getattr(self.socket, methodName)(*args)
703                    #print self._fileno, "handle_read:RESULT", id(channel), len(result)
704                except stdsocket.error as e:
705                    if e.errno == EWOULDBLOCK:
706                        return # sometimes get this on windows
707                    raise
708            except Exception, e:
709                log.debug('sock %d, read method %s error %r, throwing it', id(self), methodName, e)
710                send_throw(channel, *sys.exc_info())
711            else:
712                # don't len() the result, it may be int, tuple, etc. for recvfrom, recvinto, etc.
713                #print self._fileno, "handle_read:RETURN-RESULT", id(channel), len(result)
714                log.debug('sock %d, read method %s with args %r, sending it', id(self), methodName, args)
715                channel.send(result)
716
717        if len(self.readQueue) and self.readQueue[0][0] is channel:
718            del self.readQueue[0]
719        #print self._fileno, "handle_read:---EXIT---", id(channel)
720
721    def _merge_nbsends(self, data, flags):
722        #attempt to merge several nonblocking sends into one
723        try:
724            if len(self.writeQueue):
725                d = []
726                # pull them off as long as they are non=blocking and flags are the same
727                while self.writeQueue and self.writeQueue[0][0] is None and self.writeQueue[0][1] == flags:
728                    d.append(self.writeQueue.popleft())
729                # Be sure to support memory view objects that we get sent sometimes
730                def tobytes(s):
731                    return s.tobytes() if isinstance(s, memoryview) else s
732                data = tobytes(data) +  "".join(tobytes(e[2]) for e in d)
733        except Exception, e:
734            import logging
735            logging.exception("****shit got real")
736        return data, flags
737
738    def handle_write(self):
739        """
740        This function still needs work WRT UDP.
741        """
742        if len(self.writeQueue):
743            channel, flags, data, dest = self.writeQueue.popleft()
744
745            # asyncore does not expose sending the flags.
746            def asyncore_send(self, data, flags, dest):
747                try:
748                    if dest is not None:
749                        result = self.socket.sendto(data, flags, dest[0])
750                    else:
751                        result = self.socket.send(data, flags)
752                    return result
753                except stdsocket.error, why:
754                    # logging.root.exception("SOME SEND ERROR")
755                    if why.args[0] == EWOULDBLOCK:
756                        return 0
757                    raise
758
759            if channel:
760                try:
761                    nbytes = asyncore_send(self, data, flags, dest)
762                except Exception, e:
763                    if channel.balance < 0:
764                        send_throw(channel, *sys.exc_info())
765                else:
766                    if channel.balance < 0:
767                        channel.send(nbytes)
768            else:
769                #its a non-blocking sendall
770                if self._stream:
771                    data, flags = self._merge_nbsends(data, flags)
772                try:
773                    nbytes = asyncore_send(self, data, flags, dest)
774                    data = data[nbytes:]
775                except Exception, e:
776                    log.info("exception during non-blocking send: %r", e)
777                else:
778                    if data and self._stream:
779                        self.writeQueue.appendleft((None, flags, data, dest))
780
781
782if False:
783    def dump_socket_stack_traces():
784        import traceback
785        for skt in asyncore.socket_map.values():
786            for k, v in skt.__dict__.items():
787                if isinstance(v, stackless.channel) and v.queue:
788                    i = 0
789                    current = v.queue
790                    while i == 0 or v.queue is not current:
791                        print "%s.%s.%s" % (skt, k, i)
792                        traceback.print_stack(v.queue.frame)
793                        i += 1
794
795
796if __name__ == '__main__':
797    import struct
798    # Test code goes here.
799    testAddress = "127.0.0.1", 3000
800    info = -12345678
801    data = struct.pack("i", info)
802    dataLength = len(data)
803
804    def TestTCPServer(address):
805        global info, data, dataLength
806
807        print "server listen socket creation"
808        listenSocket = stdsocket.socket(AF_INET, SOCK_STREAM)
809        listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
810        listenSocket.bind(address)
811        listenSocket.listen(5)
812
813        NUM_TESTS = 2
814
815        i = 1
816        while i < NUM_TESTS + 1:
817            # No need to schedule this tasklet as the accept should yield most
818            # of the time on the underlying channel.
819            print "server connection wait", i
820            currentSocket, clientAddress = listenSocket.accept()
821            print "server", i, "listen socket", currentSocket.fileno(), "from", clientAddress
822
823            if i == 1:
824                print "server closing (a)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
825                currentSocket.close()
826                print "server closed (a)", i
827            elif i == 2:
828                print "server test", i, "send"
829                currentSocket.send(data)
830                print "server test", i, "recv"
831                if currentSocket.recv(4) != "":
832                    print "server recv(1)", i, "FAIL"
833                    break
834                # multiple empty recvs are fine
835                if currentSocket.recv(4) != "":
836                    print "server recv(2)", i, "FAIL"
837                    break
838            else:
839                print "server closing (b)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
840                currentSocket.close()
841
842            print "server test", i, "OK"
843            i += 1
844
845        if i != NUM_TESTS+1:
846            print "server: FAIL", i
847        else:
848            print "server: OK", i
849
850        print "Done server"
851
852    def TestTCPClient(address):
853        global info, data, dataLength
854
855        # Attempt 1:
856        clientSocket = stdsocket.socket()
857        clientSocket.connect(address)
858        print "client connection (1) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
859        if clientSocket.recv(5) != "":
860            print "client test", 1, "FAIL"
861        else:
862            print "client test", 1, "OK"
863
864        # Attempt 2:
865        clientSocket = stdsocket.socket()
866        clientSocket.connect(address)
867        print "client connection (2) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
868        s = clientSocket.recv(dataLength)
869        if s == "":
870            print "client test", 2, "FAIL (disconnect)"
871        else:
872            t = struct.unpack("i", s)
873            if t[0] == info:
874                print "client test", 2, "OK"
875            else:
876                print "client test", 2, "FAIL (wrong data)"
877
878        print "client exit"
879
880    def TestMonkeyPatchUrllib(uri):
881        # replace the system socket with this module
882        install()
883        try:
884            import urllib  # must occur after monkey-patching!
885            f = urllib.urlopen(uri)
886            if not isinstance(f.fp._sock, _fakesocket):
887                raise AssertionError("failed to apply monkeypatch, got %s" % f.fp._sock.__class__)
888            s = f.read()
889            if len(s) != 0:
890                print "Fetched", len(s), "bytes via replaced urllib"
891            else:
892                raise AssertionError("no text received?")
893        finally:
894            uninstall()
895
896    def TestMonkeyPatchUDP(address):
897        # replace the system socket with this module
898        install()
899        try:
900            def UDPServer(address):
901                listenSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
902                listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
903                listenSocket.bind(address)
904
905                # Apparently each call to recvfrom maps to an incoming
906                # packet and if we only ask for part of that packet, the
907                # rest is lost.  We really need a proper unittest suite
908                # which tests this module against the normal socket
909                # module.
910                print "waiting to receive"
911                rdata = ""
912                while len(rdata) < 512:
913                    data, address = listenSocket.recvfrom(4096)
914                    print "received", data, len(data)
915                    rdata += data
916
917            def UDPClient(address):
918                clientSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
919                # clientSocket.connect(address)
920                print "sending 512 byte packet"
921                sentBytes = clientSocket.sendto("-"+ ("*" * 510) +"-", address)
922                print "sent 512 byte packet", sentBytes
923
924            stackless.tasklet(UDPServer)(address)
925            stackless.tasklet(UDPClient)(address)
926            stackless.run()
927        finally:
928            uninstall()
929
930    if "notready" in sys.argv:
931        sys.argv.remove("notready")
932        ready_to_schedule(False)
933
934    if len(sys.argv) == 2:
935        if sys.argv[1] == "client":
936            print "client started"
937            TestTCPClient(testAddress)
938            print "client exited"
939        elif sys.argv[1] == "slpclient":
940            print "client started"
941            stackless.tasklet(TestTCPClient)(testAddress)
942            stackless.run()
943            print "client exited"
944        elif sys.argv[1] == "server":
945            print "server started"
946            TestTCPServer(testAddress)
947            print "server exited"
948        elif sys.argv[1] == "slpserver":
949            print "server started"
950            stackless.tasklet(TestTCPServer)(testAddress)
951            stackless.run()
952            print "server exited"
953        else:
954            print "Usage:", sys.argv[0], "[client|server|slpclient|slpserver]"
955
956        sys.exit(1)
957    else:
958        print "* Running client/server test"
959        install()
960        try:
961            stackless.tasklet(TestTCPServer)(testAddress)
962            stackless.tasklet(TestTCPClient)(testAddress)
963            stackless.run()
964        finally:
965            uninstall()
966
967        print "* Running urllib test"
968        stackless.tasklet(TestMonkeyPatchUrllib)("http://python.org/")
969        stackless.run()
970
971        print "* Running udp test"
972        TestMonkeyPatchUDP(testAddress)
973
974        print "result: SUCCESS"