PageRenderTime 89ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 0ms

/stacklesslib/replacements/socket.py

https://bitbucket.org/krisvale/stacklesslib
Python | 974 lines | 855 code | 32 blank | 87 comment | 24 complexity | 8c3c0e1c1c5f91cb25b0b0fd166c7ae2 MD5 | raw file
  1. #
  2. # Stackless compatible socket module.
  3. #
  4. # Author: Richard Tew <richard.m.tew@gmail.com>
  5. #
  6. # Feel free to email me with any questions, comments, or suggestions for
  7. # improvement.
  8. #
  9. # Remaining work:
  10. #
  11. # = Test suite that verifies that emulated behaviour is correct.
  12. # = When closing the socket, pending senders are sent ECONNRESET.
  13. # This was obtained by opening a server socket, connecting a
  14. # client and then closing the server. Then the client did a
  15. # send and got ECONNRESET.
  16. # = Asyncore does not add that much to this module. In fact, its
  17. # limitations and differences between implementations in different Python
  18. # versions just complicate things.
  19. # = Select on Windows only handles 512 sockets at a time. So if there
  20. # are more sockets than that, then they need to be separated and
  21. # batched around this limitation.
  22. # = It should be possible to have this wrap different mechanisms of
  23. # asynchronous IO, from select to IO completion ports.
  24. # = UDP support is mostly there due to the new hands off approach, but
  25. # there are a few spots like handle_write and timeout handling, which need
  26. # to be dealt with.
  27. #
  28. # Python standard library socket unit test state:
  29. #
  30. # - 2.5: Bad.
  31. # - 2.6: Excellent (two UDP failures).
  32. # - 2.7: Excellent (two UDP failures).
  33. #
  34. # This module is otherwise known to generally work for 2.5, 2.6 and 2.7.
  35. #
  36. # Small parts of this code were contributed back with permission from an
  37. # internal version of this module in use at CCP Games.
  38. #
  39. from __future__ import absolute_import
  40. import asyncore
  41. from collections import deque
  42. import gc
  43. import logging
  44. import select
  45. import socket as stdsocket # We need the "socket" name for the function we export.
  46. import sys
  47. import time
  48. import types
  49. import weakref
  50. import stackless
  51. from stacklesslib.util import send_throw
  52. log = logging.getLogger(__name__)
  53. # If you pump the scheduler and wish to prevent the scheduler from staying
  54. # non-empty for prolonged periods of time, If you do not pump the scheduler,
  55. # you may however wish to prevent calls to poll() from running too long.
  56. # Doing so gives all managed sockets a fairer chance at being read from,
  57. # rather than paying prolonged attention to sockets with more incoming data.
  58. #
  59. # These values govern how long a poll() call spends at a given attempt
  60. # of reading the data present on a given socket.
  61. #
  62. VALUE_MAX_NONBLOCKINGREAD_SIZE = 1000000
  63. VALUE_MAX_NONBLOCKINGREAD_CALLS = 100
  64. ## Monkey-patching support..
  65. # We need this so that sockets are cleared out when they are no longer in use.
  66. # In fact, it is essential to correct operation of this code.
  67. asyncore.socket_map = weakref.WeakValueDictionary()
  68. try:
  69. from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
  70. ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, \
  71. ECONNREFUSED
  72. except Exception:
  73. # Fallback on hard-coded PS3 constants.
  74. EALREADY = 37
  75. EINPROGRESS = 36
  76. EWOULDBLOCK = 35
  77. ECONNRESET = 54
  78. ENOTCONN = 57
  79. ESHUTDOWN = 58
  80. EINTR = 4
  81. EISCONN = 56
  82. EBADF = 9
  83. ECONNABORTED = 53
  84. ECONNREFUSED = 61
  85. # If we are to masquerade as the socket module, we need to provide the constants.
  86. if "__all__" in stdsocket.__dict__:
  87. __all__ = stdsocket.__all__
  88. for k, v in stdsocket.__dict__.iteritems():
  89. if k in __all__:
  90. globals()[k] = v
  91. elif k == "EBADF":
  92. globals()[k] = v
  93. else:
  94. for k, v in stdsocket.__dict__.iteritems():
  95. if k.upper() == k:
  96. globals()[k] = v
  97. error = stdsocket.error
  98. timeout = stdsocket.timeout
  99. # WARNING: this function blocks and is not thread safe.
  100. # The only solution is to spawn a thread to handle all
  101. # getaddrinfo requests. Implementing a stackless DNS
  102. # lookup service is only second best as getaddrinfo may
  103. # use other methods.
  104. getaddrinfo = stdsocket.getaddrinfo
  105. # urllib2 apparently uses this directly. We need to cater to that.
  106. if hasattr(stdsocket, "_fileobject"):
  107. _fileobject = stdsocket._fileobject
  108. # Someone needs to invoke asyncore.poll() regularly to keep the socket
  109. # data moving. The "ManageSockets" function here is a simple example
  110. # of such a function. It is started by StartManager(), which uses the
  111. # global "managerRunning" to ensure that no more than one copy is
  112. # running.
  113. #
  114. # If you think you can do this better, register an alternative to
  115. # StartManager using stacklesssocket_manager(). Your function will be
  116. # called every time a new socket is created; it's your responsibility
  117. # to ensure it doesn't start multiple copies of itself unnecessarily.
  118. #
  119. # By Nike: Added poll_interval on install to have it configurable from outside,
  120. managerRunning = False
  121. poll_interval = 0.05
  122. def ManageSockets():
  123. global managerRunning
  124. try:
  125. while len(asyncore.socket_map) and managerRunning:
  126. # Check the sockets for activity.
  127. #print "POLL"
  128. asyncore.poll(poll_interval)
  129. # Yield to give other tasklets a chance to be scheduled.
  130. _schedule_func()
  131. finally:
  132. managerRunning = False
  133. def StartManager():
  134. global managerRunning
  135. if not managerRunning:
  136. managerRunning = True
  137. return stackless.tasklet(ManageSockets)()
  138. def StopManager():
  139. global managerRunning
  140. managerRunning = False
  141. def pump():
  142. """poll the sockets without waiting"""
  143. asyncore.poll(0)
  144. _schedule_func = stackless.schedule
  145. _manage_sockets_func = StartManager
  146. _sleep_func = None
  147. _timeout_func = None
  148. _channel_refs = weakref.WeakKeyDictionary()
  149. def make_channel():
  150. c = stackless.channel()
  151. _channel_refs[c] = None
  152. return c
  153. def can_timeout():
  154. return _sleep_func is not None or _timeout_func is not None
  155. def stacklesssocket_manager(mgr):
  156. global _manage_sockets_func
  157. _manage_sockets_func = mgr
  158. def socket(*args, **kwargs):
  159. import sys
  160. if "socket" in sys.modules and sys.modules["socket"] is not stdsocket:
  161. raise RuntimeError("Use 'stacklesssocket.install' instead of replacing the 'socket' module")
  162. _realsocket_old = stdsocket._realsocket
  163. _socketobject_old = stdsocket.socket
  164. class _socketobject_new(_socketobject_old):
  165. def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
  166. # We need to do this here.
  167. if _sock is None:
  168. _sock = _realsocket_old(family, type, proto)
  169. _sock = _fakesocket(_sock)
  170. if _manage_sockets_func:
  171. _manage_sockets_func()
  172. _socketobject_old.__init__(self, family, type, proto, _sock)
  173. if not isinstance(self._sock, _fakesocket):
  174. raise RuntimeError("bad socket")
  175. def accept(self):
  176. sock, addr = self._sock.accept()
  177. sock = _fakesocket(sock)
  178. sock.wasConnected = True
  179. return _socketobject_new(_sock=sock), addr
  180. def setblockingsend(self, flag=None):
  181. self._sock.setblockingsend(flag)
  182. accept.__doc__ = _socketobject_old.accept.__doc__
  183. def make_blocking_socket(family=AF_INET, type=SOCK_STREAM, proto=0):
  184. """
  185. Sometimes you may want to create a normal Python socket, even when
  186. monkey-patching is in effect. One use case might be when you are trying to
  187. do socket operations on the last runnable tasklet, if these socket
  188. operations are on small writes on a non-connected UDP socket then you
  189. might as well just use a blocking socket, as the effect of blocking
  190. is negligible.
  191. """
  192. _sock = _realsocket_old(family, type, proto)
  193. return _socketobject_old(_sock=_sock)
  194. def install(pi=None):
  195. global poll_interval
  196. if stdsocket._realsocket is socket:
  197. raise StandardError("Still installed")
  198. stdsocket._realsocket = socket
  199. stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_new
  200. if pi is not None:
  201. poll_interval = pi
  202. def uninstall():
  203. stdsocket._realsocket = _realsocket_old
  204. stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_old
  205. READY_TO_SCHEDULE_TAG = "_SET_ASIDE"
  206. def ready_to_schedule(flag):
  207. """
  208. There may be cases where it is desirable to have socket operations happen before
  209. an application starts up its framework, which would then poll asyncore. This
  210. function is intended to allow all sockets to be switched between working
  211. "stacklessly" or working directly on their underlying socket objects in a
  212. blocking manner.
  213. Note that sockets created while this is in effect lack attribute values that
  214. asyncore or this module may have set if the sockets were created in a full
  215. monkey patched manner.
  216. """
  217. def reroute_wrapper(funcName):
  218. def reroute_call(self, *args, **kwargs):
  219. if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
  220. return
  221. return getattr(self.socket, funcName)(*args, **kwargs)
  222. return reroute_call
  223. def update_method_referrers(methodName, oldClassMethod, newClassMethod):
  224. """
  225. The instance methods we need to update are stored in slots on instances of
  226. socket._socketobject (actually our replacement subclass _socketobject_new).
  227. """
  228. for referrer1 in gc.get_referrers(oldClassMethod):
  229. if isinstance(referrer1, types.MethodType):
  230. for referrer2 in gc.get_referrers(referrer1):
  231. if isinstance(referrer2, _socketobject_new):
  232. setattr(referrer2, methodName, types.MethodType(newClassMethod, referrer1.im_self, referrer1.im_class))
  233. # Guard against removal if not in place.
  234. if flag:
  235. if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
  236. return
  237. del _fakesocket.__dict__[READY_TO_SCHEDULE_TAG]
  238. else:
  239. _fakesocket.__dict__[READY_TO_SCHEDULE_TAG] = None
  240. # sys.__stdout__.write("READY_TO_SCHEDULE %s\n" % flag)
  241. # Play switcheroo with the attributes to get direct socket usage, or normal socket usage.
  242. for attributeName in dir(_realsocket_old):
  243. if not attributeName.startswith("_"):
  244. storageAttributeName = attributeName +"_SET_ASIDE"
  245. if flag:
  246. storedValue = _fakesocket.__dict__.pop(storageAttributeName, None)
  247. if storedValue is not None:
  248. rerouteValue = _fakesocket.__dict__[attributeName]
  249. # sys.__stdout__.write("___ RESTORING %s (AS %s) (WAS %s)\n" % (attributeName, storedValue, rerouteValue))
  250. _fakesocket.__dict__[attributeName] = storedValue
  251. update_method_referrers(attributeName, rerouteValue, storedValue)
  252. else:
  253. if attributeName in _fakesocket.__dict__:
  254. # sys.__stdout__.write("___ STORING %s = %s\n" % (attributeName, _fakesocket.__dict__[attributeName]))
  255. _fakesocket.__dict__[storageAttributeName] = _fakesocket.__dict__[attributeName]
  256. _fakesocket.__dict__[attributeName] = reroute_wrapper(attributeName)
  257. # asyncore in Python 2.6 treats socket connection errors as connections.
  258. if sys.version_info[0] == 2 and sys.version_info[1] == 6:
  259. class asyncore_dispatcher(asyncore.dispatcher):
  260. def handle_connect_event(self):
  261. err = self.socket.getsockopt(stdsocket.SOL_SOCKET, stdsocket.SO_ERROR)
  262. if err != 0:
  263. raise stdsocket.error(err, asyncore._strerror(err))
  264. super(asyncore_dispatcher, self).handle_connect_event()
  265. else:
  266. asyncore_dispatcher = asyncore.dispatcher
  267. class _fakesocket(asyncore_dispatcher):
  268. connectChannel = None
  269. acceptChannel = None
  270. wasConnected = False
  271. _timeout = None
  272. _blocking = True
  273. lastReadChannelRef = None
  274. lastReadTally = 0
  275. lastReadCalls = 0
  276. def __init__(self, realSocket):
  277. # This is worth doing. I was passing in an invalid socket which
  278. # was an instance of _fakesocket and it was causing tasklet death.
  279. if not isinstance(realSocket, _realsocket_old):
  280. raise StandardError("An invalid socket passed to fakesocket %s" % realSocket.__class__)
  281. # This will register the real socket in the internal socket map.
  282. asyncore_dispatcher.__init__(self, realSocket)
  283. self.readQueue = deque()
  284. self.writeQueue = deque()
  285. self._blockingsend = True # Default behaviour is to block and wait for result for send()
  286. self._stream = realSocket.type == stdsocket.SOCK_STREAM
  287. if can_timeout():
  288. self._timeout = stdsocket.getdefaulttimeout()
  289. def receive_with_timeout(self, channel):
  290. if self._timeout is not None:
  291. # Start a timing out process.
  292. # a) Engage a pre-existing external tasklet to send an exception on our channel if it has a receiver, if we are still there when it times out.
  293. # b) Launch a tasklet that does a sleep, and sends an exception if we are still waiting, when it is awoken.
  294. # Block waiting for a send.
  295. if _timeout_func is not None:
  296. # You will want to use this if you are using sockets in a different thread from your sleep functionality.
  297. _timeout_func(self._timeout, channel, (timeout, "timed out"))
  298. elif _sleep_func is not None:
  299. stackless.tasklet(self._manage_receive_with_timeout)(channel)
  300. else:
  301. raise NotImplementedError("should not be here")
  302. try:
  303. ret = channel.receive()
  304. except BaseException, e:
  305. log.debug('sock %d, receive exception %r', id(self), e)
  306. raise
  307. return ret
  308. else:
  309. return channel.receive()
  310. def _manage_receive_with_timeout(self, channel):
  311. if channel.balance < 0:
  312. _sleep_func(self._timeout)
  313. if channel.balance < 0:
  314. channel.send_exception(timeout, "timed out")
  315. def __del__(self):
  316. # There are no more users (sockets or files) of this fake socket, we
  317. # are safe to close it fully. If we don't, asyncore will choke on
  318. # the weakref failures.
  319. self.close()
  320. # The asyncore version of this function depends on socket being set
  321. # which is not the case when this fake socket has been closed.
  322. def __getattr__(self, attr):
  323. if not hasattr(self, "socket"):
  324. raise AttributeError("socket attribute unset on '"+ attr +"' lookup")
  325. return getattr(self.socket, attr)
  326. ## Asyncore potential activity indicators.
  327. def readable(self):
  328. if self.socket.type == SOCK_DGRAM:
  329. return True
  330. if len(self.readQueue):
  331. return True
  332. if self.acceptChannel is not None and self.acceptChannel.balance < 0:
  333. return True
  334. if self.connectChannel is not None and self.connectChannel.balance < 0:
  335. return True
  336. return False
  337. def writable(self):
  338. if self.socket.type != SOCK_DGRAM and not self.connected:
  339. return True
  340. if len(self.writeQueue):
  341. return True
  342. return False
  343. ## Overriden socket methods.
  344. def accept(self):
  345. self._ensure_non_blocking_read()
  346. if not self.acceptChannel:
  347. self.acceptChannel = make_channel()
  348. return self.receive_with_timeout(self.acceptChannel)
  349. def listen(self, num):
  350. if num > 2<<29:
  351. raise OverflowError # for socket unittests compatibility
  352. asyncore_dispatcher.listen(self, num)
  353. def connect(self, address):
  354. """
  355. If a timeout is set for the connection attempt, and the timeout occurs
  356. then it is the responsibility of the user to close the socket, should
  357. they not wish the connection to potentially establish anyway.
  358. """
  359. asyncore_dispatcher.connect(self, address)
  360. # UDP sockets do not connect.
  361. if self.socket.type != SOCK_DGRAM and not self.connected:
  362. if not self.connectChannel:
  363. self.connectChannel = make_channel()
  364. # Prefer the sender. Do not block when sending, given that
  365. # there is a tasklet known to be waiting, this will happen.
  366. self.connectChannel.preference = 1
  367. self.receive_with_timeout(self.connectChannel)
  368. def _send(self, data, flags, nowait=False, dest=None):
  369. if not dest:
  370. self._ensure_connected()
  371. if not nowait:
  372. channel = make_channel()
  373. channel.preference = 1 # Prefer the sender.
  374. else:
  375. channel = None
  376. self.writeQueue.append((channel, flags, data, dest))
  377. if channel:
  378. return self.receive_with_timeout(channel)
  379. else:
  380. return len(data)
  381. def setblockingsend(self, flag=None):
  382. old = self._blockingsend
  383. if flag is not None:
  384. self._blockingsend = flag
  385. return old
  386. def send(self, data, flags=0):
  387. return self._send(data, flags, not self._blockingsend)
  388. def sendall(self, data, flags=0):
  389. if not self._blockingsend:
  390. self._send(data, flags, True)
  391. return
  392. while len(data):
  393. nbytes = self._send(data, flags)
  394. if nbytes == 0:
  395. raise Exception("completely unexpected situation, no data sent")
  396. data = data[nbytes:]
  397. def sendto(self, *args):
  398. # sendto(data, address)
  399. # sendto(data [, flags], address)
  400. # go through hoops to emulate std socket errors for unittests
  401. if len(args) == 2:
  402. sendData, flags, sendAddress = args[0], 0, args[1]
  403. elif len(args) == 3:
  404. sendData, flags, sendAddress = args
  405. else:
  406. raise TypeError, "sendto() takes 2 or 3 arguments (%d given)" % (len(args))
  407. # wrap sendAddress so that an empty value doesn't trigger connection test
  408. return self._send(sendData, flags, not self._blockingsend, (sendAddress,))
  409. def _recv(self, methodName, args, sizeIdx=0):
  410. self._ensure_non_blocking_read()
  411. if self._fileno is None:
  412. log.debug("sock %d, self._fileno is None", id(self))
  413. return ""
  414. if len(args) >= sizeIdx+1:
  415. generalArgs = list(args)
  416. generalArgs[sizeIdx] = 0
  417. generalArgs = tuple(generalArgs)
  418. else:
  419. generalArgs = args
  420. #print self._fileno, "_recv:---ENTER---", (methodName, args)
  421. channel = None
  422. if self.lastReadChannelRef is not None and self.lastReadTally < VALUE_MAX_NONBLOCKINGREAD_SIZE and self.lastReadCalls < VALUE_MAX_NONBLOCKINGREAD_CALLS:
  423. channel = self.lastReadChannelRef()
  424. self.lastReadChannelRef = None
  425. #elif self.lastReadTally >= VALUE_MAX_NONBLOCKINGREAD_SIZE or self.lastReadCalls >= VALUE_MAX_NONBLOCKINGREAD_CALLS:
  426. #print "_recv:FORCE-CHANNEL-CHANGE %d %d" % (self.lastReadTally, self.lastReadCalls)
  427. if channel is None:
  428. channel = make_channel()
  429. channel.preference = -1 # Prefer the receiver.
  430. self.lastReadTally = self.lastReadCalls = 0
  431. #print self._fileno, "_recv:NEW-CHANNEL", id(channel)
  432. self.readQueue.append([ channel, methodName, args ])
  433. else:
  434. self.readQueue[0][1:] = (methodName, args)
  435. #print self._fileno, "_recv:RECYCLE-CHANNEL", id(channel), self.lastReadTally
  436. ret = self.receive_with_timeout(channel)
  437. #storing the last channel is a way to communicate with the producer tasklet, so that it
  438. #immediately tries to read more, when we do the next receive. This is to optimize cases
  439. #where one can do multiple recv() calls without blocking, but each call only gives you
  440. #a limited amount of data. We then get a tight tasklet interaction between consumer
  441. #and producer until EWOULDBLOCK is received from the socket.
  442. self.lastReadChannelRef = weakref.ref(channel)
  443. if isinstance(ret, types.StringTypes):
  444. recvlen = len(ret)
  445. elif methodName == "recvfrom":
  446. recvlen = len(ret[0])
  447. elif methodName == "recvfrom_into":
  448. recvlen = ret[0]
  449. else:
  450. recvlen = ret
  451. self.lastReadTally += recvlen
  452. self.lastReadCalls += 1
  453. #print self._fileno, "_recv:---EXIT---", (methodName, args) , recvlen, self.lastReadChannelRef()
  454. return ret
  455. def recv(self, *args):
  456. if self.socket.type != SOCK_DGRAM and not self.connected:
  457. # Sockets which have never been connected do this.
  458. if not self.wasConnected:
  459. raise error(ENOTCONN, 'Socket is not connected')
  460. return self._recv("recv", args)
  461. def recv_into(self, *args):
  462. if self.socket.type != SOCK_DGRAM and not self.connected:
  463. # Sockets which have never been connected do this.
  464. if not self.wasConnected:
  465. raise error(ENOTCONN, 'Socket is not connected')
  466. return self._recv("recv_into", args, sizeIdx=1)
  467. def recvfrom(self, *args):
  468. return self._recv("recvfrom", args)
  469. def recvfrom_into(self, *args):
  470. return self._recv("recvfrom_into", args, sizeIdx=1)
  471. def close(self):
  472. if self._fileno is None:
  473. return
  474. asyncore_dispatcher.close(self)
  475. self.connected = False
  476. self.accepting = False
  477. # Clear out all the channels with relevant errors.
  478. while self.acceptChannel and self.acceptChannel.balance < 0:
  479. self.acceptChannel.send_exception(stdsocket.error, EBADF, 'Bad file descriptor')
  480. while self.connectChannel and self.connectChannel.balance < 0:
  481. self.connectChannel.send_exception(stdsocket.error, ECONNREFUSED, 'Connection refused')
  482. self._clear_queue(self.writeQueue, stdsocket.error, ECONNRESET)
  483. self._clear_queue(self.readQueue)
  484. def _clear_queue(self, queue, *args):
  485. for t in queue:
  486. if t[0] and t[0].balance < 0:
  487. if len(args):
  488. t[0].send_exception(*args)
  489. else:
  490. t[0].send("")
  491. queue.clear()
  492. # asyncore doesn't support this. Why not?
  493. def fileno(self):
  494. return self.socket.fileno()
  495. def _ensure_non_blocking_read(self):
  496. if not self._blocking:
  497. # Ensure there is something on the socket, before fetching it. Otherwise, error complaining.
  498. r, w, e = select.select([ self ], [], [], 0.0)
  499. if not r:
  500. raise stdsocket.error(EWOULDBLOCK, "The socket operation could not complete without blocking")
  501. def _ensure_connected(self):
  502. if not self.connected:
  503. # The socket was never connected.
  504. if not self.wasConnected:
  505. raise error(ENOTCONN, "Socket is not connected")
  506. # The socket has been closed already.
  507. raise error(EBADF, 'Bad file descriptor')
  508. def setblocking(self, flag):
  509. self._blocking = flag
  510. if flag:
  511. self._timeout = None
  512. else:
  513. self._timeout = 0.0
  514. def gettimeout(self):
  515. return self._timeout
  516. def settimeout(self, value):
  517. if value == 0.0:
  518. self._blocking = False
  519. self._timeout = 0.0
  520. else:
  521. if value and not can_timeout():
  522. raise RuntimeError("This is a stackless socket - to have timeout support you need to provide a sleep function")
  523. self._blocking = True
  524. self._timeout = value
  525. def handle_accept(self):
  526. if self.acceptChannel and self.acceptChannel.balance < 0:
  527. t = asyncore.dispatcher.accept(self)
  528. if t is None:
  529. return
  530. t[0].setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  531. stackless.tasklet(self.acceptChannel.send)(t)
  532. # Inform the blocked connect call that the connection has been made.
  533. def handle_connect(self):
  534. if self.socket.type != SOCK_DGRAM:
  535. if self.connectChannel and self.connectChannel.balance < 0:
  536. self.wasConnected = True
  537. self.connectChannel.send(None)
  538. # Asyncore says its done but self.readBuffer may be non-empty
  539. # so can't close yet. Do nothing and let 'recv' trigger the close.
  540. def handle_close(self):
  541. # These do not interfere with ongoing reads, but should prevent
  542. # sends and the like from going through.
  543. self.connected = False
  544. self.accepting = False
  545. # This also gets called in the case that a non-blocking connect gets
  546. # back to us with a no. If we don't reject the connect, then all
  547. # connect calls that do not connect will block indefinitely.
  548. if self.connectChannel is not None:
  549. self.close()
  550. # Some error, just close the channel and let that raise errors to
  551. # blocked calls.
  552. def handle_expt(self):
  553. if False:
  554. import traceback
  555. print "handle_expt: START"
  556. traceback.print_exc()
  557. print "handle_expt: END"
  558. self.close()
  559. def handle_error(self):
  560. log.exception("Unexpected error")
  561. def handle_read(self):
  562. """
  563. This will be called once per-poll call per socket with data in its buffer to be read.
  564. If you call poll once every 30th of a second, then you are going to be rate limited
  565. in terms of how fast you can read incoming data by the packet size they arrive in.
  566. In order to deal with the worst case scenario, advantage is taken of how scheduling
  567. works in order to keep reading until there is no more data left to read.
  568. 1. This function is called indicating data is present to read.
  569. 2. The desired amount is read and a send call is made on the channel with it.
  570. 3. The function is blocked on that action and the tasklet it is running in is reinserted into the scheduler.
  571. 4. The tasklet that made the read related socket call is awakened with the given data.
  572. 5. It returns the data to the function that made that call.
  573. 6. The function that made the call makes another read related socket call.
  574. a) If the call is similar enough to the last call, then the previous channel is retrieved.
  575. b) Otherwise, a new channel is created.
  576. 7. The tasklet that is making the read related socket call is blocked on the channel.
  577. 8. This tasklet that was blocked sending gets scheduled again.
  578. a) If there is a tasklet blocked on the channel that it was using, then goto 2.
  579. b) Otherwise, the function exits.
  580. Note that if this function loops indefinitely, and the scheduler is pumped rather than
  581. continuously run, the pumping application will stay in its pump call for a prolonged
  582. period of time potentially starving the rest of the application for CPU time.
  583. An attempt is made in _recv to limit the amount of data read in this manner to a fixed
  584. amount and it lets this function exit if that amount is exceeded. However, this it is
  585. up to the user of Stackless to understand how their application schedules and blocks,
  586. and there are situations where small reads may still effectively loop indefinitely.
  587. """
  588. if not len(self.readQueue):
  589. return
  590. channel, methodName, args = self.readQueue[0]
  591. #print self._fileno, "handle_read:---ENTER---", id(channel)
  592. while channel.balance < 0:
  593. args = self.readQueue[0][2]
  594. #print self._fileno, "handle_read:CALL", id(channel), args
  595. try:
  596. try:
  597. result = getattr(self.socket, methodName)(*args)
  598. #print self._fileno, "handle_read:RESULT", id(channel), len(result)
  599. except stdsocket.error as e:
  600. if e.errno == EWOULDBLOCK:
  601. return # sometimes get this on windows
  602. raise
  603. except Exception, e:
  604. log.debug('sock %d, read method %s error %r, throwing it', id(self), methodName, e)
  605. send_throw(channel, *sys.exc_info())
  606. else:
  607. # don't len() the result, it may be int, tuple, etc. for recvfrom, recvinto, etc.
  608. #print self._fileno, "handle_read:RETURN-RESULT", id(channel), len(result)
  609. log.debug('sock %d, read method %s with args %r, sending it', id(self), methodName, args)
  610. channel.send(result)
  611. if len(self.readQueue) and self.readQueue[0][0] is channel:
  612. del self.readQueue[0]
  613. #print self._fileno, "handle_read:---EXIT---", id(channel)
  614. def _merge_nbsends(self, data, flags):
  615. #attempt to merge several nonblocking sends into one
  616. try:
  617. if len(self.writeQueue):
  618. d = []
  619. # pull them off as long as they are non=blocking and flags are the same
  620. while self.writeQueue and self.writeQueue[0][0] is None and self.writeQueue[0][1] == flags:
  621. d.append(self.writeQueue.popleft())
  622. # Be sure to support memory view objects that we get sent sometimes
  623. def tobytes(s):
  624. return s.tobytes() if isinstance(s, memoryview) else s
  625. data = tobytes(data) + "".join(tobytes(e[2]) for e in d)
  626. except Exception, e:
  627. import logging
  628. logging.exception("****shit got real")
  629. return data, flags
  630. def handle_write(self):
  631. """
  632. This function still needs work WRT UDP.
  633. """
  634. if len(self.writeQueue):
  635. channel, flags, data, dest = self.writeQueue.popleft()
  636. # asyncore does not expose sending the flags.
  637. def asyncore_send(self, data, flags, dest):
  638. try:
  639. if dest is not None:
  640. result = self.socket.sendto(data, flags, dest[0])
  641. else:
  642. result = self.socket.send(data, flags)
  643. return result
  644. except stdsocket.error, why:
  645. # logging.root.exception("SOME SEND ERROR")
  646. if why.args[0] == EWOULDBLOCK:
  647. return 0
  648. raise
  649. if channel:
  650. try:
  651. nbytes = asyncore_send(self, data, flags, dest)
  652. except Exception, e:
  653. if channel.balance < 0:
  654. send_throw(channel, *sys.exc_info())
  655. else:
  656. if channel.balance < 0:
  657. channel.send(nbytes)
  658. else:
  659. #its a non-blocking sendall
  660. if self._stream:
  661. data, flags = self._merge_nbsends(data, flags)
  662. try:
  663. nbytes = asyncore_send(self, data, flags, dest)
  664. data = data[nbytes:]
  665. except Exception, e:
  666. log.info("exception during non-blocking send: %r", e)
  667. else:
  668. if data and self._stream:
  669. self.writeQueue.appendleft((None, flags, data, dest))
  670. if False:
  671. def dump_socket_stack_traces():
  672. import traceback
  673. for skt in asyncore.socket_map.values():
  674. for k, v in skt.__dict__.items():
  675. if isinstance(v, stackless.channel) and v.queue:
  676. i = 0
  677. current = v.queue
  678. while i == 0 or v.queue is not current:
  679. print "%s.%s.%s" % (skt, k, i)
  680. traceback.print_stack(v.queue.frame)
  681. i += 1
  682. if __name__ == '__main__':
  683. import struct
  684. # Test code goes here.
  685. testAddress = "127.0.0.1", 3000
  686. info = -12345678
  687. data = struct.pack("i", info)
  688. dataLength = len(data)
  689. def TestTCPServer(address):
  690. global info, data, dataLength
  691. print "server listen socket creation"
  692. listenSocket = stdsocket.socket(AF_INET, SOCK_STREAM)
  693. listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  694. listenSocket.bind(address)
  695. listenSocket.listen(5)
  696. NUM_TESTS = 2
  697. i = 1
  698. while i < NUM_TESTS + 1:
  699. # No need to schedule this tasklet as the accept should yield most
  700. # of the time on the underlying channel.
  701. print "server connection wait", i
  702. currentSocket, clientAddress = listenSocket.accept()
  703. print "server", i, "listen socket", currentSocket.fileno(), "from", clientAddress
  704. if i == 1:
  705. print "server closing (a)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
  706. currentSocket.close()
  707. print "server closed (a)", i
  708. elif i == 2:
  709. print "server test", i, "send"
  710. currentSocket.send(data)
  711. print "server test", i, "recv"
  712. if currentSocket.recv(4) != "":
  713. print "server recv(1)", i, "FAIL"
  714. break
  715. # multiple empty recvs are fine
  716. if currentSocket.recv(4) != "":
  717. print "server recv(2)", i, "FAIL"
  718. break
  719. else:
  720. print "server closing (b)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
  721. currentSocket.close()
  722. print "server test", i, "OK"
  723. i += 1
  724. if i != NUM_TESTS+1:
  725. print "server: FAIL", i
  726. else:
  727. print "server: OK", i
  728. print "Done server"
  729. def TestTCPClient(address):
  730. global info, data, dataLength
  731. # Attempt 1:
  732. clientSocket = stdsocket.socket()
  733. clientSocket.connect(address)
  734. print "client connection (1) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
  735. if clientSocket.recv(5) != "":
  736. print "client test", 1, "FAIL"
  737. else:
  738. print "client test", 1, "OK"
  739. # Attempt 2:
  740. clientSocket = stdsocket.socket()
  741. clientSocket.connect(address)
  742. print "client connection (2) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
  743. s = clientSocket.recv(dataLength)
  744. if s == "":
  745. print "client test", 2, "FAIL (disconnect)"
  746. else:
  747. t = struct.unpack("i", s)
  748. if t[0] == info:
  749. print "client test", 2, "OK"
  750. else:
  751. print "client test", 2, "FAIL (wrong data)"
  752. print "client exit"
  753. def TestMonkeyPatchUrllib(uri):
  754. # replace the system socket with this module
  755. install()
  756. try:
  757. import urllib # must occur after monkey-patching!
  758. f = urllib.urlopen(uri)
  759. if not isinstance(f.fp._sock, _fakesocket):
  760. raise AssertionError("failed to apply monkeypatch, got %s" % f.fp._sock.__class__)
  761. s = f.read()
  762. if len(s) != 0:
  763. print "Fetched", len(s), "bytes via replaced urllib"
  764. else:
  765. raise AssertionError("no text received?")
  766. finally:
  767. uninstall()
  768. def TestMonkeyPatchUDP(address):
  769. # replace the system socket with this module
  770. install()
  771. try:
  772. def UDPServer(address):
  773. listenSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
  774. listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  775. listenSocket.bind(address)
  776. # Apparently each call to recvfrom maps to an incoming
  777. # packet and if we only ask for part of that packet, the
  778. # rest is lost. We really need a proper unittest suite
  779. # which tests this module against the normal socket
  780. # module.
  781. print "waiting to receive"
  782. rdata = ""
  783. while len(rdata) < 512:
  784. data, address = listenSocket.recvfrom(4096)
  785. print "received", data, len(data)
  786. rdata += data
  787. def UDPClient(address):
  788. clientSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
  789. # clientSocket.connect(address)
  790. print "sending 512 byte packet"
  791. sentBytes = clientSocket.sendto("-"+ ("*" * 510) +"-", address)
  792. print "sent 512 byte packet", sentBytes
  793. stackless.tasklet(UDPServer)(address)
  794. stackless.tasklet(UDPClient)(address)
  795. stackless.run()
  796. finally:
  797. uninstall()
  798. if "notready" in sys.argv:
  799. sys.argv.remove("notready")
  800. ready_to_schedule(False)
  801. if len(sys.argv) == 2:
  802. if sys.argv[1] == "client":
  803. print "client started"
  804. TestTCPClient(testAddress)
  805. print "client exited"
  806. elif sys.argv[1] == "slpclient":
  807. print "client started"
  808. stackless.tasklet(TestTCPClient)(testAddress)
  809. stackless.run()
  810. print "client exited"
  811. elif sys.argv[1] == "server":
  812. print "server started"
  813. TestTCPServer(testAddress)
  814. print "server exited"
  815. elif sys.argv[1] == "slpserver":
  816. print "server started"
  817. stackless.tasklet(TestTCPServer)(testAddress)
  818. stackless.run()
  819. print "server exited"
  820. else:
  821. print "Usage:", sys.argv[0], "[client|server|slpclient|slpserver]"
  822. sys.exit(1)
  823. else:
  824. print "* Running client/server test"
  825. install()
  826. try:
  827. stackless.tasklet(TestTCPServer)(testAddress)
  828. stackless.tasklet(TestTCPClient)(testAddress)
  829. stackless.run()
  830. finally:
  831. uninstall()
  832. print "* Running urllib test"
  833. stackless.tasklet(TestMonkeyPatchUrllib)("http://python.org/")
  834. stackless.run()
  835. print "* Running udp test"
  836. TestMonkeyPatchUDP(testAddress)
  837. print "result: SUCCESS"