PageRenderTime 58ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/libraries/stacklesslib/stacklesslib/replacements/socket.py

http://stacklessexamples.googlecode.com/
Python | 920 lines | 805 code | 28 blank | 87 comment | 20 complexity | 34d698e3c7596f285d08cdf5411136c1 MD5 | raw file
  1. #
  2. # Stackless compatible socket module.
  3. #
  4. # Author: Richard Tew <richard.m.tew@gmail.com>
  5. #
  6. # Feel free to email me with any questions, comments, or suggestions for
  7. # improvement.
  8. #
  9. # Remaining work:
  10. #
  11. # = Test suite that verifies that emulated behaviour is correct.
  12. # = When closing the socket, pending senders are sent ECONNRESET.
  13. # This was obtained by opening a server socket, connecting a
  14. # client and then closing the server. Then the client did a
  15. # send and got ECONNRESET.
  16. # = Asyncore does not add that much to this module. In fact, its
  17. # limitations and differences between implementations in different Python
  18. # versions just complicate things.
  19. # = Select on Windows only handles 512 sockets at a time. So if there
  20. # are more sockets than that, then they need to be separated and
  21. # batched around this limitation.
  22. # = It should be possible to have this wrap different mechanisms of
  23. # asynchronous IO, from select to IO completion ports.
  24. # = UDP support is mostly there due to the new hands off approach, but
  25. # there are a few spots like handle_write and timeout handling, which need
  26. # to be dealt with.
  27. #
  28. # Python standard library socket unit test state:
  29. #
  30. # - 2.5: Bad.
  31. # - 2.6: Excellent (two UDP failures).
  32. # - 2.7: Excellent (two UDP failures).
  33. #
  34. # This module is otherwise known to generally work for 2.5, 2.6 and 2.7.
  35. #
  36. # Small parts of this code were contributed back with permission from an
  37. # internal version of this module in use at CCP Games.
  38. #
  39. from __future__ import absolute_import
  40. import asyncore
  41. from collections import deque
  42. import gc
  43. import logging
  44. import select
  45. import socket as stdsocket # We need the "socket" name for the function we export.
  46. import sys
  47. import time
  48. import types
  49. import weakref
  50. import stackless
  51. # If you pump the scheduler and wish to prevent the scheduler from staying
  52. # non-empty for prolonged periods of time, If you do not pump the scheduler,
  53. # you may however wish to prevent calls to poll() from running too long.
  54. # Doing so gives all managed sockets a fairer chance at being read from,
  55. # rather than paying prolonged attention to sockets with more incoming data.
  56. #
  57. # These values govern how long a poll() call spends at a given attempt
  58. # of reading the data present on a given socket.
  59. #
  60. VALUE_MAX_NONBLOCKINGREAD_SIZE = 1000000
  61. VALUE_MAX_NONBLOCKINGREAD_CALLS = 100
  62. ## Monkey-patching support..
  63. # We need this so that sockets are cleared out when they are no longer in use.
  64. # In fact, it is essential to correct operation of this code.
  65. asyncore.socket_map = weakref.WeakValueDictionary()
  66. try:
  67. from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
  68. ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, \
  69. ECONNREFUSED
  70. except Exception:
  71. # Fallback on hard-coded PS3 constants.
  72. EALREADY = 37
  73. EINPROGRESS = 36
  74. EWOULDBLOCK = 35
  75. ECONNRESET = 54
  76. ENOTCONN = 57
  77. ESHUTDOWN = 58
  78. EINTR = 4
  79. EISCONN = 56
  80. EBADF = 9
  81. ECONNABORTED = 53
  82. ECONNREFUSED = 61
  83. # If we are to masquerade as the socket module, we need to provide the constants.
  84. if "__all__" in stdsocket.__dict__:
  85. __all__ = stdsocket.__all__
  86. for k, v in stdsocket.__dict__.iteritems():
  87. if k in __all__:
  88. globals()[k] = v
  89. elif k == "EBADF":
  90. globals()[k] = v
  91. else:
  92. for k, v in stdsocket.__dict__.iteritems():
  93. if k.upper() == k:
  94. globals()[k] = v
  95. error = stdsocket.error
  96. timeout = stdsocket.timeout
  97. # WARNING: this function blocks and is not thread safe.
  98. # The only solution is to spawn a thread to handle all
  99. # getaddrinfo requests. Implementing a stackless DNS
  100. # lookup service is only second best as getaddrinfo may
  101. # use other methods.
  102. getaddrinfo = stdsocket.getaddrinfo
  103. # urllib2 apparently uses this directly. We need to cater for that.
  104. _fileobject = stdsocket._fileobject
  105. # Someone needs to invoke asyncore.poll() regularly to keep the socket
  106. # data moving. The "ManageSockets" function here is a simple example
  107. # of such a function. It is started by StartManager(), which uses the
  108. # global "managerRunning" to ensure that no more than one copy is
  109. # running.
  110. #
  111. # If you think you can do this better, register an alternative to
  112. # StartManager using stacklesssocket_manager(). Your function will be
  113. # called every time a new socket is created; it's your responsibility
  114. # to ensure it doesn't start multiple copies of itself unnecessarily.
  115. #
  116. # By Nike: Added poll_interval on install to have it configurable from outside,
  117. managerRunning = False
  118. poll_interval = 0.05
  119. def ManageSockets():
  120. global managerRunning
  121. try:
  122. while len(asyncore.socket_map):
  123. # Check the sockets for activity.
  124. # print "POLL"
  125. asyncore.poll(poll_interval)
  126. # Yield to give other tasklets a chance to be scheduled.
  127. # print "SCHED"
  128. _schedule_func()
  129. finally:
  130. managerRunning = False
  131. def StartManager():
  132. global managerRunning
  133. if not managerRunning:
  134. managerRunning = True
  135. return stackless.tasklet(ManageSockets)()
  136. _schedule_func = stackless.schedule
  137. _manage_sockets_func = StartManager
  138. _sleep_func = None
  139. _timeout_func = None
  140. def can_timeout():
  141. return _sleep_func is not None or _timeout_func is not None
  142. def stacklesssocket_manager(mgr):
  143. global _manage_sockets_func
  144. _manage_sockets_func = mgr
  145. def socket(*args, **kwargs):
  146. import sys
  147. if "socket" in sys.modules and sys.modules["socket"] is not stdsocket:
  148. raise RuntimeError("Use 'stacklesssocket.install' instead of replacing the 'socket' module")
  149. _realsocket_old = stdsocket._realsocket
  150. _socketobject_old = stdsocket._socketobject
  151. class _socketobject_new(_socketobject_old):
  152. def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
  153. # We need to do this here.
  154. if _sock is None:
  155. _sock = _realsocket_old(family, type, proto)
  156. _sock = _fakesocket(_sock)
  157. _manage_sockets_func()
  158. _socketobject_old.__init__(self, family, type, proto, _sock)
  159. if not isinstance(self._sock, _fakesocket):
  160. raise RuntimeError("bad socket")
  161. def accept(self):
  162. sock, addr = self._sock.accept()
  163. sock = _fakesocket(sock)
  164. sock.wasConnected = True
  165. return _socketobject_new(_sock=sock), addr
  166. accept.__doc__ = _socketobject_old.accept.__doc__
  167. def make_blocking_socket(family=AF_INET, type=SOCK_STREAM, proto=0):
  168. """
  169. Sometimes you may want to create a normal Python socket, even when
  170. monkey-patching is in effect. One use case might be when you are trying to
  171. do socket operations on the last runnable tasklet, if these socket
  172. operations are on small writes on a non-connected UDP socket then you
  173. might as well just use a blocking socket, as the effect of blocking
  174. is negligible.
  175. """
  176. _sock = _realsocket_old(family, type, proto)
  177. return _socketobject_old(_sock=_sock)
  178. def install(pi=None):
  179. global poll_interval
  180. if stdsocket._realsocket is socket:
  181. raise StandardError("Still installed")
  182. stdsocket._realsocket = socket
  183. stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_new
  184. if pi is not None:
  185. poll_interval = pi
  186. def uninstall():
  187. stdsocket._realsocket = _realsocket_old
  188. stdsocket.socket = stdsocket.SocketType = stdsocket._socketobject = _socketobject_old
  189. READY_TO_SCHEDULE_TAG = "_SET_ASIDE"
  190. def ready_to_schedule(flag):
  191. """
  192. There may be cases where it is desirable to have socket operations happen before
  193. an application starts up its framework, which would then poll asyncore. This
  194. function is intended to allow all sockets to be switched between working
  195. "stacklessly" or working directly on their underlying socket objects in a
  196. blocking manner.
  197. Note that sockets created while this is in effect lack attribute values that
  198. asyncore or this module may have set if the sockets were created in a full
  199. monkey patched manner.
  200. """
  201. def reroute_wrapper(funcName):
  202. def reroute_call(self, *args, **kwargs):
  203. if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
  204. return
  205. return getattr(self.socket, funcName)(*args, **kwargs)
  206. return reroute_call
  207. def update_method_referrers(methodName, oldClassMethod, newClassMethod):
  208. """
  209. The instance methods we need to update are stored in slots on instances of
  210. socket._socketobject (actually our replacement subclass _socketobject_new).
  211. """
  212. for referrer1 in gc.get_referrers(oldClassMethod):
  213. if isinstance(referrer1, types.MethodType):
  214. for referrer2 in gc.get_referrers(referrer1):
  215. if isinstance(referrer2, _socketobject_new):
  216. setattr(referrer2, methodName, types.MethodType(newClassMethod, referrer1.im_self, referrer1.im_class))
  217. # Guard against removal if not in place.
  218. if flag:
  219. if READY_TO_SCHEDULE_TAG not in _fakesocket.__dict__:
  220. return
  221. del _fakesocket.__dict__[READY_TO_SCHEDULE_TAG]
  222. else:
  223. _fakesocket.__dict__[READY_TO_SCHEDULE_TAG] = None
  224. # sys.__stdout__.write("READY_TO_SCHEDULE %s\n" % flag)
  225. # Play switcheroo with the attributes to get direct socket usage, or normal socket usage.
  226. for attributeName in dir(_realsocket_old):
  227. if not attributeName.startswith("_"):
  228. storageAttributeName = attributeName +"_SET_ASIDE"
  229. if flag:
  230. storedValue = _fakesocket.__dict__.pop(storageAttributeName, None)
  231. if storedValue is not None:
  232. rerouteValue = _fakesocket.__dict__[attributeName]
  233. # sys.__stdout__.write("___ RESTORING %s (AS %s) (WAS %s)\n" % (attributeName, storedValue, rerouteValue))
  234. _fakesocket.__dict__[attributeName] = storedValue
  235. update_method_referrers(attributeName, rerouteValue, storedValue)
  236. else:
  237. if attributeName in _fakesocket.__dict__:
  238. # sys.__stdout__.write("___ STORING %s = %s\n" % (attributeName, _fakesocket.__dict__[attributeName]))
  239. _fakesocket.__dict__[storageAttributeName] = _fakesocket.__dict__[attributeName]
  240. _fakesocket.__dict__[attributeName] = reroute_wrapper(attributeName)
  241. # asyncore in Python 2.6 treats socket connection errors as connections.
  242. if sys.version_info[0] == 2 and sys.version_info[1] == 6:
  243. class asyncore_dispatcher(asyncore.dispatcher):
  244. def handle_connect_event(self):
  245. err = self.socket.getsockopt(stdsocket.SOL_SOCKET, stdsocket.SO_ERROR)
  246. if err != 0:
  247. raise stdsocket.error(err, asyncore._strerror(err))
  248. super(asyncore_dispatcher, self).handle_connect_event()
  249. else:
  250. asyncore_dispatcher = asyncore.dispatcher
  251. class _fakesocket(asyncore_dispatcher):
  252. connectChannel = None
  253. acceptChannel = None
  254. wasConnected = False
  255. _timeout = None
  256. _blocking = True
  257. lastReadChannelRef = None
  258. lastReadTally = 0
  259. lastReadCalls = 0
  260. def __init__(self, realSocket):
  261. # This is worth doing. I was passing in an invalid socket which
  262. # was an instance of _fakesocket and it was causing tasklet death.
  263. if not isinstance(realSocket, _realsocket_old):
  264. raise StandardError("An invalid socket passed to fakesocket %s" % realSocket.__class__)
  265. # This will register the real socket in the internal socket map.
  266. asyncore_dispatcher.__init__(self, realSocket)
  267. self.readQueue = deque()
  268. self.writeQueue = deque()
  269. self.sendToBuffers = deque()
  270. if can_timeout():
  271. self._timeout = stdsocket.getdefaulttimeout()
  272. def receive_with_timeout(self, channel):
  273. if self._timeout is not None:
  274. # Start a timing out process.
  275. # a) Engage a pre-existing external tasklet to send an exception on our channel if it has a receiver, if we are still there when it times out.
  276. # b) Launch a tasklet that does a sleep, and sends an exception if we are still waiting, when it is awoken.
  277. # Block waiting for a send.
  278. if _timeout_func is not None:
  279. # You will want to use this if you are using sockets in a different thread from your sleep functionality.
  280. _timeout_func(self._timeout, channel, (timeout, "timed out"))
  281. elif _sleep_func is not None:
  282. stackless.tasklet(self._manage_receive_with_timeout)(channel)
  283. else:
  284. raise NotImplementedError("should not be here")
  285. try:
  286. ret = channel.receive()
  287. except BaseException, e:
  288. raise e
  289. return ret
  290. else:
  291. return channel.receive()
  292. def _manage_receive_with_timeout(self, channel):
  293. if channel.balance < 0:
  294. _sleep_func(self._timeout)
  295. if channel.balance < 0:
  296. channel.send_exception(timeout, "timed out")
  297. def __del__(self):
  298. # There are no more users (sockets or files) of this fake socket, we
  299. # are safe to close it fully. If we don't, asyncore will choke on
  300. # the weakref failures.
  301. self.close()
  302. # The asyncore version of this function depends on socket being set
  303. # which is not the case when this fake socket has been closed.
  304. def __getattr__(self, attr):
  305. if not hasattr(self, "socket"):
  306. raise AttributeError("socket attribute unset on '"+ attr +"' lookup")
  307. return getattr(self.socket, attr)
  308. ## Asyncore potential activity indicators.
  309. def readable(self):
  310. if self.socket.type == SOCK_DGRAM:
  311. return True
  312. if len(self.readQueue):
  313. return True
  314. if self.acceptChannel is not None and self.acceptChannel.balance < 0:
  315. return True
  316. if self.connectChannel is not None and self.connectChannel.balance < 0:
  317. return True
  318. return False
  319. def writable(self):
  320. if self.socket.type != SOCK_DGRAM and not self.connected:
  321. return True
  322. if len(self.writeQueue):
  323. return True
  324. if len(self.sendToBuffers):
  325. return True
  326. return False
  327. ## Overriden socket methods.
  328. def accept(self):
  329. self._ensure_non_blocking_read()
  330. if not self.acceptChannel:
  331. self.acceptChannel = stackless.channel()
  332. return self.receive_with_timeout(self.acceptChannel)
  333. def connect(self, address):
  334. """
  335. If a timeout is set for the connection attempt, and the timeout occurs
  336. then it is the responsibility of the user to close the socket, should
  337. they not wish the connection to potentially establish anyway.
  338. """
  339. asyncore_dispatcher.connect(self, address)
  340. # UDP sockets do not connect.
  341. if self.socket.type != SOCK_DGRAM and not self.connected:
  342. if not self.connectChannel:
  343. self.connectChannel = stackless.channel()
  344. # Prefer the sender. Do not block when sending, given that
  345. # there is a tasklet known to be waiting, this will happen.
  346. self.connectChannel.preference = 1
  347. self.receive_with_timeout(self.connectChannel)
  348. def _send(self, data, flags):
  349. self._ensure_connected()
  350. channel = stackless.channel()
  351. channel.preference = 1 # Prefer the sender.
  352. self.writeQueue.append((channel, flags, data))
  353. return self.receive_with_timeout(channel)
  354. def send(self, data, flags=0):
  355. return self._send(data, flags)
  356. def sendall(self, data, flags=0):
  357. while len(data):
  358. nbytes = self._send(data, flags)
  359. if nbytes == 0:
  360. raise Exception("completely unexpected situation, no data sent")
  361. data = data[nbytes:]
  362. def sendto(self, sendData, sendArg1=None, sendArg2=None):
  363. # sendto(data, address)
  364. # sendto(data [, flags], address)
  365. if sendArg2 is not None:
  366. flags = sendArg1
  367. sendAddress = sendArg2
  368. else:
  369. flags = 0
  370. sendAddress = sendArg1
  371. waitChannel = None
  372. for idx, (data, address, channel, sentBytes) in enumerate(self.sendToBuffers):
  373. if address == sendAddress:
  374. self.sendToBuffers[idx] = (data + sendData, address, channel, sentBytes)
  375. waitChannel = channel
  376. break
  377. if waitChannel is None:
  378. waitChannel = stackless.channel()
  379. self.sendToBuffers.append((sendData, sendAddress, waitChannel, 0))
  380. return self.receive_with_timeout(waitChannel)
  381. def _recv(self, methodName, args, sizeIdx=0):
  382. self._ensure_non_blocking_read()
  383. if self._fileno is None:
  384. return ""
  385. if len(args) >= sizeIdx+1:
  386. generalArgs = list(args)
  387. generalArgs[sizeIdx] = 0
  388. generalArgs = tuple(generalArgs)
  389. else:
  390. generalArgs = args
  391. #print self._fileno, "_recv:---ENTER---", (methodName, args)
  392. while True:
  393. channel = None
  394. if self.lastReadChannelRef is not None and self.lastReadTally < VALUE_MAX_NONBLOCKINGREAD_SIZE and self.lastReadCalls < VALUE_MAX_NONBLOCKINGREAD_CALLS:
  395. channel = self.lastReadChannelRef()
  396. self.lastReadChannelRef = None
  397. #elif self.lastReadTally >= VALUE_MAX_NONBLOCKINGREAD_SIZE or self.lastReadCalls >= VALUE_MAX_NONBLOCKINGREAD_CALLS:
  398. #print "_recv:FORCE-CHANNEL-CHANGE %d %d" % (self.lastReadTally, self.lastReadCalls)
  399. if channel is None:
  400. channel = stackless.channel()
  401. channel.preference = -1 # Prefer the receiver.
  402. self.lastReadTally = self.lastReadCalls = 0
  403. #print self._fileno, "_recv:NEW-CHANNEL", id(channel)
  404. self.readQueue.append([ channel, methodName, args ])
  405. else:
  406. self.readQueue[0][1:] = (methodName, args)
  407. #print self._fileno, "_recv:RECYCLE-CHANNEL", id(channel), self.lastReadTally
  408. try:
  409. ret = self.receive_with_timeout(channel)
  410. except stdsocket.error, e:
  411. if isinstance(e, stdsocket.error) and e.args[0] == EWOULDBLOCK:
  412. #print self._fileno, "_recv:BLOCK-RETRY", id(channel), "-" * 30
  413. continue
  414. else:
  415. raise
  416. break
  417. #storing the last channel is a way to communicate with the producer tasklet, so that it
  418. #immediately tries to read more, when we do the next receive. This is to optimize cases
  419. #where one can do multiple recv() calls without blocking, but each call only gives you
  420. #a limited amount of data. We then get a tight tasklet interaction between consumer
  421. #and producer until EWOULDBLOCK is received from the socket.
  422. self.lastReadChannelRef = weakref.ref(channel)
  423. if isinstance(ret, types.StringTypes):
  424. recvlen = len(ret)
  425. elif methodName == "recvfrom":
  426. recvlen = len(ret[0])
  427. elif methodName == "recvfrom_into":
  428. recvlen = ret[0]
  429. else:
  430. recvlen = ret
  431. self.lastReadTally += recvlen
  432. self.lastReadCalls += 1
  433. #print self._fileno, "_recv:---EXIT---", (methodName, args) , recvlen, self.lastReadChannelRef()
  434. return ret
  435. def recv(self, *args):
  436. if self.socket.type != SOCK_DGRAM and not self.connected:
  437. # Sockets which have never been connected do this.
  438. if not self.wasConnected:
  439. raise error(ENOTCONN, 'Socket is not connected')
  440. return self._recv("recv", args)
  441. def recv_into(self, *args):
  442. if self.socket.type != SOCK_DGRAM and not self.connected:
  443. # Sockets which have never been connected do this.
  444. if not self.wasConnected:
  445. raise error(ENOTCONN, 'Socket is not connected')
  446. return self._recv("recv_into", args, sizeIdx=1)
  447. def recvfrom(self, *args):
  448. return self._recv("recvfrom", args)
  449. def recvfrom_into(self, *args):
  450. return self._recv("recvfrom_into", args, sizeIdx=1)
  451. def close(self):
  452. if self._fileno is None:
  453. return
  454. asyncore_dispatcher.close(self)
  455. self.connected = False
  456. self.accepting = False
  457. # Clear out all the channels with relevant errors.
  458. while self.acceptChannel and self.acceptChannel.balance < 0:
  459. self.acceptChannel.send_exception(stdsocket.error, EBADF, 'Bad file descriptor')
  460. while self.connectChannel and self.connectChannel.balance < 0:
  461. self.connectChannel.send_exception(stdsocket.error, ECONNREFUSED, 'Connection refused')
  462. self._clear_queue(self.writeQueue, stdsocket.error, ECONNRESET)
  463. self._clear_queue(self.readQueue)
  464. def _clear_queue(self, queue, *args):
  465. for t in queue:
  466. if t[0].balance < 0:
  467. if len(args):
  468. t[0].send_exception(*args)
  469. else:
  470. t[0].send("")
  471. queue.clear()
  472. # asyncore doesn't support this. Why not?
  473. def fileno(self):
  474. return self.socket.fileno()
  475. def _is_non_blocking(self):
  476. return not self._blocking or self._timeout == 0.0
  477. def _ensure_non_blocking_read(self):
  478. if self._is_non_blocking():
  479. # Ensure there is something on the socket, before fetching it. Otherwise, error complaining.
  480. r, w, e = select.select([ self ], [], [], 0.0)
  481. if not r:
  482. raise stdsocket.error(EWOULDBLOCK, "The socket operation could not complete without blocking")
  483. def _ensure_connected(self):
  484. if not self.connected:
  485. # The socket was never connected.
  486. if not self.wasConnected:
  487. raise error(ENOTCONN, "Socket is not connected")
  488. # The socket has been closed already.
  489. raise error(EBADF, 'Bad file descriptor')
  490. def setblocking(self, flag):
  491. self._blocking = flag
  492. def gettimeout(self):
  493. return self._timeout
  494. def settimeout(self, value):
  495. if value and not can_timeout():
  496. raise RuntimeError("This is a stackless socket - to have timeout support you need to provide a sleep function")
  497. self._timeout = value
  498. def handle_accept(self):
  499. if self.acceptChannel and self.acceptChannel.balance < 0:
  500. t = asyncore.dispatcher.accept(self)
  501. if t is None:
  502. return
  503. t[0].setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  504. stackless.tasklet(self.acceptChannel.send)(t)
  505. # Inform the blocked connect call that the connection has been made.
  506. def handle_connect(self):
  507. if self.socket.type != SOCK_DGRAM:
  508. if self.connectChannel and self.connectChannel.balance < 0:
  509. self.wasConnected = True
  510. self.connectChannel.send(None)
  511. # Asyncore says its done but self.readBuffer may be non-empty
  512. # so can't close yet. Do nothing and let 'recv' trigger the close.
  513. def handle_close(self):
  514. # These do not interfere with ongoing reads, but should prevent
  515. # sends and the like from going through.
  516. self.connected = False
  517. self.accepting = False
  518. # This also gets called in the case that a non-blocking connect gets
  519. # back to us with a no. If we don't reject the connect, then all
  520. # connect calls that do not connect will block indefinitely.
  521. if self.connectChannel is not None:
  522. self.close()
  523. # Some error, just close the channel and let that raise errors to
  524. # blocked calls.
  525. def handle_expt(self):
  526. if False:
  527. import traceback
  528. print "handle_expt: START"
  529. traceback.print_exc()
  530. print "handle_expt: END"
  531. self.close()
  532. def handle_error(self):
  533. self.close()
  534. def handle_read(self):
  535. """
  536. This will be called once per-poll call per socket with data in its buffer to be read.
  537. If you call poll once every 30th of a second, then you are going to be rate limited
  538. in terms of how fast you can read incoming data by the packet size they arrive in.
  539. In order to deal with the worst case scenario, advantage is taken of how scheduling
  540. works in order to keep reading until there is no more data left to read.
  541. 1. This function is called indicating data is present to read.
  542. 2. The desired amount is read and a send call is made on the channel with it.
  543. 3. The function is blocked on that action and the tasklet it is running in is reinserted into the scheduler.
  544. 4. The tasklet that made the read related socket call is awakened with the given data.
  545. 5. It returns the data to the function that made that call.
  546. 6. The function that made the call makes another read related socket call.
  547. a) If the call is similar enough to the last call, then the previous channel is retrieved.
  548. b) Otherwise, a new channel is created.
  549. 7. The tasklet that is making the read related socket call is blocked on the channel.
  550. 8. This tasklet that was blocked sending gets scheduled again.
  551. a) If there is a tasklet blocked on the channel that it was using, then goto 2.
  552. b) Otherwise, the function exits.
  553. Note that if this function loops indefinitely, and the scheduler is pumped rather than
  554. continuously run, the pumping application will stay in its pump call for a prolonged
  555. period of time potentially starving the rest of the application for CPU time.
  556. An attempt is made in _recv to limit the amount of data read in this manner to a fixed
  557. amount and it lets this function exit if that amount is exceeded. However, this it is
  558. up to the user of Stackless to understand how their application schedules and blocks,
  559. and there are situations where small reads may still effectively loop indefinitely.
  560. """
  561. if not len(self.readQueue):
  562. return
  563. channel, methodName, args = self.readQueue[0]
  564. #print self._fileno, "handle_read:---ENTER---", id(channel)
  565. while channel.balance < 0:
  566. args = self.readQueue[0][2]
  567. #print self._fileno, "handle_read:CALL", id(channel), args
  568. try:
  569. result = getattr(self.socket, methodName)(*args)
  570. #print self._fileno, "handle_read:RESULT", id(channel), len(result)
  571. except Exception, e:
  572. # winsock sometimes throws ENOTCONN
  573. #print self._fileno, "handle_read:EXCEPTION", id(channel), len(result)
  574. if isinstance(e, stdsocket.error) and e.args[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED]:
  575. self.handle_close()
  576. result = ''
  577. elif channel.balance < 0:
  578. channel.send_exception(e.__class__, *e.args)
  579. if channel.balance < 0:
  580. #print self._fileno, "handle_read:RETURN-RESULT", id(channel), len(result)
  581. channel.send(result)
  582. if len(self.readQueue) and self.readQueue[0][0] is channel:
  583. del self.readQueue[0]
  584. #print self._fileno, "handle_read:---EXIT---", id(channel)
  585. def handle_write(self):
  586. """
  587. This function still needs work WRT UDP.
  588. """
  589. if len(self.writeQueue):
  590. channel, flags, data = self.writeQueue[0]
  591. del self.writeQueue[0]
  592. # asyncore does not expose sending the flags.
  593. def asyncore_send(self, data, flags=0):
  594. try:
  595. result = self.socket.send(data, flags)
  596. return result
  597. except stdsocket.error, why:
  598. # logging.root.exception("SOME SEND ERROR")
  599. if why.args[0] == EWOULDBLOCK:
  600. return 0
  601. # Ensure the sender appears to have directly received this exception.
  602. channel.send_exception(why.__class__, *why.args)
  603. if why.args[0] in (ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED):
  604. self.handle_close()
  605. return 0
  606. nbytes = asyncore_send(self, data, flags)
  607. if channel.balance < 0:
  608. channel.send(nbytes)
  609. elif len(self.sendToBuffers):
  610. data, address, channel, oldSentBytes = self.sendToBuffers[0]
  611. sentBytes = self.socket.sendto(data, address)
  612. totalSentBytes = oldSentBytes + sentBytes
  613. if len(data) > sentBytes:
  614. self.sendToBuffers[0] = data[sentBytes:], address, channel, totalSentBytes
  615. else:
  616. del self.sendToBuffers[0]
  617. stackless.tasklet(channel.send)(totalSentBytes)
  618. if False:
  619. def dump_socket_stack_traces():
  620. import traceback
  621. for skt in asyncore.socket_map.values():
  622. for k, v in skt.__dict__.items():
  623. if isinstance(v, stackless.channel) and v.queue:
  624. i = 0
  625. current = v.queue
  626. while i == 0 or v.queue is not current:
  627. print "%s.%s.%s" % (skt, k, i)
  628. traceback.print_stack(v.queue.frame)
  629. i += 1
  630. if __name__ == '__main__':
  631. import struct
  632. # Test code goes here.
  633. testAddress = "127.0.0.1", 3000
  634. info = -12345678
  635. data = struct.pack("i", info)
  636. dataLength = len(data)
  637. def TestTCPServer(address):
  638. global info, data, dataLength
  639. print "server listen socket creation"
  640. listenSocket = stdsocket.socket(AF_INET, SOCK_STREAM)
  641. listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  642. listenSocket.bind(address)
  643. listenSocket.listen(5)
  644. NUM_TESTS = 2
  645. i = 1
  646. while i < NUM_TESTS + 1:
  647. # No need to schedule this tasklet as the accept should yield most
  648. # of the time on the underlying channel.
  649. print "server connection wait", i
  650. currentSocket, clientAddress = listenSocket.accept()
  651. print "server", i, "listen socket", currentSocket.fileno(), "from", clientAddress
  652. if i == 1:
  653. print "server closing (a)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
  654. currentSocket.close()
  655. print "server closed (a)", i
  656. elif i == 2:
  657. print "server test", i, "send"
  658. currentSocket.send(data)
  659. print "server test", i, "recv"
  660. if currentSocket.recv(4) != "":
  661. print "server recv(1)", i, "FAIL"
  662. break
  663. # multiple empty recvs are fine
  664. if currentSocket.recv(4) != "":
  665. print "server recv(2)", i, "FAIL"
  666. break
  667. else:
  668. print "server closing (b)", i, "fd", currentSocket.fileno(), "id", id(currentSocket)
  669. currentSocket.close()
  670. print "server test", i, "OK"
  671. i += 1
  672. if i != NUM_TESTS+1:
  673. print "server: FAIL", i
  674. else:
  675. print "server: OK", i
  676. print "Done server"
  677. def TestTCPClient(address):
  678. global info, data, dataLength
  679. # Attempt 1:
  680. clientSocket = stdsocket.socket()
  681. clientSocket.connect(address)
  682. print "client connection (1) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
  683. if clientSocket.recv(5) != "":
  684. print "client test", 1, "FAIL"
  685. else:
  686. print "client test", 1, "OK"
  687. # Attempt 2:
  688. clientSocket = stdsocket.socket()
  689. clientSocket.connect(address)
  690. print "client connection (2) fd", clientSocket.fileno(), "id", id(clientSocket._sock), "waiting to recv"
  691. s = clientSocket.recv(dataLength)
  692. if s == "":
  693. print "client test", 2, "FAIL (disconnect)"
  694. else:
  695. t = struct.unpack("i", s)
  696. if t[0] == info:
  697. print "client test", 2, "OK"
  698. else:
  699. print "client test", 2, "FAIL (wrong data)"
  700. print "client exit"
  701. def TestMonkeyPatchUrllib(uri):
  702. # replace the system socket with this module
  703. install()
  704. try:
  705. import urllib # must occur after monkey-patching!
  706. f = urllib.urlopen(uri)
  707. if not isinstance(f.fp._sock, _fakesocket):
  708. raise AssertionError("failed to apply monkeypatch, got %s" % f.fp._sock.__class__)
  709. s = f.read()
  710. if len(s) != 0:
  711. print "Fetched", len(s), "bytes via replaced urllib"
  712. else:
  713. raise AssertionError("no text received?")
  714. finally:
  715. uninstall()
  716. def TestMonkeyPatchUDP(address):
  717. # replace the system socket with this module
  718. install()
  719. try:
  720. def UDPServer(address):
  721. listenSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
  722. listenSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
  723. listenSocket.bind(address)
  724. # Apparently each call to recvfrom maps to an incoming
  725. # packet and if we only ask for part of that packet, the
  726. # rest is lost. We really need a proper unittest suite
  727. # which tests this module against the normal socket
  728. # module.
  729. print "waiting to receive"
  730. rdata = ""
  731. while len(rdata) < 512:
  732. data, address = listenSocket.recvfrom(4096)
  733. print "received", data, len(data)
  734. rdata += data
  735. def UDPClient(address):
  736. clientSocket = stdsocket.socket(AF_INET, SOCK_DGRAM)
  737. # clientSocket.connect(address)
  738. print "sending 512 byte packet"
  739. sentBytes = clientSocket.sendto("-"+ ("*" * 510) +"-", address)
  740. print "sent 512 byte packet", sentBytes
  741. stackless.tasklet(UDPServer)(address)
  742. stackless.tasklet(UDPClient)(address)
  743. stackless.run()
  744. finally:
  745. uninstall()
  746. if "notready" in sys.argv:
  747. sys.argv.remove("notready")
  748. ready_to_schedule(False)
  749. if len(sys.argv) == 2:
  750. if sys.argv[1] == "client":
  751. print "client started"
  752. TestTCPClient(testAddress)
  753. print "client exited"
  754. elif sys.argv[1] == "slpclient":
  755. print "client started"
  756. stackless.tasklet(TestTCPClient)(testAddress)
  757. stackless.run()
  758. print "client exited"
  759. elif sys.argv[1] == "server":
  760. print "server started"
  761. TestTCPServer(testAddress)
  762. print "server exited"
  763. elif sys.argv[1] == "slpserver":
  764. print "server started"
  765. stackless.tasklet(TestTCPServer)(testAddress)
  766. stackless.run()
  767. print "server exited"
  768. else:
  769. print "Usage:", sys.argv[0], "[client|server|slpclient|slpserver]"
  770. sys.exit(1)
  771. else:
  772. print "* Running client/server test"
  773. install()
  774. try:
  775. stackless.tasklet(TestTCPServer)(testAddress)
  776. stackless.tasklet(TestTCPClient)(testAddress)
  777. stackless.run()
  778. finally:
  779. uninstall()
  780. print "* Running urllib test"
  781. stackless.tasklet(TestMonkeyPatchUrllib)("http://python.org/")
  782. stackless.run()
  783. print "* Running udp test"
  784. TestMonkeyPatchUDP(testAddress)
  785. print "result: SUCCESS"