PageRenderTime 55ms CodeModel.GetById 13ms RepoModel.GetById 1ms app.codeStats 0ms

/cherrypy/wsgiserver/__init__.py

https://gitlab.com/akila-33/Sick-Beard
Python | 1352 lines | 1261 code | 35 blank | 56 comment | 50 complexity | 4f014219d24622f1ff4132716bfc21a9 MD5 | raw file
  1. """A high-speed, production ready, thread pooled, generic HTTP server.
  2. Simplest example on how to use this module directly
  3. (without using CherryPy's application machinery):
  4. from cherrypy import wsgiserver
  5. def my_crazy_app(environ, start_response):
  6. status = '200 OK'
  7. response_headers = [('Content-type','text/plain')]
  8. start_response(status, response_headers)
  9. return ['Hello world!\n']
  10. server = wsgiserver.CherryPyWSGIServer(
  11. ('0.0.0.0', 8070), my_crazy_app,
  12. server_name='www.cherrypy.example')
  13. The CherryPy WSGI server can serve as many WSGI applications
  14. as you want in one instance by using a WSGIPathInfoDispatcher:
  15. d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
  16. server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
  17. Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
  18. This won't call the CherryPy engine (application side) at all, only the
  19. HTTP server, which is independent from the rest of CherryPy. Don't
  20. let the name "CherryPyWSGIServer" throw you; the name merely reflects
  21. its origin, not its coupling.
  22. For those of you wanting to understand internals of this module, here's the
  23. basic call flow. The server's listening thread runs a very tight loop,
  24. sticking incoming connections onto a Queue:
  25. server = CherryPyWSGIServer(...)
  26. server.start()
  27. while True:
  28. tick()
  29. # This blocks until a request comes in:
  30. child = socket.accept()
  31. conn = HTTPConnection(child, ...)
  32. server.requests.put(conn)
  33. Worker threads are kept in a pool and poll the Queue, popping off and then
  34. handling each connection in turn. Each connection can consist of an arbitrary
  35. number of requests and their responses, so we run a nested loop:
  36. while True:
  37. conn = server.requests.get()
  38. conn.communicate()
  39. -> while True:
  40. req = HTTPRequest(...)
  41. req.parse_request()
  42. -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
  43. req.rfile.readline()
  44. read_headers(req.rfile, req.inheaders)
  45. req.respond()
  46. -> response = app(...)
  47. try:
  48. for chunk in response:
  49. if chunk:
  50. req.write(chunk)
  51. finally:
  52. if hasattr(response, "close"):
  53. response.close()
  54. if req.close_connection:
  55. return
  56. """
  57. CRLF = '\r\n'
  58. import os
  59. import Queue
  60. import re
  61. quoted_slash = re.compile("(?i)%2F")
  62. import rfc822
  63. import socket
  64. import sys
  65. if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
  66. socket.IPPROTO_IPV6 = 41
  67. try:
  68. import cStringIO as StringIO
  69. except ImportError:
  70. import StringIO
  71. _fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
  72. import threading
  73. import time
  74. import traceback
  75. from urllib import unquote
  76. from urlparse import urlparse
  77. import warnings
  78. import errno
  79. def plat_specific_errors(*errnames):
  80. """Return error numbers for all errors in errnames on this platform.
  81. The 'errno' module contains different global constants depending on
  82. the specific platform (OS). This function will return the list of
  83. numeric values for a given list of potential names.
  84. """
  85. errno_names = dir(errno)
  86. nums = [getattr(errno, k) for k in errnames if k in errno_names]
  87. # de-dupe the list
  88. return dict.fromkeys(nums).keys()
  89. socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
  90. socket_errors_to_ignore = plat_specific_errors(
  91. "EPIPE",
  92. "EBADF", "WSAEBADF",
  93. "ENOTSOCK", "WSAENOTSOCK",
  94. "ETIMEDOUT", "WSAETIMEDOUT",
  95. "ECONNREFUSED", "WSAECONNREFUSED",
  96. "ECONNRESET", "WSAECONNRESET",
  97. "ECONNABORTED", "WSAECONNABORTED",
  98. "ENETRESET", "WSAENETRESET",
  99. "EHOSTDOWN", "EHOSTUNREACH",
  100. )
  101. socket_errors_to_ignore.append("timed out")
  102. socket_errors_to_ignore.append("The read operation timed out")
  103. socket_errors_nonblocking = plat_specific_errors(
  104. 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
  105. comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
  106. 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
  107. 'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
  108. 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
  109. 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
  110. 'WWW-Authenticate']
  111. def read_headers(rfile, hdict=None):
  112. """Read headers from the given stream into the given header dict.
  113. If hdict is None, a new header dict is created. Returns the populated
  114. header dict.
  115. Headers which are repeated are folded together using a comma if their
  116. specification so dictates.
  117. This function raises ValueError when the read bytes violate the HTTP spec.
  118. You should probably return "400 Bad Request" if this happens.
  119. """
  120. if hdict is None:
  121. hdict = {}
  122. while True:
  123. line = rfile.readline()
  124. if not line:
  125. # No more data--illegal end of headers
  126. raise ValueError("Illegal end of headers.")
  127. if line == CRLF:
  128. # Normal end of headers
  129. break
  130. if not line.endswith(CRLF):
  131. raise ValueError("HTTP requires CRLF terminators")
  132. if line[0] in ' \t':
  133. # It's a continuation line.
  134. v = line.strip()
  135. else:
  136. try:
  137. k, v = line.split(":", 1)
  138. except ValueError:
  139. raise ValueError("Illegal header line.")
  140. # TODO: what about TE and WWW-Authenticate?
  141. k = k.strip().title()
  142. v = v.strip()
  143. hname = k
  144. if k in comma_separated_headers:
  145. existing = hdict.get(hname)
  146. if existing:
  147. v = ", ".join((existing, v))
  148. hdict[hname] = v
  149. return hdict
  150. class MaxSizeExceeded(Exception):
  151. pass
  152. class SizeCheckWrapper(object):
  153. """Wraps a file-like object, raising MaxSizeExceeded if too large."""
  154. def __init__(self, rfile, maxlen):
  155. self.rfile = rfile
  156. self.maxlen = maxlen
  157. self.bytes_read = 0
  158. def _check_length(self):
  159. if self.maxlen and self.bytes_read > self.maxlen:
  160. raise MaxSizeExceeded()
  161. def read(self, size=None):
  162. data = self.rfile.read(size)
  163. self.bytes_read += len(data)
  164. self._check_length()
  165. return data
  166. def readline(self, size=None):
  167. if size is not None:
  168. data = self.rfile.readline(size)
  169. self.bytes_read += len(data)
  170. self._check_length()
  171. return data
  172. # User didn't specify a size ...
  173. # We read the line in chunks to make sure it's not a 100MB line !
  174. res = []
  175. while True:
  176. data = self.rfile.readline(256)
  177. self.bytes_read += len(data)
  178. self._check_length()
  179. res.append(data)
  180. # See http://www.cherrypy.org/ticket/421
  181. if len(data) < 256 or data[-1:] == "\n":
  182. return ''.join(res)
  183. def readlines(self, sizehint=0):
  184. # Shamelessly stolen from StringIO
  185. total = 0
  186. lines = []
  187. line = self.readline()
  188. while line:
  189. lines.append(line)
  190. total += len(line)
  191. if 0 < sizehint <= total:
  192. break
  193. line = self.readline()
  194. return lines
  195. def close(self):
  196. self.rfile.close()
  197. def __iter__(self):
  198. return self
  199. def next(self):
  200. data = self.rfile.next()
  201. self.bytes_read += len(data)
  202. self._check_length()
  203. return data
  204. class KnownLengthRFile(object):
  205. """Wraps a file-like object, returning an empty string when exhausted."""
  206. def __init__(self, rfile, content_length):
  207. self.rfile = rfile
  208. self.remaining = content_length
  209. def read(self, size=None):
  210. if self.remaining == 0:
  211. return ''
  212. if size is None:
  213. size = self.remaining
  214. else:
  215. size = min(size, self.remaining)
  216. data = self.rfile.read(size)
  217. self.remaining -= len(data)
  218. return data
  219. def readline(self, size=None):
  220. if self.remaining == 0:
  221. return ''
  222. if size is None:
  223. size = self.remaining
  224. else:
  225. size = min(size, self.remaining)
  226. data = self.rfile.readline(size)
  227. self.remaining -= len(data)
  228. return data
  229. def readlines(self, sizehint=0):
  230. # Shamelessly stolen from StringIO
  231. total = 0
  232. lines = []
  233. line = self.readline(sizehint)
  234. while line:
  235. lines.append(line)
  236. total += len(line)
  237. if 0 < sizehint <= total:
  238. break
  239. line = self.readline(sizehint)
  240. return lines
  241. def close(self):
  242. self.rfile.close()
  243. def __iter__(self):
  244. return self
  245. def __next__(self):
  246. data = next(self.rfile)
  247. self.remaining -= len(data)
  248. return data
  249. class MaxSizeExceeded(Exception):
  250. pass
  251. class ChunkedRFile(object):
  252. """Wraps a file-like object, returning an empty string when exhausted.
  253. This class is intended to provide a conforming wsgi.input value for
  254. request entities that have been encoded with the 'chunked' transfer
  255. encoding.
  256. """
  257. def __init__(self, rfile, maxlen, bufsize=8192):
  258. self.rfile = rfile
  259. self.maxlen = maxlen
  260. self.bytes_read = 0
  261. self.buffer = ''
  262. self.bufsize = bufsize
  263. self.closed = False
  264. def _fetch(self):
  265. if self.closed:
  266. return
  267. line = self.rfile.readline()
  268. self.bytes_read += len(line)
  269. if self.maxlen and self.bytes_read > self.maxlen:
  270. raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
  271. line = line.strip().split(";", 1)
  272. try:
  273. chunk_size = line.pop(0)
  274. chunk_size = int(chunk_size, 16)
  275. except ValueError:
  276. raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
  277. if chunk_size <= 0:
  278. self.closed = True
  279. return
  280. ## if line: chunk_extension = line[0]
  281. if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
  282. raise IOError("Request Entity Too Large")
  283. chunk = self.rfile.read(chunk_size)
  284. self.bytes_read += len(chunk)
  285. self.buffer += chunk
  286. crlf = self.rfile.read(2)
  287. if crlf != CRLF:
  288. raise ValueError(
  289. "Bad chunked transfer coding (expected '\\r\\n', "
  290. "got " + repr(crlf) + ")")
  291. def read(self, size=None):
  292. data = ''
  293. while True:
  294. if size and len(data) >= size:
  295. return data
  296. if not self.buffer:
  297. self._fetch()
  298. if not self.buffer:
  299. # EOF
  300. return data
  301. if size:
  302. remaining = size - len(data)
  303. data += self.buffer[:remaining]
  304. self.buffer = self.buffer[remaining:]
  305. else:
  306. data += self.buffer
  307. def readline(self, size=None):
  308. data = ''
  309. while True:
  310. if size and len(data) >= size:
  311. return data
  312. if not self.buffer:
  313. self._fetch()
  314. if not self.buffer:
  315. # EOF
  316. return data
  317. newline_pos = self.buffer.find('\n')
  318. if size:
  319. if newline_pos == -1:
  320. remaining = size - len(data)
  321. data += self.buffer[:remaining]
  322. self.buffer = self.buffer[remaining:]
  323. else:
  324. remaining = min(size - len(data), newline_pos)
  325. data += self.buffer[:remaining]
  326. self.buffer = self.buffer[remaining:]
  327. else:
  328. if newline_pos == -1:
  329. data += self.buffer
  330. else:
  331. data += self.buffer[:newline_pos]
  332. self.buffer = self.buffer[newline_pos:]
  333. def readlines(self, sizehint=0):
  334. # Shamelessly stolen from StringIO
  335. total = 0
  336. lines = []
  337. line = self.readline(sizehint)
  338. while line:
  339. lines.append(line)
  340. total += len(line)
  341. if 0 < sizehint <= total:
  342. break
  343. line = self.readline(sizehint)
  344. return lines
  345. def read_trailer_lines(self):
  346. if not self.closed:
  347. raise ValueError(
  348. "Cannot read trailers until the request body has been read.")
  349. while True:
  350. line = self.rfile.readline()
  351. if not line:
  352. # No more data--illegal end of headers
  353. raise ValueError("Illegal end of headers.")
  354. self.bytes_read += len(line)
  355. if self.maxlen and self.bytes_read > self.maxlen:
  356. raise IOError("Request Entity Too Large")
  357. if line == CRLF:
  358. # Normal end of headers
  359. break
  360. if not line.endswith(CRLF):
  361. raise ValueError("HTTP requires CRLF terminators")
  362. yield line
  363. def close(self):
  364. self.rfile.close()
  365. def __iter__(self):
  366. # Shamelessly stolen from StringIO
  367. total = 0
  368. line = self.readline(sizehint)
  369. while line:
  370. yield line
  371. total += len(line)
  372. if 0 < sizehint <= total:
  373. break
  374. line = self.readline(sizehint)
  375. class HTTPRequest(object):
  376. """An HTTP Request (and response).
  377. A single HTTP connection may consist of multiple request/response pairs.
  378. server: the Server object which is receiving this request.
  379. conn: the HTTPConnection object on which this request connected.
  380. inheaders: a dict of request headers.
  381. outheaders: a list of header tuples to write in the response.
  382. ready: when True, the request has been parsed and is ready to begin
  383. generating the response. When False, signals the calling Connection
  384. that the response should not be generated and the connection should
  385. close.
  386. close_connection: signals the calling Connection that the request
  387. should close. This does not imply an error! The client and/or
  388. server may each request that the connection be closed.
  389. chunked_write: if True, output will be encoded with the "chunked"
  390. transfer-coding. This value is set automatically inside
  391. send_headers.
  392. """
  393. def __init__(self, server, conn):
  394. self.server = server
  395. self.conn = conn
  396. self.ready = False
  397. self.started_request = False
  398. self.scheme = "http"
  399. if self.server.ssl_adapter is not None:
  400. self.scheme = "https"
  401. self.inheaders = {}
  402. self.status = ""
  403. self.outheaders = []
  404. self.sent_headers = False
  405. self.close_connection = False
  406. self.chunked_write = False
  407. def parse_request(self):
  408. """Parse the next HTTP request start-line and message-headers."""
  409. self.rfile = SizeCheckWrapper(self.conn.rfile,
  410. self.server.max_request_header_size)
  411. try:
  412. self._parse_request()
  413. except MaxSizeExceeded:
  414. self.simple_response("413 Request Entity Too Large")
  415. return
  416. def _parse_request(self):
  417. # HTTP/1.1 connections are persistent by default. If a client
  418. # requests a page, then idles (leaves the connection open),
  419. # then rfile.readline() will raise socket.error("timed out").
  420. # Note that it does this based on the value given to settimeout(),
  421. # and doesn't need the client to request or acknowledge the close
  422. # (although your TCP stack might suffer for it: cf Apache's history
  423. # with FIN_WAIT_2).
  424. request_line = self.rfile.readline()
  425. # Set started_request to True so communicate() knows to send 408
  426. # from here on out.
  427. self.started_request = True
  428. if not request_line:
  429. # Force self.ready = False so the connection will close.
  430. self.ready = False
  431. return
  432. if request_line == CRLF:
  433. # RFC 2616 sec 4.1: "...if the server is reading the protocol
  434. # stream at the beginning of a message and receives a CRLF
  435. # first, it should ignore the CRLF."
  436. # But only ignore one leading line! else we enable a DoS.
  437. request_line = self.rfile.readline()
  438. if not request_line:
  439. self.ready = False
  440. return
  441. if not request_line.endswith(CRLF):
  442. self.simple_response(400, "HTTP requires CRLF terminators")
  443. return
  444. try:
  445. method, uri, req_protocol = request_line.strip().split(" ", 2)
  446. except ValueError:
  447. self.simple_response(400, "Malformed Request-Line")
  448. return
  449. self.uri = uri
  450. self.method = method
  451. # uri may be an abs_path (including "http://host.domain.tld");
  452. scheme, authority, path = self.parse_request_uri(uri)
  453. if '#' in path:
  454. self.simple_response("400 Bad Request",
  455. "Illegal #fragment in Request-URI.")
  456. return
  457. if scheme:
  458. self.scheme = scheme
  459. qs = ''
  460. if '?' in path:
  461. path, qs = path.split('?', 1)
  462. # Unquote the path+params (e.g. "/this%20path" -> "/this path").
  463. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
  464. #
  465. # But note that "...a URI must be separated into its components
  466. # before the escaped characters within those components can be
  467. # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
  468. # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
  469. try:
  470. atoms = [unquote(x) for x in quoted_slash.split(path)]
  471. except ValueError, ex:
  472. self.simple_response("400 Bad Request", ex.args[0])
  473. return
  474. path = "%2F".join(atoms)
  475. self.path = path
  476. # Note that, like wsgiref and most other HTTP servers,
  477. # we "% HEX HEX"-unquote the path but not the query string.
  478. self.qs = qs
  479. # Compare request and server HTTP protocol versions, in case our
  480. # server does not support the requested protocol. Limit our output
  481. # to min(req, server). We want the following output:
  482. # request server actual written supported response
  483. # protocol protocol response protocol feature set
  484. # a 1.0 1.0 1.0 1.0
  485. # b 1.0 1.1 1.1 1.0
  486. # c 1.1 1.0 1.0 1.0
  487. # d 1.1 1.1 1.1 1.1
  488. # Notice that, in (b), the response will be "HTTP/1.1" even though
  489. # the client only understands 1.0. RFC 2616 10.5.6 says we should
  490. # only return 505 if the _major_ version is different.
  491. rp = int(req_protocol[5]), int(req_protocol[7])
  492. sp = int(self.server.protocol[5]), int(self.server.protocol[7])
  493. if sp[0] != rp[0]:
  494. self.simple_response("505 HTTP Version Not Supported")
  495. return
  496. self.request_protocol = req_protocol
  497. self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
  498. # then all the http headers
  499. try:
  500. read_headers(self.rfile, self.inheaders)
  501. except ValueError, ex:
  502. self.simple_response("400 Bad Request", ex.args[0])
  503. return
  504. mrbs = self.server.max_request_body_size
  505. if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
  506. self.simple_response("413 Request Entity Too Large")
  507. return
  508. # Persistent connection support
  509. if self.response_protocol == "HTTP/1.1":
  510. # Both server and client are HTTP/1.1
  511. if self.inheaders.get("Connection", "") == "close":
  512. self.close_connection = True
  513. else:
  514. # Either the server or client (or both) are HTTP/1.0
  515. if self.inheaders.get("Connection", "") != "Keep-Alive":
  516. self.close_connection = True
  517. # Transfer-Encoding support
  518. te = None
  519. if self.response_protocol == "HTTP/1.1":
  520. te = self.inheaders.get("Transfer-Encoding")
  521. if te:
  522. te = [x.strip().lower() for x in te.split(",") if x.strip()]
  523. self.chunked_read = False
  524. if te:
  525. for enc in te:
  526. if enc == "chunked":
  527. self.chunked_read = True
  528. else:
  529. # Note that, even if we see "chunked", we must reject
  530. # if there is an extension we don't recognize.
  531. self.simple_response("501 Unimplemented")
  532. self.close_connection = True
  533. return
  534. # From PEP 333:
  535. # "Servers and gateways that implement HTTP 1.1 must provide
  536. # transparent support for HTTP 1.1's "expect/continue" mechanism.
  537. # This may be done in any of several ways:
  538. # 1. Respond to requests containing an Expect: 100-continue request
  539. # with an immediate "100 Continue" response, and proceed normally.
  540. # 2. Proceed with the request normally, but provide the application
  541. # with a wsgi.input stream that will send the "100 Continue"
  542. # response if/when the application first attempts to read from
  543. # the input stream. The read request must then remain blocked
  544. # until the client responds.
  545. # 3. Wait until the client decides that the server does not support
  546. # expect/continue, and sends the request body on its own.
  547. # (This is suboptimal, and is not recommended.)
  548. #
  549. # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
  550. # but it seems like it would be a big slowdown for such a rare case.
  551. if self.inheaders.get("Expect", "") == "100-continue":
  552. # Don't use simple_response here, because it emits headers
  553. # we don't want. See http://www.cherrypy.org/ticket/951
  554. msg = self.server.protocol + " 100 Continue\r\n\r\n"
  555. try:
  556. self.conn.wfile.sendall(msg)
  557. except socket.error, x:
  558. if x.args[0] not in socket_errors_to_ignore:
  559. raise
  560. self.ready = True
  561. def parse_request_uri(self, uri):
  562. """Parse a Request-URI into (scheme, authority, path).
  563. Note that Request-URI's must be one of:
  564. Request-URI = "*" | absoluteURI | abs_path | authority
  565. Therefore, a Request-URI which starts with a double forward-slash
  566. cannot be a "net_path":
  567. net_path = "//" authority [ abs_path ]
  568. Instead, it must be interpreted as an "abs_path" with an empty first
  569. path segment:
  570. abs_path = "/" path_segments
  571. path_segments = segment *( "/" segment )
  572. segment = *pchar *( ";" param )
  573. param = *pchar
  574. """
  575. if uri == "*":
  576. return None, None, uri
  577. i = uri.find('://')
  578. if i > 0 and '?' not in uri[:i]:
  579. # An absoluteURI.
  580. # If there's a scheme (and it must be http or https), then:
  581. # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
  582. scheme, remainder = uri[:i].lower(), uri[i + 3:]
  583. authority, path = remainder.split("/", 1)
  584. return scheme, authority, path
  585. if uri.startswith('/'):
  586. # An abs_path.
  587. return None, None, uri
  588. else:
  589. # An authority.
  590. return None, uri, None
  591. def respond(self):
  592. """Call the gateway and write its iterable output."""
  593. mrbs = self.server.max_request_body_size
  594. if self.chunked_read:
  595. self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
  596. else:
  597. cl = int(self.inheaders.get("Content-Length", 0))
  598. if mrbs and mrbs < cl:
  599. if not self.sent_headers:
  600. self.simple_response("413 Request Entity Too Large")
  601. return
  602. self.rfile = KnownLengthRFile(self.conn.rfile, cl)
  603. self.server.gateway(self).respond()
  604. if (self.ready and not self.sent_headers):
  605. self.sent_headers = True
  606. self.send_headers()
  607. if self.chunked_write:
  608. self.conn.wfile.sendall("0\r\n\r\n")
  609. def simple_response(self, status, msg=""):
  610. """Write a simple response back to the client."""
  611. status = str(status)
  612. buf = [self.server.protocol + " " +
  613. status + CRLF,
  614. "Content-Length: %s\r\n" % len(msg),
  615. "Content-Type: text/plain\r\n"]
  616. if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
  617. # Request Entity Too Large
  618. self.close_connection = True
  619. buf.append("Connection: close\r\n")
  620. buf.append(CRLF)
  621. if msg:
  622. if isinstance(msg, unicode):
  623. msg = msg.encode("ISO-8859-1")
  624. buf.append(msg)
  625. try:
  626. self.conn.wfile.sendall("".join(buf))
  627. except socket.error, x:
  628. if x.args[0] not in socket_errors_to_ignore:
  629. raise
  630. def write(self, chunk):
  631. """Write unbuffered data to the client."""
  632. if self.chunked_write and chunk:
  633. buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
  634. self.conn.wfile.sendall("".join(buf))
  635. else:
  636. self.conn.wfile.sendall(chunk)
  637. def send_headers(self):
  638. """Assert, process, and send the HTTP response message-headers.
  639. You must set self.status, and self.outheaders before calling this.
  640. """
  641. hkeys = [key.lower() for key, value in self.outheaders]
  642. status = int(self.status[:3])
  643. if status == 413:
  644. # Request Entity Too Large. Close conn to avoid garbage.
  645. self.close_connection = True
  646. elif "content-length" not in hkeys:
  647. # "All 1xx (informational), 204 (no content),
  648. # and 304 (not modified) responses MUST NOT
  649. # include a message-body." So no point chunking.
  650. if status < 200 or status in (204, 205, 304):
  651. pass
  652. else:
  653. if (self.response_protocol == 'HTTP/1.1'
  654. and self.method != 'HEAD'):
  655. # Use the chunked transfer-coding
  656. self.chunked_write = True
  657. self.outheaders.append(("Transfer-Encoding", "chunked"))
  658. else:
  659. # Closing the conn is the only way to determine len.
  660. self.close_connection = True
  661. if "connection" not in hkeys:
  662. if self.response_protocol == 'HTTP/1.1':
  663. # Both server and client are HTTP/1.1 or better
  664. if self.close_connection:
  665. self.outheaders.append(("Connection", "close"))
  666. else:
  667. # Server and/or client are HTTP/1.0
  668. if not self.close_connection:
  669. self.outheaders.append(("Connection", "Keep-Alive"))
  670. if (not self.close_connection) and (not self.chunked_read):
  671. # Read any remaining request body data on the socket.
  672. # "If an origin server receives a request that does not include an
  673. # Expect request-header field with the "100-continue" expectation,
  674. # the request includes a request body, and the server responds
  675. # with a final status code before reading the entire request body
  676. # from the transport connection, then the server SHOULD NOT close
  677. # the transport connection until it has read the entire request,
  678. # or until the client closes the connection. Otherwise, the client
  679. # might not reliably receive the response message. However, this
  680. # requirement is not be construed as preventing a server from
  681. # defending itself against denial-of-service attacks, or from
  682. # badly broken client implementations."
  683. remaining = getattr(self.rfile, 'remaining', 0)
  684. if remaining > 0:
  685. self.rfile.read(remaining)
  686. if "date" not in hkeys:
  687. self.outheaders.append(("Date", rfc822.formatdate()))
  688. if "server" not in hkeys:
  689. self.outheaders.append(("Server", self.server.server_name))
  690. buf = [self.server.protocol + " " + self.status + CRLF]
  691. for k, v in self.outheaders:
  692. buf.append(k + ": " + v + CRLF)
  693. buf.append(CRLF)
  694. self.conn.wfile.sendall("".join(buf))
  695. class NoSSLError(Exception):
  696. """Exception raised when a client speaks HTTP to an HTTPS socket."""
  697. pass
  698. class FatalSSLAlert(Exception):
  699. """Exception raised when the SSL implementation signals a fatal alert."""
  700. pass
  701. if not _fileobject_uses_str_type:
  702. class CP_fileobject(socket._fileobject):
  703. """Faux file object attached to a socket object."""
  704. def sendall(self, data):
  705. """Sendall for non-blocking sockets."""
  706. while data:
  707. try:
  708. bytes_sent = self.send(data)
  709. data = data[bytes_sent:]
  710. except socket.error, e:
  711. if e.args[0] not in socket_errors_nonblocking:
  712. raise
  713. def send(self, data):
  714. return self._sock.send(data)
  715. def flush(self):
  716. if self._wbuf:
  717. buffer = "".join(self._wbuf)
  718. self._wbuf = []
  719. self.sendall(buffer)
  720. def recv(self, size):
  721. while True:
  722. try:
  723. return self._sock.recv(size)
  724. except socket.error, e:
  725. if (e.args[0] not in socket_errors_nonblocking
  726. and e.args[0] not in socket_error_eintr):
  727. raise
  728. def read(self, size= -1):
  729. # Use max, disallow tiny reads in a loop as they are very inefficient.
  730. # We never leave read() with any leftover data from a new recv() call
  731. # in our internal buffer.
  732. rbufsize = max(self._rbufsize, self.default_bufsize)
  733. # Our use of StringIO rather than lists of string objects returned by
  734. # recv() minimizes memory usage and fragmentation that occurs when
  735. # rbufsize is large compared to the typical return value of recv().
  736. buf = self._rbuf
  737. buf.seek(0, 2) # seek end
  738. if size < 0:
  739. # Read until EOF
  740. self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
  741. while True:
  742. data = self.recv(rbufsize)
  743. if not data:
  744. break
  745. buf.write(data)
  746. return buf.getvalue()
  747. else:
  748. # Read until size bytes or EOF seen, whichever comes first
  749. buf_len = buf.tell()
  750. if buf_len >= size:
  751. # Already have size bytes in our buffer? Extract and return.
  752. buf.seek(0)
  753. rv = buf.read(size)
  754. self._rbuf = StringIO.StringIO()
  755. self._rbuf.write(buf.read())
  756. return rv
  757. self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
  758. while True:
  759. left = size - buf_len
  760. # recv() will malloc the amount of memory given as its
  761. # parameter even though it often returns much less data
  762. # than that. The returned data string is short lived
  763. # as we copy it into a StringIO and free it. This avoids
  764. # fragmentation issues on many platforms.
  765. data = self.recv(left)
  766. if not data:
  767. break
  768. n = len(data)
  769. if n == size and not buf_len:
  770. # Shortcut. Avoid buffer data copies when:
  771. # - We have no data in our buffer.
  772. # AND
  773. # - Our call to recv returned exactly the
  774. # number of bytes we were asked to read.
  775. return data
  776. if n == left:
  777. buf.write(data)
  778. del data # explicit free
  779. break
  780. assert n <= left, "recv(%d) returned %d bytes" % (left, n)
  781. buf.write(data)
  782. buf_len += n
  783. del data # explicit free
  784. #assert buf_len == buf.tell()
  785. return buf.getvalue()
  786. def readline(self, size= -1):
  787. buf = self._rbuf
  788. buf.seek(0, 2) # seek end
  789. if buf.tell() > 0:
  790. # check if we already have it in our buffer
  791. buf.seek(0)
  792. bline = buf.readline(size)
  793. if bline.endswith('\n') or len(bline) == size:
  794. self._rbuf = StringIO.StringIO()
  795. self._rbuf.write(buf.read())
  796. return bline
  797. del bline
  798. if size < 0:
  799. # Read until \n or EOF, whichever comes first
  800. if self._rbufsize <= 1:
  801. # Speed up unbuffered case
  802. buf.seek(0)
  803. buffers = [buf.read()]
  804. self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
  805. data = None
  806. recv = self.recv
  807. while data != "\n":
  808. data = recv(1)
  809. if not data:
  810. break
  811. buffers.append(data)
  812. return "".join(buffers)
  813. buf.seek(0, 2) # seek end
  814. self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
  815. while True:
  816. data = self.recv(self._rbufsize)
  817. if not data:
  818. break
  819. nl = data.find('\n')
  820. if nl >= 0:
  821. nl += 1
  822. buf.write(data[:nl])
  823. self._rbuf.write(data[nl:])
  824. del data
  825. break
  826. buf.write(data)
  827. return buf.getvalue()
  828. else:
  829. # Read until size bytes or \n or EOF seen, whichever comes first
  830. buf.seek(0, 2) # seek end
  831. buf_len = buf.tell()
  832. if buf_len >= size:
  833. buf.seek(0)
  834. rv = buf.read(size)
  835. self._rbuf = StringIO.StringIO()
  836. self._rbuf.write(buf.read())
  837. return rv
  838. self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
  839. while True:
  840. data = self.recv(self._rbufsize)
  841. if not data:
  842. break
  843. left = size - buf_len
  844. # did we just receive a newline?
  845. nl = data.find('\n', 0, left)
  846. if nl >= 0:
  847. nl += 1
  848. # save the excess data to _rbuf
  849. self._rbuf.write(data[nl:])
  850. if buf_len:
  851. buf.write(data[:nl])
  852. break
  853. else:
  854. # Shortcut. Avoid data copy through buf when returning
  855. # a substring of our first recv().
  856. return data[:nl]
  857. n = len(data)
  858. if n == size and not buf_len:
  859. # Shortcut. Avoid data copy through buf when
  860. # returning exactly all of our first recv().
  861. return data
  862. if n >= left:
  863. buf.write(data[:left])
  864. self._rbuf.write(data[left:])
  865. break
  866. buf.write(data)
  867. buf_len += n
  868. #assert buf_len == buf.tell()
  869. return buf.getvalue()
  870. else:
  871. class CP_fileobject(socket._fileobject):
  872. """Faux file object attached to a socket object."""
  873. def sendall(self, data):
  874. """Sendall for non-blocking sockets."""
  875. while data:
  876. try:
  877. bytes_sent = self.send(data)
  878. data = data[bytes_sent:]
  879. except socket.error, e:
  880. if e.args[0] not in socket_errors_nonblocking:
  881. raise
  882. def send(self, data):
  883. return self._sock.send(data)
  884. def flush(self):
  885. if self._wbuf:
  886. buffer = "".join(self._wbuf)
  887. self._wbuf = []
  888. self.sendall(buffer)
  889. def recv(self, size):
  890. while True:
  891. try:
  892. return self._sock.recv(size)
  893. except socket.error, e:
  894. if (e.args[0] not in socket_errors_nonblocking
  895. and e.args[0] not in socket_error_eintr):
  896. raise
  897. def read(self, size= -1):
  898. if size < 0:
  899. # Read until EOF
  900. buffers = [self._rbuf]
  901. self._rbuf = ""
  902. if self._rbufsize <= 1:
  903. recv_size = self.default_bufsize
  904. else:
  905. recv_size = self._rbufsize
  906. while True:
  907. data = self.recv(recv_size)
  908. if not data:
  909. break
  910. buffers.append(data)
  911. return "".join(buffers)
  912. else:
  913. # Read until size bytes or EOF seen, whichever comes first
  914. data = self._rbuf
  915. buf_len = len(data)
  916. if buf_len >= size:
  917. self._rbuf = data[size:]
  918. return data[:size]
  919. buffers = []
  920. if data:
  921. buffers.append(data)
  922. self._rbuf = ""
  923. while True:
  924. left = size - buf_len
  925. recv_size = max(self._rbufsize, left)
  926. data = self.recv(recv_size)
  927. if not data:
  928. break
  929. buffers.append(data)
  930. n = len(data)
  931. if n >= left:
  932. self._rbuf = data[left:]
  933. buffers[-1] = data[:left]
  934. break
  935. buf_len += n
  936. return "".join(buffers)
  937. def readline(self, size= -1):
  938. data = self._rbuf
  939. if size < 0:
  940. # Read until \n or EOF, whichever comes first
  941. if self._rbufsize <= 1:
  942. # Speed up unbuffered case
  943. assert data == ""
  944. buffers = []
  945. while data != "\n":
  946. data = self.recv(1)
  947. if not data:
  948. break
  949. buffers.append(data)
  950. return "".join(buffers)
  951. nl = data.find('\n')
  952. if nl >= 0:
  953. nl += 1
  954. self._rbuf = data[nl:]
  955. return data[:nl]
  956. buffers = []
  957. if data:
  958. buffers.append(data)
  959. self._rbuf = ""
  960. while True:
  961. data = self.recv(self._rbufsize)
  962. if not data:
  963. break
  964. buffers.append(data)
  965. nl = data.find('\n')
  966. if nl >= 0:
  967. nl += 1
  968. self._rbuf = data[nl:]
  969. buffers[-1] = data[:nl]
  970. break
  971. return "".join(buffers)
  972. else:
  973. # Read until size bytes or \n or EOF seen, whichever comes first
  974. nl = data.find('\n', 0, size)
  975. if nl >= 0:
  976. nl += 1
  977. self._rbuf = data[nl:]
  978. return data[:nl]
  979. buf_len = len(data)
  980. if buf_len >= size:
  981. self._rbuf = data[size:]
  982. return data[:size]
  983. buffers = []
  984. if data:
  985. buffers.append(data)
  986. self._rbuf = ""
  987. while True:
  988. data = self.recv(self._rbufsize)
  989. if not data:
  990. break
  991. buffers.append(data)
  992. left = size - buf_len
  993. nl = data.find('\n', 0, left)
  994. if nl >= 0:
  995. nl += 1
  996. self._rbuf = data[nl:]
  997. buffers[-1] = data[:nl]
  998. break
  999. n = len(data)
  1000. if n >= left:
  1001. self._rbuf = data[left:]
  1002. buffers[-1] = data[:left]
  1003. break
  1004. buf_len += n
  1005. return "".join(buffers)
  1006. class HTTPConnection(object):
  1007. """An HTTP connection (active socket).
  1008. server: the Server object which received this connection.
  1009. socket: the raw socket object (usually TCP) for this connection.
  1010. makefile: a fileobject class for reading from the socket.
  1011. """
  1012. remote_addr = None
  1013. remote_port = None
  1014. ssl_env = None
  1015. rbufsize = -1
  1016. RequestHandlerClass = HTTPRequest
  1017. def __init__(self, server, sock, makefile=CP_fileobject):
  1018. self.server = server
  1019. self.socket = sock
  1020. self.rfile = makefile(sock, "rb", self.rbufsize)
  1021. self.wfile = makefile(sock, "wb", -1)
  1022. def communicate(self):
  1023. """Read each request and respond appropriately."""
  1024. request_seen = False
  1025. try:
  1026. while True:
  1027. # (re)set req to None so that if something goes wrong in
  1028. # the RequestHandlerClass constructor, the error doesn't
  1029. # get written to the previous request.
  1030. req = None
  1031. req = self.RequestHandlerClass(self.server, self)
  1032. # This order of operations should guarantee correct pipelining.
  1033. req.parse_request()
  1034. if not req.ready:
  1035. # Something went wrong in the parsing (and the server has
  1036. # probably already made a simple_response). Return and
  1037. # let the conn close.
  1038. return
  1039. request_seen = True
  1040. req.respond()
  1041. if req.close_connection:
  1042. return
  1043. except socket.error, e:
  1044. errnum = e.args[0]
  1045. if errnum == 'timed out':
  1046. # Don't error if we're between requests; only error
  1047. # if 1) no request has been started at all, or 2) we're
  1048. # in the middle of a request.
  1049. # See http://www.cherrypy.org/ticket/853
  1050. if (not request_seen) or (req and req.started_request):
  1051. # Don't bother writing the 408 if the response
  1052. # has already started being written.
  1053. if req and not req.sent_headers:
  1054. try:
  1055. req.simple_response("408 Request Timeout")
  1056. except FatalSSLAlert:
  1057. # Close the connection.
  1058. return
  1059. elif errnum not in socket_errors_to_ignore:
  1060. if req and not req.sent_headers:
  1061. try:
  1062. req.simple_response("500 Internal Server Error",
  1063. format_exc())
  1064. except FatalSSLAlert:
  1065. # Close the connection.
  1066. return
  1067. return
  1068. except (KeyboardInterrupt, SystemExit):
  1069. raise
  1070. except FatalSSLAlert:
  1071. # Close the connection.
  1072. return
  1073. except NoSSLError:
  1074. if req and not req.sent_headers:
  1075. # Unwrap our wfile
  1076. self.wfile = CP_fileobject(self.socket._sock, "wb", -1)
  1077. req.simple_response("400 Bad Request",
  1078. "The client sent a plain HTTP request, but "
  1079. "this server only speaks HTTPS on this port.")
  1080. self.linger = True
  1081. except Exception:
  1082. if req and not req.sent_headers:
  1083. try:
  1084. req.simple_response("500 Internal Server Error", format_exc())
  1085. except FatalSSLAlert:
  1086. # Close the connection.
  1087. return
  1088. linger = False
  1089. def close(self):
  1090. """Close the socket underlying this connection."""
  1091. self.rfile.close()
  1092. if not self.linger:
  1093. # Python's socket module does NOT call close on the kernel socket
  1094. # when you call socket.close(). We do so manually here because we
  1095. # want this server to send a FIN TCP segment immediately. Note this
  1096. # must be called *before* calling socket.close(), because the latter
  1097. # drops its reference to the kernel socket.
  1098. if hasattr(self.socket, '_sock'):
  1099. self.socket._sock.close()
  1100. self.socket.close()
  1101. else:
  1102. # On the other hand, sometimes we want to hang around for a bit
  1103. # to make sure the client has a chance to read our entire
  1104. # response. Skipping the close() calls here delays the FIN
  1105. # packet until the socket object is garbage-collected later.
  1106. # Someday, perhaps, we'll do the full lingering_close that
  1107. # Apache does, but not today.
  1108. pass
  1109. def format_exc(limit=None):
  1110. """Like print_exc() but return a string. Backport for Python 2.3."""
  1111. try:
  1112. etype, value, tb = sys.exc_info()
  1113. return ''.join(traceback.format_exception(etype, value, tb, limit))
  1114. finally:
  1115. etype = value = tb = None
  1116. _SHUTDOWNREQUEST = None
  1117. class WorkerThread(threading.Thread):
  1118. """Thread which continuously polls a Queue for Connection objects.
  1119. server: the HTTP Server which spawned this thread, and which owns the
  1120. Queue and is placing active connections into it.
  1121. ready: a simple flag for the calling server to know when this thread
  1122. has begun polling the Queue.
  1123. Due to the timing issues of polling a Queue, a WorkerThread does not
  1124. check its own 'ready' flag after it has started. To stop the thread,
  1125. it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
  1126. (one for each running WorkerThread).
  1127. """
  1128. conn = None
  1129. def __init__(self, server):
  1130. self.ready = False
  1131. self.server = server
  1132. threading.Thread.__init__(self)
  1133. def run(self):
  1134. try:
  1135. self.ready = True
  1136. while True:
  1137. conn = self.server.requests.get()
  1138. if conn is _SHUTDOWNREQUEST:
  1139. return
  1140. self.conn = conn
  1141. try:
  1142. conn.communicate()
  1143. finally:
  1144. conn.close()
  1145. self.conn = None
  1146. except (KeyboardInterrupt, SystemExit), exc:
  1147. self.server.interrupt = exc
  1148. class ThreadPool(object):
  1149. """A Request Queue for the CherryPyWSGIServer which pools threads.
  1150. ThreadPool objects must provide min, get(), put(obj), start()
  1151. and stop(timeout) attributes.
  1152. """
  1153. def __init__(self, server, min=10, max= -1):
  1154. self.server = server
  1155. self.min = min
  1156. self.max = max
  1157. self._threads = []
  1158. self._queue = Queue.Queue()
  1159. self.get = self._queue.get
  1160. def start(self):
  1161. """Start the pool of threads."""
  1162. for i in range(self.min):
  1163. self