Fixed python_path problem.
[smonitor.git] / lib / cherrypy / wsgiserver / __init__.py
blob55d1dd904bb836116e57559c851c691eadbe44a5
1 """A high-speed, production ready, thread pooled, generic HTTP server.
3 Simplest example on how to use this module directly
4 (without using CherryPy's application machinery)::
6 from cherrypy import wsgiserver
8 def my_crazy_app(environ, start_response):
9 status = '200 OK'
10 response_headers = [('Content-type','text/plain')]
11 start_response(status, response_headers)
12 return ['Hello world!']
14 server = wsgiserver.CherryPyWSGIServer(
15 ('0.0.0.0', 8070), my_crazy_app,
16 server_name='www.cherrypy.example')
17 server.start()
19 The CherryPy WSGI server can serve as many WSGI applications
20 as you want in one instance by using a WSGIPathInfoDispatcher::
22 d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
23 server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
25 Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
27 This won't call the CherryPy engine (application side) at all, only the
28 HTTP server, which is independent from the rest of CherryPy. Don't
29 let the name "CherryPyWSGIServer" throw you; the name merely reflects
30 its origin, not its coupling.
32 For those of you wanting to understand internals of this module, here's the
33 basic call flow. The server's listening thread runs a very tight loop,
34 sticking incoming connections onto a Queue::
36 server = CherryPyWSGIServer(...)
37 server.start()
38 while True:
39 tick()
40 # This blocks until a request comes in:
41 child = socket.accept()
42 conn = HTTPConnection(child, ...)
43 server.requests.put(conn)
45 Worker threads are kept in a pool and poll the Queue, popping off and then
46 handling each connection in turn. Each connection can consist of an arbitrary
47 number of requests and their responses, so we run a nested loop::
49 while True:
50 conn = server.requests.get()
51 conn.communicate()
52 -> while True:
53 req = HTTPRequest(...)
54 req.parse_request()
55 -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
56 req.rfile.readline()
57 read_headers(req.rfile, req.inheaders)
58 req.respond()
59 -> response = app(...)
60 try:
61 for chunk in response:
62 if chunk:
63 req.write(chunk)
64 finally:
65 if hasattr(response, "close"):
66 response.close()
67 if req.close_connection:
68 return
69 """
71 CRLF = '\r\n'
72 import os
73 import Queue
74 import re
75 quoted_slash = re.compile("(?i)%2F")
76 import rfc822
77 import socket
78 import sys
79 if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
80 socket.IPPROTO_IPV6 = 41
81 try:
82 import cStringIO as StringIO
83 except ImportError:
84 import StringIO
85 DEFAULT_BUFFER_SIZE = -1
87 _fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
89 import threading
90 import time
91 import traceback
92 def format_exc(limit=None):
93 """Like print_exc() but return a string. Backport for Python 2.3."""
94 try:
95 etype, value, tb = sys.exc_info()
96 return ''.join(traceback.format_exception(etype, value, tb, limit))
97 finally:
98 etype = value = tb = None
101 from urllib import unquote
102 from urlparse import urlparse
103 import warnings
105 import errno
107 def plat_specific_errors(*errnames):
108 """Return error numbers for all errors in errnames on this platform.
110 The 'errno' module contains different global constants depending on
111 the specific platform (OS). This function will return the list of
112 numeric values for a given list of potential names.
114 errno_names = dir(errno)
115 nums = [getattr(errno, k) for k in errnames if k in errno_names]
116 # de-dupe the list
117 return dict.fromkeys(nums).keys()
119 socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
121 socket_errors_to_ignore = plat_specific_errors(
122 "EPIPE",
123 "EBADF", "WSAEBADF",
124 "ENOTSOCK", "WSAENOTSOCK",
125 "ETIMEDOUT", "WSAETIMEDOUT",
126 "ECONNREFUSED", "WSAECONNREFUSED",
127 "ECONNRESET", "WSAECONNRESET",
128 "ECONNABORTED", "WSAECONNABORTED",
129 "ENETRESET", "WSAENETRESET",
130 "EHOSTDOWN", "EHOSTUNREACH",
132 socket_errors_to_ignore.append("timed out")
133 socket_errors_to_ignore.append("The read operation timed out")
135 socket_errors_nonblocking = plat_specific_errors(
136 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
138 comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
139 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
140 'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
141 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
142 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
143 'WWW-Authenticate']
146 import logging
147 if not hasattr(logging, 'statistics'): logging.statistics = {}
150 def read_headers(rfile, hdict=None):
151 """Read headers from the given stream into the given header dict.
153 If hdict is None, a new header dict is created. Returns the populated
154 header dict.
156 Headers which are repeated are folded together using a comma if their
157 specification so dictates.
159 This function raises ValueError when the read bytes violate the HTTP spec.
160 You should probably return "400 Bad Request" if this happens.
162 if hdict is None:
163 hdict = {}
165 while True:
166 line = rfile.readline()
167 if not line:
168 # No more data--illegal end of headers
169 raise ValueError("Illegal end of headers.")
171 if line == CRLF:
172 # Normal end of headers
173 break
174 if not line.endswith(CRLF):
175 raise ValueError("HTTP requires CRLF terminators")
177 if line[0] in ' \t':
178 # It's a continuation line.
179 v = line.strip()
180 else:
181 try:
182 k, v = line.split(":", 1)
183 except ValueError:
184 raise ValueError("Illegal header line.")
185 # TODO: what about TE and WWW-Authenticate?
186 k = k.strip().title()
187 v = v.strip()
188 hname = k
190 if k in comma_separated_headers:
191 existing = hdict.get(hname)
192 if existing:
193 v = ", ".join((existing, v))
194 hdict[hname] = v
196 return hdict
199 class MaxSizeExceeded(Exception):
200 pass
202 class SizeCheckWrapper(object):
203 """Wraps a file-like object, raising MaxSizeExceeded if too large."""
205 def __init__(self, rfile, maxlen):
206 self.rfile = rfile
207 self.maxlen = maxlen
208 self.bytes_read = 0
210 def _check_length(self):
211 if self.maxlen and self.bytes_read > self.maxlen:
212 raise MaxSizeExceeded()
214 def read(self, size=None):
215 data = self.rfile.read(size)
216 self.bytes_read += len(data)
217 self._check_length()
218 return data
220 def readline(self, size=None):
221 if size is not None:
222 data = self.rfile.readline(size)
223 self.bytes_read += len(data)
224 self._check_length()
225 return data
227 # User didn't specify a size ...
228 # We read the line in chunks to make sure it's not a 100MB line !
229 res = []
230 while True:
231 data = self.rfile.readline(256)
232 self.bytes_read += len(data)
233 self._check_length()
234 res.append(data)
235 # See http://www.cherrypy.org/ticket/421
236 if len(data) < 256 or data[-1:] == "\n":
237 return ''.join(res)
239 def readlines(self, sizehint=0):
240 # Shamelessly stolen from StringIO
241 total = 0
242 lines = []
243 line = self.readline()
244 while line:
245 lines.append(line)
246 total += len(line)
247 if 0 < sizehint <= total:
248 break
249 line = self.readline()
250 return lines
252 def close(self):
253 self.rfile.close()
255 def __iter__(self):
256 return self
258 def next(self):
259 data = self.rfile.next()
260 self.bytes_read += len(data)
261 self._check_length()
262 return data
265 class KnownLengthRFile(object):
266 """Wraps a file-like object, returning an empty string when exhausted."""
268 def __init__(self, rfile, content_length):
269 self.rfile = rfile
270 self.remaining = content_length
272 def read(self, size=None):
273 if self.remaining == 0:
274 return ''
275 if size is None:
276 size = self.remaining
277 else:
278 size = min(size, self.remaining)
280 data = self.rfile.read(size)
281 self.remaining -= len(data)
282 return data
284 def readline(self, size=None):
285 if self.remaining == 0:
286 return ''
287 if size is None:
288 size = self.remaining
289 else:
290 size = min(size, self.remaining)
292 data = self.rfile.readline(size)
293 self.remaining -= len(data)
294 return data
296 def readlines(self, sizehint=0):
297 # Shamelessly stolen from StringIO
298 total = 0
299 lines = []
300 line = self.readline(sizehint)
301 while line:
302 lines.append(line)
303 total += len(line)
304 if 0 < sizehint <= total:
305 break
306 line = self.readline(sizehint)
307 return lines
309 def close(self):
310 self.rfile.close()
312 def __iter__(self):
313 return self
315 def __next__(self):
316 data = next(self.rfile)
317 self.remaining -= len(data)
318 return data
321 class ChunkedRFile(object):
322 """Wraps a file-like object, returning an empty string when exhausted.
324 This class is intended to provide a conforming wsgi.input value for
325 request entities that have been encoded with the 'chunked' transfer
326 encoding.
329 def __init__(self, rfile, maxlen, bufsize=8192):
330 self.rfile = rfile
331 self.maxlen = maxlen
332 self.bytes_read = 0
333 self.buffer = ''
334 self.bufsize = bufsize
335 self.closed = False
337 def _fetch(self):
338 if self.closed:
339 return
341 line = self.rfile.readline()
342 self.bytes_read += len(line)
344 if self.maxlen and self.bytes_read > self.maxlen:
345 raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
347 line = line.strip().split(";", 1)
349 try:
350 chunk_size = line.pop(0)
351 chunk_size = int(chunk_size, 16)
352 except ValueError:
353 raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
355 if chunk_size <= 0:
356 self.closed = True
357 return
359 ## if line: chunk_extension = line[0]
361 if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
362 raise IOError("Request Entity Too Large")
364 chunk = self.rfile.read(chunk_size)
365 self.bytes_read += len(chunk)
366 self.buffer += chunk
368 crlf = self.rfile.read(2)
369 if crlf != CRLF:
370 raise ValueError(
371 "Bad chunked transfer coding (expected '\\r\\n', "
372 "got " + repr(crlf) + ")")
374 def read(self, size=None):
375 data = ''
376 while True:
377 if size and len(data) >= size:
378 return data
380 if not self.buffer:
381 self._fetch()
382 if not self.buffer:
383 # EOF
384 return data
386 if size:
387 remaining = size - len(data)
388 data += self.buffer[:remaining]
389 self.buffer = self.buffer[remaining:]
390 else:
391 data += self.buffer
393 def readline(self, size=None):
394 data = ''
395 while True:
396 if size and len(data) >= size:
397 return data
399 if not self.buffer:
400 self._fetch()
401 if not self.buffer:
402 # EOF
403 return data
405 newline_pos = self.buffer.find('\n')
406 if size:
407 if newline_pos == -1:
408 remaining = size - len(data)
409 data += self.buffer[:remaining]
410 self.buffer = self.buffer[remaining:]
411 else:
412 remaining = min(size - len(data), newline_pos)
413 data += self.buffer[:remaining]
414 self.buffer = self.buffer[remaining:]
415 else:
416 if newline_pos == -1:
417 data += self.buffer
418 else:
419 data += self.buffer[:newline_pos]
420 self.buffer = self.buffer[newline_pos:]
422 def readlines(self, sizehint=0):
423 # Shamelessly stolen from StringIO
424 total = 0
425 lines = []
426 line = self.readline(sizehint)
427 while line:
428 lines.append(line)
429 total += len(line)
430 if 0 < sizehint <= total:
431 break
432 line = self.readline(sizehint)
433 return lines
435 def read_trailer_lines(self):
436 if not self.closed:
437 raise ValueError(
438 "Cannot read trailers until the request body has been read.")
440 while True:
441 line = self.rfile.readline()
442 if not line:
443 # No more data--illegal end of headers
444 raise ValueError("Illegal end of headers.")
446 self.bytes_read += len(line)
447 if self.maxlen and self.bytes_read > self.maxlen:
448 raise IOError("Request Entity Too Large")
450 if line == CRLF:
451 # Normal end of headers
452 break
453 if not line.endswith(CRLF):
454 raise ValueError("HTTP requires CRLF terminators")
456 yield line
458 def close(self):
459 self.rfile.close()
461 def __iter__(self):
462 # Shamelessly stolen from StringIO
463 total = 0
464 line = self.readline(sizehint)
465 while line:
466 yield line
467 total += len(line)
468 if 0 < sizehint <= total:
469 break
470 line = self.readline(sizehint)
473 class HTTPRequest(object):
474 """An HTTP Request (and response).
476 A single HTTP connection may consist of multiple request/response pairs.
479 server = None
480 """The HTTPServer object which is receiving this request."""
482 conn = None
483 """The HTTPConnection object on which this request connected."""
485 inheaders = {}
486 """A dict of request headers."""
488 outheaders = []
489 """A list of header tuples to write in the response."""
491 ready = False
492 """When True, the request has been parsed and is ready to begin generating
493 the response. When False, signals the calling Connection that the response
494 should not be generated and the connection should close."""
496 close_connection = False
497 """Signals the calling Connection that the request should close. This does
498 not imply an error! The client and/or server may each request that the
499 connection be closed."""
501 chunked_write = False
502 """If True, output will be encoded with the "chunked" transfer-coding.
504 This value is set automatically inside send_headers."""
506 def __init__(self, server, conn):
507 self.server= server
508 self.conn = conn
510 self.ready = False
511 self.started_request = False
512 self.scheme = "http"
513 if self.server.ssl_adapter is not None:
514 self.scheme = "https"
515 # Use the lowest-common protocol in case read_request_line errors.
516 self.response_protocol = 'HTTP/1.0'
517 self.inheaders = {}
519 self.status = ""
520 self.outheaders = []
521 self.sent_headers = False
522 self.close_connection = self.__class__.close_connection
523 self.chunked_read = False
524 self.chunked_write = self.__class__.chunked_write
526 def parse_request(self):
527 """Parse the next HTTP request start-line and message-headers."""
528 self.rfile = SizeCheckWrapper(self.conn.rfile,
529 self.server.max_request_header_size)
530 try:
531 self.read_request_line()
532 except MaxSizeExceeded:
533 self.simple_response("414 Request-URI Too Long",
534 "The Request-URI sent with the request exceeds the maximum "
535 "allowed bytes.")
536 return
538 try:
539 success = self.read_request_headers()
540 except MaxSizeExceeded:
541 self.simple_response("413 Request Entity Too Large",
542 "The headers sent with the request exceed the maximum "
543 "allowed bytes.")
544 return
545 else:
546 if not success:
547 return
549 self.ready = True
551 def read_request_line(self):
552 # HTTP/1.1 connections are persistent by default. If a client
553 # requests a page, then idles (leaves the connection open),
554 # then rfile.readline() will raise socket.error("timed out").
555 # Note that it does this based on the value given to settimeout(),
556 # and doesn't need the client to request or acknowledge the close
557 # (although your TCP stack might suffer for it: cf Apache's history
558 # with FIN_WAIT_2).
559 request_line = self.rfile.readline()
561 # Set started_request to True so communicate() knows to send 408
562 # from here on out.
563 self.started_request = True
564 if not request_line:
565 # Force self.ready = False so the connection will close.
566 self.ready = False
567 return
569 if request_line == CRLF:
570 # RFC 2616 sec 4.1: "...if the server is reading the protocol
571 # stream at the beginning of a message and receives a CRLF
572 # first, it should ignore the CRLF."
573 # But only ignore one leading line! else we enable a DoS.
574 request_line = self.rfile.readline()
575 if not request_line:
576 self.ready = False
577 return
579 if not request_line.endswith(CRLF):
580 self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
581 return
583 try:
584 method, uri, req_protocol = request_line.strip().split(" ", 2)
585 rp = int(req_protocol[5]), int(req_protocol[7])
586 except (ValueError, IndexError):
587 self.simple_response("400 Bad Request", "Malformed Request-Line")
588 return
590 self.uri = uri
591 self.method = method
593 # uri may be an abs_path (including "http://host.domain.tld");
594 scheme, authority, path = self.parse_request_uri(uri)
595 if '#' in path:
596 self.simple_response("400 Bad Request",
597 "Illegal #fragment in Request-URI.")
598 return
600 if scheme:
601 self.scheme = scheme
603 qs = ''
604 if '?' in path:
605 path, qs = path.split('?', 1)
607 # Unquote the path+params (e.g. "/this%20path" -> "/this path").
608 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
610 # But note that "...a URI must be separated into its components
611 # before the escaped characters within those components can be
612 # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
613 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
614 try:
615 atoms = [unquote(x) for x in quoted_slash.split(path)]
616 except ValueError, ex:
617 self.simple_response("400 Bad Request", ex.args[0])
618 return
619 path = "%2F".join(atoms)
620 self.path = path
622 # Note that, like wsgiref and most other HTTP servers,
623 # we "% HEX HEX"-unquote the path but not the query string.
624 self.qs = qs
626 # Compare request and server HTTP protocol versions, in case our
627 # server does not support the requested protocol. Limit our output
628 # to min(req, server). We want the following output:
629 # request server actual written supported response
630 # protocol protocol response protocol feature set
631 # a 1.0 1.0 1.0 1.0
632 # b 1.0 1.1 1.1 1.0
633 # c 1.1 1.0 1.0 1.0
634 # d 1.1 1.1 1.1 1.1
635 # Notice that, in (b), the response will be "HTTP/1.1" even though
636 # the client only understands 1.0. RFC 2616 10.5.6 says we should
637 # only return 505 if the _major_ version is different.
638 sp = int(self.server.protocol[5]), int(self.server.protocol[7])
640 if sp[0] != rp[0]:
641 self.simple_response("505 HTTP Version Not Supported")
642 return
643 self.request_protocol = req_protocol
644 self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
646 def read_request_headers(self):
647 """Read self.rfile into self.inheaders. Return success."""
649 # then all the http headers
650 try:
651 read_headers(self.rfile, self.inheaders)
652 except ValueError, ex:
653 self.simple_response("400 Bad Request", ex.args[0])
654 return False
656 mrbs = self.server.max_request_body_size
657 if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
658 self.simple_response("413 Request Entity Too Large",
659 "The entity sent with the request exceeds the maximum "
660 "allowed bytes.")
661 return False
663 # Persistent connection support
664 if self.response_protocol == "HTTP/1.1":
665 # Both server and client are HTTP/1.1
666 if self.inheaders.get("Connection", "") == "close":
667 self.close_connection = True
668 else:
669 # Either the server or client (or both) are HTTP/1.0
670 if self.inheaders.get("Connection", "") != "Keep-Alive":
671 self.close_connection = True
673 # Transfer-Encoding support
674 te = None
675 if self.response_protocol == "HTTP/1.1":
676 te = self.inheaders.get("Transfer-Encoding")
677 if te:
678 te = [x.strip().lower() for x in te.split(",") if x.strip()]
680 self.chunked_read = False
682 if te:
683 for enc in te:
684 if enc == "chunked":
685 self.chunked_read = True
686 else:
687 # Note that, even if we see "chunked", we must reject
688 # if there is an extension we don't recognize.
689 self.simple_response("501 Unimplemented")
690 self.close_connection = True
691 return False
693 # From PEP 333:
694 # "Servers and gateways that implement HTTP 1.1 must provide
695 # transparent support for HTTP 1.1's "expect/continue" mechanism.
696 # This may be done in any of several ways:
697 # 1. Respond to requests containing an Expect: 100-continue request
698 # with an immediate "100 Continue" response, and proceed normally.
699 # 2. Proceed with the request normally, but provide the application
700 # with a wsgi.input stream that will send the "100 Continue"
701 # response if/when the application first attempts to read from
702 # the input stream. The read request must then remain blocked
703 # until the client responds.
704 # 3. Wait until the client decides that the server does not support
705 # expect/continue, and sends the request body on its own.
706 # (This is suboptimal, and is not recommended.)
708 # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
709 # but it seems like it would be a big slowdown for such a rare case.
710 if self.inheaders.get("Expect", "") == "100-continue":
711 # Don't use simple_response here, because it emits headers
712 # we don't want. See http://www.cherrypy.org/ticket/951
713 msg = self.server.protocol + " 100 Continue\r\n\r\n"
714 try:
715 self.conn.wfile.sendall(msg)
716 except socket.error, x:
717 if x.args[0] not in socket_errors_to_ignore:
718 raise
719 return True
721 def parse_request_uri(self, uri):
722 """Parse a Request-URI into (scheme, authority, path).
724 Note that Request-URI's must be one of::
726 Request-URI = "*" | absoluteURI | abs_path | authority
728 Therefore, a Request-URI which starts with a double forward-slash
729 cannot be a "net_path"::
731 net_path = "//" authority [ abs_path ]
733 Instead, it must be interpreted as an "abs_path" with an empty first
734 path segment::
736 abs_path = "/" path_segments
737 path_segments = segment *( "/" segment )
738 segment = *pchar *( ";" param )
739 param = *pchar
741 if uri == "*":
742 return None, None, uri
744 i = uri.find('://')
745 if i > 0 and '?' not in uri[:i]:
746 # An absoluteURI.
747 # If there's a scheme (and it must be http or https), then:
748 # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
749 scheme, remainder = uri[:i].lower(), uri[i + 3:]
750 authority, path = remainder.split("/", 1)
751 return scheme, authority, path
753 if uri.startswith('/'):
754 # An abs_path.
755 return None, None, uri
756 else:
757 # An authority.
758 return None, uri, None
760 def respond(self):
761 """Call the gateway and write its iterable output."""
762 mrbs = self.server.max_request_body_size
763 if self.chunked_read:
764 self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
765 else:
766 cl = int(self.inheaders.get("Content-Length", 0))
767 if mrbs and mrbs < cl:
768 if not self.sent_headers:
769 self.simple_response("413 Request Entity Too Large",
770 "The entity sent with the request exceeds the maximum "
771 "allowed bytes.")
772 return
773 self.rfile = KnownLengthRFile(self.conn.rfile, cl)
775 self.server.gateway(self).respond()
777 if (self.ready and not self.sent_headers):
778 self.sent_headers = True
779 self.send_headers()
780 if self.chunked_write:
781 self.conn.wfile.sendall("0\r\n\r\n")
783 def simple_response(self, status, msg=""):
784 """Write a simple response back to the client."""
785 status = str(status)
786 buf = [self.server.protocol + " " +
787 status + CRLF,
788 "Content-Length: %s\r\n" % len(msg),
789 "Content-Type: text/plain\r\n"]
791 if status[:3] in ("413", "414"):
792 # Request Entity Too Large / Request-URI Too Long
793 self.close_connection = True
794 if self.response_protocol == 'HTTP/1.1':
795 # This will not be true for 414, since read_request_line
796 # usually raises 414 before reading the whole line, and we
797 # therefore cannot know the proper response_protocol.
798 buf.append("Connection: close\r\n")
799 else:
800 # HTTP/1.0 had no 413/414 status nor Connection header.
801 # Emit 400 instead and trust the message body is enough.
802 status = "400 Bad Request"
804 buf.append(CRLF)
805 if msg:
806 if isinstance(msg, unicode):
807 msg = msg.encode("ISO-8859-1")
808 buf.append(msg)
810 try:
811 self.conn.wfile.sendall("".join(buf))
812 except socket.error, x:
813 if x.args[0] not in socket_errors_to_ignore:
814 raise
816 def write(self, chunk):
817 """Write unbuffered data to the client."""
818 if self.chunked_write and chunk:
819 buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
820 self.conn.wfile.sendall("".join(buf))
821 else:
822 self.conn.wfile.sendall(chunk)
824 def send_headers(self):
825 """Assert, process, and send the HTTP response message-headers.
827 You must set self.status, and self.outheaders before calling this.
829 hkeys = [key.lower() for key, value in self.outheaders]
830 status = int(self.status[:3])
832 if status == 413:
833 # Request Entity Too Large. Close conn to avoid garbage.
834 self.close_connection = True
835 elif "content-length" not in hkeys:
836 # "All 1xx (informational), 204 (no content),
837 # and 304 (not modified) responses MUST NOT
838 # include a message-body." So no point chunking.
839 if status < 200 or status in (204, 205, 304):
840 pass
841 else:
842 if (self.response_protocol == 'HTTP/1.1'
843 and self.method != 'HEAD'):
844 # Use the chunked transfer-coding
845 self.chunked_write = True
846 self.outheaders.append(("Transfer-Encoding", "chunked"))
847 else:
848 # Closing the conn is the only way to determine len.
849 self.close_connection = True
851 if "connection" not in hkeys:
852 if self.response_protocol == 'HTTP/1.1':
853 # Both server and client are HTTP/1.1 or better
854 if self.close_connection:
855 self.outheaders.append(("Connection", "close"))
856 else:
857 # Server and/or client are HTTP/1.0
858 if not self.close_connection:
859 self.outheaders.append(("Connection", "Keep-Alive"))
861 if (not self.close_connection) and (not self.chunked_read):
862 # Read any remaining request body data on the socket.
863 # "If an origin server receives a request that does not include an
864 # Expect request-header field with the "100-continue" expectation,
865 # the request includes a request body, and the server responds
866 # with a final status code before reading the entire request body
867 # from the transport connection, then the server SHOULD NOT close
868 # the transport connection until it has read the entire request,
869 # or until the client closes the connection. Otherwise, the client
870 # might not reliably receive the response message. However, this
871 # requirement is not be construed as preventing a server from
872 # defending itself against denial-of-service attacks, or from
873 # badly broken client implementations."
874 remaining = getattr(self.rfile, 'remaining', 0)
875 if remaining > 0:
876 self.rfile.read(remaining)
878 if "date" not in hkeys:
879 self.outheaders.append(("Date", rfc822.formatdate()))
881 if "server" not in hkeys:
882 self.outheaders.append(("Server", self.server.server_name))
884 buf = [self.server.protocol + " " + self.status + CRLF]
885 for k, v in self.outheaders:
886 buf.append(k + ": " + v + CRLF)
887 buf.append(CRLF)
888 self.conn.wfile.sendall("".join(buf))
891 class NoSSLError(Exception):
892 """Exception raised when a client speaks HTTP to an HTTPS socket."""
893 pass
896 class FatalSSLAlert(Exception):
897 """Exception raised when the SSL implementation signals a fatal alert."""
898 pass
901 class CP_fileobject(socket._fileobject):
902 """Faux file object attached to a socket object."""
904 def __init__(self, *args, **kwargs):
905 self.bytes_read = 0
906 self.bytes_written = 0
907 socket._fileobject.__init__(self, *args, **kwargs)
909 def sendall(self, data):
910 """Sendall for non-blocking sockets."""
911 while data:
912 try:
913 bytes_sent = self.send(data)
914 data = data[bytes_sent:]
915 except socket.error, e:
916 if e.args[0] not in socket_errors_nonblocking:
917 raise
919 def send(self, data):
920 bytes_sent = self._sock.send(data)
921 self.bytes_written += bytes_sent
922 return bytes_sent
924 def flush(self):
925 if self._wbuf:
926 buffer = "".join(self._wbuf)
927 self._wbuf = []
928 self.sendall(buffer)
930 def recv(self, size):
931 while True:
932 try:
933 data = self._sock.recv(size)
934 self.bytes_read += len(data)
935 return data
936 except socket.error, e:
937 if (e.args[0] not in socket_errors_nonblocking
938 and e.args[0] not in socket_error_eintr):
939 raise
941 if not _fileobject_uses_str_type:
942 def read(self, size=-1):
943 # Use max, disallow tiny reads in a loop as they are very inefficient.
944 # We never leave read() with any leftover data from a new recv() call
945 # in our internal buffer.
946 rbufsize = max(self._rbufsize, self.default_bufsize)
947 # Our use of StringIO rather than lists of string objects returned by
948 # recv() minimizes memory usage and fragmentation that occurs when
949 # rbufsize is large compared to the typical return value of recv().
950 buf = self._rbuf
951 buf.seek(0, 2) # seek end
952 if size < 0:
953 # Read until EOF
954 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
955 while True:
956 data = self.recv(rbufsize)
957 if not data:
958 break
959 buf.write(data)
960 return buf.getvalue()
961 else:
962 # Read until size bytes or EOF seen, whichever comes first
963 buf_len = buf.tell()
964 if buf_len >= size:
965 # Already have size bytes in our buffer? Extract and return.
966 buf.seek(0)
967 rv = buf.read(size)
968 self._rbuf = StringIO.StringIO()
969 self._rbuf.write(buf.read())
970 return rv
972 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
973 while True:
974 left = size - buf_len
975 # recv() will malloc the amount of memory given as its
976 # parameter even though it often returns much less data
977 # than that. The returned data string is short lived
978 # as we copy it into a StringIO and free it. This avoids
979 # fragmentation issues on many platforms.
980 data = self.recv(left)
981 if not data:
982 break
983 n = len(data)
984 if n == size and not buf_len:
985 # Shortcut. Avoid buffer data copies when:
986 # - We have no data in our buffer.
987 # AND
988 # - Our call to recv returned exactly the
989 # number of bytes we were asked to read.
990 return data
991 if n == left:
992 buf.write(data)
993 del data # explicit free
994 break
995 assert n <= left, "recv(%d) returned %d bytes" % (left, n)
996 buf.write(data)
997 buf_len += n
998 del data # explicit free
999 #assert buf_len == buf.tell()
1000 return buf.getvalue()
1002 def readline(self, size=-1):
1003 buf = self._rbuf
1004 buf.seek(0, 2) # seek end
1005 if buf.tell() > 0:
1006 # check if we already have it in our buffer
1007 buf.seek(0)
1008 bline = buf.readline(size)
1009 if bline.endswith('\n') or len(bline) == size:
1010 self._rbuf = StringIO.StringIO()
1011 self._rbuf.write(buf.read())
1012 return bline
1013 del bline
1014 if size < 0:
1015 # Read until \n or EOF, whichever comes first
1016 if self._rbufsize <= 1:
1017 # Speed up unbuffered case
1018 buf.seek(0)
1019 buffers = [buf.read()]
1020 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
1021 data = None
1022 recv = self.recv
1023 while data != "\n":
1024 data = recv(1)
1025 if not data:
1026 break
1027 buffers.append(data)
1028 return "".join(buffers)
1030 buf.seek(0, 2) # seek end
1031 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
1032 while True:
1033 data = self.recv(self._rbufsize)
1034 if not data:
1035 break
1036 nl = data.find('\n')
1037 if nl >= 0:
1038 nl += 1
1039 buf.write(data[:nl])
1040 self._rbuf.write(data[nl:])
1041 del data
1042 break
1043 buf.write(data)
1044 return buf.getvalue()
1045 else:
1046 # Read until size bytes or \n or EOF seen, whichever comes first
1047 buf.seek(0, 2) # seek end
1048 buf_len = buf.tell()
1049 if buf_len >= size:
1050 buf.seek(0)
1051 rv = buf.read(size)
1052 self._rbuf = StringIO.StringIO()
1053 self._rbuf.write(buf.read())
1054 return rv
1055 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
1056 while True:
1057 data = self.recv(self._rbufsize)
1058 if not data:
1059 break
1060 left = size - buf_len
1061 # did we just receive a newline?
1062 nl = data.find('\n', 0, left)
1063 if nl >= 0:
1064 nl += 1
1065 # save the excess data to _rbuf
1066 self._rbuf.write(data[nl:])
1067 if buf_len:
1068 buf.write(data[:nl])
1069 break
1070 else:
1071 # Shortcut. Avoid data copy through buf when returning
1072 # a substring of our first recv().
1073 return data[:nl]
1074 n = len(data)
1075 if n == size and not buf_len:
1076 # Shortcut. Avoid data copy through buf when
1077 # returning exactly all of our first recv().
1078 return data
1079 if n >= left:
1080 buf.write(data[:left])
1081 self._rbuf.write(data[left:])
1082 break
1083 buf.write(data)
1084 buf_len += n
1085 #assert buf_len == buf.tell()
1086 return buf.getvalue()
1087 else:
1088 def read(self, size=-1):
1089 if size < 0:
1090 # Read until EOF
1091 buffers = [self._rbuf]
1092 self._rbuf = ""
1093 if self._rbufsize <= 1:
1094 recv_size = self.default_bufsize
1095 else:
1096 recv_size = self._rbufsize
1098 while True:
1099 data = self.recv(recv_size)
1100 if not data:
1101 break
1102 buffers.append(data)
1103 return "".join(buffers)
1104 else:
1105 # Read until size bytes or EOF seen, whichever comes first
1106 data = self._rbuf
1107 buf_len = len(data)
1108 if buf_len >= size:
1109 self._rbuf = data[size:]
1110 return data[:size]
1111 buffers = []
1112 if data:
1113 buffers.append(data)
1114 self._rbuf = ""
1115 while True:
1116 left = size - buf_len
1117 recv_size = max(self._rbufsize, left)
1118 data = self.recv(recv_size)
1119 if not data:
1120 break
1121 buffers.append(data)
1122 n = len(data)
1123 if n >= left:
1124 self._rbuf = data[left:]
1125 buffers[-1] = data[:left]
1126 break
1127 buf_len += n
1128 return "".join(buffers)
1130 def readline(self, size=-1):
1131 data = self._rbuf
1132 if size < 0:
1133 # Read until \n or EOF, whichever comes first
1134 if self._rbufsize <= 1:
1135 # Speed up unbuffered case
1136 assert data == ""
1137 buffers = []
1138 while data != "\n":
1139 data = self.recv(1)
1140 if not data:
1141 break
1142 buffers.append(data)
1143 return "".join(buffers)
1144 nl = data.find('\n')
1145 if nl >= 0:
1146 nl += 1
1147 self._rbuf = data[nl:]
1148 return data[:nl]
1149 buffers = []
1150 if data:
1151 buffers.append(data)
1152 self._rbuf = ""
1153 while True:
1154 data = self.recv(self._rbufsize)
1155 if not data:
1156 break
1157 buffers.append(data)
1158 nl = data.find('\n')
1159 if nl >= 0:
1160 nl += 1
1161 self._rbuf = data[nl:]
1162 buffers[-1] = data[:nl]
1163 break
1164 return "".join(buffers)
1165 else:
1166 # Read until size bytes or \n or EOF seen, whichever comes first
1167 nl = data.find('\n', 0, size)
1168 if nl >= 0:
1169 nl += 1
1170 self._rbuf = data[nl:]
1171 return data[:nl]
1172 buf_len = len(data)
1173 if buf_len >= size:
1174 self._rbuf = data[size:]
1175 return data[:size]
1176 buffers = []
1177 if data:
1178 buffers.append(data)
1179 self._rbuf = ""
1180 while True:
1181 data = self.recv(self._rbufsize)
1182 if not data:
1183 break
1184 buffers.append(data)
1185 left = size - buf_len
1186 nl = data.find('\n', 0, left)
1187 if nl >= 0:
1188 nl += 1
1189 self._rbuf = data[nl:]
1190 buffers[-1] = data[:nl]
1191 break
1192 n = len(data)
1193 if n >= left:
1194 self._rbuf = data[left:]
1195 buffers[-1] = data[:left]
1196 break
1197 buf_len += n
1198 return "".join(buffers)
1201 class HTTPConnection(object):
1202 """An HTTP connection (active socket).
1204 server: the Server object which received this connection.
1205 socket: the raw socket object (usually TCP) for this connection.
1206 makefile: a fileobject class for reading from the socket.
1209 remote_addr = None
1210 remote_port = None
1211 ssl_env = None
1212 rbufsize = DEFAULT_BUFFER_SIZE
1213 wbufsize = DEFAULT_BUFFER_SIZE
1214 RequestHandlerClass = HTTPRequest
1216 def __init__(self, server, sock, makefile=CP_fileobject):
1217 self.server = server
1218 self.socket = sock
1219 self.rfile = makefile(sock, "rb", self.rbufsize)
1220 self.wfile = makefile(sock, "wb", self.wbufsize)
1221 self.requests_seen = 0
1223 def communicate(self):
1224 """Read each request and respond appropriately."""
1225 request_seen = False
1226 try:
1227 while True:
1228 # (re)set req to None so that if something goes wrong in
1229 # the RequestHandlerClass constructor, the error doesn't
1230 # get written to the previous request.
1231 req = None
1232 req = self.RequestHandlerClass(self.server, self)
1234 # This order of operations should guarantee correct pipelining.
1235 req.parse_request()
1236 if self.server.stats['Enabled']:
1237 self.requests_seen += 1
1238 if not req.ready:
1239 # Something went wrong in the parsing (and the server has
1240 # probably already made a simple_response). Return and
1241 # let the conn close.
1242 return
1244 request_seen = True
1245 req.respond()
1246 if req.close_connection:
1247 return
1248 except socket.error, e:
1249 errnum = e.args[0]
1250 # sadly SSL sockets return a different (longer) time out string
1251 if errnum == 'timed out' or errnum == 'The read operation timed out':
1252 # Don't error if we're between requests; only error
1253 # if 1) no request has been started at all, or 2) we're
1254 # in the middle of a request.
1255 # See http://www.cherrypy.org/ticket/853
1256 if (not request_seen) or (req and req.started_request):
1257 # Don't bother writing the 408 if the response
1258 # has already started being written.
1259 if req and not req.sent_headers:
1260 try:
1261 req.simple_response("408 Request Timeout")
1262 except FatalSSLAlert:
1263 # Close the connection.
1264 return
1265 elif errnum not in socket_errors_to_ignore:
1266 if req and not req.sent_headers:
1267 try:
1268 req.simple_response("500 Internal Server Error",
1269 format_exc())
1270 except FatalSSLAlert:
1271 # Close the connection.
1272 return
1273 return
1274 except (KeyboardInterrupt, SystemExit):
1275 raise
1276 except FatalSSLAlert:
1277 # Close the connection.
1278 return
1279 except NoSSLError:
1280 if req and not req.sent_headers:
1281 # Unwrap our wfile
1282 self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
1283 req.simple_response("400 Bad Request",
1284 "The client sent a plain HTTP request, but "
1285 "this server only speaks HTTPS on this port.")
1286 self.linger = True
1287 except Exception:
1288 if req and not req.sent_headers:
1289 try:
1290 req.simple_response("500 Internal Server Error", format_exc())
1291 except FatalSSLAlert:
1292 # Close the connection.
1293 return
1295 linger = False
1297 def close(self):
1298 """Close the socket underlying this connection."""
1299 self.rfile.close()
1301 if not self.linger:
1302 # Python's socket module does NOT call close on the kernel socket
1303 # when you call socket.close(). We do so manually here because we
1304 # want this server to send a FIN TCP segment immediately. Note this
1305 # must be called *before* calling socket.close(), because the latter
1306 # drops its reference to the kernel socket.
1307 if hasattr(self.socket, '_sock'):
1308 self.socket._sock.close()
1309 self.socket.close()
1310 else:
1311 # On the other hand, sometimes we want to hang around for a bit
1312 # to make sure the client has a chance to read our entire
1313 # response. Skipping the close() calls here delays the FIN
1314 # packet until the socket object is garbage-collected later.
1315 # Someday, perhaps, we'll do the full lingering_close that
1316 # Apache does, but not today.
1317 pass
1320 _SHUTDOWNREQUEST = None
1322 class WorkerThread(threading.Thread):
1323 """Thread which continuously polls a Queue for Connection objects.
1325 Due to the timing issues of polling a Queue, a WorkerThread does not
1326 check its own 'ready' flag after it has started. To stop the thread,
1327 it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
1328 (one for each running WorkerThread).
1331 conn = None
1332 """The current connection pulled off the Queue, or None."""
1334 server = None
1335 """The HTTP Server which spawned this thread, and which owns the
1336 Queue and is placing active connections into it."""
1338 ready = False
1339 """A simple flag for the calling server to know when this thread
1340 has begun polling the Queue."""
1343 def __init__(self, server):
1344 self.ready = False
1345 self.server = server
1347 self.requests_seen = 0
1348 self.bytes_read = 0
1349 self.bytes_written = 0
1350 self.start_time = None
1351 self.work_time = 0
1352 self.stats = {
1353 'Requests': lambda s: self.requests_seen + ((self.start_time is None) and 0 or self.conn.requests_seen),
1354 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and 0 or self.conn.rfile.bytes_read),
1355 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and 0 or self.conn.wfile.bytes_written),
1356 'Work Time': lambda s: self.work_time + ((self.start_time is None) and 0 or time.time() - self.start_time),
1357 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
1358 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
1360 threading.Thread.__init__(self)
1362 def run(self):
1363 self.server.stats['Worker Threads'][self.getName()] = self.stats
1364 try:
1365 self.ready = True
1366 while True:
1367 conn = self.server.requests.get()
1368 if conn is _SHUTDOWNREQUEST:
1369 return
1371 self.conn = conn
1372 if self.server.stats['Enabled']:
1373 self.start_time = time.time()
1374 try:
1375 conn.communicate()
1376 finally:
1377 conn.close()
1378 if self.server.stats['Enabled']:
1379 self.requests_seen += self.conn.requests_seen
1380 self.bytes_read += self.conn.rfile.bytes_read
1381 self.bytes_written += self.conn.wfile.bytes_written
1382 self.work_time += time.time() - self.start_time
1383 self.start_time = None
1384 self.conn = None
1385 except (KeyboardInterrupt, SystemExit), exc:
1386 self.server.interrupt = exc
1389 class ThreadPool(object):
1390 """A Request Queue for the CherryPyWSGIServer which pools threads.
1392 ThreadPool objects must provide min, get(), put(obj), start()
1393 and stop(timeout) attributes.
1396 def __init__(self, server, min=10, max=-1):
1397 self.server = server
1398 self.min = min
1399 self.max = max
1400 self._threads = []
1401 self._queue = Queue.Queue()
1402 self.get = self._queue.get
1404 def start(self):
1405 """Start the pool of threads."""
1406 for i in range(self.min):
1407 self._threads.append(WorkerThread(self.server))
1408 for worker in self._threads:
1409 worker.setName("CP Server " + worker.getName())
1410 worker.start()
1411 for worker in self._threads:
1412 while not worker.ready:
1413 time.sleep(.1)
1415 def _get_idle(self):
1416 """Number of worker threads which are idle. Read-only."""
1417 return len([t for t in self._threads if t.conn is None])
1418 idle = property(_get_idle, doc=_get_idle.__doc__)
1420 def put(self, obj):
1421 self._queue.put(obj)
1422 if obj is _SHUTDOWNREQUEST:
1423 return
1425 def grow(self, amount):
1426 """Spawn new worker threads (not above self.max)."""
1427 for i in range(amount):
1428 if self.max > 0 and len(self._threads) >= self.max:
1429 break
1430 worker = WorkerThread(self.server)
1431 worker.setName("CP Server " + worker.getName())
1432 self._threads.append(worker)
1433 worker.start()
1435 def shrink(self, amount):
1436 """Kill off worker threads (not below self.min)."""
1437 # Grow/shrink the pool if necessary.
1438 # Remove any dead threads from our list
1439 for t in self._threads:
1440 if not t.isAlive():
1441 self._threads.remove(t)
1442 amount -= 1
1444 if amount > 0:
1445 for i in range(min(amount, len(self._threads) - self.min)):
1446 # Put a number of shutdown requests on the queue equal
1447 # to 'amount'. Once each of those is processed by a worker,
1448 # that worker will terminate and be culled from our list
1449 # in self.put.
1450 self._queue.put(_SHUTDOWNREQUEST)
1452 def stop(self, timeout=5):
1453 # Must shut down threads here so the code that calls
1454 # this method can know when all threads are stopped.
1455 for worker in self._threads:
1456 self._queue.put(_SHUTDOWNREQUEST)
1458 # Don't join currentThread (when stop is called inside a request).
1459 current = threading.currentThread()
1460 if timeout and timeout >= 0:
1461 endtime = time.time() + timeout
1462 while self._threads:
1463 worker = self._threads.pop()
1464 if worker is not current and worker.isAlive():
1465 try:
1466 if timeout is None or timeout < 0:
1467 worker.join()
1468 else:
1469 remaining_time = endtime - time.time()
1470 if remaining_time > 0:
1471 worker.join(remaining_time)
1472 if worker.isAlive():
1473 # We exhausted the timeout.
1474 # Forcibly shut down the socket.
1475 c = worker.conn
1476 if c and not c.rfile.closed:
1477 try:
1478 c.socket.shutdown(socket.SHUT_RD)
1479 except TypeError:
1480 # pyOpenSSL sockets don't take an arg
1481 c.socket.shutdown()
1482 worker.join()
1483 except (AssertionError,
1484 # Ignore repeated Ctrl-C.
1485 # See http://www.cherrypy.org/ticket/691.
1486 KeyboardInterrupt), exc1:
1487 pass
1489 def _get_qsize(self):
1490 return self._queue.qsize()
1491 qsize = property(_get_qsize)
1495 try:
1496 import fcntl
1497 except ImportError:
1498 try:
1499 from ctypes import windll, WinError
1500 except ImportError:
1501 def prevent_socket_inheritance(sock):
1502 """Dummy function, since neither fcntl nor ctypes are available."""
1503 pass
1504 else:
1505 def prevent_socket_inheritance(sock):
1506 """Mark the given socket fd as non-inheritable (Windows)."""
1507 if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
1508 raise WinError()
1509 else:
1510 def prevent_socket_inheritance(sock):
1511 """Mark the given socket fd as non-inheritable (POSIX)."""
1512 fd = sock.fileno()
1513 old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
1514 fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
1517 class SSLAdapter(object):
1518 """Base class for SSL driver library adapters.
1520 Required methods:
1522 * ``wrap(sock) -> (wrapped socket, ssl environ dict)``
1523 * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
1526 def __init__(self, certificate, private_key, certificate_chain=None):
1527 self.certificate = certificate
1528 self.private_key = private_key
1529 self.certificate_chain = certificate_chain
1531 def wrap(self, sock):
1532 raise NotImplemented
1534 def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
1535 raise NotImplemented
1538 class HTTPServer(object):
1539 """An HTTP server."""
1541 _bind_addr = "127.0.0.1"
1542 _interrupt = None
1544 gateway = None
1545 """A Gateway instance."""
1547 minthreads = None
1548 """The minimum number of worker threads to create (default 10)."""
1550 maxthreads = None
1551 """The maximum number of worker threads to create (default -1 = no limit)."""
1553 server_name = None
1554 """The name of the server; defaults to socket.gethostname()."""
1556 protocol = "HTTP/1.1"
1557 """The version string to write in the Status-Line of all HTTP responses.
1559 For example, "HTTP/1.1" is the default. This also limits the supported
1560 features used in the response."""
1562 request_queue_size = 5
1563 """The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
1565 shutdown_timeout = 5
1566 """The total time, in seconds, to wait for worker threads to cleanly exit."""
1568 timeout = 10
1569 """The timeout in seconds for accepted connections (default 10)."""
1571 version = "CherryPy/3.2.0"
1572 """A version string for the HTTPServer."""
1574 software = None
1575 """The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
1577 If None, this defaults to ``'%s Server' % self.version``."""
1579 ready = False
1580 """An internal flag which marks whether the socket is accepting connections."""
1582 max_request_header_size = 0
1583 """The maximum size, in bytes, for request headers, or 0 for no limit."""
1585 max_request_body_size = 0
1586 """The maximum size, in bytes, for request bodies, or 0 for no limit."""
1588 nodelay = True
1589 """If True (the default since 3.1), sets the TCP_NODELAY socket option."""
1591 ConnectionClass = HTTPConnection
1592 """The class to use for handling HTTP connections."""
1594 ssl_adapter = None
1595 """An instance of SSLAdapter (or a subclass).
1597 You must have the corresponding SSL driver library installed."""
1599 def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
1600 server_name=None):
1601 self.bind_addr = bind_addr
1602 self.gateway = gateway
1604 self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
1606 if not server_name:
1607 server_name = socket.gethostname()
1608 self.server_name = server_name
1609 self.clear_stats()
1611 def clear_stats(self):
1612 self._start_time = None
1613 self._run_time = 0
1614 self.stats = {
1615 'Enabled': False,
1616 'Bind Address': lambda s: repr(self.bind_addr),
1617 'Run time': lambda s: (not s['Enabled']) and 0 or self.runtime(),
1618 'Accepts': 0,
1619 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
1620 'Queue': lambda s: getattr(self.requests, "qsize", None),
1621 'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
1622 'Threads Idle': lambda s: getattr(self.requests, "idle", None),
1623 'Socket Errors': 0,
1624 'Requests': lambda s: (not s['Enabled']) and 0 or sum([w['Requests'](w) for w
1625 in s['Worker Threads'].values()], 0),
1626 'Bytes Read': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Read'](w) for w
1627 in s['Worker Threads'].values()], 0),
1628 'Bytes Written': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Written'](w) for w
1629 in s['Worker Threads'].values()], 0),
1630 'Work Time': lambda s: (not s['Enabled']) and 0 or sum([w['Work Time'](w) for w
1631 in s['Worker Threads'].values()], 0),
1632 'Read Throughput': lambda s: (not s['Enabled']) and 0 or sum(
1633 [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
1634 for w in s['Worker Threads'].values()], 0),
1635 'Write Throughput': lambda s: (not s['Enabled']) and 0 or sum(
1636 [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
1637 for w in s['Worker Threads'].values()], 0),
1638 'Worker Threads': {},
1640 logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
1642 def runtime(self):
1643 if self._start_time is None:
1644 return self._run_time
1645 else:
1646 return self._run_time + (time.time() - self._start_time)
1648 def __str__(self):
1649 return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
1650 self.bind_addr)
1652 def _get_bind_addr(self):
1653 return self._bind_addr
1654 def _set_bind_addr(self, value):
1655 if isinstance(value, tuple) and value[0] in ('', None):
1656 # Despite the socket module docs, using '' does not
1657 # allow AI_PASSIVE to work. Passing None instead
1658 # returns '0.0.0.0' like we want. In other words:
1659 # host AI_PASSIVE result
1660 # '' Y 192.168.x.y
1661 # '' N 192.168.x.y
1662 # None Y 0.0.0.0
1663 # None N 127.0.0.1
1664 # But since you can get the same effect with an explicit
1665 # '0.0.0.0', we deny both the empty string and None as values.
1666 raise ValueError("Host values of '' or None are not allowed. "
1667 "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
1668 "to listen on all active interfaces.")
1669 self._bind_addr = value
1670 bind_addr = property(_get_bind_addr, _set_bind_addr,
1671 doc="""The interface on which to listen for connections.
1673 For TCP sockets, a (host, port) tuple. Host values may be any IPv4
1674 or IPv6 address, or any valid hostname. The string 'localhost' is a
1675 synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
1676 The string '0.0.0.0' is a special IPv4 entry meaning "any active
1677 interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
1678 IPv6. The empty string or None are not allowed.
1680 For UNIX sockets, supply the filename as a string.""")
1682 def start(self):
1683 """Run the server forever."""
1684 # We don't have to trap KeyboardInterrupt or SystemExit here,
1685 # because cherrpy.server already does so, calling self.stop() for us.
1686 # If you're using this server with another framework, you should
1687 # trap those exceptions in whatever code block calls start().
1688 self._interrupt = None
1690 if self.software is None:
1691 self.software = "%s Server" % self.version
1693 # SSL backward compatibility
1694 if (self.ssl_adapter is None and
1695 getattr(self, 'ssl_certificate', None) and
1696 getattr(self, 'ssl_private_key', None)):
1697 warnings.warn(
1698 "SSL attributes are deprecated in CherryPy 3.2, and will "
1699 "be removed in CherryPy 3.3. Use an ssl_adapter attribute "
1700 "instead.",
1701 DeprecationWarning
1703 try:
1704 from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
1705 except ImportError:
1706 pass
1707 else:
1708 self.ssl_adapter = pyOpenSSLAdapter(
1709 self.ssl_certificate, self.ssl_private_key,
1710 getattr(self, 'ssl_certificate_chain', None))
1712 # Select the appropriate socket
1713 if isinstance(self.bind_addr, basestring):
1714 # AF_UNIX socket
1716 # So we can reuse the socket...
1717 try: os.unlink(self.bind_addr)
1718 except: pass
1720 # So everyone can access the socket...
1721 try: os.chmod(self.bind_addr, 0777)
1722 except: pass
1724 info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
1725 else:
1726 # AF_INET or AF_INET6 socket
1727 # Get the correct address family for our host (allows IPv6 addresses)
1728 host, port = self.bind_addr
1729 try:
1730 info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
1731 socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
1732 except socket.gaierror:
1733 if ':' in self.bind_addr[0]:
1734 info = [(socket.AF_INET6, socket.SOCK_STREAM,
1735 0, "", self.bind_addr + (0, 0))]
1736 else:
1737 info = [(socket.AF_INET, socket.SOCK_STREAM,
1738 0, "", self.bind_addr)]
1740 self.socket = None
1741 msg = "No socket could be created"
1742 for res in info:
1743 af, socktype, proto, canonname, sa = res
1744 try:
1745 self.bind(af, socktype, proto)
1746 except socket.error:
1747 if self.socket:
1748 self.socket.close()
1749 self.socket = None
1750 continue
1751 break
1752 if not self.socket:
1753 raise socket.error(msg)
1755 # Timeout so KeyboardInterrupt can be caught on Win32
1756 self.socket.settimeout(1)
1757 self.socket.listen(self.request_queue_size)
1759 # Create worker threads
1760 self.requests.start()
1762 self.ready = True
1763 self._start_time = time.time()
1764 while self.ready:
1765 self.tick()
1766 if self.interrupt:
1767 while self.interrupt is True:
1768 # Wait for self.stop() to complete. See _set_interrupt.
1769 time.sleep(0.1)
1770 if self.interrupt:
1771 raise self.interrupt
1773 def bind(self, family, type, proto=0):
1774 """Create (or recreate) the actual socket object."""
1775 self.socket = socket.socket(family, type, proto)
1776 prevent_socket_inheritance(self.socket)
1777 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1778 if self.nodelay and not isinstance(self.bind_addr, str):
1779 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
1781 if self.ssl_adapter is not None:
1782 self.socket = self.ssl_adapter.bind(self.socket)
1784 # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
1785 # activate dual-stack. See http://www.cherrypy.org/ticket/871.
1786 if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
1787 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
1788 try:
1789 self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
1790 except (AttributeError, socket.error):
1791 # Apparently, the socket option is not available in
1792 # this machine's TCP stack
1793 pass
1795 self.socket.bind(self.bind_addr)
1797 def tick(self):
1798 """Accept a new connection and put it on the Queue."""
1799 try:
1800 s, addr = self.socket.accept()
1801 if self.stats['Enabled']:
1802 self.stats['Accepts'] += 1
1803 if not self.ready:
1804 return
1806 prevent_socket_inheritance(s)
1807 if hasattr(s, 'settimeout'):
1808 s.settimeout(self.timeout)
1810 makefile = CP_fileobject
1811 ssl_env = {}
1812 # if ssl cert and key are set, we try to be a secure HTTP server
1813 if self.ssl_adapter is not None:
1814 try:
1815 s, ssl_env = self.ssl_adapter.wrap(s)
1816 except NoSSLError:
1817 msg = ("The client sent a plain HTTP request, but "
1818 "this server only speaks HTTPS on this port.")
1819 buf = ["%s 400 Bad Request\r\n" % self.protocol,
1820 "Content-Length: %s\r\n" % len(msg),
1821 "Content-Type: text/plain\r\n\r\n",
1822 msg]
1824 wfile = CP_fileobject(s, "wb", DEFAULT_BUFFER_SIZE)
1825 try:
1826 wfile.sendall("".join(buf))
1827 except socket.error, x:
1828 if x.args[0] not in socket_errors_to_ignore:
1829 raise
1830 return
1831 if not s:
1832 return
1833 makefile = self.ssl_adapter.makefile
1834 # Re-apply our timeout since we may have a new socket object
1835 if hasattr(s, 'settimeout'):
1836 s.settimeout(self.timeout)
1838 conn = self.ConnectionClass(self, s, makefile)
1840 if not isinstance(self.bind_addr, basestring):
1841 # optional values
1842 # Until we do DNS lookups, omit REMOTE_HOST
1843 if addr is None: # sometimes this can happen
1844 # figure out if AF_INET or AF_INET6.
1845 if len(s.getsockname()) == 2:
1846 # AF_INET
1847 addr = ('0.0.0.0', 0)
1848 else:
1849 # AF_INET6
1850 addr = ('::', 0)
1851 conn.remote_addr = addr[0]
1852 conn.remote_port = addr[1]
1854 conn.ssl_env = ssl_env
1856 self.requests.put(conn)
1857 except socket.timeout:
1858 # The only reason for the timeout in start() is so we can
1859 # notice keyboard interrupts on Win32, which don't interrupt
1860 # accept() by default
1861 return
1862 except socket.error, x:
1863 if self.stats['Enabled']:
1864 self.stats['Socket Errors'] += 1
1865 if x.args[0] in socket_error_eintr:
1866 # I *think* this is right. EINTR should occur when a signal
1867 # is received during the accept() call; all docs say retry
1868 # the call, and I *think* I'm reading it right that Python
1869 # will then go ahead and poll for and handle the signal
1870 # elsewhere. See http://www.cherrypy.org/ticket/707.
1871 return
1872 if x.args[0] in socket_errors_nonblocking:
1873 # Just try again. See http://www.cherrypy.org/ticket/479.
1874 return
1875 if x.args[0] in socket_errors_to_ignore:
1876 # Our socket was closed.
1877 # See http://www.cherrypy.org/ticket/686.
1878 return
1879 raise
1881 def _get_interrupt(self):
1882 return self._interrupt
1883 def _set_interrupt(self, interrupt):
1884 self._interrupt = True
1885 self.stop()
1886 self._interrupt = interrupt
1887 interrupt = property(_get_interrupt, _set_interrupt,
1888 doc="Set this to an Exception instance to "
1889 "interrupt the server.")
1891 def stop(self):
1892 """Gracefully shutdown a server that is serving forever."""
1893 self.ready = False
1894 if self._start_time is not None:
1895 self._run_time += (time.time() - self._start_time)
1896 self._start_time = None
1898 sock = getattr(self, "socket", None)
1899 if sock:
1900 if not isinstance(self.bind_addr, basestring):
1901 # Touch our own socket to make accept() return immediately.
1902 try:
1903 host, port = sock.getsockname()[:2]
1904 except socket.error, x:
1905 if x.args[0] not in socket_errors_to_ignore:
1906 # Changed to use error code and not message
1907 # See http://www.cherrypy.org/ticket/860.
1908 raise
1909 else:
1910 # Note that we're explicitly NOT using AI_PASSIVE,
1911 # here, because we want an actual IP to touch.
1912 # localhost won't work if we've bound to a public IP,
1913 # but it will if we bound to '0.0.0.0' (INADDR_ANY).
1914 for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
1915 socket.SOCK_STREAM):
1916 af, socktype, proto, canonname, sa = res
1917 s = None
1918 try:
1919 s = socket.socket(af, socktype, proto)
1920 # See http://groups.google.com/group/cherrypy-users/
1921 # browse_frm/thread/bbfe5eb39c904fe0
1922 s.settimeout(1.0)
1923 s.connect((host, port))
1924 s.close()
1925 except socket.error:
1926 if s:
1927 s.close()
1928 if hasattr(sock, "close"):
1929 sock.close()
1930 self.socket = None
1932 self.requests.stop(self.shutdown_timeout)
1935 class Gateway(object):
1937 def __init__(self, req):
1938 self.req = req
1940 def respond(self):
1941 raise NotImplemented
1944 # These may either be wsgiserver.SSLAdapter subclasses or the string names
1945 # of such classes (in which case they will be lazily loaded).
1946 ssl_adapters = {
1947 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
1948 'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
1951 def get_ssl_adapter_class(name='pyopenssl'):
1952 adapter = ssl_adapters[name.lower()]
1953 if isinstance(adapter, basestring):
1954 last_dot = adapter.rfind(".")
1955 attr_name = adapter[last_dot + 1:]
1956 mod_path = adapter[:last_dot]
1958 try:
1959 mod = sys.modules[mod_path]
1960 if mod is None:
1961 raise KeyError()
1962 except KeyError:
1963 # The last [''] is important.
1964 mod = __import__(mod_path, globals(), locals(), [''])
1966 # Let an AttributeError propagate outward.
1967 try:
1968 adapter = getattr(mod, attr_name)
1969 except AttributeError:
1970 raise AttributeError("'%s' object has no attribute '%s'"
1971 % (mod_path, attr_name))
1973 return adapter
1975 # -------------------------------- WSGI Stuff -------------------------------- #
1978 class CherryPyWSGIServer(HTTPServer):
1980 wsgi_version = (1, 0)
1982 def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
1983 max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
1984 self.requests = ThreadPool(self, min=numthreads or 1, max=max)
1985 self.wsgi_app = wsgi_app
1986 self.gateway = wsgi_gateways[self.wsgi_version]
1988 self.bind_addr = bind_addr
1989 if not server_name:
1990 server_name = socket.gethostname()
1991 self.server_name = server_name
1992 self.request_queue_size = request_queue_size
1994 self.timeout = timeout
1995 self.shutdown_timeout = shutdown_timeout
1996 self.clear_stats()
1998 def _get_numthreads(self):
1999 return self.requests.min
2000 def _set_numthreads(self, value):
2001 self.requests.min = value
2002 numthreads = property(_get_numthreads, _set_numthreads)
2005 class WSGIGateway(Gateway):
2007 def __init__(self, req):
2008 self.req = req
2009 self.started_response = False
2010 self.env = self.get_environ()
2011 self.remaining_bytes_out = None
2013 def get_environ(self):
2014 """Return a new environ dict targeting the given wsgi.version"""
2015 raise NotImplemented
2017 def respond(self):
2018 response = self.req.server.wsgi_app(self.env, self.start_response)
2019 try:
2020 for chunk in response:
2021 # "The start_response callable must not actually transmit
2022 # the response headers. Instead, it must store them for the
2023 # server or gateway to transmit only after the first
2024 # iteration of the application return value that yields
2025 # a NON-EMPTY string, or upon the application's first
2026 # invocation of the write() callable." (PEP 333)
2027 if chunk:
2028 if isinstance(chunk, unicode):
2029 chunk = chunk.encode('ISO-8859-1')
2030 self.write(chunk)
2031 finally:
2032 if hasattr(response, "close"):
2033 response.close()
2035 def start_response(self, status, headers, exc_info = None):
2036 """WSGI callable to begin the HTTP response."""
2037 # "The application may call start_response more than once,
2038 # if and only if the exc_info argument is provided."
2039 if self.started_response and not exc_info:
2040 raise AssertionError("WSGI start_response called a second "
2041 "time with no exc_info.")
2042 self.started_response = True
2044 # "if exc_info is provided, and the HTTP headers have already been
2045 # sent, start_response must raise an error, and should raise the
2046 # exc_info tuple."
2047 if self.req.sent_headers:
2048 try:
2049 raise exc_info[0], exc_info[1], exc_info[2]
2050 finally:
2051 exc_info = None
2053 self.req.status = status
2054 for k, v in headers:
2055 if not isinstance(k, str):
2056 raise TypeError("WSGI response header key %r is not a byte string." % k)
2057 if not isinstance(v, str):
2058 raise TypeError("WSGI response header value %r is not a byte string." % v)
2059 if k.lower() == 'content-length':
2060 self.remaining_bytes_out = int(v)
2061 self.req.outheaders.extend(headers)
2063 return self.write
2065 def write(self, chunk):
2066 """WSGI callable to write unbuffered data to the client.
2068 This method is also used internally by start_response (to write
2069 data from the iterable returned by the WSGI application).
2071 if not self.started_response:
2072 raise AssertionError("WSGI write called before start_response.")
2074 chunklen = len(chunk)
2075 rbo = self.remaining_bytes_out
2076 if rbo is not None and chunklen > rbo:
2077 if not self.req.sent_headers:
2078 # Whew. We can send a 500 to the client.
2079 self.req.simple_response("500 Internal Server Error",
2080 "The requested resource returned more bytes than the "
2081 "declared Content-Length.")
2082 else:
2083 # Dang. We have probably already sent data. Truncate the chunk
2084 # to fit (so the client doesn't hang) and raise an error later.
2085 chunk = chunk[:rbo]
2087 if not self.req.sent_headers:
2088 self.req.sent_headers = True
2089 self.req.send_headers()
2091 self.req.write(chunk)
2093 if rbo is not None:
2094 rbo -= chunklen
2095 if rbo < 0:
2096 raise ValueError(
2097 "Response body exceeds the declared Content-Length.")
2100 class WSGIGateway_10(WSGIGateway):
2102 def get_environ(self):
2103 """Return a new environ dict targeting the given wsgi.version"""
2104 req = self.req
2105 env = {
2106 # set a non-standard environ entry so the WSGI app can know what
2107 # the *real* server protocol is (and what features to support).
2108 # See http://www.faqs.org/rfcs/rfc2145.html.
2109 'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
2110 'PATH_INFO': req.path,
2111 'QUERY_STRING': req.qs,
2112 'REMOTE_ADDR': req.conn.remote_addr or '',
2113 'REMOTE_PORT': str(req.conn.remote_port or ''),
2114 'REQUEST_METHOD': req.method,
2115 'REQUEST_URI': req.uri,
2116 'SCRIPT_NAME': '',
2117 'SERVER_NAME': req.server.server_name,
2118 # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
2119 'SERVER_PROTOCOL': req.request_protocol,
2120 'SERVER_SOFTWARE': req.server.software,
2121 'wsgi.errors': sys.stderr,
2122 'wsgi.input': req.rfile,
2123 'wsgi.multiprocess': False,
2124 'wsgi.multithread': True,
2125 'wsgi.run_once': False,
2126 'wsgi.url_scheme': req.scheme,
2127 'wsgi.version': (1, 0),
2130 if isinstance(req.server.bind_addr, basestring):
2131 # AF_UNIX. This isn't really allowed by WSGI, which doesn't
2132 # address unix domain sockets. But it's better than nothing.
2133 env["SERVER_PORT"] = ""
2134 else:
2135 env["SERVER_PORT"] = str(req.server.bind_addr[1])
2137 # Request headers
2138 for k, v in req.inheaders.iteritems():
2139 env["HTTP_" + k.upper().replace("-", "_")] = v
2141 # CONTENT_TYPE/CONTENT_LENGTH
2142 ct = env.pop("HTTP_CONTENT_TYPE", None)
2143 if ct is not None:
2144 env["CONTENT_TYPE"] = ct
2145 cl = env.pop("HTTP_CONTENT_LENGTH", None)
2146 if cl is not None:
2147 env["CONTENT_LENGTH"] = cl
2149 if req.conn.ssl_env:
2150 env.update(req.conn.ssl_env)
2152 return env
2155 class WSGIGateway_u0(WSGIGateway_10):
2157 def get_environ(self):
2158 """Return a new environ dict targeting the given wsgi.version"""
2159 req = self.req
2160 env_10 = WSGIGateway_10.get_environ(self)
2161 env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
2162 env[u'wsgi.version'] = ('u', 0)
2164 # Request-URI
2165 env.setdefault(u'wsgi.url_encoding', u'utf-8')
2166 try:
2167 for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
2168 env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
2169 except UnicodeDecodeError:
2170 # Fall back to latin 1 so apps can transcode if needed.
2171 env[u'wsgi.url_encoding'] = u'ISO-8859-1'
2172 for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
2173 env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
2175 for k, v in sorted(env.items()):
2176 if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
2177 env[k] = v.decode('ISO-8859-1')
2179 return env
2181 wsgi_gateways = {
2182 (1, 0): WSGIGateway_10,
2183 ('u', 0): WSGIGateway_u0,
2186 class WSGIPathInfoDispatcher(object):
2187 """A WSGI dispatcher for dispatch based on the PATH_INFO.
2189 apps: a dict or list of (path_prefix, app) pairs.
2192 def __init__(self, apps):
2193 try:
2194 apps = apps.items()
2195 except AttributeError:
2196 pass
2198 # Sort the apps by len(path), descending
2199 apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
2200 apps.reverse()
2202 # The path_prefix strings must start, but not end, with a slash.
2203 # Use "" instead of "/".
2204 self.apps = [(p.rstrip("/"), a) for p, a in apps]
2206 def __call__(self, environ, start_response):
2207 path = environ["PATH_INFO"] or "/"
2208 for p, app in self.apps:
2209 # The apps list should be sorted by length, descending.
2210 if path.startswith(p + "/") or path == p:
2211 environ = environ.copy()
2212 environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
2213 environ["PATH_INFO"] = path[len(p):]
2214 return app(environ, start_response)
2216 start_response('404 Not Found', [('Content-Type', 'text/plain'),
2217 ('Content-Length', '0')])
2218 return ['']