Enable doctest running for several other documents.
[python.git] / Lib / urllib2.py
blob437a813696dc5c9bef5a58146b37cb1796eb434a
1 """An extensible library for opening URLs using a variety of protocols
3 The simplest way to use this module is to call the urlopen function,
4 which accepts a string containing a URL or a Request object (described
5 below). It opens the URL and returns the results as file-like
6 object; the returned object has some extra methods described below.
8 The OpenerDirector manages a collection of Handler objects that do
9 all the actual work. Each Handler implements a particular protocol or
10 option. The OpenerDirector is a composite object that invokes the
11 Handlers needed to open the requested URL. For example, the
12 HTTPHandler performs HTTP GET and POST requests and deals with
13 non-error returns. The HTTPRedirectHandler automatically deals with
14 HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
15 deals with digest authentication.
17 urlopen(url, data=None) -- Basic usage is the same as original
18 urllib. pass the url and optionally data to post to an HTTP URL, and
19 get a file-like object back. One difference is that you can also pass
20 a Request instance instead of URL. Raises a URLError (subclass of
21 IOError); for HTTP errors, raises an HTTPError, which can also be
22 treated as a valid response.
24 build_opener -- Function that creates a new OpenerDirector instance.
25 Will install the default handlers. Accepts one or more Handlers as
26 arguments, either instances or Handler classes that it will
27 instantiate. If one of the argument is a subclass of the default
28 handler, the argument will be installed instead of the default.
30 install_opener -- Installs a new opener as the default opener.
32 objects of interest:
33 OpenerDirector --
35 Request -- An object that encapsulates the state of a request. The
36 state can be as simple as the URL. It can also include extra HTTP
37 headers, e.g. a User-Agent.
39 BaseHandler --
41 exceptions:
42 URLError -- A subclass of IOError, individual protocols have their own
43 specific subclass.
45 HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
46 as an exceptional event or valid response.
48 internals:
49 BaseHandler and parent
50 _call_chain conventions
52 Example usage:
54 import urllib2
56 # set up authentication info
57 authinfo = urllib2.HTTPBasicAuthHandler()
58 authinfo.add_password(realm='PDQ Application',
59 uri='https://mahler:8092/site-updates.py',
60 user='klem',
61 passwd='geheim$parole')
63 proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
65 # build a new opener that adds authentication and caching FTP handlers
66 opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
68 # install it
69 urllib2.install_opener(opener)
71 f = urllib2.urlopen('http://www.python.org/')
74 """
76 # XXX issues:
77 # If an authentication error handler that tries to perform
78 # authentication for some reason but fails, how should the error be
79 # signalled? The client needs to know the HTTP error code. But if
80 # the handler knows that the problem was, e.g., that it didn't know
81 # that hash algo that requested in the challenge, it would be good to
82 # pass that information along to the client, too.
83 # ftp errors aren't handled cleanly
84 # check digest against correct (i.e. non-apache) implementation
86 # Possible extensions:
87 # complex proxies XXX not sure what exactly was meant by this
88 # abstract factory for opener
90 import base64
91 import hashlib
92 import httplib
93 import mimetools
94 import os
95 import posixpath
96 import random
97 import re
98 import socket
99 import sys
100 import time
101 import urlparse
102 import bisect
104 try:
105 from cStringIO import StringIO
106 except ImportError:
107 from StringIO import StringIO
109 from urllib import (unwrap, unquote, splittype, splithost, quote,
110 addinfourl, splitport, splitquery,
111 splitattr, ftpwrapper, noheaders, splituser, splitpasswd, splitvalue)
113 # support for FileHandler, proxies via environment variables
114 from urllib import localhost, url2pathname, getproxies
116 # used in User-Agent header sent
117 __version__ = sys.version[:3]
119 _opener = None
120 def urlopen(url, data=None, timeout=None):
121 global _opener
122 if _opener is None:
123 _opener = build_opener()
124 return _opener.open(url, data, timeout)
126 def install_opener(opener):
127 global _opener
128 _opener = opener
130 # do these error classes make sense?
131 # make sure all of the IOError stuff is overridden. we just want to be
132 # subtypes.
134 class URLError(IOError):
135 # URLError is a sub-type of IOError, but it doesn't share any of
136 # the implementation. need to override __init__ and __str__.
137 # It sets self.args for compatibility with other EnvironmentError
138 # subclasses, but args doesn't have the typical format with errno in
139 # slot 0 and strerror in slot 1. This may be better than nothing.
140 def __init__(self, reason):
141 self.args = reason,
142 self.reason = reason
144 def __str__(self):
145 return '<urlopen error %s>' % self.reason
147 class HTTPError(URLError, addinfourl):
148 """Raised when HTTP error occurs, but also acts like non-error return"""
149 __super_init = addinfourl.__init__
151 def __init__(self, url, code, msg, hdrs, fp):
152 self.code = code
153 self.msg = msg
154 self.hdrs = hdrs
155 self.fp = fp
156 self.filename = url
157 # The addinfourl classes depend on fp being a valid file
158 # object. In some cases, the HTTPError may not have a valid
159 # file object. If this happens, the simplest workaround is to
160 # not initialize the base classes.
161 if fp is not None:
162 self.__super_init(fp, hdrs, url)
164 def __str__(self):
165 return 'HTTP Error %s: %s' % (self.code, self.msg)
167 # copied from cookielib.py
168 _cut_port_re = re.compile(r":\d+$")
169 def request_host(request):
170 """Return request-host, as defined by RFC 2965.
172 Variation from RFC: returned value is lowercased, for convenient
173 comparison.
176 url = request.get_full_url()
177 host = urlparse.urlparse(url)[1]
178 if host == "":
179 host = request.get_header("Host", "")
181 # remove port, if present
182 host = _cut_port_re.sub("", host, 1)
183 return host.lower()
185 class Request:
187 def __init__(self, url, data=None, headers={},
188 origin_req_host=None, unverifiable=False):
189 # unwrap('<URL:type://host/path>') --> 'type://host/path'
190 self.__original = unwrap(url)
191 self.type = None
192 # self.__r_type is what's left after doing the splittype
193 self.host = None
194 self.port = None
195 self.data = data
196 self.headers = {}
197 for key, value in headers.items():
198 self.add_header(key, value)
199 self.unredirected_hdrs = {}
200 if origin_req_host is None:
201 origin_req_host = request_host(self)
202 self.origin_req_host = origin_req_host
203 self.unverifiable = unverifiable
205 def __getattr__(self, attr):
206 # XXX this is a fallback mechanism to guard against these
207 # methods getting called in a non-standard order. this may be
208 # too complicated and/or unnecessary.
209 # XXX should the __r_XXX attributes be public?
210 if attr[:12] == '_Request__r_':
211 name = attr[12:]
212 if hasattr(Request, 'get_' + name):
213 getattr(self, 'get_' + name)()
214 return getattr(self, attr)
215 raise AttributeError, attr
217 def get_method(self):
218 if self.has_data():
219 return "POST"
220 else:
221 return "GET"
223 # XXX these helper methods are lame
225 def add_data(self, data):
226 self.data = data
228 def has_data(self):
229 return self.data is not None
231 def get_data(self):
232 return self.data
234 def get_full_url(self):
235 return self.__original
237 def get_type(self):
238 if self.type is None:
239 self.type, self.__r_type = splittype(self.__original)
240 if self.type is None:
241 raise ValueError, "unknown url type: %s" % self.__original
242 return self.type
244 def get_host(self):
245 if self.host is None:
246 self.host, self.__r_host = splithost(self.__r_type)
247 if self.host:
248 self.host = unquote(self.host)
249 return self.host
251 def get_selector(self):
252 return self.__r_host
254 def set_proxy(self, host, type):
255 self.host, self.type = host, type
256 self.__r_host = self.__original
258 def get_origin_req_host(self):
259 return self.origin_req_host
261 def is_unverifiable(self):
262 return self.unverifiable
264 def add_header(self, key, val):
265 # useful for something like authentication
266 self.headers[key.capitalize()] = val
268 def add_unredirected_header(self, key, val):
269 # will not be added to a redirected request
270 self.unredirected_hdrs[key.capitalize()] = val
272 def has_header(self, header_name):
273 return (header_name in self.headers or
274 header_name in self.unredirected_hdrs)
276 def get_header(self, header_name, default=None):
277 return self.headers.get(
278 header_name,
279 self.unredirected_hdrs.get(header_name, default))
281 def header_items(self):
282 hdrs = self.unredirected_hdrs.copy()
283 hdrs.update(self.headers)
284 return hdrs.items()
286 class OpenerDirector:
287 def __init__(self):
288 client_version = "Python-urllib/%s" % __version__
289 self.addheaders = [('User-agent', client_version)]
290 # manage the individual handlers
291 self.handlers = []
292 self.handle_open = {}
293 self.handle_error = {}
294 self.process_response = {}
295 self.process_request = {}
297 def add_handler(self, handler):
298 if not hasattr(handler, "add_parent"):
299 raise TypeError("expected BaseHandler instance, got %r" %
300 type(handler))
302 added = False
303 for meth in dir(handler):
304 if meth in ["redirect_request", "do_open", "proxy_open"]:
305 # oops, coincidental match
306 continue
308 i = meth.find("_")
309 protocol = meth[:i]
310 condition = meth[i+1:]
312 if condition.startswith("error"):
313 j = condition.find("_") + i + 1
314 kind = meth[j+1:]
315 try:
316 kind = int(kind)
317 except ValueError:
318 pass
319 lookup = self.handle_error.get(protocol, {})
320 self.handle_error[protocol] = lookup
321 elif condition == "open":
322 kind = protocol
323 lookup = self.handle_open
324 elif condition == "response":
325 kind = protocol
326 lookup = self.process_response
327 elif condition == "request":
328 kind = protocol
329 lookup = self.process_request
330 else:
331 continue
333 handlers = lookup.setdefault(kind, [])
334 if handlers:
335 bisect.insort(handlers, handler)
336 else:
337 handlers.append(handler)
338 added = True
340 if added:
341 # the handlers must work in an specific order, the order
342 # is specified in a Handler attribute
343 bisect.insort(self.handlers, handler)
344 handler.add_parent(self)
346 def close(self):
347 # Only exists for backwards compatibility.
348 pass
350 def _call_chain(self, chain, kind, meth_name, *args):
351 # Handlers raise an exception if no one else should try to handle
352 # the request, or return None if they can't but another handler
353 # could. Otherwise, they return the response.
354 handlers = chain.get(kind, ())
355 for handler in handlers:
356 func = getattr(handler, meth_name)
358 result = func(*args)
359 if result is not None:
360 return result
362 def open(self, fullurl, data=None, timeout=None):
363 # accept a URL or a Request object
364 if isinstance(fullurl, basestring):
365 req = Request(fullurl, data)
366 else:
367 req = fullurl
368 if data is not None:
369 req.add_data(data)
371 req.timeout = timeout
372 protocol = req.get_type()
374 # pre-process request
375 meth_name = protocol+"_request"
376 for processor in self.process_request.get(protocol, []):
377 meth = getattr(processor, meth_name)
378 req = meth(req)
380 response = self._open(req, data)
382 # post-process response
383 meth_name = protocol+"_response"
384 for processor in self.process_response.get(protocol, []):
385 meth = getattr(processor, meth_name)
386 response = meth(req, response)
388 return response
390 def _open(self, req, data=None):
391 result = self._call_chain(self.handle_open, 'default',
392 'default_open', req)
393 if result:
394 return result
396 protocol = req.get_type()
397 result = self._call_chain(self.handle_open, protocol, protocol +
398 '_open', req)
399 if result:
400 return result
402 return self._call_chain(self.handle_open, 'unknown',
403 'unknown_open', req)
405 def error(self, proto, *args):
406 if proto in ('http', 'https'):
407 # XXX http[s] protocols are special-cased
408 dict = self.handle_error['http'] # https is not different than http
409 proto = args[2] # YUCK!
410 meth_name = 'http_error_%s' % proto
411 http_err = 1
412 orig_args = args
413 else:
414 dict = self.handle_error
415 meth_name = proto + '_error'
416 http_err = 0
417 args = (dict, proto, meth_name) + args
418 result = self._call_chain(*args)
419 if result:
420 return result
422 if http_err:
423 args = (dict, 'default', 'http_error_default') + orig_args
424 return self._call_chain(*args)
426 # XXX probably also want an abstract factory that knows when it makes
427 # sense to skip a superclass in favor of a subclass and when it might
428 # make sense to include both
430 def build_opener(*handlers):
431 """Create an opener object from a list of handlers.
433 The opener will use several default handlers, including support
434 for HTTP and FTP.
436 If any of the handlers passed as arguments are subclasses of the
437 default handlers, the default handlers will not be used.
439 import types
440 def isclass(obj):
441 return isinstance(obj, types.ClassType) or hasattr(obj, "__bases__")
443 opener = OpenerDirector()
444 default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
445 HTTPDefaultErrorHandler, HTTPRedirectHandler,
446 FTPHandler, FileHandler, HTTPErrorProcessor]
447 if hasattr(httplib, 'HTTPS'):
448 default_classes.append(HTTPSHandler)
449 skip = []
450 for klass in default_classes:
451 for check in handlers:
452 if isclass(check):
453 if issubclass(check, klass):
454 skip.append(klass)
455 elif isinstance(check, klass):
456 skip.append(klass)
457 for klass in skip:
458 default_classes.remove(klass)
460 for klass in default_classes:
461 opener.add_handler(klass())
463 for h in handlers:
464 if isclass(h):
465 h = h()
466 opener.add_handler(h)
467 return opener
469 class BaseHandler:
470 handler_order = 500
472 def add_parent(self, parent):
473 self.parent = parent
475 def close(self):
476 # Only exists for backwards compatibility
477 pass
479 def __lt__(self, other):
480 if not hasattr(other, "handler_order"):
481 # Try to preserve the old behavior of having custom classes
482 # inserted after default ones (works only for custom user
483 # classes which are not aware of handler_order).
484 return True
485 return self.handler_order < other.handler_order
488 class HTTPErrorProcessor(BaseHandler):
489 """Process HTTP error responses."""
490 handler_order = 1000 # after all other processing
492 def http_response(self, request, response):
493 code, msg, hdrs = response.code, response.msg, response.info()
495 # According to RFC 2616, "2xx" code indicates that the client's
496 # request was successfully received, understood, and accepted.
497 if not (200 <= code < 300):
498 response = self.parent.error(
499 'http', request, response, code, msg, hdrs)
501 return response
503 https_response = http_response
505 class HTTPDefaultErrorHandler(BaseHandler):
506 def http_error_default(self, req, fp, code, msg, hdrs):
507 raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
509 class HTTPRedirectHandler(BaseHandler):
510 # maximum number of redirections to any single URL
511 # this is needed because of the state that cookies introduce
512 max_repeats = 4
513 # maximum total number of redirections (regardless of URL) before
514 # assuming we're in a loop
515 max_redirections = 10
517 def redirect_request(self, req, fp, code, msg, headers, newurl):
518 """Return a Request or None in response to a redirect.
520 This is called by the http_error_30x methods when a
521 redirection response is received. If a redirection should
522 take place, return a new Request to allow http_error_30x to
523 perform the redirect. Otherwise, raise HTTPError if no-one
524 else should try to handle this url. Return None if you can't
525 but another Handler might.
527 m = req.get_method()
528 if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
529 or code in (301, 302, 303) and m == "POST"):
530 # Strictly (according to RFC 2616), 301 or 302 in response
531 # to a POST MUST NOT cause a redirection without confirmation
532 # from the user (of urllib2, in this case). In practice,
533 # essentially all clients do redirect in this case, so we
534 # do the same.
535 # be conciliant with URIs containing a space
536 newurl = newurl.replace(' ', '%20')
537 newheaders = dict((k,v) for k,v in req.headers.items()
538 if k.lower() not in ("content-length", "content-type")
540 return Request(newurl,
541 headers=newheaders,
542 origin_req_host=req.get_origin_req_host(),
543 unverifiable=True)
544 else:
545 raise HTTPError(req.get_full_url(), code, msg, headers, fp)
547 # Implementation note: To avoid the server sending us into an
548 # infinite loop, the request object needs to track what URLs we
549 # have already seen. Do this by adding a handler-specific
550 # attribute to the Request object.
551 def http_error_302(self, req, fp, code, msg, headers):
552 # Some servers (incorrectly) return multiple Location headers
553 # (so probably same goes for URI). Use first header.
554 if 'location' in headers:
555 newurl = headers.getheaders('location')[0]
556 elif 'uri' in headers:
557 newurl = headers.getheaders('uri')[0]
558 else:
559 return
560 newurl = urlparse.urljoin(req.get_full_url(), newurl)
562 # XXX Probably want to forget about the state of the current
563 # request, although that might interact poorly with other
564 # handlers that also use handler-specific request attributes
565 new = self.redirect_request(req, fp, code, msg, headers, newurl)
566 if new is None:
567 return
569 # loop detection
570 # .redirect_dict has a key url if url was previously visited.
571 if hasattr(req, 'redirect_dict'):
572 visited = new.redirect_dict = req.redirect_dict
573 if (visited.get(newurl, 0) >= self.max_repeats or
574 len(visited) >= self.max_redirections):
575 raise HTTPError(req.get_full_url(), code,
576 self.inf_msg + msg, headers, fp)
577 else:
578 visited = new.redirect_dict = req.redirect_dict = {}
579 visited[newurl] = visited.get(newurl, 0) + 1
581 # Don't close the fp until we are sure that we won't use it
582 # with HTTPError.
583 fp.read()
584 fp.close()
586 return self.parent.open(new)
588 http_error_301 = http_error_303 = http_error_307 = http_error_302
590 inf_msg = "The HTTP server returned a redirect error that would " \
591 "lead to an infinite loop.\n" \
592 "The last 30x error message was:\n"
595 def _parse_proxy(proxy):
596 """Return (scheme, user, password, host/port) given a URL or an authority.
598 If a URL is supplied, it must have an authority (host:port) component.
599 According to RFC 3986, having an authority component means the URL must
600 have two slashes after the scheme:
602 >>> _parse_proxy('file:/ftp.example.com/')
603 Traceback (most recent call last):
604 ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
606 The first three items of the returned tuple may be None.
608 Examples of authority parsing:
610 >>> _parse_proxy('proxy.example.com')
611 (None, None, None, 'proxy.example.com')
612 >>> _parse_proxy('proxy.example.com:3128')
613 (None, None, None, 'proxy.example.com:3128')
615 The authority component may optionally include userinfo (assumed to be
616 username:password):
618 >>> _parse_proxy('joe:password@proxy.example.com')
619 (None, 'joe', 'password', 'proxy.example.com')
620 >>> _parse_proxy('joe:password@proxy.example.com:3128')
621 (None, 'joe', 'password', 'proxy.example.com:3128')
623 Same examples, but with URLs instead:
625 >>> _parse_proxy('http://proxy.example.com/')
626 ('http', None, None, 'proxy.example.com')
627 >>> _parse_proxy('http://proxy.example.com:3128/')
628 ('http', None, None, 'proxy.example.com:3128')
629 >>> _parse_proxy('http://joe:password@proxy.example.com/')
630 ('http', 'joe', 'password', 'proxy.example.com')
631 >>> _parse_proxy('http://joe:password@proxy.example.com:3128')
632 ('http', 'joe', 'password', 'proxy.example.com:3128')
634 Everything after the authority is ignored:
636 >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
637 ('ftp', 'joe', 'password', 'proxy.example.com')
639 Test for no trailing '/' case:
641 >>> _parse_proxy('http://joe:password@proxy.example.com')
642 ('http', 'joe', 'password', 'proxy.example.com')
645 scheme, r_scheme = splittype(proxy)
646 if not r_scheme.startswith("/"):
647 # authority
648 scheme = None
649 authority = proxy
650 else:
651 # URL
652 if not r_scheme.startswith("//"):
653 raise ValueError("proxy URL with no authority: %r" % proxy)
654 # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
655 # and 3.3.), path is empty or starts with '/'
656 end = r_scheme.find("/", 2)
657 if end == -1:
658 end = None
659 authority = r_scheme[2:end]
660 userinfo, hostport = splituser(authority)
661 if userinfo is not None:
662 user, password = splitpasswd(userinfo)
663 else:
664 user = password = None
665 return scheme, user, password, hostport
667 class ProxyHandler(BaseHandler):
668 # Proxies must be in front
669 handler_order = 100
671 def __init__(self, proxies=None):
672 if proxies is None:
673 proxies = getproxies()
674 assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
675 self.proxies = proxies
676 for type, url in proxies.items():
677 setattr(self, '%s_open' % type,
678 lambda r, proxy=url, type=type, meth=self.proxy_open: \
679 meth(r, proxy, type))
681 def proxy_open(self, req, proxy, type):
682 orig_type = req.get_type()
683 proxy_type, user, password, hostport = _parse_proxy(proxy)
684 if proxy_type is None:
685 proxy_type = orig_type
686 if user and password:
687 user_pass = '%s:%s' % (unquote(user), unquote(password))
688 creds = base64.b64encode(user_pass).strip()
689 req.add_header('Proxy-authorization', 'Basic ' + creds)
690 hostport = unquote(hostport)
691 req.set_proxy(hostport, proxy_type)
692 if orig_type == proxy_type:
693 # let other handlers take care of it
694 return None
695 else:
696 # need to start over, because the other handlers don't
697 # grok the proxy's URL type
698 # e.g. if we have a constructor arg proxies like so:
699 # {'http': 'ftp://proxy.example.com'}, we may end up turning
700 # a request for http://acme.example.com/a into one for
701 # ftp://proxy.example.com/a
702 return self.parent.open(req)
704 class HTTPPasswordMgr:
706 def __init__(self):
707 self.passwd = {}
709 def add_password(self, realm, uri, user, passwd):
710 # uri could be a single URI or a sequence
711 if isinstance(uri, basestring):
712 uri = [uri]
713 if not realm in self.passwd:
714 self.passwd[realm] = {}
715 for default_port in True, False:
716 reduced_uri = tuple(
717 [self.reduce_uri(u, default_port) for u in uri])
718 self.passwd[realm][reduced_uri] = (user, passwd)
720 def find_user_password(self, realm, authuri):
721 domains = self.passwd.get(realm, {})
722 for default_port in True, False:
723 reduced_authuri = self.reduce_uri(authuri, default_port)
724 for uris, authinfo in domains.iteritems():
725 for uri in uris:
726 if self.is_suburi(uri, reduced_authuri):
727 return authinfo
728 return None, None
730 def reduce_uri(self, uri, default_port=True):
731 """Accept authority or URI and extract only the authority and path."""
732 # note HTTP URLs do not have a userinfo component
733 parts = urlparse.urlsplit(uri)
734 if parts[1]:
735 # URI
736 scheme = parts[0]
737 authority = parts[1]
738 path = parts[2] or '/'
739 else:
740 # host or host:port
741 scheme = None
742 authority = uri
743 path = '/'
744 host, port = splitport(authority)
745 if default_port and port is None and scheme is not None:
746 dport = {"http": 80,
747 "https": 443,
748 }.get(scheme)
749 if dport is not None:
750 authority = "%s:%d" % (host, dport)
751 return authority, path
753 def is_suburi(self, base, test):
754 """Check if test is below base in a URI tree
756 Both args must be URIs in reduced form.
758 if base == test:
759 return True
760 if base[0] != test[0]:
761 return False
762 common = posixpath.commonprefix((base[1], test[1]))
763 if len(common) == len(base[1]):
764 return True
765 return False
768 class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
770 def find_user_password(self, realm, authuri):
771 user, password = HTTPPasswordMgr.find_user_password(self, realm,
772 authuri)
773 if user is not None:
774 return user, password
775 return HTTPPasswordMgr.find_user_password(self, None, authuri)
778 class AbstractBasicAuthHandler:
780 # XXX this allows for multiple auth-schemes, but will stupidly pick
781 # the last one with a realm specified.
783 # allow for double- and single-quoted realm values
784 # (single quotes are a violation of the RFC, but appear in the wild)
785 rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
786 'realm=(["\'])(.*?)\\2', re.I)
788 # XXX could pre-emptively send auth info already accepted (RFC 2617,
789 # end of section 2, and section 1.2 immediately after "credentials"
790 # production).
792 def __init__(self, password_mgr=None):
793 if password_mgr is None:
794 password_mgr = HTTPPasswordMgr()
795 self.passwd = password_mgr
796 self.add_password = self.passwd.add_password
798 def http_error_auth_reqed(self, authreq, host, req, headers):
799 # host may be an authority (without userinfo) or a URL with an
800 # authority
801 # XXX could be multiple headers
802 authreq = headers.get(authreq, None)
803 if authreq:
804 mo = AbstractBasicAuthHandler.rx.search(authreq)
805 if mo:
806 scheme, quote, realm = mo.groups()
807 if scheme.lower() == 'basic':
808 return self.retry_http_basic_auth(host, req, realm)
810 def retry_http_basic_auth(self, host, req, realm):
811 user, pw = self.passwd.find_user_password(realm, host)
812 if pw is not None:
813 raw = "%s:%s" % (user, pw)
814 auth = 'Basic %s' % base64.b64encode(raw).strip()
815 if req.headers.get(self.auth_header, None) == auth:
816 return None
817 req.add_header(self.auth_header, auth)
818 return self.parent.open(req)
819 else:
820 return None
823 class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
825 auth_header = 'Authorization'
827 def http_error_401(self, req, fp, code, msg, headers):
828 url = req.get_full_url()
829 return self.http_error_auth_reqed('www-authenticate',
830 url, req, headers)
833 class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
835 auth_header = 'Proxy-authorization'
837 def http_error_407(self, req, fp, code, msg, headers):
838 # http_error_auth_reqed requires that there is no userinfo component in
839 # authority. Assume there isn't one, since urllib2 does not (and
840 # should not, RFC 3986 s. 3.2.1) support requests for URLs containing
841 # userinfo.
842 authority = req.get_host()
843 return self.http_error_auth_reqed('proxy-authenticate',
844 authority, req, headers)
847 def randombytes(n):
848 """Return n random bytes."""
849 # Use /dev/urandom if it is available. Fall back to random module
850 # if not. It might be worthwhile to extend this function to use
851 # other platform-specific mechanisms for getting random bytes.
852 if os.path.exists("/dev/urandom"):
853 f = open("/dev/urandom")
854 s = f.read(n)
855 f.close()
856 return s
857 else:
858 L = [chr(random.randrange(0, 256)) for i in range(n)]
859 return "".join(L)
861 class AbstractDigestAuthHandler:
862 # Digest authentication is specified in RFC 2617.
864 # XXX The client does not inspect the Authentication-Info header
865 # in a successful response.
867 # XXX It should be possible to test this implementation against
868 # a mock server that just generates a static set of challenges.
870 # XXX qop="auth-int" supports is shaky
872 def __init__(self, passwd=None):
873 if passwd is None:
874 passwd = HTTPPasswordMgr()
875 self.passwd = passwd
876 self.add_password = self.passwd.add_password
877 self.retried = 0
878 self.nonce_count = 0
880 def reset_retry_count(self):
881 self.retried = 0
883 def http_error_auth_reqed(self, auth_header, host, req, headers):
884 authreq = headers.get(auth_header, None)
885 if self.retried > 5:
886 # Don't fail endlessly - if we failed once, we'll probably
887 # fail a second time. Hm. Unless the Password Manager is
888 # prompting for the information. Crap. This isn't great
889 # but it's better than the current 'repeat until recursion
890 # depth exceeded' approach <wink>
891 raise HTTPError(req.get_full_url(), 401, "digest auth failed",
892 headers, None)
893 else:
894 self.retried += 1
895 if authreq:
896 scheme = authreq.split()[0]
897 if scheme.lower() == 'digest':
898 return self.retry_http_digest_auth(req, authreq)
900 def retry_http_digest_auth(self, req, auth):
901 token, challenge = auth.split(' ', 1)
902 chal = parse_keqv_list(parse_http_list(challenge))
903 auth = self.get_authorization(req, chal)
904 if auth:
905 auth_val = 'Digest %s' % auth
906 if req.headers.get(self.auth_header, None) == auth_val:
907 return None
908 req.add_unredirected_header(self.auth_header, auth_val)
909 resp = self.parent.open(req)
910 return resp
912 def get_cnonce(self, nonce):
913 # The cnonce-value is an opaque
914 # quoted string value provided by the client and used by both client
915 # and server to avoid chosen plaintext attacks, to provide mutual
916 # authentication, and to provide some message integrity protection.
917 # This isn't a fabulous effort, but it's probably Good Enough.
918 dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
919 randombytes(8))).hexdigest()
920 return dig[:16]
922 def get_authorization(self, req, chal):
923 try:
924 realm = chal['realm']
925 nonce = chal['nonce']
926 qop = chal.get('qop')
927 algorithm = chal.get('algorithm', 'MD5')
928 # mod_digest doesn't send an opaque, even though it isn't
929 # supposed to be optional
930 opaque = chal.get('opaque', None)
931 except KeyError:
932 return None
934 H, KD = self.get_algorithm_impls(algorithm)
935 if H is None:
936 return None
938 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
939 if user is None:
940 return None
942 # XXX not implemented yet
943 if req.has_data():
944 entdig = self.get_entity_digest(req.get_data(), chal)
945 else:
946 entdig = None
948 A1 = "%s:%s:%s" % (user, realm, pw)
949 A2 = "%s:%s" % (req.get_method(),
950 # XXX selector: what about proxies and full urls
951 req.get_selector())
952 if qop == 'auth':
953 self.nonce_count += 1
954 ncvalue = '%08x' % self.nonce_count
955 cnonce = self.get_cnonce(nonce)
956 noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
957 respdig = KD(H(A1), noncebit)
958 elif qop is None:
959 respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
960 else:
961 # XXX handle auth-int.
962 raise URLError("qop '%s' is not supported." % qop)
964 # XXX should the partial digests be encoded too?
966 base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
967 'response="%s"' % (user, realm, nonce, req.get_selector(),
968 respdig)
969 if opaque:
970 base += ', opaque="%s"' % opaque
971 if entdig:
972 base += ', digest="%s"' % entdig
973 base += ', algorithm="%s"' % algorithm
974 if qop:
975 base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
976 return base
978 def get_algorithm_impls(self, algorithm):
979 # lambdas assume digest modules are imported at the top level
980 if algorithm == 'MD5':
981 H = lambda x: hashlib.md5(x).hexdigest()
982 elif algorithm == 'SHA':
983 H = lambda x: hashlib.sha1(x).hexdigest()
984 # XXX MD5-sess
985 KD = lambda s, d: H("%s:%s" % (s, d))
986 return H, KD
988 def get_entity_digest(self, data, chal):
989 # XXX not implemented yet
990 return None
993 class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
994 """An authentication protocol defined by RFC 2069
996 Digest authentication improves on basic authentication because it
997 does not transmit passwords in the clear.
1000 auth_header = 'Authorization'
1001 handler_order = 490 # before Basic auth
1003 def http_error_401(self, req, fp, code, msg, headers):
1004 host = urlparse.urlparse(req.get_full_url())[1]
1005 retry = self.http_error_auth_reqed('www-authenticate',
1006 host, req, headers)
1007 self.reset_retry_count()
1008 return retry
1011 class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
1013 auth_header = 'Proxy-Authorization'
1014 handler_order = 490 # before Basic auth
1016 def http_error_407(self, req, fp, code, msg, headers):
1017 host = req.get_host()
1018 retry = self.http_error_auth_reqed('proxy-authenticate',
1019 host, req, headers)
1020 self.reset_retry_count()
1021 return retry
1023 class AbstractHTTPHandler(BaseHandler):
1025 def __init__(self, debuglevel=0):
1026 self._debuglevel = debuglevel
1028 def set_http_debuglevel(self, level):
1029 self._debuglevel = level
1031 def do_request_(self, request):
1032 host = request.get_host()
1033 if not host:
1034 raise URLError('no host given')
1036 if request.has_data(): # POST
1037 data = request.get_data()
1038 if not request.has_header('Content-type'):
1039 request.add_unredirected_header(
1040 'Content-type',
1041 'application/x-www-form-urlencoded')
1042 if not request.has_header('Content-length'):
1043 request.add_unredirected_header(
1044 'Content-length', '%d' % len(data))
1046 scheme, sel = splittype(request.get_selector())
1047 sel_host, sel_path = splithost(sel)
1048 if not request.has_header('Host'):
1049 request.add_unredirected_header('Host', sel_host or host)
1050 for name, value in self.parent.addheaders:
1051 name = name.capitalize()
1052 if not request.has_header(name):
1053 request.add_unredirected_header(name, value)
1055 return request
1057 def do_open(self, http_class, req):
1058 """Return an addinfourl object for the request, using http_class.
1060 http_class must implement the HTTPConnection API from httplib.
1061 The addinfourl return value is a file-like object. It also
1062 has methods and attributes including:
1063 - info(): return a mimetools.Message object for the headers
1064 - geturl(): return the original request URL
1065 - code: HTTP status code
1067 host = req.get_host()
1068 if not host:
1069 raise URLError('no host given')
1071 h = http_class(host, timeout=req.timeout) # will parse host:port
1072 h.set_debuglevel(self._debuglevel)
1074 headers = dict(req.headers)
1075 headers.update(req.unredirected_hdrs)
1076 # We want to make an HTTP/1.1 request, but the addinfourl
1077 # class isn't prepared to deal with a persistent connection.
1078 # It will try to read all remaining data from the socket,
1079 # which will block while the server waits for the next request.
1080 # So make sure the connection gets closed after the (only)
1081 # request.
1082 headers["Connection"] = "close"
1083 headers = dict(
1084 (name.title(), val) for name, val in headers.items())
1085 try:
1086 h.request(req.get_method(), req.get_selector(), req.data, headers)
1087 r = h.getresponse()
1088 except socket.error, err: # XXX what error?
1089 raise URLError(err)
1091 # Pick apart the HTTPResponse object to get the addinfourl
1092 # object initialized properly.
1094 # Wrap the HTTPResponse object in socket's file object adapter
1095 # for Windows. That adapter calls recv(), so delegate recv()
1096 # to read(). This weird wrapping allows the returned object to
1097 # have readline() and readlines() methods.
1099 # XXX It might be better to extract the read buffering code
1100 # out of socket._fileobject() and into a base class.
1102 r.recv = r.read
1103 fp = socket._fileobject(r, close=True)
1105 resp = addinfourl(fp, r.msg, req.get_full_url())
1106 resp.code = r.status
1107 resp.msg = r.reason
1108 return resp
1111 class HTTPHandler(AbstractHTTPHandler):
1113 def http_open(self, req):
1114 return self.do_open(httplib.HTTPConnection, req)
1116 http_request = AbstractHTTPHandler.do_request_
1118 if hasattr(httplib, 'HTTPS'):
1119 class HTTPSHandler(AbstractHTTPHandler):
1121 def https_open(self, req):
1122 return self.do_open(httplib.HTTPSConnection, req)
1124 https_request = AbstractHTTPHandler.do_request_
1126 class HTTPCookieProcessor(BaseHandler):
1127 def __init__(self, cookiejar=None):
1128 import cookielib
1129 if cookiejar is None:
1130 cookiejar = cookielib.CookieJar()
1131 self.cookiejar = cookiejar
1133 def http_request(self, request):
1134 self.cookiejar.add_cookie_header(request)
1135 return request
1137 def http_response(self, request, response):
1138 self.cookiejar.extract_cookies(response, request)
1139 return response
1141 https_request = http_request
1142 https_response = http_response
1144 class UnknownHandler(BaseHandler):
1145 def unknown_open(self, req):
1146 type = req.get_type()
1147 raise URLError('unknown url type: %s' % type)
1149 def parse_keqv_list(l):
1150 """Parse list of key=value strings where keys are not duplicated."""
1151 parsed = {}
1152 for elt in l:
1153 k, v = elt.split('=', 1)
1154 if v[0] == '"' and v[-1] == '"':
1155 v = v[1:-1]
1156 parsed[k] = v
1157 return parsed
1159 def parse_http_list(s):
1160 """Parse lists as described by RFC 2068 Section 2.
1162 In particular, parse comma-separated lists where the elements of
1163 the list may include quoted-strings. A quoted-string could
1164 contain a comma. A non-quoted string could have quotes in the
1165 middle. Neither commas nor quotes count if they are escaped.
1166 Only double-quotes count, not single-quotes.
1168 res = []
1169 part = ''
1171 escape = quote = False
1172 for cur in s:
1173 if escape:
1174 part += cur
1175 escape = False
1176 continue
1177 if quote:
1178 if cur == '\\':
1179 escape = True
1180 continue
1181 elif cur == '"':
1182 quote = False
1183 part += cur
1184 continue
1186 if cur == ',':
1187 res.append(part)
1188 part = ''
1189 continue
1191 if cur == '"':
1192 quote = True
1194 part += cur
1196 # append last part
1197 if part:
1198 res.append(part)
1200 return [part.strip() for part in res]
1202 class FileHandler(BaseHandler):
1203 # Use local file or FTP depending on form of URL
1204 def file_open(self, req):
1205 url = req.get_selector()
1206 if url[:2] == '//' and url[2:3] != '/':
1207 req.type = 'ftp'
1208 return self.parent.open(req)
1209 else:
1210 return self.open_local_file(req)
1212 # names for the localhost
1213 names = None
1214 def get_names(self):
1215 if FileHandler.names is None:
1216 try:
1217 FileHandler.names = (socket.gethostbyname('localhost'),
1218 socket.gethostbyname(socket.gethostname()))
1219 except socket.gaierror:
1220 FileHandler.names = (socket.gethostbyname('localhost'),)
1221 return FileHandler.names
1223 # not entirely sure what the rules are here
1224 def open_local_file(self, req):
1225 import email.utils
1226 import mimetypes
1227 host = req.get_host()
1228 file = req.get_selector()
1229 localfile = url2pathname(file)
1230 try:
1231 stats = os.stat(localfile)
1232 size = stats.st_size
1233 modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
1234 mtype = mimetypes.guess_type(file)[0]
1235 headers = mimetools.Message(StringIO(
1236 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
1237 (mtype or 'text/plain', size, modified)))
1238 if host:
1239 host, port = splitport(host)
1240 if not host or \
1241 (not port and socket.gethostbyname(host) in self.get_names()):
1242 return addinfourl(open(localfile, 'rb'),
1243 headers, 'file:'+file)
1244 except OSError, msg:
1245 # urllib2 users shouldn't expect OSErrors coming from urlopen()
1246 raise URLError(msg)
1247 raise URLError('file not on local host')
1249 class FTPHandler(BaseHandler):
1250 def ftp_open(self, req):
1251 import ftplib
1252 import mimetypes
1253 host = req.get_host()
1254 if not host:
1255 raise URLError('ftp error: no host given')
1256 host, port = splitport(host)
1257 if port is None:
1258 port = ftplib.FTP_PORT
1259 else:
1260 port = int(port)
1262 # username/password handling
1263 user, host = splituser(host)
1264 if user:
1265 user, passwd = splitpasswd(user)
1266 else:
1267 passwd = None
1268 host = unquote(host)
1269 user = unquote(user or '')
1270 passwd = unquote(passwd or '')
1272 try:
1273 host = socket.gethostbyname(host)
1274 except socket.error, msg:
1275 raise URLError(msg)
1276 path, attrs = splitattr(req.get_selector())
1277 dirs = path.split('/')
1278 dirs = map(unquote, dirs)
1279 dirs, file = dirs[:-1], dirs[-1]
1280 if dirs and not dirs[0]:
1281 dirs = dirs[1:]
1282 try:
1283 fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
1284 type = file and 'I' or 'D'
1285 for attr in attrs:
1286 attr, value = splitvalue(attr)
1287 if attr.lower() == 'type' and \
1288 value in ('a', 'A', 'i', 'I', 'd', 'D'):
1289 type = value.upper()
1290 fp, retrlen = fw.retrfile(file, type)
1291 headers = ""
1292 mtype = mimetypes.guess_type(req.get_full_url())[0]
1293 if mtype:
1294 headers += "Content-type: %s\n" % mtype
1295 if retrlen is not None and retrlen >= 0:
1296 headers += "Content-length: %d\n" % retrlen
1297 sf = StringIO(headers)
1298 headers = mimetools.Message(sf)
1299 return addinfourl(fp, headers, req.get_full_url())
1300 except ftplib.all_errors, msg:
1301 raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
1303 def connect_ftp(self, user, passwd, host, port, dirs, timeout):
1304 fw = ftpwrapper(user, passwd, host, port, dirs, timeout)
1305 ## fw.ftp.set_debuglevel(1)
1306 return fw
1308 class CacheFTPHandler(FTPHandler):
1309 # XXX would be nice to have pluggable cache strategies
1310 # XXX this stuff is definitely not thread safe
1311 def __init__(self):
1312 self.cache = {}
1313 self.timeout = {}
1314 self.soonest = 0
1315 self.delay = 60
1316 self.max_conns = 16
1318 def setTimeout(self, t):
1319 self.delay = t
1321 def setMaxConns(self, m):
1322 self.max_conns = m
1324 def connect_ftp(self, user, passwd, host, port, dirs, timeout):
1325 key = user, host, port, '/'.join(dirs), timeout
1326 if key in self.cache:
1327 self.timeout[key] = time.time() + self.delay
1328 else:
1329 self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
1330 self.timeout[key] = time.time() + self.delay
1331 self.check_cache()
1332 return self.cache[key]
1334 def check_cache(self):
1335 # first check for old ones
1336 t = time.time()
1337 if self.soonest <= t:
1338 for k, v in self.timeout.items():
1339 if v < t:
1340 self.cache[k].close()
1341 del self.cache[k]
1342 del self.timeout[k]
1343 self.soonest = min(self.timeout.values())
1345 # then check the size
1346 if len(self.cache) == self.max_conns:
1347 for k, v in self.timeout.items():
1348 if v == self.soonest:
1349 del self.cache[k]
1350 del self.timeout[k]
1351 break
1352 self.soonest = min(self.timeout.values())