#1153769: document PEP 237 changes to string formatting.
[python.git] / Lib / urlparse.py
blob631a5a1cb318f3583cb4d8274269915be48628ed
1 """Parse (absolute and relative) URLs.
3 See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
4 UC Irvine, June 1995.
5 """
7 __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
8 "urlsplit", "urlunsplit"]
10 # A classification of schemes ('' means apply by default)
11 uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
12 'wais', 'file', 'https', 'shttp', 'mms',
13 'prospero', 'rtsp', 'rtspu', '', 'sftp']
14 uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
15 'imap', 'wais', 'file', 'mms', 'https', 'shttp',
16 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
17 'svn', 'svn+ssh', 'sftp']
18 non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
19 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
20 uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
21 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
22 'mms', '', 'sftp']
23 uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
24 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
25 uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
26 'nntp', 'wais', 'https', 'shttp', 'snews',
27 'file', 'prospero', '']
29 # Characters valid in scheme names
30 scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
31 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
32 '0123456789'
33 '+-.')
35 MAX_CACHE_SIZE = 20
36 _parse_cache = {}
38 def clear_cache():
39 """Clear the parse cache."""
40 _parse_cache.clear()
43 class ResultMixin(object):
44 """Shared methods for the parsed result objects."""
46 @property
47 def username(self):
48 netloc = self.netloc
49 if "@" in netloc:
50 userinfo = netloc.rsplit("@", 1)[0]
51 if ":" in userinfo:
52 userinfo = userinfo.split(":", 1)[0]
53 return userinfo
54 return None
56 @property
57 def password(self):
58 netloc = self.netloc
59 if "@" in netloc:
60 userinfo = netloc.rsplit("@", 1)[0]
61 if ":" in userinfo:
62 return userinfo.split(":", 1)[1]
63 return None
65 @property
66 def hostname(self):
67 netloc = self.netloc
68 if "@" in netloc:
69 netloc = netloc.rsplit("@", 1)[1]
70 if ":" in netloc:
71 netloc = netloc.split(":", 1)[0]
72 return netloc.lower() or None
74 @property
75 def port(self):
76 netloc = self.netloc
77 if "@" in netloc:
78 netloc = netloc.rsplit("@", 1)[1]
79 if ":" in netloc:
80 port = netloc.split(":", 1)[1]
81 return int(port, 10)
82 return None
84 from collections import namedtuple
86 class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
88 __slots__ = ()
90 def geturl(self):
91 return urlunsplit(self)
94 class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
96 __slots__ = ()
98 def geturl(self):
99 return urlunparse(self)
102 def urlparse(url, scheme='', allow_fragments=True):
103 """Parse a URL into 6 components:
104 <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
105 Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
106 Note that we don't break the components up in smaller bits
107 (e.g. netloc is a single string) and we don't expand % escapes."""
108 tuple = urlsplit(url, scheme, allow_fragments)
109 scheme, netloc, url, query, fragment = tuple
110 if scheme in uses_params and ';' in url:
111 url, params = _splitparams(url)
112 else:
113 params = ''
114 return ParseResult(scheme, netloc, url, params, query, fragment)
116 def _splitparams(url):
117 if '/' in url:
118 i = url.find(';', url.rfind('/'))
119 if i < 0:
120 return url, ''
121 else:
122 i = url.find(';')
123 return url[:i], url[i+1:]
125 def _splitnetloc(url, start=0):
126 delim = len(url) # position of end of domain part of url, default is end
127 for c in '/?#': # look for delimiters; the order is NOT important
128 wdelim = url.find(c, start) # find first of this delim
129 if wdelim >= 0: # if found
130 delim = min(delim, wdelim) # use earliest delim position
131 return url[start:delim], url[delim:] # return (domain, rest)
133 def urlsplit(url, scheme='', allow_fragments=True):
134 """Parse a URL into 5 components:
135 <scheme>://<netloc>/<path>?<query>#<fragment>
136 Return a 5-tuple: (scheme, netloc, path, query, fragment).
137 Note that we don't break the components up in smaller bits
138 (e.g. netloc is a single string) and we don't expand % escapes."""
139 allow_fragments = bool(allow_fragments)
140 key = url, scheme, allow_fragments, type(url), type(scheme)
141 cached = _parse_cache.get(key, None)
142 if cached:
143 return cached
144 if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
145 clear_cache()
146 netloc = query = fragment = ''
147 i = url.find(':')
148 if i > 0:
149 if url[:i] == 'http': # optimize the common case
150 scheme = url[:i].lower()
151 url = url[i+1:]
152 if url[:2] == '//':
153 netloc, url = _splitnetloc(url, 2)
154 if allow_fragments and '#' in url:
155 url, fragment = url.split('#', 1)
156 if '?' in url:
157 url, query = url.split('?', 1)
158 v = SplitResult(scheme, netloc, url, query, fragment)
159 _parse_cache[key] = v
160 return v
161 for c in url[:i]:
162 if c not in scheme_chars:
163 break
164 else:
165 scheme, url = url[:i].lower(), url[i+1:]
166 if scheme in uses_netloc and url[:2] == '//':
167 netloc, url = _splitnetloc(url, 2)
168 if allow_fragments and scheme in uses_fragment and '#' in url:
169 url, fragment = url.split('#', 1)
170 if scheme in uses_query and '?' in url:
171 url, query = url.split('?', 1)
172 v = SplitResult(scheme, netloc, url, query, fragment)
173 _parse_cache[key] = v
174 return v
176 def urlunparse((scheme, netloc, url, params, query, fragment)):
177 """Put a parsed URL back together again. This may result in a
178 slightly different, but equivalent URL, if the URL that was parsed
179 originally had redundant delimiters, e.g. a ? with an empty query
180 (the draft states that these are equivalent)."""
181 if params:
182 url = "%s;%s" % (url, params)
183 return urlunsplit((scheme, netloc, url, query, fragment))
185 def urlunsplit((scheme, netloc, url, query, fragment)):
186 if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
187 if url and url[:1] != '/': url = '/' + url
188 url = '//' + (netloc or '') + url
189 if scheme:
190 url = scheme + ':' + url
191 if query:
192 url = url + '?' + query
193 if fragment:
194 url = url + '#' + fragment
195 return url
197 def urljoin(base, url, allow_fragments=True):
198 """Join a base URL and a possibly relative URL to form an absolute
199 interpretation of the latter."""
200 if not base:
201 return url
202 if not url:
203 return base
204 bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
205 urlparse(base, '', allow_fragments)
206 scheme, netloc, path, params, query, fragment = \
207 urlparse(url, bscheme, allow_fragments)
208 if scheme != bscheme or scheme not in uses_relative:
209 return url
210 if scheme in uses_netloc:
211 if netloc:
212 return urlunparse((scheme, netloc, path,
213 params, query, fragment))
214 netloc = bnetloc
215 if path[:1] == '/':
216 return urlunparse((scheme, netloc, path,
217 params, query, fragment))
218 if not (path or params or query):
219 return urlunparse((scheme, netloc, bpath,
220 bparams, bquery, fragment))
221 segments = bpath.split('/')[:-1] + path.split('/')
222 # XXX The stuff below is bogus in various ways...
223 if segments[-1] == '.':
224 segments[-1] = ''
225 while '.' in segments:
226 segments.remove('.')
227 while 1:
228 i = 1
229 n = len(segments) - 1
230 while i < n:
231 if (segments[i] == '..'
232 and segments[i-1] not in ('', '..')):
233 del segments[i-1:i+1]
234 break
235 i = i+1
236 else:
237 break
238 if segments == ['', '..']:
239 segments[-1] = ''
240 elif len(segments) >= 2 and segments[-1] == '..':
241 segments[-2:] = ['']
242 return urlunparse((scheme, netloc, '/'.join(segments),
243 params, query, fragment))
245 def urldefrag(url):
246 """Removes any existing fragment from URL.
248 Returns a tuple of the defragmented URL and the fragment. If
249 the URL contained no fragments, the second element is the
250 empty string.
252 if '#' in url:
253 s, n, p, a, q, frag = urlparse(url)
254 defrag = urlunparse((s, n, p, a, q, ''))
255 return defrag, frag
256 else:
257 return url, ''
260 test_input = """
261 http://a/b/c/d
263 g:h = <URL:g:h>
264 http:g = <URL:http://a/b/c/g>
265 http: = <URL:http://a/b/c/d>
266 g = <URL:http://a/b/c/g>
267 ./g = <URL:http://a/b/c/g>
268 g/ = <URL:http://a/b/c/g/>
269 /g = <URL:http://a/g>
270 //g = <URL:http://g>
271 ?y = <URL:http://a/b/c/d?y>
272 g?y = <URL:http://a/b/c/g?y>
273 g?y/./x = <URL:http://a/b/c/g?y/./x>
274 . = <URL:http://a/b/c/>
275 ./ = <URL:http://a/b/c/>
276 .. = <URL:http://a/b/>
277 ../ = <URL:http://a/b/>
278 ../g = <URL:http://a/b/g>
279 ../.. = <URL:http://a/>
280 ../../g = <URL:http://a/g>
281 ../../../g = <URL:http://a/../g>
282 ./../g = <URL:http://a/b/g>
283 ./g/. = <URL:http://a/b/c/g/>
284 /./g = <URL:http://a/./g>
285 g/./h = <URL:http://a/b/c/g/h>
286 g/../h = <URL:http://a/b/c/h>
287 http:g = <URL:http://a/b/c/g>
288 http: = <URL:http://a/b/c/d>
289 http:?y = <URL:http://a/b/c/d?y>
290 http:g?y = <URL:http://a/b/c/g?y>
291 http:g?y/./x = <URL:http://a/b/c/g?y/./x>
294 def test():
295 import sys
296 base = ''
297 if sys.argv[1:]:
298 fn = sys.argv[1]
299 if fn == '-':
300 fp = sys.stdin
301 else:
302 fp = open(fn)
303 else:
304 try:
305 from cStringIO import StringIO
306 except ImportError:
307 from StringIO import StringIO
308 fp = StringIO(test_input)
309 for line in fp:
310 words = line.split()
311 if not words:
312 continue
313 url = words[0]
314 parts = urlparse(url)
315 print '%-10s : %s' % (url, parts)
316 abs = urljoin(base, url)
317 if not base:
318 base = abs
319 wrapped = '<URL:%s>' % abs
320 print '%-10s = %s' % (url, wrapped)
321 if len(words) == 3 and words[1] == '=':
322 if wrapped != words[2]:
323 print 'EXPECTED', words[2], '!!!!!!!!!!'
325 if __name__ == '__main__':
326 test()