3 Copyright (C) 2000 Bastian Kleineidam
5 You can choose between two licenses when using this package:
7 2) PSF license for Python 2.2
9 The robots.txt Exclusion Protocol is implemented as specified in
10 http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
12 import urlparse
,urllib
14 __all__
= ["RobotFileParser"]
22 class RobotFileParser
:
23 """ This class provides a set of methods to read, parse and answer
24 questions about a single robots.txt file.
28 def __init__(self
, url
=''):
30 self
.default_entry
= None
31 self
.disallow_all
= False
32 self
.allow_all
= False
37 """Returns the time the robots.txt file was last fetched.
39 This is useful for long-running web spiders that need to
40 check for new robots.txt files periodically.
43 return self
.last_checked
46 """Sets the time the robots.txt file was last fetched to the
51 self
.last_checked
= time
.time()
53 def set_url(self
, url
):
54 """Sets the URL referring to a robots.txt file."""
56 self
.host
, self
.path
= urlparse
.urlparse(url
)[1:3]
59 """Reads the robots.txt URL and feeds it to the parser."""
61 f
= opener
.open(self
.url
)
65 lines
.append(line
.strip())
67 self
.errcode
= opener
.errcode
68 if self
.errcode
== 401 or self
.errcode
== 403:
69 self
.disallow_all
= True
70 _debug("disallow all")
71 elif self
.errcode
>= 400:
74 elif self
.errcode
== 200 and lines
:
78 def _add_entry(self
, entry
):
79 if "*" in entry
.useragents
:
80 # the default entry is considered last
81 self
.default_entry
= entry
83 self
.entries
.append(entry
)
85 def parse(self
, lines
):
86 """parse the input lines from a robots.txt file.
87 We allow that a user-agent: line is not preceded by
88 one or more blank lines."""
94 linenumber
= linenumber
+ 1
97 _debug("line %d: warning: you should insert"
98 " allow: or disallow: directives below any"
99 " user-agent: line" % linenumber
)
103 self
._add
_entry
(entry
)
106 # remove optional comment and strip line
113 line
= line
.split(':', 1)
115 line
[0] = line
[0].strip().lower()
116 line
[1] = urllib
.unquote(line
[1].strip())
117 if line
[0] == "user-agent":
119 _debug("line %d: warning: you should insert a blank"
120 " line before any user-agent"
121 " directive" % linenumber
)
122 self
._add
_entry
(entry
)
124 entry
.useragents
.append(line
[1])
126 elif line
[0] == "disallow":
128 _debug("line %d: error: you must insert a user-agent:"
129 " directive before this line" % linenumber
)
131 entry
.rulelines
.append(RuleLine(line
[1], False))
133 elif line
[0] == "allow":
135 _debug("line %d: error: you must insert a user-agent:"
136 " directive before this line" % linenumber
)
138 entry
.rulelines
.append(RuleLine(line
[1], True))
140 _debug("line %d: warning: unknown key %s" % (linenumber
,
143 _debug("line %d: error: malformed line %s"%(linenumber
, line
))
145 self
.entries
.append(entry
)
146 _debug("Parsed rules:\n%s" % str(self
))
149 def can_fetch(self
, useragent
, url
):
150 """using the parsed robots.txt decide if useragent can fetch url"""
151 _debug("Checking robots.txt allowance for:\n user agent: %s\n url: %s" %
153 if self
.disallow_all
:
157 # search for given user agent matches
158 # the first match counts
159 url
= urllib
.quote(urlparse
.urlparse(urllib
.unquote(url
))[2]) or "/"
160 for entry
in self
.entries
:
161 if entry
.applies_to(useragent
):
162 return entry
.allowance(url
)
163 # try the default entry last
164 if self
.default_entry
:
165 return self
.default_entry
.allowance(url
)
166 # agent not found ==> access granted
172 for entry
in self
.entries
:
173 ret
= ret
+ str(entry
) + "\n"
178 """A rule line is a single "Allow:" (allowance==True) or "Disallow:"
179 (allowance==False) followed by a path."""
180 def __init__(self
, path
, allowance
):
181 if path
== '' and not allowance
:
182 # an empty value means allow all
184 self
.path
= urllib
.quote(path
)
185 self
.allowance
= allowance
187 def applies_to(self
, filename
):
188 return self
.path
=="*" or filename
.startswith(self
.path
)
191 return (self
.allowance
and "Allow" or "Disallow")+": "+self
.path
195 """An entry has one or more user-agents and zero or more rulelines"""
202 for agent
in self
.useragents
:
203 ret
= ret
+ "User-agent: "+agent
+"\n"
204 for line
in self
.rulelines
:
205 ret
= ret
+ str(line
) + "\n"
208 def applies_to(self
, useragent
):
209 """check if this entry applies to the specified agent"""
210 # split the name token and make it lower case
211 useragent
= useragent
.split("/")[0].lower()
212 for agent
in self
.useragents
:
214 # we have the catch-all agent
216 agent
= agent
.lower()
217 if agent
in useragent
:
221 def allowance(self
, filename
):
223 - our agent applies to this entry
224 - filename is URL decoded"""
225 for line
in self
.rulelines
:
226 _debug((filename
, str(line
), line
.allowance
))
227 if line
.applies_to(filename
):
228 return line
.allowance
231 class URLopener(urllib
.FancyURLopener
):
232 def __init__(self
, *args
):
233 urllib
.FancyURLopener
.__init
__(self
, *args
)
236 def http_error_default(self
, url
, fp
, errcode
, errmsg
, headers
):
237 self
.errcode
= errcode
238 return urllib
.FancyURLopener
.http_error_default(self
, url
, fp
, errcode
,
245 ac
= "access allowed"
254 rp
= RobotFileParser()
257 # robots.txt that exists, gotten to by redirection
258 rp
.set_url('http://www.musi-cal.com/robots.txt')
262 _check(rp
.can_fetch('*', 'http://www.musi-cal.com/'), 1)
263 # this should match the first rule, which is a disallow
264 _check(rp
.can_fetch('', 'http://www.musi-cal.com/'), 0)
265 # various cherry pickers
266 _check(rp
.can_fetch('CherryPickerSE',
267 'http://www.musi-cal.com/cgi-bin/event-search'
268 '?city=San+Francisco'), 0)
269 _check(rp
.can_fetch('CherryPickerSE/1.0',
270 'http://www.musi-cal.com/cgi-bin/event-search'
271 '?city=San+Francisco'), 0)
272 _check(rp
.can_fetch('CherryPickerSE/1.5',
273 'http://www.musi-cal.com/cgi-bin/event-search'
274 '?city=San+Francisco'), 0)
276 _check(rp
.can_fetch('ExtractorPro', 'http://www.musi-cal.com/blubba'), 0)
277 _check(rp
.can_fetch('extractorpro', 'http://www.musi-cal.com/blubba'), 0)
279 _check(rp
.can_fetch('toolpak/1.1', 'http://www.musi-cal.com/blubba'), 0)
280 # tests for catch-all * agent
281 _check(rp
.can_fetch('spam', 'http://www.musi-cal.com/search'), 0)
282 _check(rp
.can_fetch('spam', 'http://www.musi-cal.com/Musician/me'), 1)
283 _check(rp
.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
284 _check(rp
.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
286 # robots.txt that does not exist
287 rp
.set_url('http://www.lycos.com/robots.txt')
289 _check(rp
.can_fetch('Mozilla', 'http://www.lycos.com/search'), 1)
291 if __name__
== '__main__':