Allow to post news to identi.ca / twitter.
[phpmyadmin-website.git] / render.py
blob71b86fd6195fd646d65a5a653917bdec8c1d1b12
1 #!/usr/bin/env python
2 # -*- coding: UTF-8 -*-
4 # phpMyAdmin web site generator
6 # Copyright (C) 2008 Michal Cihar <michal@cihar.com>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License along
19 # with this program; if not, write to the Free Software Foundation, Inc.,
20 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 import sys
23 import os
24 import re
25 import glob
26 import shutil
27 import csv
28 import traceback
29 import datetime
30 from genshi.template import TemplateLoader
31 from genshi.template import NewTextTemplate
32 from genshi.input import XML
33 from optparse import OptionParser
35 import helper.cache
36 import helper.log
37 import helper.date
38 import helper.stringfmt
39 import helper.twitter
41 import data.awards
42 import data.themes
43 import data.langnames
44 import data.menu
45 import data.screenshots
46 import data.redirects
47 import data.sf
48 import data.sitemap
50 # Project part
51 PROJECT_ID = 23067
52 PROJECT_NAME = 'phpmyadmin'
54 # Filtering
55 FILES_MARK = 'all-languages.'
56 BRANCH_REGEXP = re.compile('^([0-9]+\.[0-9]+)\.')
57 MAJOR_BRANCH_REGEXP = re.compile('^([0-9]+)\.')
58 TESTING_REGEXP = re.compile('.*(beta|alpha|rc).*')
59 SIZE_REGEXP = re.compile('.*\(([0-9]+) bytes, ([0-9]+) downloads to date')
60 COMMENTS_REGEXP = re.compile('^(.*)\(<a href="([^"]*)">([0-9]*) comments</a>\)$')
61 LANG_REGEXP ='((translation|lang|%s).*update|update.*(translation|lang|%s)|^updated?$|new lang|better word|fix.*translation)'
63 # Base URL (including trailing /)
64 SERVER = 'http://www.phpmyadmin.net'
65 BASE_URL = '/home_page/'
66 EXTENSION = 'php'
68 # How many security issues are shown in RSS
69 TOP_ISSUES = 10
71 # File locations
72 TEMPLATES = './templates'
73 CSS = './css'
74 JS = './js'
75 IMAGES = './images'
76 OUTPUT = './output'
77 STATIC = './static'
79 # Which JS files are not templates
80 JS_TEMPLATES = []
82 # Generic sourceforge.net part
83 PROJECT_FILES_RSS = 'https://sourceforge.net/export/rss2_projfiles.php?group_id=%d&rss_limit=100' % PROJECT_ID
84 PROJECT_FILES_RSS = 'https://sourceforge.net/api/file/index/project-id/%d/rss' % PROJECT_ID
85 PROJECT_NEWS_RSS = 'https://sourceforge.net/export/rss2_projnews.php?group_id=%d&rss_fulltext=1&limit=10' % PROJECT_ID
86 PROJECT_SUMMARY_RSS = 'https://sourceforge.net/export/rss2_projsummary.php?group_id=%d' % PROJECT_ID
87 DONATIONS_RSS = 'https://sourceforge.net/export/rss2_projdonors.php?group_id=%d&limit=20' % PROJECT_ID
88 PROJECT_SVN_RSS = 'http://cia.vc/stats/project/phpmyadmin/.rss'
89 PROJECT_DL = 'http://prdownloads.sourceforge.net/%s/%%s' % PROJECT_NAME
90 PROJECT_SVN = 'https://phpmyadmin.svn.sourceforge.net/svnroot/phpmyadmin/trunk/phpMyAdmin/'
91 TRANSLATIONS_SVN = '%slang/' % PROJECT_SVN
92 PLANET_RSS = 'http://planet.phpmyadmin.net/rss20.xml'
93 RSS_CZ = 'http://phpmyadmin.cz/rss.xml'
94 RSS_RU = 'http://php-myadmin.ru/rss/news.xml'
96 # Data sources
97 SVN_MD5 = 'http://dl.cihar.com/phpMyAdmin/trunk/md5.sums'
98 SVN_SIZES = 'http://dl.cihar.com/phpMyAdmin/trunk/files.list'
100 # Clean output before generating
101 CLEAN_OUTPUT = True
103 # RSS parsing
104 SUMMARY_DEVS = re.compile('Developers on project: ([0-9]*)')
105 SUMMARY_ACTIVITY = re.compile('Activity percentile \(last week\): ([0-9.]*%)')
106 SUMMARY_DOWNLOADS = re.compile('Downloadable files: ([0-9]*) total downloads to date')
107 SUMMARY_LISTS = re.compile('Mailing lists \(public\): ([0-9]*)')
108 SUMMARY_FORUMS = re.compile('Discussion forums \(public\): ([0-9]*), containing ([0-9]*) messages')
109 SUMMARY_TRACKER = re.compile('Tracker: (.*) \(([0-9]*) open/([0-9]*) total\)')
111 # Indenti.ca integration
112 IDENTICA_USER = 'phpmyadmin'
113 IDENTICA_PASSWORD = None
115 def copytree(src, dst):
117 Trimmed down version of shutil.copytree. Recursively copies a directory
118 tree using shutil.copy2().
120 The destination directory must not already exist.
121 If exception(s) occur, an Error is raised with a list of reasons.
123 It handles only files and dirs and ignores .svn and *.swp* files and
124 files starting with underscore (_).
126 names = os.listdir(src)
127 errors = []
128 for name in names:
129 if name == '.svn' or name.find('.swp') != -1 or name[0] == '_':
130 continue
131 srcname = os.path.join(src, name)
132 dstname = os.path.join(dst, name)
133 try:
134 if os.path.isdir(srcname):
135 os.makedirs(dstname)
136 copytree(srcname, dstname)
137 else:
138 shutil.copy2(srcname, dstname)
139 except (IOError, os.error), why:
140 errors.append((srcname, dstname, str(why)))
141 # catch the Error from the recursive copytree so that we can
142 # continue with other files
143 except OSError, err:
144 errors.extend(err.args[0])
145 if errors:
146 raise OSError, errors
148 def fmt_bytes(number):
150 Formats bytes to human readable form.
152 number = int(number)
153 if number > 10 * 1024 * 1024:
154 return '%d MiB' % (number / ( 1024 * 1024 ))
155 elif number > 1024 * 1024:
156 return '%.1f MiB' % (number / ( 1024.0 * 1024 ))
157 if number > 10 * 1024:
158 return '%d KiB' % (number / 1024 )
159 elif number > 1024:
160 return '%.1f KiB' % (number / 1024.0 )
161 else:
162 return '%d bytes' % number
164 class SFGenerator:
165 def __init__(self):
166 self.data = {
167 'releases': [],
168 'releases_featured': [],
169 'releases_older': [],
170 'releases_beta': [],
171 'themes': [],
172 'news': [],
173 'blogs': [],
174 'issues': [],
175 'donations': [],
176 'base_url': BASE_URL,
177 'server': SERVER,
178 'file_ext': EXTENSION,
179 'rss_files': PROJECT_FILES_RSS,
180 'rss_donations': DONATIONS_RSS,
181 'rss_news': PROJECT_NEWS_RSS,
182 'rss_planet': PLANET_RSS,
183 'rss_summary': PROJECT_SUMMARY_RSS,
184 'rss_security': '%s%ssecurity/index.xml' % (SERVER, BASE_URL),
185 'rss_svn': PROJECT_SVN_RSS,
186 'screenshots': data.screenshots.SCREENSHOTS,
187 'awards': data.awards.AWARDS,
188 'generated': helper.date.fmtdatetime.utcnow(),
189 'themecssversions': data.themes.CSSVERSIONS,
190 'sfservers': data.sf.SERVERS,
191 'current_year': datetime.datetime.now().year,
193 self.loader = TemplateLoader([TEMPLATES])
194 self.cssloader = TemplateLoader([CSS], default_class = NewTextTemplate)
195 self.staticloader = TemplateLoader([STATIC], default_class = NewTextTemplate)
196 self.jsloader = TemplateLoader([JS], default_class = NewTextTemplate)
197 self.feeds = helper.cache.FeedCache()
198 self.xmls = helper.cache.XMLCache()
199 self.urls = helper.cache.URLCache()
200 self.svn = helper.cache.SVNCache(TRANSLATIONS_SVN)
201 self.simplesvn = helper.cache.SimpleSVNCache(PROJECT_SVN)
203 def get_outname(self, page):
205 Converts page name to file name. Basically only extension is appended
206 if none is already used.
208 if page.find('.') == -1:
209 return '%s.%s' % (page, self.data['file_ext'])
210 else:
211 return page
213 def get_renderer(self, page):
215 Returns genshi renderer type for chosen page.
217 if page[:-4] == '.xml':
218 return 'xml'
219 return 'xhtml'
221 def text_to_id(self, text):
223 Converts text to something what can be used as a anchor or id (no spaces
224 or other special chars).
226 return re.sub('[^a-z0-9A-Z.-]', '_', text)
228 def fmt_translator(self, translator):
230 Formats translator information.
232 lines = [x.strip() for x in translator.split('\n')]
233 output = []
234 for line in lines:
235 try:
236 name, email = line.split('(')
237 except ValueError:
238 name = line
239 email = None
240 output.append(name.strip())
241 return ', '.join(output)
243 def get_version_info(self, version):
245 Returns description to the phpMyAdmin version.
247 if version[:2] == '2.':
248 text ='Version compatible with PHP 4+ and MySQL 3+.'
249 elif version[:2] == '3.':
250 text = 'Version compatible with PHP 5 and MySQL 5.'
251 if version.find('beta1') != -1:
252 text += ' First beta version.'
253 elif version.find('beta2') != -1:
254 text += ' Second beta version.'
255 elif version.find('beta') != -1:
256 helper.log.warn('Generic beta: %s' % version)
257 text += ' Beta version.'
258 elif version.find('rc1') != -1:
259 text += ' First release candidate.'
260 elif version.find('rc2') != -1:
261 text += ' Second release candidate.'
262 elif version.find('rc3') != -1:
263 text += ' Third release candidate.'
264 elif version.find('rc') != -1:
265 text += ' Release candidate.'
266 helper.log.warn('Generic RC: %s' % version)
268 return text
270 def dom2release(self, item, theme = False):
272 Parses DOM object into release hash.
274 Basically it gets XML like this:
276 <title><![CDATA[/theme-xampp/2.11/xampp-2.11.zip]]></title>
277 <item>
278 <title><![CDATA[/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz]]></title>
279 <link>http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz/download</link>
280 <guid>http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz/download</guid>
281 <description><![CDATA[/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz]]></description>
282 <pubDate>Sun, 09 Aug 2009 21:27:17 +0000</pubDate>
283 <files:extra-info xmlns:files="http://sourceforge.net/api/files.rdf#">HTML document text</files:extra-info>
284 <media:content xmlns:media="http://video.search.yahoo.com/mrss/" type="text/html" url="http://sourceforge.net/project/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-notes.html/download" filesize="1539"><media:title></media:title><media:hash algo="md5">b9e4de4108f1d6e5fc4772df888e73ac</media:hash></media:content>
285 <files:download-count xmlns:files="http://sourceforge.net/api/files.rdf#">0</files:download-count>
286 </item>
288 title = item.getElementsByTagName('title')[0].childNodes[0].data
289 titleparts = title[1:].split('/')
290 type = titleparts[0]
291 version = titleparts[1]
292 filename = titleparts[2]
293 ext = os.path.splitext(filename)[1]
294 link = item.getElementsByTagName('link')[0].childNodes[0].data
295 pubdate = item.getElementsByTagName('pubDate')[0].childNodes[0].data
296 featured = (filename.find(FILES_MARK) != -1)
297 dlcount = item.getElementsByTagName('files:download-count')[0].childNodes[0].data
298 try:
299 notes = item.getElementsByTagName('files:release-notes-url')[0].childNodes[0].data
300 except:
301 notes = ''
302 media = item.getElementsByTagName('media:content')[0]
303 size = media.getAttribute('filesize')
304 md5 = None
305 for hash in media.getElementsByTagName('media:hash'):
306 if hash.getAttribute('algo') == 'md5':
307 md5 = hash.childNodes[0].data
309 release = {
310 'show': False,
311 'version': version,
312 'date': helper.date.fmtdatetime.parse(pubdate[:-6] + ' GMT'),
313 'name': type,
314 'fullname': '%s %s' % (type, version),
315 'notes': notes,
316 'files': []
318 if not theme:
319 release['info'] = self.get_version_info(version)
321 file = {
322 'name': filename,
323 'url': link,
324 'ext': ext,
325 'featured': featured,
326 'size': size,
327 'size_k' : int(size) / 1024,
328 'size_m' : int(size) / (1024 * 1024),
329 'humansize': fmt_bytes(size),
330 'dlcount': dlcount,
331 'md5': md5}
333 return release, file
335 def process_releases(self, xml_files):
337 Gets phpMyAdmin releases out of releases feed and fills releases,
338 releases_beta and releases_older.
342 helper.log.dbg('Processing file releases...')
343 releases_dict = {}
344 for entry in xml_files.getElementsByTagName('item'):
345 title = entry.getElementsByTagName('title')[0].childNodes[0].data
346 titleparts = title[1:].split('/')
347 type = titleparts[0]
348 if type != 'phpMyAdmin':
349 continue
350 # This should not be needed, but the XML is currently broken, see
351 # https://sourceforge.net/apps/trac/sourceforge/ticket/3791
352 try:
353 item = self.xmls.load('release-%s' % title, '%s?path=%s' % (PROJECT_FILES_RSS, title))
354 item = item.getElementsByTagName('item')[0]
355 except:
356 item = entry
357 release, file = self.dom2release(item)
358 if release is None:
359 continue
360 if not releases_dict.has_key(release['version']):
361 releases_dict[release['version']] = release
362 if file['ext'] == '.html':
363 releases_dict[release['version']]['notes'] = file['url']
364 else:
365 releases_dict[release['version']]['files'].append(file)
367 releases = [releases_dict[rel] for rel in releases_dict.keys()]
369 helper.log.dbg('Sorting file lists...')
370 releases.sort(key = lambda x: x['version'], reverse = True)
372 helper.log.dbg('Detecting versions...')
373 outversions = {}
374 outbetaversions = {}
376 # Split up versions to branches
377 for idx in xrange(len(releases)):
378 version = releases[idx]
379 branch = BRANCH_REGEXP.match(version['version']).group(1)
380 test = TESTING_REGEXP.match(version['version'])
381 if test is not None:
382 try:
383 if releases[outbetaversions[branch]]['version'] < version['version']:
384 outbetaversions[branch] = idx
385 except KeyError:
386 outbetaversions[branch] = idx
387 else:
388 try:
389 if releases[outversions[branch]]['version'] < version['version']:
390 outversions[branch] = idx
391 except KeyError:
392 outversions[branch] = idx
394 # Check for old beta versions
395 for beta in outbetaversions.keys():
396 try:
397 stable_rel = releases[outversions[beta]]['version']
398 beta_rel = releases[outbetaversions[beta]]['version'].split('-')[0]
399 if stable_rel > beta_rel or stable_rel == beta_rel:
400 helper.log.dbg('Old beta: %s' % releases[outbetaversions[beta]]['version'])
401 del outbetaversions[beta]
402 except KeyError:
403 pass
405 # Check for old stable releases
406 for stable in outversions.keys():
407 version = releases[outversions[stable]]['version']
408 major_branch = MAJOR_BRANCH_REGEXP.match(version).group(1)
409 for check in outversions.keys():
410 try:
411 check_version = releases[outversions[check]]['version']
412 except KeyError:
413 # We already marked this one as old
414 continue
415 if major_branch == check_version[:len(major_branch)] and version < check_version:
416 helper.log.dbg('Old release: %s' % version)
417 del outversions[stable]
418 continue
420 featured = max(outversions.keys())
421 featured_id = outversions[featured]
423 helper.log.dbg('Versions detected:')
424 for idx in xrange(len(releases)):
425 if idx in outversions.values():
426 self.data['releases'].append(releases[idx])
427 if featured_id == idx:
428 releases[idx]['info'] += ' Currently recommended version.'
429 self.data['releases_featured'].append(releases[idx])
430 helper.log.dbg(' %s (featured)' % releases[idx]['version'])
431 else:
432 helper.log.dbg(' %s' % releases[idx]['version'])
433 elif idx in outbetaversions.values():
434 self.data['releases_beta'].append(releases[idx])
435 helper.log.dbg(' %s (beta)' % releases[idx]['version'])
436 else:
437 self.data['releases_older'].append(releases[idx])
438 helper.log.dbg(' %s (old)' % releases[idx]['version'])
440 def get_snapshots_info(self):
442 Retrieves SVN snapshots info and fills it in data['releases_svn'].
444 md5_strings = self.urls.load(SVN_MD5).split('\n')
445 size_strings = self.urls.load(SVN_SIZES).split('\n')
446 md5s = {}
447 for line in md5_strings:
448 if line.strip() == '':
449 continue
450 md5, name = line.split(' ')
451 md5s[name] = md5
452 svn = []
453 for line in size_strings:
454 if line.strip() == '':
455 continue
456 name, size = line.split(' ')
457 svn.append({
458 'name' : name,
459 'size' : int(size),
460 'size_k' : int(size) / 1024,
461 'size_m' : int(size) / (1024 * 1024),
462 'humansize' : fmt_bytes(size),
463 'url' : 'http://dl.cihar.com.nyud.net/phpMyAdmin/trunk/%s' % name,
464 'md5' : md5s[name],
466 self.data['release_svn'] = svn
468 def process_themes(self, xml_files):
470 Gets theme releases out of releases feed and fills themes.
472 helper.log.dbg('Processing themes releases...')
473 for entry in xml_files.getElementsByTagName('item'):
474 title = entry.getElementsByTagName('title')[0].childNodes[0].data
475 titleparts = title[1:].split('/')
476 type = titleparts[0]
477 if type[:6] != 'theme-':
478 continue
479 type = type[6:]
480 version = titleparts[1]
481 # This should not be needed, but the XML is currently broken, see
482 # https://sourceforge.net/apps/trac/sourceforge/ticket/3791
483 try:
484 item = self.xmls.load('release-%s' % title, '%s?path=%s' % (PROJECT_FILES_RSS, title))
485 item = item.getElementsByTagName('item')[0]
486 except:
487 item = entry
488 release, file = self.dom2release(item, theme = True)
489 if release is None:
490 continue
491 release['shortname'] = type
492 release['imgname'] = 'images/themes/%s.png' % type
493 try:
494 release.update(data.themes.THEMES['%s-%s' % (type, version)])
495 except KeyError:
496 helper.log.warn('No meatadata for theme %s-%s!' % (type, version))
497 release['name'] = type
498 release['support'] = 'N/A'
499 release['info'] = ''
500 release['fullname'] = '%s %s' % (release['name'], version)
501 release['classes'] = data.themes.CSSMAP[release['support']]
503 release['file'] = file
504 self.data['themes'].append(release)
506 helper.log.dbg('Sorting file lists...')
507 self.data['themes'].sort(key = lambda x: x['date'], reverse = True)
509 def process_news(self, feed):
511 Fills in news based on news feed.
513 helper.log.dbg('Processing news feed...')
514 for entry in feed.entries:
515 matches = COMMENTS_REGEXP.match(entry.summary)
516 item = {}
517 item['link'] = entry.link
518 item['date'] = helper.date.fmtdatetime.parse(entry.updated)
519 # replaces are workaround for broken automatic links from sf.net rss feed
520 item['text'] = matches.group(1).replace('.</a>', '</a>.').replace('.">http', '">http')
521 item['comments_link'] = matches.group(2)
522 item['comments_number'] = matches.group(3)
523 item['title'] = entry.title
524 item['anchor'] = self.text_to_id(entry.title)
525 self.data['news'].append(item)
527 self.data['short_news'] = self.data['news'][:5]
529 def tweet(self):
531 Finds out whether we should send update to identi.ca and twitter and do so.
533 news = self.data['news'][0]
534 if IDENTICA_USER is None or IDENTICA_PASSWORD is None:
535 return
536 storage = helper.cache.Cache()
537 try:
538 last = storage.get('last-tweet')
539 except helper.cache.NoCache:
540 last = None
541 if last == news['link']:
542 helper.log.dbg('No need to tweet, the last news is still the same...')
543 return
544 tweet = '%s | http://www.phpmyadmin.net/ | #phpmyadmin' % news['title']
545 helper.log.dbg('Tweeting to identi.ca: %s' % tweet)
546 api = helper.twitter.Api(username = IDENTICA_USER,
547 password = IDENTICA_PASSWORD,
548 twitterserver='identi.ca/api')
549 api.SetSource('phpMyAdmin website')
550 api.PostUpdate(tweet)
551 last = storage.set('last-tweet', news['link'])
553 def process_planet(self, feed):
555 Fills in planet based on planet feed.
557 helper.log.dbg('Processing planet feed...')
558 for entry in feed.entries:
559 item = {}
560 item['link'] = 'http://planet.phpmyadmin.net/#%s' % entry.link
561 item['date'] = helper.date.fmtdatetime.parse(entry.updated.replace('+0000', 'GMT'))
562 item['text'] = entry.summary_detail['value']
563 item['title'] = entry.title
564 self.data['blogs'].append(item)
566 self.data['short_blogs'] = self.data['blogs'][:5]
568 def process_feed(self, name, feed, count = 3):
570 Fills in feed data based on feeparser feed.
572 helper.log.dbg('Processing %s feed...' % name)
573 self.data[name] = []
574 for entry in feed.entries:
575 item = {}
576 item['link'] = entry.link
577 item['date'] = entry.updated_parsed
578 item['text'] = entry.summary_detail['value']
579 item['title'] = entry.title
580 self.data[name].append(item)
582 self.data['short_%s' % name ] = self.data[name][:count]
584 def process_donations(self, feed):
586 Fills in donations based on donations feed.
588 helper.log.dbg('Processing donations feed...')
589 for entry in feed.entries:
590 item = {}
591 item['link'] = entry.link
592 item['date'] = helper.date.fmtdatetime.parse(entry.updated)
593 item['text'] = helper.stringfmt.fmt_urls(entry.summary)
594 item['title'] = entry.title
595 self.data['donations'].append(item)
597 def process_summary(self, feed):
599 Reads summary feed and fills some useful information into data.
601 helper.log.dbg('Processing summary feed...')
602 data = {}
603 links = {}
604 trackers = []
605 for entry in feed.entries:
606 if entry.title[:22] == 'Developers on project:':
607 m = SUMMARY_DEVS.match(entry.title)
608 data['developers'] = m.group(1)
609 links['developers'] = entry.link
610 elif entry.title[:19] == 'Activity percentile':
611 m = SUMMARY_ACTIVITY.match(entry.title)
612 data['activity'] = m.group(1)
613 links['activity'] = entry.link
614 elif entry.title[:19] == 'Downloadable files:':
615 m = SUMMARY_DOWNLOADS.match(entry.title)
616 data['downloads'] = m.group(1)
617 links['downloads'] = entry.link
618 elif entry.title[:13] == 'Mailing lists':
619 m = SUMMARY_LISTS.match(entry.title)
620 data['mailinglists'] = m.group(1)
621 links['mailinglists'] = entry.link
622 elif entry.title[:17] == 'Discussion forums':
623 m = SUMMARY_FORUMS.match(entry.title)
624 data['forums'] = m.group(1)
625 data['forumposts'] = m.group(2)
626 links['forums'] = entry.link
627 elif entry.title[:8] == 'Tracker:':
628 m = SUMMARY_TRACKER.match(entry.title)
629 trackers.append({
630 'name': m.group(1),
631 'open': m.group(2),
632 'total': m.group(3),
633 'description': entry.summary[21:],
634 'link': entry.link,
636 self.data['info'] = data
637 self.data['links'] = links
638 trackers.sort(key = lambda x: x['name'])
639 self.data['trackers'] = trackers
641 def get_menu(self, active):
643 Returns list of menu entries with marked active one.
645 menu = []
646 for item in data.menu.MENU:
647 title = item[1]
648 name = item[0]
649 field = {
650 'title' : title,
651 'class' : {},
653 if name == active or '%sindex' % name == active:
654 field['class'] = { 'class': 'active' }
655 if len(name) > 0 and name[-1] != '/':
656 name = self.get_outname(name)
657 field['link'] = '%s%s' % (BASE_URL, name)
658 menu.append(field)
659 return menu
661 def render_css(self, filename):
663 Renders CSS file from template.
665 helper.log.dbg(' %s' % filename)
666 template = self.cssloader.load(filename)
667 out = open(os.path.join(OUTPUT, 'css', filename), 'w')
668 out.write(template.generate(**self.data).render())
669 out.close()
671 def render_static(self, templatename, outfile, extradata = {}):
673 Renders "static" file from template.
675 helper.log.dbg(' %s' % outfile)
676 template = self.staticloader.load(templatename)
677 out = open(os.path.join(OUTPUT, outfile), 'w')
678 extradata.update(self.data)
679 out.write(template.generate(**extradata).render())
680 out.close()
682 def render_js(self, filename):
684 Renders JavaScript file from template. Some defined files are not processed
685 through template engine as they were taken from other projects.
687 helper.log.dbg(' %s' % filename)
688 outpath = os.path.join(OUTPUT, 'js', filename)
689 if filename not in JS_TEMPLATES:
690 shutil.copy2(os.path.join(JS, filename), outpath)
691 return
692 template = self.jsloader.load(filename)
693 out = open(outpath, 'w')
694 out.write(template.generate(**self.data).render())
695 out.close()
697 def render(self, page):
699 Renders standard page.
701 helper.log.dbg(' %s' % page)
702 template = self.loader.load('%s.tpl' % page)
703 menu = self.get_menu(page)
704 out = open(os.path.join(OUTPUT, self.get_outname(page)), 'w')
705 out.write(template.generate(menu = menu, **self.data).render(self.get_renderer(page)))
706 out.close()
708 def render_security(self, issue):
710 Renders security issue.
712 helper.log.dbg(' %s' % issue)
713 template = self.loader.load('security/%s' % issue)
714 menu = self.get_menu('security/')
715 out = open(os.path.join(OUTPUT, 'security', self.get_outname(issue)), 'w')
716 out.write(template.generate(menu = menu, issue = issue, **self.data).render('xhtml'))
717 out.close()
720 def list_security_issues(self):
722 Fills in issues and topissues with security issues information.
724 issues = glob.glob('templates/security/PMASA-*')
725 issues.sort(key = lambda x: int(x[24:29]) * 100 - int(x[30:]))
726 for issue in issues:
727 data = XML(open(issue, 'r').read())
728 name = os.path.basename(issue)
729 self.data['issues'].append({
730 'name' : name,
731 'link': '%ssecurity/%s' % (BASE_URL, self.get_outname(name)),
732 'fulllink': '%s%ssecurity/%s' % (SERVER, BASE_URL, self.get_outname(name)),
733 'summary': str(data.select('def[@function="announcement_summary"]/text()')),
734 'date': helper.date.fmtdate.parse(str(data.select('def[@function="announcement_date"]/text()'))),
735 'cve': str(data.select('def[@function="announcement_cve"]/text()')),
737 self.data['topissues'] = self.data['issues'][:TOP_ISSUES]
739 def prepare_output(self):
741 Copies static content to output and creates required directories.
743 helper.log.dbg('Copying static content to output...')
744 if CLEAN_OUTPUT:
745 try:
746 shutil.rmtree(OUTPUT)
747 os.mkdir(OUTPUT)
748 except OSError:
749 pass
750 else:
751 try:
752 shutil.rmtree(os.path.join(OUTPUT, 'images'))
753 except OSError:
754 pass
755 imgdst = os.path.join(OUTPUT, 'images')
756 os.makedirs(imgdst)
757 copytree(IMAGES, imgdst)
758 copytree(STATIC, OUTPUT)
759 try:
760 os.mkdir(os.path.join(OUTPUT, 'security'))
761 except OSError:
762 pass
763 try:
764 os.mkdir(os.path.join(OUTPUT, 'css'))
765 except OSError:
766 pass
767 try:
768 os.mkdir(os.path.join(OUTPUT, 'js'))
769 except OSError:
770 pass
772 def get_sitemap_data(self, page):
774 Returns metadata for page for sitemap as per http://sitemaps.org.
776 priority = '0.8'
777 changefreq = 'daily'
778 if page[:15] == 'security/PMASA-':
779 priority = '0.5'
780 changefreq = 'monthly'
781 elif page[:15] == '/documentation/':
782 priority = '0.7'
783 changefreq = 'weekly'
784 elif page[:20] == '/pma_localized_docs/':
785 priority = '0.6'
786 changefreq = 'monthly'
787 elif page in ['index', 'news']:
788 priority = '1.0'
789 changefreq = 'daily'
790 elif page in ['improve', 'team', 'docs']:
791 priority = '1.0'
792 changefreq = 'weekly'
793 elif page in ['downloads', 'donate', 'themes', 'translations']:
794 priority = '0.9'
795 changefreq = 'daily'
796 elif page in ['support']:
797 priority = '0.9'
798 changefreq = 'weekly'
799 elif page in ['sitemap']:
800 priority = '0.2'
801 changefreq = 'weekly'
802 return {
803 'lastmod' : helper.date.fmtdate.utcnow(),
804 'changefreq' : changefreq,
805 'priority' : priority,
808 def generate_sitemap(self):
810 Generates list of pages with titles.
812 self.data['sitemap'] = []
813 self.data['sitemapxml'] = []
814 helper.log.dbg('Generating sitemap:')
815 for root, dirs, files in os.walk(TEMPLATES):
816 if '.svn' in dirs:
817 dirs.remove('.svn') # don't visit .svn directories
818 files.sort()
819 dir = root[len(TEMPLATES):].strip('/')
820 if len(dir) > 0:
821 dir += '/'
822 for file in files:
823 name, ext = os.path.splitext(file)
824 if ext != '.tpl' and name[:6] != 'PMASA-':
825 continue
826 if name[0] in ['_', '.']:
827 continue
828 if file in ['index.xml.tpl', 'sitemap.xml.tpl', '404.tpl']:
829 continue
830 helper.log.dbg('- %s' % file)
831 xmldata = XML(open(os.path.join(root, file), 'r').read())
832 title = str(xmldata.select('def[@function="page_title"]/text()'))
833 title = title.strip()
834 if len(title) == 0:
835 title = str(xmldata.select('def[@function="announcement_id"]/text()'))
836 title = title.strip()
837 if len(title) == 0:
838 title = 'Index'
839 link = dir + self.get_outname(name)
840 sitemap = {
841 'link': link,
842 'loc': '%s%s%s' % (SERVER, BASE_URL, link),
843 'title': title
845 if name[:6] != 'PMASA-':
846 self.data['sitemap'].append(sitemap)
847 sitemap.update(self.get_sitemap_data(dir + name))
848 self.data['sitemapxml'].append(sitemap)
849 for link in data.sitemap.ENTRIES:
850 sitemap = {
851 'loc': SERVER + link,
853 sitemap.update(self.get_sitemap_data(link))
854 self.data['sitemapxml'].append(sitemap)
856 def get_translation_stats(self):
858 Receives translation stats from external server and parses it.
860 helper.log.dbg('Processing translation stats...')
861 self.data['translations'] = []
862 list = self.svn.ls()
863 translators = XML(self.simplesvn.cat('translators.html'))
864 english = self.svn.cat('english-utf-8.inc.php')
865 allmessages = len(re.compile('\n\$str').findall(english))
866 for name in list:
867 if name[-14:] != '-utf-8.inc.php':
868 continue
869 lang = name[:-14]
870 try:
871 baselang, ignore = lang.split('_')
872 except:
873 baselang = lang
874 translator = translators.select('tr[@id="%s"]/td[2]/text()' % lang)
875 translator = unicode(translator).strip()
876 if translator == '':
877 translator = translators.select('tr[@id="%s"]/td[2]/text()' % baselang)
878 translator = unicode(translator).strip()
879 translator = self.fmt_translator(translator)
880 short = data.langnames.MAP[lang]
881 helper.log.dbg(' - %s [%s]' % (lang, short))
882 svnlog = self.svn.log(name)
883 langs = '%s|%s|%s' % (lang, short, baselang)
884 regexp = re.compile(LANG_REGEXP % (langs, langs), re.IGNORECASE)
885 found = None
886 if lang == 'english':
887 found = svnlog[0]
888 else:
889 for x in svnlog:
890 if regexp.findall(x['message']) != []:
891 found = x
892 break
893 content = self.svn.cat(name)
894 missing = len(re.compile('\n\$str.*to translate').findall(content))
895 translated = allmessages - missing
896 percent = 100.0 * translated / allmessages
897 if percent < 50:
898 css = ' b50'
899 elif percent < 80:
900 css = ' b80'
901 else:
902 css =''
903 try:
904 dt = found['date']
905 except TypeError:
906 dt = ''
907 self.data['translations'].append({
908 'name': lang,
909 'short': short,
910 'translated': translated,
911 'translator': translator,
912 'percent': '%0.1f' % percent,
913 'updated': dt,
914 'css': css,
917 def fetch_data(self):
919 Fetches data from remote or local sources and prepares template data.
921 self.get_snapshots_info()
923 xml_files = self.xmls.load('files', PROJECT_FILES_RSS)
924 self.process_releases(xml_files)
925 self.process_themes(xml_files)
927 rss_news = self.feeds.load('news', PROJECT_NEWS_RSS)
928 self.process_news(rss_news)
930 rss_planet = self.feeds.load('planet', PLANET_RSS)
931 self.process_planet(rss_planet)
933 rss_cz = self.feeds.load('cz', RSS_CZ)
934 self.process_feed('news_cz', rss_cz)
936 rss_ru = self.feeds.load('ru', RSS_RU)
937 self.process_feed('news_ru', rss_ru)
939 rss_summary = self.feeds.load('summary', PROJECT_SUMMARY_RSS)
940 self.process_summary(rss_summary)
942 rss_donations = self.feeds.load('donations', DONATIONS_RSS)
943 self.process_donations(rss_donations)
945 self.get_translation_stats()
947 self.list_security_issues()
949 self.generate_sitemap()
951 self.tweet()
953 def render_pages(self):
955 Renders all content pages.
957 helper.log.dbg('Rendering pages:')
958 templates = [os.path.basename(x) for x in glob.glob('templates/*.tpl')]
959 templates.extend([os.path.join('security', os.path.basename(x)) for x in glob.glob('templates/security/*.tpl')])
960 for template in templates:
961 name = os.path.splitext(template)[0]
962 if os.path.basename(name)[0] == '_':
963 continue
964 self.render(name)
966 helper.log.dbg('Rendering security issues pages:')
967 for issue in self.data['issues']:
968 self.render_security(issue['name'])
970 helper.log.dbg('Generating CSS:')
971 for css in [os.path.basename(x) for x in glob.glob('css/*.css')]:
972 self.render_css(css)
974 helper.log.dbg('Generating JavaScript:')
975 for js in [os.path.basename(x) for x in glob.glob('js/*.js')]:
976 self.render_js(js)
978 helper.log.dbg('Generating static pages:')
979 self.render_static('_version.php', 'version.php')
980 self.render_static('_version.txt', 'version.txt')
981 self.render_static('_security.php', 'security.php')
982 self.render_static('_robots.txt', 'robots.txt')
983 for redir in data.redirects.REDIRECTS:
984 self.render_static('_redirect.tpl',
985 '%s.php' % redir,
986 {'location': self.get_outname(data.redirects.REDIRECTS[redir])})
989 def main(self):
991 Main program which does everything.
993 self.prepare_output()
994 self.fetch_data()
995 self.render_pages()
996 helper.log.dbg('Done!')
998 if __name__ == '__main__':
999 parser = OptionParser()
1000 parser.add_option('-v', '--verbose',
1001 action='store_true',
1002 dest='verbose',
1003 help='Output verbose information.')
1004 parser.add_option('-q', '--quiet',
1005 action='store_false',
1006 dest='verbose',
1007 help='Only show errors and warnings.')
1008 parser.add_option('-C', '--clean',
1009 action='store_true',
1010 dest='clean',
1011 help='Clean output directory (default).')
1012 parser.add_option('-N', '--no-clean',
1013 action='store_false',
1014 dest='clean',
1015 help='Do not clean output directory.')
1016 parser.add_option('-V', '--verbose-cache',
1017 action='store_true',
1018 dest='verbose_cache',
1019 help='Output verbose caching information.')
1020 parser.add_option('-Q', '--quiet-cache',
1021 action='store_false',
1022 dest='verbose_cache',
1023 help='No information from caching in output.')
1024 parser.add_option('-s', '--server',
1025 action='store', type='string',
1026 dest='server',
1027 help='Name of server where data will be published, eg.: %s.' % SERVER)
1028 parser.add_option('-b', '--base-url',
1029 action='store', type='string',
1030 dest='base_url',
1031 help='Base URL of document, eg.: %s.' % BASE_URL)
1032 parser.add_option('-e', '--extension',
1033 action='store', type='string',
1034 dest='extension',
1035 help='Extension of generated files, default is %s.' % EXTENSION)
1036 parser.add_option('-l', '--log',
1037 action='store', type='string',
1038 dest='log',
1039 help='Log filename, default is none.')
1040 parser.add_option('-p', '--identica-password',
1041 action='store', type='string',
1042 dest='identica_password',
1043 help='Pasword to identi.ca, default is not to post there.')
1044 parser.add_option('-u', '--identica-user',
1045 action='store', type='string',
1046 dest='identica_user',
1047 help='Username to identi.ca, defaull is %s.' % IDENTICA_USER)
1049 parser.set_defaults(
1050 verbose = helper.log.VERBOSE,
1051 verbose_cache = helper.log.DBG_CACHE,
1052 server = SERVER,
1053 base_url = BASE_URL,
1054 clean = CLEAN_OUTPUT,
1055 log = None,
1056 extension = EXTENSION,
1057 identica_user = IDENTICA_USER,
1058 identica_password = IDENTICA_PASSWORD
1061 (options, args) = parser.parse_args()
1063 helper.log.VERBOSE = options.verbose
1064 helper.log.DBG_CACHE = options.verbose_cache
1065 SERVER = options.server
1066 BASE_URL = options.base_url
1067 EXTENSION = options.extension
1068 CLEAN_OUTPUT = options.clean
1069 IDENTICA_USER = options.identica_user
1070 IDENTICA_PASSWORD = options.identica_password
1071 if options.log is not None:
1072 helper.log.LOG = open(options.log, 'w')
1074 gen = SFGenerator()
1075 gen.main()