Explicitely mention that phpmyadmin is the repository most people want
[phpmyadmin-website.git] / render.py
blob834ae6a3effc87fd49695001a9b31b6a30b1fada
1 #!/usr/bin/env python
2 # -*- coding: UTF-8 -*-
4 # phpMyAdmin web site generator
6 # Copyright (C) 2008 Michal Cihar <michal@cihar.com>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
18 # You should have received a copy of the GNU General Public License along
19 # with this program; if not, write to the Free Software Foundation, Inc.,
20 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 import sys
23 import os
24 import re
25 import glob
26 import shutil
27 import csv
28 import traceback
29 import datetime
30 import polib
31 from genshi.template import TemplateLoader
32 from genshi.template import NewTextTemplate
33 from genshi.input import XML
34 from optparse import OptionParser
36 import helper.cache
37 import helper.log
38 import helper.date
39 import helper.stringfmt
40 import helper.twitter
42 import data.awards
43 import data.themes
44 import data.langnames
45 import data.menu
46 import data.screenshots
47 import data.redirects
48 import data.sf
49 import data.sitemap
51 # Project part
52 PROJECT_ID = 23067
53 PROJECT_NAME = 'phpmyadmin'
55 # Filtering
56 FILES_REGEXP = re.compile(r'.*all-languages\.(zip|tar\.gz).*')
57 BRANCH_REGEXP = re.compile('^([0-9]+\.[0-9]+)\.')
58 MAJOR_BRANCH_REGEXP = re.compile('^([0-9]+)\.')
59 TESTING_REGEXP = re.compile('.*(beta|alpha|rc).*')
60 SIZE_REGEXP = re.compile('.*\(([0-9]+) bytes, ([0-9]+) downloads to date')
61 LANG_REGEXP ='((translation|lang|%s)\W.*update|update.*\W(translation|lang|%s)($|\W)|^updated?$|new lang|better word|fix.*translation($|\W)|Translation update done using Pootle)'
63 # Base URL (including trailing /)
64 SERVER = 'http://www.phpmyadmin.net'
65 BASE_URL = '/home_page/'
66 EXTENSION = 'php'
68 # How many security issues are shown in RSS
69 TOP_ISSUES = 10
71 # File locations
72 TEMPLATES = './templates'
73 CSS = './css'
74 JS = './js'
75 IMAGES = './images'
76 OUTPUT = './output'
77 STATIC = './static'
79 # Which JS files are not templates
80 JS_TEMPLATES = []
82 # Generic sourceforge.net part
83 PROJECT_FILES_RSS = 'https://sourceforge.net/export/rss2_projfiles.php?group_id=%d&rss_limit=100' % PROJECT_ID
84 PROJECT_FILES_RSS = 'https://sourceforge.net/api/file/index/project-id/%d/rss' % PROJECT_ID
85 PROJECT_NEWS_RSS = 'https://sourceforge.net/export/rss2_projnews.php?group_id=%d&rss_fulltext=1&limit=10' % PROJECT_ID
86 PROJECT_SUMMARY_RSS = 'https://sourceforge.net/export/rss2_projsummary.php?group_id=%d' % PROJECT_ID
87 DONATIONS_RSS = 'https://sourceforge.net/export/rss2_projdonors.php?group_id=%d&limit=20' % PROJECT_ID
88 PROJECT_VCS_RSS = 'http://phpmyadmin.git.sourceforge.net/git/gitweb.cgi?p=phpmyadmin/phpmyadmin;a=rss'
89 PROJECT_DL = 'http://prdownloads.sourceforge.net/%s/%%s' % PROJECT_NAME
90 PROJECT_GIT = 'git://github.com/phpmyadmin/phpmyadmin.git'
91 PLANET_RSS = 'http://planet.phpmyadmin.net/rss20.xml'
92 RSS_CZ = 'http://phpmyadmin.cz/rss.xml'
93 RSS_RU = 'http://php-myadmin.ru/rss/news.xml'
95 # Data sources
96 SNAPSHOT_MD5 = 'http://dl.cihar.com/phpMyAdmin/master/md5.sums'
97 SNAPSHOT_SIZES = 'http://dl.cihar.com/phpMyAdmin/master/files.list'
99 # Clean output before generating
100 CLEAN_OUTPUT = True
102 # RSS parsing
103 SUMMARY_DEVS = re.compile('Developers on project: ([0-9]*)')
104 SUMMARY_ACTIVITY = re.compile('Activity percentile \(last week\): ([0-9.]*%)')
105 SUMMARY_DOWNLOADS = re.compile('Downloadable files: ([0-9]*) total downloads to date')
106 SUMMARY_LISTS = re.compile('Mailing lists \(public\): ([0-9]*)')
107 SUMMARY_FORUMS = re.compile('Discussion forums \(public\): ([0-9]*), containing ([0-9]*) messages')
108 SUMMARY_TRACKER = re.compile('Tracker: (.*) \(([0-9]*) open/([0-9]*) total\)')
110 # Indenti.ca integration
111 IDENTICA_USER = 'phpmyadmin'
112 IDENTICA_PASSWORD = None
114 def copytree(src, dst):
116 Trimmed down version of shutil.copytree. Recursively copies a directory
117 tree using shutil.copy2().
119 The destination directory must not already exist.
120 If exception(s) occur, an Error is raised with a list of reasons.
122 It handles only files and dirs and ignores .svn and *.swp* files and
123 files starting with underscore (_).
125 names = os.listdir(src)
126 errors = []
127 for name in names:
128 if name == '.git' or name == '.svn' or name.find('.swp') != -1 or name[0] == '_':
129 continue
130 srcname = os.path.join(src, name)
131 dstname = os.path.join(dst, name)
132 try:
133 if os.path.isdir(srcname):
134 os.makedirs(dstname)
135 copytree(srcname, dstname)
136 else:
137 shutil.copy2(srcname, dstname)
138 except (IOError, os.error), why:
139 errors.append((srcname, dstname, str(why)))
140 # catch the Error from the recursive copytree so that we can
141 # continue with other files
142 except OSError, err:
143 errors.extend(err.args[0])
144 if errors:
145 raise OSError, errors
147 def fmt_bytes(number):
149 Formats bytes to human readable form.
151 number = int(number)
152 if number > 10 * 1024 * 1024:
153 return '%d MiB' % (number / ( 1024 * 1024 ))
154 elif number > 1024 * 1024:
155 return '%.1f MiB' % (number / ( 1024.0 * 1024 ))
156 if number > 10 * 1024:
157 return '%d KiB' % (number / 1024 )
158 elif number > 1024:
159 return '%.1f KiB' % (number / 1024.0 )
160 else:
161 return '%d bytes' % number
163 class SFGenerator:
164 def __init__(self):
165 self.data = {
166 'releases': [],
167 'releases_featured': [],
168 'releases_older': [],
169 'releases_beta': [],
170 'themes': [],
171 'news': [],
172 'blogs': [],
173 'issues': [],
174 'donations': [],
175 'base_url': BASE_URL,
176 'server': SERVER,
177 'file_ext': EXTENSION,
178 'rss_files': PROJECT_FILES_RSS,
179 'rss_donations': DONATIONS_RSS,
180 'rss_news': PROJECT_NEWS_RSS,
181 'rss_planet': PLANET_RSS,
182 'rss_summary': PROJECT_SUMMARY_RSS,
183 'rss_security': '%s%ssecurity/index.xml' % (SERVER, BASE_URL),
184 'rss_vcs': PROJECT_VCS_RSS,
185 'screenshots': data.screenshots.SCREENSHOTS,
186 'awards': data.awards.AWARDS,
187 'generated': helper.date.fmtdatetime.utcnow(),
188 'themecssversions': data.themes.CSSVERSIONS,
189 'sfservers': data.sf.SERVERS,
190 'current_year': datetime.datetime.now().year,
192 self.loader = TemplateLoader([TEMPLATES])
193 self.cssloader = TemplateLoader([CSS], default_class = NewTextTemplate)
194 self.staticloader = TemplateLoader([STATIC], default_class = NewTextTemplate)
195 self.jsloader = TemplateLoader([JS], default_class = NewTextTemplate)
196 self.feeds = helper.cache.FeedCache()
197 self.xmls = helper.cache.XMLCache()
198 self.urls = helper.cache.URLCache()
199 self.git = helper.cache.GitCache(PROJECT_GIT)
201 def get_outname(self, page):
203 Converts page name to file name. Basically only extension is appended
204 if none is already used.
206 if page.find('.') == -1:
207 return '%s.%s' % (page, self.data['file_ext'])
208 else:
209 return page
211 def get_renderer(self, page):
213 Returns genshi renderer type for chosen page.
215 if page[:-4] == '.xml':
216 return 'xml'
217 return 'xhtml'
219 def text_to_id(self, text):
221 Converts text to something what can be used as a anchor or id (no spaces
222 or other special chars).
224 return re.sub('[^a-z0-9A-Z.-]', '_', text)
226 def get_version_info(self, version):
228 Returns description to the phpMyAdmin version.
230 if version[:2] == '1.':
231 text ='Historical release.'
232 elif version[:2] == '2.':
233 text ='Version compatible with PHP 4+ and MySQL 3+.'
234 elif version[:2] == '3.':
235 text = 'Version compatible with PHP 5.2 and MySQL 5.'
236 if version.find('beta1') != -1:
237 text += ' First beta version.'
238 elif version.find('beta2') != -1:
239 text += ' Second beta version.'
240 elif version.find('beta3') != -1:
241 text += ' Third beta version.'
242 elif version.find('beta4') != -1:
243 text += ' Fourth beta version.'
244 elif version.find('beta') != -1:
245 helper.log.warn('Generic beta: %s' % version)
246 text += ' Beta version.'
247 elif version.find('rc1') != -1:
248 text += ' First release candidate.'
249 elif version.find('rc2') != -1:
250 text += ' Second release candidate.'
251 elif version.find('rc3') != -1:
252 text += ' Third release candidate.'
253 elif version.find('rc') != -1:
254 text += ' Release candidate.'
255 helper.log.warn('Generic RC: %s' % version)
257 return text
259 def dom2release(self, item, theme = False):
261 Parses DOM object into release hash.
263 Basically it gets XML like this:
265 <title><![CDATA[/theme-xampp/2.11/xampp-2.11.zip]]></title>
266 <item>
267 <title><![CDATA[/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz]]></title>
268 <link>http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz/download</link>
269 <guid>http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz/download</guid>
270 <description><![CDATA[/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-all-languages.tar.gz]]></description>
271 <pubDate>Sun, 09 Aug 2009 21:27:17 +0000</pubDate>
272 <files:extra-info xmlns:files="http://sourceforge.net/api/files.rdf#">HTML document text</files:extra-info>
273 <media:content xmlns:media="http://video.search.yahoo.com/mrss/" type="text/html" url="http://sourceforge.net/project/phpmyadmin/files/phpMyAdmin/3.2.1/phpMyAdmin-3.2.1-notes.html/download" filesize="1539"><media:title></media:title><media:hash algo="md5">b9e4de4108f1d6e5fc4772df888e73ac</media:hash></media:content>
274 <files:download-count xmlns:files="http://sourceforge.net/api/files.rdf#">0</files:download-count>
275 </item>
277 title = item.getElementsByTagName('title')[0].childNodes[0].data
278 helper.log.dbg('Processing release %s' % title)
279 titleparts = title[1:].split('/')
280 type = titleparts[0]
281 version = titleparts[1]
282 if theme:
283 filename = titleparts[3]
284 else:
285 filename = titleparts[2]
286 ext = os.path.splitext(filename)[1]
287 link = item.getElementsByTagName('link')[0].childNodes[0].data
288 pubdate = item.getElementsByTagName('pubDate')[0].childNodes[0].data
289 featured = (FILES_REGEXP.match(filename) is not None)
290 if featured:
291 helper.log.dbg('Release is featured!')
292 try:
293 dlcount = item.getElementsByTagName('files:download-count')[0].childNodes[0].data
294 except:
295 dlcount = None
296 try:
297 notes = item.getElementsByTagName('files:release-notes-url')[0].childNodes[0].data
298 except:
299 notes = ''
300 media = item.getElementsByTagName('media:content')[0]
301 size = media.getAttribute('filesize')
302 for hash in media.getElementsByTagName('media:hash'):
303 if hash.getAttribute('algo') == 'md5':
304 md5 = hash.childNodes[0].data
306 release = {
307 'show': False,
308 'version': version,
309 'date': helper.date.fmtdatetime.parse(pubdate[:-6] + ' GMT'),
310 'name': type,
311 'fullname': '%s %s' % (type, version),
312 'notes': notes,
313 'files': []
315 if not theme:
316 release['info'] = self.get_version_info(version)
318 file = {
319 'name': filename,
320 'url': link,
321 'ext': ext,
322 'featured': featured,
323 'size': size,
324 'size_k' : int(size) / 1024,
325 'size_m' : int(size) / (1024 * 1024),
326 'humansize': fmt_bytes(size),
327 'dlcount': dlcount,
328 'md5': md5}
330 return release, file
332 def version_compare(self, first, second):
334 Returns true if second version is newer than first one.
336 # Check for identical versions
337 if first == second:
338 return False
339 # Split out possible suffix like beta or rc
340 first_parts = first.split('-')
341 second_parts = second.split('-')
343 # Extract numeric versions
344 f = [int(x) for x in first_parts[0].split('.')]
345 s = [int(x) for x in second_parts[0].split('.')]
347 # Compare numbers
348 if tuple(f) < tuple(s):
349 return True
350 if tuple(f) == tuple(s):
351 # Second is final
352 if len(second_parts) == 1:
353 return True
354 # First is final
355 if len(first_parts) == 1:
356 return False
357 # Both are betas
358 return (first_parts[1] < second_parts[1])
360 return False
363 def process_releases(self, xml_files):
365 Gets phpMyAdmin releases out of releases feed and fills releases,
366 releases_beta and releases_older.
370 helper.log.dbg('Processing file releases...')
371 releases_dict = {}
372 for entry in xml_files.getElementsByTagName('item'):
373 title = entry.getElementsByTagName('title')[0].childNodes[0].data
374 titleparts = title[1:].split('/')
375 type = titleparts[0]
376 if type != 'phpMyAdmin':
377 continue
378 path, ext = os.path.splitext(title)
379 if ext not in ['.html', '.txt', '.7z', '.gz', '.bz2', '.xz', '.zip']:
380 continue
381 release, file = self.dom2release(entry)
382 if release is None:
383 continue
384 if not releases_dict.has_key(release['version']):
385 releases_dict[release['version']] = release
386 if file['ext'] == '.html':
387 releases_dict[release['version']]['notes'] = file['url'].replace('/download', '/view')
388 else:
389 releases_dict[release['version']]['files'].append(file)
391 releases = [releases_dict[rel] for rel in releases_dict.keys()]
393 helper.log.dbg('Sorting file lists...')
394 releases.sort(key = lambda x: x['version'], reverse = True)
396 helper.log.dbg('Detecting versions...')
397 outversions = {}
398 outbetaversions = {}
400 # Split up versions to branches
401 for idx in xrange(len(releases)):
402 version = releases[idx]
403 branch = BRANCH_REGEXP.match(version['version']).group(1)
404 test = TESTING_REGEXP.match(version['version'])
405 if test is not None:
406 try:
407 if self.version_compare(releases[outbetaversions[branch]]['version'], version['version']):
408 outbetaversions[branch] = idx
409 except KeyError:
410 outbetaversions[branch] = idx
411 else:
412 try:
413 if self.version_compare(releases[outversions[branch]]['version'], version['version']):
414 outversions[branch] = idx
415 except KeyError:
416 outversions[branch] = idx
418 # Check for old beta versions
419 for beta in outbetaversions.keys():
420 try:
421 stable_rel = releases[outversions[beta]]['version']
422 beta_rel = releases[outbetaversions[beta]]['version'].split('-')[0]
423 if stable_rel > beta_rel or stable_rel == beta_rel:
424 helper.log.dbg('Old beta: %s' % releases[outbetaversions[beta]]['version'])
425 del outbetaversions[beta]
426 except KeyError:
427 pass
429 # Check for old stable releases
430 for stable in outversions.keys():
431 version = releases[outversions[stable]]['version']
432 major_branch = MAJOR_BRANCH_REGEXP.match(version).group(1)
433 if major_branch in ['1', '2']:
434 del outversions[stable]
435 continue
436 for check in outversions.keys():
437 try:
438 check_version = releases[outversions[check]]['version']
439 except KeyError:
440 # We already marked this one as old
441 continue
442 if major_branch == check_version[:len(major_branch)] and self.version_compare(version, check_version):
443 helper.log.dbg('Old release: %s' % version)
444 del outversions[stable]
445 continue
447 featured = max(outversions.keys())
448 featured_id = outversions[featured]
450 helper.log.dbg('Versions detected:')
451 for idx in xrange(len(releases)):
452 if idx in outversions.values():
453 self.data['releases'].append(releases[idx])
454 if featured_id == idx:
455 releases[idx]['info'] += ' Currently recommended version.'
456 self.data['releases_featured'].append(releases[idx])
457 helper.log.dbg(' %s (featured)' % releases[idx]['version'])
458 else:
459 helper.log.dbg(' %s' % releases[idx]['version'])
460 elif idx in outbetaversions.values():
461 self.data['releases_beta'].append(releases[idx])
462 helper.log.dbg(' %s (beta)' % releases[idx]['version'])
463 else:
464 self.data['releases_older'].append(releases[idx])
465 helper.log.dbg(' %s (old)' % releases[idx]['version'])
467 def get_snapshots_info(self):
469 Retrieves vcs snapshots info and fills it in data['release_vcs'].
471 md5_strings = self.urls.load(SNAPSHOT_MD5).split('\n')
472 size_strings = self.urls.load(SNAPSHOT_SIZES).split('\n')
473 md5s = {}
474 for line in md5_strings:
475 if line.strip() == '':
476 continue
477 md5, name = line.split(' ')
478 md5s[name] = md5
479 vcs = []
480 for line in size_strings:
481 if line.strip() == '':
482 continue
483 name, size = line.split(' ')
484 vcs.append({
485 'name' : name,
486 'size' : int(size),
487 'size_k' : int(size) / 1024,
488 'size_m' : int(size) / (1024 * 1024),
489 'humansize' : fmt_bytes(size),
490 'url' : 'http://dl.cihar.com/phpMyAdmin/master/%s' % name,
491 'md5' : md5s[name],
493 self.data['release_vcs'] = vcs
495 def process_themes(self, xml_files):
497 Gets theme releases out of releases feed and fills themes.
499 helper.log.dbg('Processing themes releases...')
500 for entry in xml_files.getElementsByTagName('item'):
501 title = entry.getElementsByTagName('title')[0].childNodes[0].data
502 titleparts = title[1:].split('/')
503 type = titleparts[0]
504 if type != 'themes':
505 continue
506 path, ext = os.path.splitext(title)
507 if ext not in ['.html', '.txt', '.7z', '.gz', '.bz2', '.xz', '.zip']:
508 continue
509 name = titleparts[1]
510 version = titleparts[2]
511 release, file = self.dom2release(entry, theme = True)
512 if release is None:
513 continue
514 release['shortname'] = name
515 release['ignore'] = False
516 release['imgname'] = 'images/themes/%s.png' % name
517 try:
518 release.update(data.themes.THEMES['%s-%s' % (name, version)])
519 except KeyError:
520 helper.log.warn('No meatadata for theme %s-%s!' % (name, version))
521 release['name'] = name
522 release['support'] = 'N/A'
523 release['info'] = ''
524 release['fullname'] = '%s %s' % (release['name'], version)
525 release['classes'] = data.themes.CSSMAP[release['support']]
527 release['file'] = file
528 if not release['ignore']:
529 self.data['themes'].append(release)
531 helper.log.dbg('Sorting file lists...')
532 self.data['themes'].sort(key = lambda x: x['date'], reverse = True)
534 def process_news(self, feed):
536 Fills in news based on news feed.
538 helper.log.dbg('Processing news feed...')
539 for entry in feed.entries:
540 item = {}
541 item['link'] = entry.link
542 item['date'] = helper.date.fmtdatetime.parse(entry.updated)
543 # replaces are workaround for broken automatic links from sf.net rss feed
544 item['text'] = entry.summary.replace('.</a>', '</a>.').replace('.">http', '">http')
545 item['comments_link'] = entry.comments
546 item['comments_number'] = 0
547 item['title'] = entry.title
548 item['anchor'] = self.text_to_id(entry.title)
549 self.data['news'].append(item)
551 self.data['short_news'] = self.data['news'][:5]
553 def tweet(self):
555 Finds out whether we should send update to identi.ca and twitter and do so.
557 news = self.data['news'][0]
558 if IDENTICA_USER is None or IDENTICA_PASSWORD is None:
559 return
560 storage = helper.cache.Cache()
561 tweet = '%s | http://www.phpmyadmin.net/ | #phpmyadmin' % news['title']
562 try:
563 last = storage.force_get('last-tweet')
564 except helper.cache.NoCache:
565 last = None
566 if last == tweet:
567 helper.log.dbg('No need to tweet, the last news is still the same...')
568 return
569 helper.log.dbg('Tweeting to identi.ca: %s' % tweet)
570 api = helper.twitter.Api(username = IDENTICA_USER,
571 password = IDENTICA_PASSWORD,
572 twitterserver='identi.ca/api')
573 api.SetSource('phpMyAdmin website')
574 api.PostUpdate(tweet)
575 storage.set('last-tweet', tweet)
577 def tweet_security(self):
579 Finds out whether we should send update to identi.ca and twitter about
580 security issue and do so.
582 issue = self.data['issues'][0]
583 if IDENTICA_USER is None or IDENTICA_PASSWORD is None:
584 return
585 storage = helper.cache.Cache()
586 tweet = '%s | http://www.phpmyadmin.net/home_page/security/ | #phpmyadmin #pmasa #security' % issue['name']
587 try:
588 last = storage.force_get('last-security-tweet')
589 except helper.cache.NoCache:
590 last = None
591 if last == tweet:
592 helper.log.dbg('No need to tweet, the last news is still the same...')
593 return
594 helper.log.dbg('Tweeting to identi.ca: %s' % tweet)
595 api = helper.twitter.Api(username = IDENTICA_USER,
596 password = IDENTICA_PASSWORD,
597 twitterserver='identi.ca/api')
598 api.SetSource('phpMyAdmin website')
599 api.PostUpdate(tweet)
600 storage.set('last-security-tweet', tweet)
602 def process_planet(self, feed):
604 Fills in planet based on planet feed.
606 helper.log.dbg('Processing planet feed...')
607 for entry in feed.entries:
608 item = {}
609 item['link'] = 'http://planet.phpmyadmin.net/#%s' % entry.link
610 item['date'] = helper.date.fmtdatetime.parse(entry.updated.replace('+0000', 'GMT'))
611 item['text'] = entry.summary_detail['value']
612 item['title'] = entry.title
613 self.data['blogs'].append(item)
615 self.data['short_blogs'] = self.data['blogs'][:5]
617 def process_feed(self, name, feed, count = 3):
619 Fills in feed data based on feeparser feed.
621 helper.log.dbg('Processing %s feed...' % name)
622 self.data[name] = []
623 for entry in feed.entries:
624 item = {}
625 item['link'] = entry.link
626 item['date'] = entry.updated_parsed
627 item['text'] = entry.summary_detail['value']
628 item['title'] = entry.title
629 self.data[name].append(item)
631 self.data['short_%s' % name ] = self.data[name][:count]
633 def process_donations(self, feed):
635 Fills in donations based on donations feed.
637 helper.log.dbg('Processing donations feed...')
638 for entry in feed.entries:
639 item = {}
640 item['link'] = entry.link
641 item['date'] = helper.date.fmtdatetime.parse(entry.updated)
642 item['text'] = helper.stringfmt.fmt_urls(entry.summary)
643 item['title'] = entry.title
644 self.data['donations'].append(item)
646 def process_summary(self, feed):
648 Reads summary feed and fills some useful information into data.
650 helper.log.dbg('Processing summary feed...')
651 data = {}
652 links = {}
653 trackers = []
654 for entry in feed.entries:
655 if entry.title[:22] == 'Developers on project:':
656 m = SUMMARY_DEVS.match(entry.title)
657 data['developers'] = m.group(1)
658 links['developers'] = entry.link
659 elif entry.title[:19] == 'Downloadable files:':
660 m = SUMMARY_DOWNLOADS.match(entry.title)
661 data['downloads'] = m.group(1)
662 links['downloads'] = entry.link
663 elif entry.title[:13] == 'Mailing lists':
664 m = SUMMARY_LISTS.match(entry.title)
665 data['mailinglists'] = m.group(1)
666 links['mailinglists'] = entry.link
667 elif entry.title[:17] == 'Discussion forums':
668 m = SUMMARY_FORUMS.match(entry.title)
669 data['forums'] = m.group(1)
670 data['forumposts'] = m.group(2)
671 links['forums'] = entry.link
672 elif entry.title[:8] == 'Tracker:':
673 m = SUMMARY_TRACKER.match(entry.title)
674 trackers.append({
675 'name': m.group(1),
676 'open': m.group(2),
677 'total': m.group(3),
678 'description': entry.summary[21:],
679 'link': entry.link,
681 self.data['info'] = data
682 self.data['links'] = links
683 trackers.sort(key = lambda x: x['name'])
684 self.data['trackers'] = trackers
686 def get_menu(self, active):
688 Returns list of menu entries with marked active one.
690 menu = []
691 for item in data.menu.MENU:
692 title = item[1]
693 name = item[0]
694 field = {
695 'title' : title,
696 'class' : {},
698 if name == active or '%sindex' % name == active:
699 field['class'] = { 'class': 'active' }
700 if len(name) > 0 and name[-1] != '/':
701 name = self.get_outname(name)
702 field['link'] = '%s%s' % (BASE_URL, name)
703 menu.append(field)
704 return menu
706 def render_css(self, filename):
708 Renders CSS file from template.
710 helper.log.dbg(' %s' % filename)
711 template = self.cssloader.load(filename)
712 out = open(os.path.join(OUTPUT, 'css', filename), 'w')
713 out.write(template.generate(**self.data).render())
714 out.close()
716 def render_static(self, templatename, outfile, extradata = {}):
718 Renders "static" file from template.
720 helper.log.dbg(' %s' % outfile)
721 template = self.staticloader.load(templatename)
722 out = open(os.path.join(OUTPUT, outfile), 'w')
723 extradata.update(self.data)
724 out.write(template.generate(**extradata).render())
725 out.close()
727 def render_js(self, filename):
729 Renders JavaScript file from template. Some defined files are not processed
730 through template engine as they were taken from other projects.
732 helper.log.dbg(' %s' % filename)
733 outpath = os.path.join(OUTPUT, 'js', filename)
734 if filename not in JS_TEMPLATES:
735 shutil.copy2(os.path.join(JS, filename), outpath)
736 return
737 template = self.jsloader.load(filename)
738 out = open(outpath, 'w')
739 out.write(template.generate(**self.data).render())
740 out.close()
742 def render(self, page):
744 Renders standard page.
746 helper.log.dbg(' %s' % page)
747 template = self.loader.load('%s.tpl' % page)
748 menu = self.get_menu(page)
749 out = open(os.path.join(OUTPUT, self.get_outname(page)), 'w')
750 out.write(template.generate(menu = menu, **self.data).render(self.get_renderer(page)))
751 out.close()
753 def render_security(self, issue):
755 Renders security issue.
757 helper.log.dbg(' %s' % issue)
758 template = self.loader.load('security/%s' % issue)
759 menu = self.get_menu('security/')
760 out = open(os.path.join(OUTPUT, 'security', self.get_outname(issue)), 'w')
761 out.write(template.generate(menu = menu, issue = issue, **self.data).render('xhtml'))
762 out.close()
765 def list_security_issues(self):
767 Fills in issues and topissues with security issues information.
769 issues = glob.glob('templates/security/PMASA-*')
770 issues.sort(key = lambda x: int(x[24:29]) * 100 - int(x[30:]))
771 for issue in issues:
772 data = XML(open(issue, 'r').read())
773 name = os.path.basename(issue)
774 self.data['issues'].append({
775 'name' : name,
776 'link': '%ssecurity/%s' % (BASE_URL, self.get_outname(name)),
777 'fulllink': '%s%ssecurity/%s' % (SERVER, BASE_URL, self.get_outname(name)),
778 'summary': str(data.select('def[@function="announcement_summary"]/text()')),
779 'date': helper.date.fmtdate.parse(str(data.select('def[@function="announcement_date"]/text()'))),
780 'cves': str(data.select('def[@function="announcement_cve"]/text()')).split(' '),
781 'versions': str(data.select('def[@function="announcement_affected"]/text()')),
783 self.data['topissues'] = self.data['issues'][:TOP_ISSUES]
785 def prepare_output(self):
787 Copies static content to output and creates required directories.
789 helper.log.dbg('Copying static content to output...')
790 if CLEAN_OUTPUT:
791 try:
792 shutil.rmtree(OUTPUT)
793 os.mkdir(OUTPUT)
794 except OSError:
795 pass
796 else:
797 try:
798 shutil.rmtree(os.path.join(OUTPUT, 'images'))
799 except OSError:
800 pass
801 imgdst = os.path.join(OUTPUT, 'images')
802 os.makedirs(imgdst)
803 copytree(IMAGES, imgdst)
804 copytree(STATIC, OUTPUT)
805 try:
806 os.mkdir(os.path.join(OUTPUT, 'security'))
807 except OSError:
808 pass
809 try:
810 os.mkdir(os.path.join(OUTPUT, 'css'))
811 except OSError:
812 pass
813 try:
814 os.mkdir(os.path.join(OUTPUT, 'js'))
815 except OSError:
816 pass
818 def get_sitemap_data(self, page):
820 Returns metadata for page for sitemap as per http://sitemaps.org.
822 priority = '0.8'
823 changefreq = 'daily'
824 if page[:15] == 'security/PMASA-':
825 priority = '0.5'
826 changefreq = 'monthly'
827 elif page[:15] == '/documentation/':
828 priority = '0.7'
829 changefreq = 'weekly'
830 elif page[:20] == '/pma_localized_docs/':
831 priority = '0.6'
832 changefreq = 'monthly'
833 elif page in ['index', 'news']:
834 priority = '1.0'
835 changefreq = 'daily'
836 elif page in ['improve', 'team', 'docs', 'devel', 'translate']:
837 priority = '1.0'
838 changefreq = 'weekly'
839 elif page in ['downloads', 'donate', 'themes', 'translations']:
840 priority = '0.9'
841 changefreq = 'daily'
842 elif page in ['support']:
843 priority = '0.9'
844 changefreq = 'weekly'
845 elif page in ['sitemap']:
846 priority = '0.2'
847 changefreq = 'weekly'
848 return {
849 'lastmod' : helper.date.fmtdate.utcnow(),
850 'changefreq' : changefreq,
851 'priority' : priority,
854 def generate_sitemap(self):
856 Generates list of pages with titles.
858 self.data['sitemap'] = []
859 self.data['sitemapxml'] = []
860 helper.log.dbg('Generating sitemap:')
861 for root, dirs, files in os.walk(TEMPLATES):
862 if '.svn' in dirs:
863 dirs.remove('.svn') # don't visit .svn directories
864 if '.git' in dirs:
865 dirs.remove('.git') # don't visit .git directories
866 files.sort()
867 dir = root[len(TEMPLATES):].strip('/')
868 if len(dir) > 0:
869 dir += '/'
870 for file in files:
871 name, ext = os.path.splitext(file)
872 if ext != '.tpl' and name[:6] != 'PMASA-':
873 continue
874 if name[0] in ['_', '.']:
875 continue
876 if file in ['index.xml.tpl', 'sitemap.xml.tpl', '404.tpl']:
877 continue
878 helper.log.dbg('- %s' % file)
879 xmldata = XML(open(os.path.join(root, file), 'r').read())
880 title = str(xmldata.select('def[@function="page_title"]/text()'))
881 title = title.strip()
882 if len(title) == 0:
883 title = str(xmldata.select('def[@function="announcement_id"]/text()'))
884 title = title.strip()
885 if len(title) == 0:
886 title = 'Index'
887 link = dir + self.get_outname(name)
888 sitemap = {
889 'link': link,
890 'loc': '%s%s%s' % (SERVER, BASE_URL, link),
891 'title': title
893 if name[:6] != 'PMASA-':
894 self.data['sitemap'].append(sitemap)
895 sitemap.update(self.get_sitemap_data(dir + name))
896 self.data['sitemapxml'].append(sitemap)
897 for link in data.sitemap.ENTRIES:
898 sitemap = {
899 'loc': SERVER + link,
901 sitemap.update(self.get_sitemap_data(link))
902 self.data['sitemapxml'].append(sitemap)
904 def get_translation_stats(self):
906 Receives translation stats from external server and parses it.
908 helper.log.dbg('Processing translation stats...')
909 storage = helper.cache.Cache()
910 self.data['translations'] = []
911 list = self.git.langtree.keys()
912 list.sort()
913 for name in list:
914 if name[-3:] != '.po':
915 continue
916 lang = name[:-3]
917 cache_key = 'trans-%s' % lang
918 try:
919 self.data['translations'].append(storage.get(cache_key))
920 continue
921 except:
922 pass
923 longlang = data.langnames.MAP[lang]
924 po = polib.pofile(os.path.join(self.git.dirname, 'po', name))
925 helper.log.dbg(' - %s [%s]' % (lang, longlang))
926 gitlog = self.git.repo.log(path = 'po/%s' % name)
927 langs = '%s|%s' % (lang, longlang)
928 regexp = re.compile(LANG_REGEXP % (langs, langs), re.IGNORECASE)
929 found = None
930 for x in gitlog:
931 if regexp.findall(x.message) != []:
932 found = x
933 break
935 percent = po.percent_translated()
936 translated = len(po.translated_entries())
937 if percent < 50:
938 css = ' b50'
939 elif percent < 80:
940 css = ' b80'
941 else:
942 css =''
943 try:
944 dt = datetime.datetime(*found.committed_date[:6])
945 except (TypeError, AttributeError):
946 dt = ''
947 translation = {
948 'name': longlang,
949 'short': lang,
950 'translated': translated,
951 'percent': '%0.1f' % percent,
952 'updated': dt,
953 'css': css,
955 storage.set(cache_key, translation)
956 self.data['translations'].append(translation)
958 def fetch_data(self):
960 Fetches data from remote or local sources and prepares template data.
962 self.get_snapshots_info()
964 xml_files = self.xmls.load('files', PROJECT_FILES_RSS)
966 self.process_releases(xml_files)
967 self.process_themes(xml_files)
969 rss_news = self.feeds.load('news', PROJECT_NEWS_RSS)
970 self.process_news(rss_news)
972 self.tweet()
974 rss_planet = self.feeds.load('planet', PLANET_RSS)
975 self.process_planet(rss_planet)
977 rss_cz = self.feeds.load('cz', RSS_CZ)
978 self.process_feed('news_cz', rss_cz)
980 rss_ru = self.feeds.load('ru', RSS_RU)
981 self.process_feed('news_ru', rss_ru)
983 rss_summary = self.feeds.load('summary', PROJECT_SUMMARY_RSS)
984 self.process_summary(rss_summary)
986 rss_donations = self.feeds.load('donations', DONATIONS_RSS)
987 self.process_donations(rss_donations)
989 self.get_translation_stats()
991 self.list_security_issues()
993 self.tweet_security()
995 self.generate_sitemap()
997 def render_pages(self):
999 Renders all content pages.
1001 helper.log.dbg('Rendering pages:')
1002 templates = [os.path.basename(x) for x in glob.glob('templates/*.tpl')]
1003 templates.extend([os.path.join('security', os.path.basename(x)) for x in glob.glob('templates/security/*.tpl')])
1004 for template in templates:
1005 name = os.path.splitext(template)[0]
1006 if os.path.basename(name)[0] == '_':
1007 continue
1008 self.render(name)
1010 helper.log.dbg('Rendering security issues pages:')
1011 for issue in self.data['issues']:
1012 self.render_security(issue['name'])
1014 helper.log.dbg('Generating CSS:')
1015 for css in [os.path.basename(x) for x in glob.glob('css/*.css')]:
1016 self.render_css(css)
1018 helper.log.dbg('Generating JavaScript:')
1019 for js in [os.path.basename(x) for x in glob.glob('js/*.js')]:
1020 self.render_js(js)
1022 helper.log.dbg('Generating static pages:')
1023 self.render_static('_version.php', 'version.php')
1024 self.render_static('_version.txt', 'version.txt')
1025 self.render_static('_version.js', 'version.js')
1026 self.render_static('_security.php', 'security.php')
1027 self.render_static('_robots.txt', 'robots.txt')
1028 for redir in data.redirects.REDIRECTS:
1029 self.render_static('_redirect.tpl',
1030 '%s.php' % redir,
1031 {'location': self.get_outname(data.redirects.REDIRECTS[redir])})
1034 def main(self):
1036 Main program which does everything.
1038 self.prepare_output()
1039 self.fetch_data()
1040 self.render_pages()
1041 helper.log.dbg('Done!')
1043 if __name__ == '__main__':
1044 parser = OptionParser()
1045 parser.add_option('-v', '--verbose',
1046 action='store_true',
1047 dest='verbose',
1048 help='Output verbose information.')
1049 parser.add_option('-q', '--quiet',
1050 action='store_false',
1051 dest='verbose',
1052 help='Only show errors and warnings.')
1053 parser.add_option('-C', '--clean',
1054 action='store_true',
1055 dest='clean',
1056 help='Clean output directory (default).')
1057 parser.add_option('-N', '--no-clean',
1058 action='store_false',
1059 dest='clean',
1060 help='Do not clean output directory.')
1061 parser.add_option('-V', '--verbose-cache',
1062 action='store_true',
1063 dest='verbose_cache',
1064 help='Output verbose caching information.')
1065 parser.add_option('-Q', '--quiet-cache',
1066 action='store_false',
1067 dest='verbose_cache',
1068 help='No information from caching in output.')
1069 parser.add_option('-s', '--server',
1070 action='store', type='string',
1071 dest='server',
1072 help='Name of server where data will be published, eg.: %s.' % SERVER)
1073 parser.add_option('-b', '--base-url',
1074 action='store', type='string',
1075 dest='base_url',
1076 help='Base URL of document, eg.: %s.' % BASE_URL)
1077 parser.add_option('-e', '--extension',
1078 action='store', type='string',
1079 dest='extension',
1080 help='Extension of generated files, default is %s.' % EXTENSION)
1081 parser.add_option('-l', '--log',
1082 action='store', type='string',
1083 dest='log',
1084 help='Log filename, default is none.')
1085 parser.add_option('-p', '--identica-password',
1086 action='store', type='string',
1087 dest='identica_password',
1088 help='Pasword to identi.ca, default is not to post there.')
1089 parser.add_option('-u', '--identica-user',
1090 action='store', type='string',
1091 dest='identica_user',
1092 help='Username to identi.ca, defaull is %s.' % IDENTICA_USER)
1094 parser.set_defaults(
1095 verbose = helper.log.VERBOSE,
1096 verbose_cache = helper.log.DBG_CACHE,
1097 server = SERVER,
1098 base_url = BASE_URL,
1099 clean = CLEAN_OUTPUT,
1100 log = None,
1101 extension = EXTENSION,
1102 identica_user = IDENTICA_USER,
1103 identica_password = IDENTICA_PASSWORD
1106 (options, args) = parser.parse_args()
1108 helper.log.VERBOSE = options.verbose
1109 helper.log.DBG_CACHE = options.verbose_cache
1110 SERVER = options.server
1111 BASE_URL = options.base_url
1112 EXTENSION = options.extension
1113 CLEAN_OUTPUT = options.clean
1114 IDENTICA_USER = options.identica_user
1115 IDENTICA_PASSWORD = options.identica_password
1116 if options.log is not None:
1117 helper.log.LOG = open(options.log, 'w')
1119 gen = SFGenerator()
1120 gen.main()