1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
35 from gpodder
import util
36 from gpodder
import opml
37 from gpodder
import cache
38 from gpodder
import services
39 from gpodder
import draw
40 from gpodder
import libtagupdate
41 from gpodder
import dumbshelve
42 from gpodder
import resolver
44 from gpodder
.liblogger
import log
45 from gpodder
.libgpodder
import gl
46 from gpodder
.dbsqlite
import db
59 import xml
.dom
.minidom
62 from xml
.sax
import saxutils
65 if gpodder
.interface
== gpodder
.MAEMO
:
66 ICON_AUDIO_FILE
= 'gnome-mime-audio-mp3'
67 ICON_VIDEO_FILE
= 'gnome-mime-video-mp4'
68 ICON_DOWNLOADING
= 'qgn_toolb_messagin_moveto'
69 ICON_DELETED
= 'qgn_toolb_gene_deletebutton'
70 ICON_NEW
= 'qgn_list_gene_favor'
72 ICON_AUDIO_FILE
= 'audio-x-generic'
73 ICON_VIDEO_FILE
= 'video-x-generic'
74 ICON_DOWNLOADING
= gtk
.STOCK_GO_DOWN
75 ICON_DELETED
= gtk
.STOCK_DELETE
76 ICON_NEW
= gtk
.STOCK_ABOUT
79 class HTTPAuthError(Exception): pass
81 class podcastChannel(object):
82 """holds data for a complete channel"""
83 SETTINGS
= ('sync_to_devices', 'device_playlist_name','override_title','username','password')
89 def load(cls
, url
, create
=True, authentication_tokens
=None):
90 if isinstance(url
, unicode):
91 url
= url
.encode('utf-8')
93 tmp
= db
.load_channels(factory
=lambda d
: cls
.create_from_dict(d
), url
=url
)
97 tmp
= podcastChannel(url
)
98 if authentication_tokens
is not None:
99 tmp
.username
= authentication_tokens
[0]
100 tmp
.password
= authentication_tokens
[1]
101 success
, error_code
= tmp
.update()
103 if error_code
== 401:
108 db
.force_last_new(tmp
)
112 def create_from_dict(d
):
116 setattr(c
, key
, d
[key
])
120 (updated
, c
) = self
.fc
.fetch(self
.url
, self
)
123 return ( False, None )
126 return ( False, 401 )
128 if self
.url
!= c
.url
:
129 log('Updating channel URL from %s to %s', self
.url
, c
.url
, sender
=self
)
132 # update the cover if it's not there
135 # If we have an old instance of this channel, and
136 # feedcache says the feed hasn't changed, return old
138 log('Channel %s is up to date', self
.url
)
139 return ( True, None )
141 # Save etag and last-modified for later reuse
142 if c
.headers
.get('etag'):
143 self
.etag
= c
.headers
.get('etag')
144 if c
.headers
.get('last-modified'):
145 self
.last_modified
= c
.headers
.get('last-modified')
147 self
.parse_error
= c
.get('bozo_exception', None)
149 if hasattr(c
.feed
, 'title'):
150 self
.title
= c
.feed
.title
152 self
.title
= self
.url
153 if hasattr( c
.feed
, 'link'):
154 self
.link
= c
.feed
.link
155 if hasattr( c
.feed
, 'subtitle'):
156 self
.description
= c
.feed
.subtitle
158 if hasattr(c
.feed
, 'updated_parsed') and c
.feed
.updated_parsed
is not None:
159 self
.pubDate
= rfc822
.mktime_tz(c
.feed
.updated_parsed
+(0,))
161 self
.pubDate
= time
.time()
162 if hasattr( c
.feed
, 'image'):
163 if hasattr(c
.feed
.image
, 'href') and c
.feed
.image
.href
:
165 self
.image
= c
.feed
.image
.href
166 if old
!= self
.image
:
167 self
.update_cover(force
=True)
169 # Marked as bulk because we commit after importing episodes.
170 db
.save_channel(self
, bulk
=True)
172 # Remove old episodes before adding the new ones. This helps
173 # deal with hyperactive channels, such as TV news, when there
174 # can be more new episodes than the user wants in the list.
175 # By cleaning up old episodes before receiving the new ones we
176 # ensure that the user doesn't miss any.
177 db
.purge(gl
.config
.max_episodes_per_feed
, self
.id)
179 # Load all episodes to update them properly.
180 existing
= self
.get_all_episodes()
182 # We can limit the maximum number of entries that gPodder will parse
183 # via the "max_episodes_per_feed" configuration option.
184 if len(c
.entries
) > gl
.config
.max_episodes_per_feed
:
185 log('Limiting number of episodes for %s to %d', self
.title
, gl
.config
.max_episodes_per_feed
)
186 for entry
in c
.entries
[:min(gl
.config
.max_episodes_per_feed
, len(c
.entries
))]:
190 episode
= podcastItem
.from_feedparser_entry(entry
, self
)
192 log('Cannot instantiate episode "%s": %s. Skipping.', entry
.get('id', '(no id available)'), e
, sender
=self
, traceback
=True)
198 if ex
.guid
== episode
.guid
:
199 for k
in ('title', 'title', 'description', 'link', 'pubDate'):
200 setattr(ex
, k
, getattr(episode
, k
))
204 if not episode
.length
:
205 episode
.length
= resolver
.get_real_episode_length(episode
)
207 episode
.save(bulk
=True)
209 return ( True, None )
211 def update_cover(self
, force
=False):
212 if self
.cover_file
is None or not os
.path
.exists(self
.cover_file
) or force
:
213 if self
.image
is not None:
214 services
.cover_downloader
.request_cover(self
)
217 db
.delete_channel(self
)
220 db
.save_channel(self
)
222 def stat(self
, state
=None, is_played
=None, is_locked
=None):
223 return db
.get_channel_stat(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
225 def __init__( self
, url
= "", title
= "", link
= "", description
= ""):
230 self
.description
= description
233 self
.parse_error
= None
234 self
.newest_pubdate_cached
= None
235 self
.update_flag
= False # channel is updating or to be updated
238 # should this channel be synced to devices? (ex: iPod)
239 self
.sync_to_devices
= True
240 # to which playlist should be synced
241 self
.device_playlist_name
= 'gPodder'
242 # if set, this overrides the channel-provided title
243 self
.override_title
= ''
247 self
.last_modified
= None
250 self
.save_dir_size
= 0
251 self
.__save
_dir
_size
_set
= False
253 self
.count_downloaded
= 0
255 self
.count_unplayed
= 0
257 self
.channel_is_locked
= False
259 def request_save_dir_size(self
):
260 if not self
.__save
_dir
_size
_set
:
261 self
.update_save_dir_size()
262 self
.__save
_dir
_size
_set
= True
264 def update_save_dir_size(self
):
265 self
.save_dir_size
= util
.calculate_size(self
.save_dir
)
267 def get_filename( self
):
268 """Return the MD5 sum of the channel URL"""
269 return md5
.new( self
.url
).hexdigest()
271 filename
= property(fget
=get_filename
)
273 def get_title( self
):
274 if self
.override_title
:
275 return self
.override_title
276 elif not self
.__title
.strip():
281 def set_title( self
, value
):
282 self
.__title
= value
.strip()
284 title
= property(fget
=get_title
,
287 def set_custom_title( self
, custom_title
):
288 custom_title
= custom_title
.strip()
290 if custom_title
!= self
.__title
:
291 self
.override_title
= custom_title
293 self
.override_title
= ''
295 def get_downloaded_episodes(self
):
296 return db
.load_episodes(self
, factory
=lambda c
: podcastItem
.create_from_dict(c
, self
), state
=db
.STATE_DOWNLOADED
)
298 def save_settings(self
):
299 db
.save_channel(self
)
301 def get_new_episodes( self
):
302 return [episode
for episode
in db
.load_episodes(self
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
)) if episode
.state
== db
.STATE_NORMAL
and not episode
.is_played
]
304 def update_m3u_playlist(self
):
305 if gl
.config
.create_m3u_playlists
:
306 downloaded_episodes
= self
.get_downloaded_episodes()
307 fn
= util
.sanitize_filename(self
.title
)
309 fn
= os
.path
.basename(self
.save_dir
)
310 m3u_filename
= os
.path
.join(gl
.downloaddir
, fn
+'.m3u')
311 log('Writing playlist to %s', m3u_filename
, sender
=self
)
312 f
= open(m3u_filename
, 'w')
315 for episode
in downloaded_episodes
:
316 filename
= episode
.local_filename()
317 if os
.path
.dirname(filename
).startswith(os
.path
.dirname(m3u_filename
)):
318 filename
= filename
[len(os
.path
.dirname(m3u_filename
)+os
.sep
):]
319 f
.write('#EXTINF:0,'+self
.title
+' - '+episode
.title
+' ('+episode
.cute_pubdate()+')\n')
320 f
.write(filename
+'\n')
323 def addDownloadedItem(self
, item
):
324 log('addDownloadedItem(%s)', item
.url
)
326 if not item
.was_downloaded():
327 item
.mark_downloaded(save
=True)
329 # Update metadata on file (if possible and wanted)
330 if gl
.config
.update_tags
and libtagupdate
.tagging_supported():
331 filename
= item
.local_filename()
333 libtagupdate
.update_metadata_on_file(filename
, title
=item
.title
, artist
=self
.title
, genre
='Podcast')
335 log('Error while calling update_metadata_on_file(): %s', e
)
337 self
.update_m3u_playlist()
339 def get_all_episodes(self
):
340 return db
.load_episodes(self
, factory
= lambda d
: podcastItem
.create_from_dict(d
, self
))
343 def update_model( self
):
344 self
.update_save_dir_size()
345 model
= self
.tree_model
347 iter = model
.get_iter_first()
348 while iter is not None:
349 self
.iter_set_downloading_columns(model
, iter)
350 iter = model
.iter_next( iter)
353 def tree_model( self
):
354 log('Returning TreeModel for %s', self
.url
, sender
= self
)
355 return self
.items_liststore()
357 def iter_set_downloading_columns( self
, model
, iter, episode
=None):
358 global ICON_AUDIO_FILE
, ICON_VIDEO_FILE
359 global ICON_DOWNLOADING
, ICON_DELETED
, ICON_NEW
362 url
= model
.get_value( iter, 0)
363 episode
= db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
367 if gl
.config
.episode_list_descriptions
:
372 if services
.download_status_manager
.is_download_in_progress(url
):
373 status_icon
= util
.get_tree_icon(ICON_DOWNLOADING
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
375 if episode
.state
== db
.STATE_NORMAL
:
376 if episode
.is_played
:
379 status_icon
= util
.get_tree_icon(ICON_NEW
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
380 elif episode
.was_downloaded():
381 missing
= not episode
.file_exists()
384 log('Episode missing: %s (before drawing an icon)', episode
.url
, sender
=self
)
386 file_type
= util
.file_type_by_extension( model
.get_value( iter, 9))
387 if file_type
== 'audio':
388 status_icon
= util
.get_tree_icon(ICON_AUDIO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
389 elif file_type
== 'video':
390 status_icon
= util
.get_tree_icon(ICON_VIDEO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
392 status_icon
= util
.get_tree_icon('unknown', not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
393 elif episode
.state
== db
.STATE_DELETED
or episode
.state
== db
.STATE_DOWNLOADED
:
394 status_icon
= util
.get_tree_icon(ICON_DELETED
, not episode
.is_played
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
396 log('Warning: Cannot determine status icon.', sender
=self
)
399 model
.set( iter, 4, status_icon
)
401 def items_liststore( self
):
403 Return a gtk.ListStore containing episodes for this channel
405 new_model
= gtk
.ListStore( gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
406 gobject
.TYPE_BOOLEAN
, gtk
.gdk
.Pixbuf
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
407 gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
)
409 for item
in self
.get_all_episodes():
410 description
= item
.title_and_description
413 filelength
= gl
.format_filesize(item
.length
, 1)
417 new_iter
= new_model
.append((item
.url
, item
.title
, filelength
,
418 True, None, item
.cute_pubdate(), description
, util
.remove_html_tags(item
.description
),
419 item
.local_filename(), item
.extension()))
420 self
.iter_set_downloading_columns( new_model
, new_iter
, episode
=item
)
422 self
.update_save_dir_size()
425 def find_episode( self
, url
):
426 return db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
428 def get_save_dir(self
):
429 save_dir
= os
.path
.join(gl
.downloaddir
, self
.filename
, '')
431 # Create save_dir if it does not yet exist
432 if not util
.make_directory( save_dir
):
433 log( 'Could not create save_dir: %s', save_dir
, sender
= self
)
437 save_dir
= property(fget
=get_save_dir
)
439 def remove_downloaded( self
):
440 shutil
.rmtree( self
.save_dir
, True)
442 def get_index_file(self
):
443 # gets index xml filename for downloaded channels list
444 return os
.path
.join( self
.save_dir
, 'index.xml')
446 index_file
= property(fget
=get_index_file
)
448 def get_cover_file( self
):
449 # gets cover filename for cover download cache
450 return os
.path
.join( self
.save_dir
, 'cover')
452 cover_file
= property(fget
=get_cover_file
)
454 def delete_episode_by_url(self
, url
):
455 episode
= db
.load_episode(url
, lambda c
: podcastItem
.create_from_dict(c
, self
))
457 if episode
is not None:
458 util
.delete_file(episode
.local_filename())
459 episode
.set_state(db
.STATE_DELETED
)
461 self
.update_m3u_playlist()
464 class podcastItem(object):
465 """holds data for one object in a channel"""
468 def load(url
, channel
):
469 e
= podcastItem(channel
)
470 d
= db
.load_episode(url
)
472 for k
, v
in d
.iteritems():
478 def from_feedparser_entry( entry
, channel
):
479 episode
= podcastItem( channel
)
481 episode
.title
= entry
.get( 'title', util
.get_first_line( util
.remove_html_tags( entry
.get( 'summary', ''))))
482 episode
.link
= entry
.get( 'link', '')
483 episode
.description
= entry
.get( 'summary', entry
.get( 'link', entry
.get( 'title', '')))
484 episode
.guid
= entry
.get( 'id', '')
485 if entry
.get( 'updated_parsed', None):
486 episode
.pubDate
= rfc822
.mktime_tz(entry
.updated_parsed
+(0,))
488 if episode
.title
== '':
489 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender
= episode
)
492 if hasattr(entry
, 'enclosures') and len(entry
.enclosures
) > 0:
493 enclosure
= entry
.enclosures
[0]
494 if len(entry
.enclosures
) > 1:
495 for e
in entry
.enclosures
:
496 if hasattr( e
, 'href') and hasattr( e
, 'length') and hasattr( e
, 'type') and (e
.type.startswith('audio/') or e
.type.startswith('video/')):
497 if util
.normalize_feed_url(e
.href
) is not None:
498 log( 'Selected enclosure: %s', e
.href
, sender
= episode
)
501 episode
.url
= util
.normalize_feed_url( enclosure
.get( 'href', ''))
502 elif hasattr(entry
, 'link'):
503 (filename
, extension
) = util
.filename_from_url(entry
.link
)
504 if extension
== '' and hasattr( entry
, 'type'):
505 extension
= util
.extension_from_mimetype(e
.type)
506 file_type
= util
.file_type_by_extension(extension
)
507 if file_type
is not None:
508 log('Adding episode with link to file type "%s".', file_type
, sender
=episode
)
509 episode
.url
= entry
.link
512 # This item in the feed has no downloadable enclosure
515 if not episode
.pubDate
:
516 metainfo
= util
.get_episode_info_from_url(episode
.url
)
517 if 'pubdate' in metainfo
:
519 episode
.pubDate
= int(float(metainfo
['pubdate']))
521 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo
['pubdate']), traceback
=True)
523 if hasattr( enclosure
, 'length') and episode
.length
:
525 episode
.length
= int(enclosure
.length
)
529 if hasattr( enclosure
, 'type'):
530 episode
.mimetype
= enclosure
.type
532 if episode
.title
== '':
533 ( filename
, extension
) = os
.path
.splitext( os
.path
.basename( episode
.url
))
534 episode
.title
= filename
539 def __init__( self
, channel
):
540 # Used by Storage for faster saving
545 self
.mimetype
= 'application/octet-stream'
547 self
.description
= ''
549 self
.channel
= channel
552 self
.state
= db
.STATE_NORMAL
553 self
.is_played
= False
554 self
.is_locked
= channel
.channel_is_locked
556 def save(self
, bulk
=False):
557 if self
.state
!= db
.STATE_DOWNLOADED
and self
.file_exists():
558 self
.state
= db
.STATE_DOWNLOADED
559 db
.save_episode(self
, bulk
=bulk
)
561 def set_state(self
, state
):
563 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
, is_locked
=self
.is_locked
)
565 def mark(self
, state
=None, is_played
=None, is_locked
=None):
566 if state
is not None:
568 if is_played
is not None:
569 self
.is_played
= is_played
570 if is_locked
is not None:
571 self
.is_locked
= is_locked
572 db
.mark_episode(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
574 def mark_downloaded(self
, save
=False):
575 self
.state
= db
.STATE_DOWNLOADED
576 self
.is_played
= False
581 def create_from_dict(d
, channel
):
582 e
= podcastItem(channel
)
585 setattr(e
, key
, d
[key
])
589 def title_and_description(self
):
591 Returns Pango markup for displaying in a TreeView, and
592 disables the description when the config variable
593 "episode_list_descriptions" is not set.
595 if gl
.config
.episode_list_descriptions
:
596 return '%s\n<small>%s</small>' % (saxutils
.escape(self
.title
), saxutils
.escape(self
.one_line_description()))
598 return saxutils
.escape(self
.title
)
600 def age_in_days(self
):
601 return util
.file_age_in_days(self
.local_filename())
604 return self
.age_in_days() > gl
.config
.episode_old_age
606 def get_age_string(self
):
607 return util
.file_age_to_string(self
.age_in_days())
609 age_prop
= property(fget
=get_age_string
)
611 def one_line_description( self
):
612 lines
= util
.remove_html_tags(self
.description
).strip().splitlines()
613 if not lines
or lines
[0] == '':
614 return _('No description available')
616 return ' '.join((l
.strip() for l
in lines
if l
.strip() != ''))
618 def delete_from_disk(self
):
620 self
.channel
.delete_episode_by_url(self
.url
)
622 log('Cannot delete episode from disk: %s', self
.title
, traceback
=True, sender
=self
)
624 def local_filename( self
):
625 ext
= self
.extension()
627 # For compatibility with already-downloaded episodes,
628 # we accept md5 filenames if they are downloaded now.
629 md5_filename
= os
.path
.join(self
.channel
.save_dir
, md5
.new(self
.url
).hexdigest()+ext
)
630 if os
.path
.exists(md5_filename
) or not gl
.config
.experimental_file_naming
:
633 # If the md5 filename does not exist,
634 ( episode
, e
) = util
.filename_from_url(self
.url
)
635 episode
= util
.sanitize_filename(episode
) + ext
637 # If the episode filename looks suspicious,
638 # we still return the md5 filename to be on
639 # the safe side of the fence ;)
640 if len(episode
) == 0 or episode
.startswith('redirect.'):
642 filename
= os
.path
.join(self
.channel
.save_dir
, episode
)
645 def extension( self
):
646 ( filename
, ext
) = util
.filename_from_url(self
.url
)
647 # if we can't detect the extension from the url fallback on the mimetype
648 if ext
== '' or util
.file_type_by_extension(ext
) is None:
649 ext
= util
.extension_from_mimetype(self
.mimetype
)
650 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
654 self
.state
= db
.STATE_NORMAL
655 self
.is_played
= False
656 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
)
659 self
.is_played
= True
660 db
.mark_episode(self
.url
, is_played
=True)
662 def file_exists(self
):
663 return os
.path
.exists(self
.local_filename())
665 def was_downloaded(self
, and_exists
=False):
666 if self
.state
!= db
.STATE_DOWNLOADED
:
668 if and_exists
and not self
.file_exists():
672 def sync_filename( self
):
673 if gl
.config
.custom_sync_name_enabled
:
674 if '{channel' in gl
.config
.custom_sync_name
:
675 log('Fixing OLD syntax {channel.*} => {podcast.*} in custom_sync_name.', sender
=self
)
676 gl
.config
.custom_sync_name
= gl
.config
.custom_sync_name
.replace('{channel.', '{podcast.')
677 return util
.object_string_formatter(gl
.config
.custom_sync_name
, episode
=self
, podcast
=self
.channel
)
681 def file_type( self
):
682 return util
.file_type_by_extension( self
.extension() )
686 return os
.path
.splitext( os
.path
.basename( self
.url
))[0]
689 def published( self
):
691 return datetime
.datetime
.fromtimestamp(self
.pubDate
).strftime('%Y%m%d')
693 log( 'Cannot format pubDate for "%s".', self
.title
, sender
= self
)
696 def cute_pubdate(self
):
697 result
= util
.format_date(self
.pubDate
)
699 return '(%s)' % _('unknown')
703 pubdate_prop
= property(fget
=cute_pubdate
)
705 def calculate_filesize( self
):
707 self
.length
= os
.path
.getsize(self
.local_filename())
709 log( 'Could not get filesize for %s.', self
.url
)
711 def get_filesize_string( self
):
712 return gl
.format_filesize( self
.length
)
714 filesize_prop
= property(fget
=get_filesize_string
)
716 def get_channel_title( self
):
717 return self
.channel
.title
719 channel_prop
= property(fget
=get_channel_title
)
721 def get_played_string( self
):
722 if not self
.is_played
:
727 played_prop
= property(fget
=get_played_string
)
731 def update_channel_model_by_iter( model
, iter, channel
, color_dict
,
732 cover_cache
=None, max_width
=0, max_height
=0 ):
734 count_downloaded
= channel
.stat(state
=db
.STATE_DOWNLOADED
)
735 count_new
= channel
.stat(state
=db
.STATE_NORMAL
, is_played
=False)
736 count_unplayed
= channel
.stat(state
=db
.STATE_DOWNLOADED
, is_played
=False)
739 model
.set(iter, 0, channel
.url
)
740 model
.set(iter, 1, channel
.title
)
742 title_markup
= saxutils
.escape(channel
.title
)
743 description_markup
= saxutils
.escape(util
.get_first_line(channel
.description
) or _('No description available'))
746 d
.append('<span weight="bold">')
747 d
.append(title_markup
)
751 description
= ''.join(d
+['\n', '<small>', description_markup
, '</small>'])
752 model
.set(iter, 2, description
)
754 if channel
.parse_error
is not None:
755 model
.set(iter, 6, channel
.parse_error
)
756 color
= color_dict
['parse_error']
758 color
= color_dict
['default']
760 if channel
.update_flag
:
761 color
= color_dict
['updating']
763 model
.set(iter, 8, color
)
765 if count_unplayed
> 0 or count_downloaded
> 0:
766 model
.set(iter, 3, draw
.draw_pill_pixbuf(str(count_unplayed
), str(count_downloaded
)))
767 model
.set(iter, 7, True)
769 model
.set(iter, 7, False)
771 # Load the cover if we have it, but don't download
772 # it if it's not available (to avoid blocking here)
773 pixbuf
= services
.cover_downloader
.get_cover(channel
, avoid_downloading
=True)
775 if pixbuf
is not None:
776 new_pixbuf
= util
.resize_pixbuf_keep_ratio(pixbuf
, max_width
, max_height
, channel
.url
, cover_cache
)
777 model
.set(iter, 5, new_pixbuf
or pixbuf
)
779 def channels_to_model(channels
, color_dict
, cover_cache
=None, max_width
=0, max_height
=0):
780 new_model
= gtk
.ListStore( str, str, str, gtk
.gdk
.Pixbuf
, int,
781 gtk
.gdk
.Pixbuf
, str, bool, str )
783 for channel
in channels
:
784 update_channel_model_by_iter( new_model
, new_model
.append(), channel
,
785 color_dict
, cover_cache
, max_width
, max_height
)
791 return db
.load_channels(lambda d
: podcastChannel
.create_from_dict(d
))
793 def update_channels(callback_proc
=None, callback_error
=None, is_cancelled_cb
=None):
794 log('Updating channels....')
796 channels
= load_channels()
799 for channel
in channels
:
800 if is_cancelled_cb
is not None and is_cancelled_cb():
802 callback_proc
and callback_proc(count
, len(channels
))
808 def save_channels( channels
):
809 exporter
= opml
.Exporter(gl
.channel_opml_file
)
810 return exporter
.write(channels
)
812 def can_restore_from_opml():
814 if len(opml
.Importer(gl
.channel_opml_file
).items
):
815 return gl
.channel_opml_file
821 class LocalDBReader( object):
823 DEPRECATED - Only used for migration to SQLite
825 def __init__( self
, url
):
828 def get_text( self
, nodelist
):
829 return ''.join( [ node
.data
for node
in nodelist
if node
.nodeType
== node
.TEXT_NODE
])
831 def get_text_by_first_node( self
, element
, name
):
832 return self
.get_text( element
.getElementsByTagName( name
)[0].childNodes
)
834 def get_episode_from_element( self
, channel
, element
):
835 episode
= podcastItem( channel
)
836 episode
.title
= self
.get_text_by_first_node( element
, 'title')
837 episode
.description
= self
.get_text_by_first_node( element
, 'description')
838 episode
.url
= self
.get_text_by_first_node( element
, 'url')
839 episode
.link
= self
.get_text_by_first_node( element
, 'link')
840 episode
.guid
= self
.get_text_by_first_node( element
, 'guid')
843 for k
in ('url', 'link'):
844 if getattr(episode
, k
) is not None:
845 episode
.guid
= getattr(episode
, k
)
846 log('Notice: episode has no guid, using %s', episode
.guid
)
849 episode
.pubDate
= float(self
.get_text_by_first_node(element
, 'pubDate'))
851 log('Looks like you have an old pubDate in your LocalDB -> converting it')
852 episode
.pubDate
= self
.get_text_by_first_node(element
, 'pubDate')
853 log('FYI: pubDate value is: "%s"', episode
.pubDate
, sender
=self
)
854 pubdate
= feedparser
._parse
_date
(episode
.pubDate
)
856 log('Error converting the old pubDate - sorry!', sender
=self
)
859 log('PubDate converted successfully - yay!', sender
=self
)
860 episode
.pubDate
= time
.mktime(pubdate
)
862 episode
.mimetype
= self
.get_text_by_first_node( element
, 'mimetype')
864 log('No mimetype info for %s', episode
.url
, sender
=self
)
865 episode
.calculate_filesize()
868 def load_and_clean( self
, filename
):
870 Clean-up a LocalDB XML file that could potentially contain
871 "unbound prefix" XML elements (generated by the old print-based
872 LocalDB code). The code removes those lines to make the new
875 This should be removed in a future version.
878 for line
in open(filename
).read().split('\n'):
879 if not line
.startswith('<gpodder:info'):
882 return '\n'.join( lines
)
884 def read( self
, filename
):
885 doc
= xml
.dom
.minidom
.parseString( self
.load_and_clean( filename
))
886 rss
= doc
.getElementsByTagName('rss')[0]
888 channel_element
= rss
.getElementsByTagName('channel')[0]
890 channel
= podcastChannel( url
= self
.url
)
891 channel
.title
= self
.get_text_by_first_node( channel_element
, 'title')
892 channel
.description
= self
.get_text_by_first_node( channel_element
, 'description')
893 channel
.link
= self
.get_text_by_first_node( channel_element
, 'link')
896 for episode_element
in rss
.getElementsByTagName('item'):
897 episode
= self
.get_episode_from_element( channel
, episode_element
)
898 episodes
.append(episode
)