1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
35 from gpodder
import util
36 from gpodder
import opml
37 from gpodder
import cache
38 from gpodder
import services
39 from gpodder
import draw
40 from gpodder
import libtagupdate
41 from gpodder
import dumbshelve
43 from gpodder
.liblogger
import log
44 from gpodder
.libgpodder
import gl
45 from gpodder
.dbsqlite
import db
58 import xml
.dom
.minidom
61 from xml
.sax
import saxutils
64 if gpodder
.interface
== gpodder
.MAEMO
:
65 ICON_AUDIO_FILE
= 'gnome-mime-audio-mp3'
66 ICON_VIDEO_FILE
= 'gnome-mime-video-mp4'
67 ICON_BITTORRENT
= 'qgn_toolb_browser_web'
68 ICON_DOWNLOADING
= 'qgn_toolb_messagin_moveto'
69 ICON_DELETED
= 'qgn_toolb_gene_deletebutton'
70 ICON_NEW
= 'qgn_list_gene_favor'
72 ICON_AUDIO_FILE
= 'audio-x-generic'
73 ICON_VIDEO_FILE
= 'video-x-generic'
74 ICON_BITTORRENT
= 'applications-internet'
75 ICON_DOWNLOADING
= gtk
.STOCK_GO_DOWN
76 ICON_DELETED
= gtk
.STOCK_DELETE
77 ICON_NEW
= gtk
.STOCK_ABOUT
81 class podcastChannel(object):
82 """holds data for a complete channel"""
83 SETTINGS
= ('sync_to_devices', 'device_playlist_name','override_title','username','password')
89 def load(cls
, url
, create
=True):
90 if isinstance(url
, unicode):
91 url
= url
.encode('utf-8')
93 tmp
= db
.load_channels(factory
=lambda d
: cls
.create_from_dict(d
), url
=url
)
97 tmp
= podcastChannel(url
)
101 db
.force_last_new(tmp
)
105 def create_from_dict(d
):
109 setattr(c
, key
, d
[key
])
113 (updated
, c
) = self
.fc
.fetch(self
.url
, self
)
118 if self
.url
!= c
.url
:
119 log('Updating channel URL from %s to %s', self
.url
, c
.url
, sender
=self
)
122 # update the cover if it's not there
125 # If we have an old instance of this channel, and
126 # feedcache says the feed hasn't changed, return old
128 log('Channel %s is up to date', self
.url
)
131 # Save etag and last-modified for later reuse
132 if c
.headers
.get('etag'):
133 self
.etag
= c
.headers
.get('etag')
134 if c
.headers
.get('last-modified'):
135 self
.last_modified
= c
.headers
.get('last-modified')
137 self
.parse_error
= c
.get('bozo_exception', None)
139 if hasattr(c
.feed
, 'title'):
140 self
.title
= c
.feed
.title
142 self
.title
= self
.url
143 if hasattr( c
.feed
, 'link'):
144 self
.link
= c
.feed
.link
145 if hasattr( c
.feed
, 'subtitle'):
146 self
.description
= c
.feed
.subtitle
148 if hasattr(c
.feed
, 'updated_parsed') and c
.feed
.updated_parsed
is not None:
149 self
.pubDate
= rfc822
.mktime_tz(c
.feed
.updated_parsed
+(0,))
151 self
.pubDate
= time
.time()
152 if hasattr( c
.feed
, 'image'):
153 if hasattr(c
.feed
.image
, 'href') and c
.feed
.image
.href
:
155 self
.image
= c
.feed
.image
.href
156 if old
!= self
.image
:
157 self
.update_cover(force
=True)
159 # Marked as bulk because we commit after importing episodes.
160 db
.save_channel(self
, bulk
=True)
162 # Remove old episodes before adding the new ones. This helps
163 # deal with hyperactive channels, such as TV news, when there
164 # can be more new episodes than the user wants in the list.
165 # By cleaning up old episodes before receiving the new ones we
166 # ensure that the user doesn't miss any.
167 db
.purge(gl
.config
.max_episodes_per_feed
, self
.id)
169 # Load all episodes to update them properly.
170 existing
= self
.get_all_episodes()
172 # We can limit the maximum number of entries that gPodder will parse
173 # via the "max_episodes_per_feed" configuration option.
174 if len(c
.entries
) > gl
.config
.max_episodes_per_feed
:
175 log('Limiting number of episodes for %s to %d', self
.title
, gl
.config
.max_episodes_per_feed
)
176 for entry
in c
.entries
[:min(gl
.config
.max_episodes_per_feed
, len(c
.entries
))]:
180 episode
= podcastItem
.from_feedparser_entry(entry
, self
)
182 log('Cannot instantiate episode "%s": %s. Skipping.', entry
.get('id', '(no id available)'), e
, sender
=self
, traceback
=True)
188 if ex
.guid
== episode
.guid
:
189 for k
in ('title', 'title', 'description', 'link', 'pubDate'):
190 setattr(ex
, k
, getattr(episode
, k
))
194 episode
.save(bulk
=True)
198 def update_cover(self
, force
=False):
199 if self
.cover_file
is None or not os
.path
.exists(self
.cover_file
) or force
:
200 if self
.image
is not None:
201 services
.cover_downloader
.request_cover(self
)
204 db
.delete_channel(self
)
207 db
.save_channel(self
)
209 def stat(self
, state
=None, is_played
=None, is_locked
=None):
210 return db
.get_channel_stat(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
212 def __init__( self
, url
= "", title
= "", link
= "", description
= ""):
217 self
.description
= description
220 self
.parse_error
= None
221 self
.newest_pubdate_cached
= None
222 self
.update_flag
= False # channel is updating or to be updated
225 # should this channel be synced to devices? (ex: iPod)
226 self
.sync_to_devices
= True
227 # to which playlist should be synced
228 self
.device_playlist_name
= 'gPodder'
229 # if set, this overrides the channel-provided title
230 self
.override_title
= ''
234 self
.last_modified
= None
237 self
.save_dir_size
= 0
238 self
.__save
_dir
_size
_set
= False
240 self
.count_downloaded
= 0
242 self
.count_unplayed
= 0
244 def request_save_dir_size(self
):
245 if not self
.__save
_dir
_size
_set
:
246 self
.update_save_dir_size()
247 self
.__save
_dir
_size
_set
= True
249 def update_save_dir_size(self
):
250 self
.save_dir_size
= util
.calculate_size(self
.save_dir
)
252 def get_filename( self
):
253 """Return the MD5 sum of the channel URL"""
254 return md5
.new( self
.url
).hexdigest()
256 filename
= property(fget
=get_filename
)
258 def get_title( self
):
259 if self
.override_title
:
260 return self
.override_title
261 elif not self
.__title
.strip():
266 def set_title( self
, value
):
267 self
.__title
= value
.strip()
269 title
= property(fget
=get_title
,
272 def set_custom_title( self
, custom_title
):
273 custom_title
= custom_title
.strip()
275 if custom_title
!= self
.__title
:
276 self
.override_title
= custom_title
278 self
.override_title
= ''
280 def get_downloaded_episodes(self
):
281 return db
.load_episodes(self
, factory
=lambda c
: podcastItem
.create_from_dict(c
, self
), state
=db
.STATE_DOWNLOADED
)
283 def save_settings(self
):
284 db
.save_channel(self
)
286 def get_new_episodes( self
):
287 return [episode
for episode
in db
.load_episodes(self
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
)) if episode
.state
== db
.STATE_NORMAL
and not episode
.is_played
]
289 def update_m3u_playlist(self
):
290 if gl
.config
.create_m3u_playlists
:
291 downloaded_episodes
= self
.get_downloaded_episodes()
292 fn
= util
.sanitize_filename(self
.title
)
294 fn
= os
.path
.basename(self
.save_dir
)
295 m3u_filename
= os
.path
.join(gl
.downloaddir
, fn
+'.m3u')
296 log('Writing playlist to %s', m3u_filename
, sender
=self
)
297 f
= open(m3u_filename
, 'w')
300 for episode
in downloaded_episodes
:
301 filename
= episode
.local_filename()
302 if os
.path
.dirname(filename
).startswith(os
.path
.dirname(m3u_filename
)):
303 filename
= filename
[len(os
.path
.dirname(m3u_filename
)+os
.sep
):]
304 f
.write('#EXTINF:0,'+self
.title
+' - '+episode
.title
+' ('+episode
.cute_pubdate()+')\n')
305 f
.write(filename
+'\n')
308 def addDownloadedItem(self
, item
):
309 log('addDownloadedItem(%s)', item
.url
)
311 if not item
.was_downloaded():
312 item
.mark_downloaded(save
=True)
314 # Update metadata on file (if possible and wanted)
315 if gl
.config
.update_tags
and libtagupdate
.tagging_supported():
316 filename
= item
.local_filename()
318 libtagupdate
.update_metadata_on_file(filename
, title
=item
.title
, artist
=self
.title
, genre
='Podcast')
320 log('Error while calling update_metadata_on_file(): %s', e
)
322 self
.update_m3u_playlist()
324 if item
.file_type() == 'torrent':
325 torrent_filename
= item
.local_filename()
326 destination_filename
= util
.torrent_filename( torrent_filename
)
327 gl
.invoke_torrent(item
.url
, torrent_filename
, destination_filename
)
329 def get_all_episodes(self
):
330 return db
.load_episodes(self
, factory
= lambda d
: podcastItem
.create_from_dict(d
, self
))
333 def update_model( self
):
334 self
.update_save_dir_size()
335 model
= self
.tree_model
337 iter = model
.get_iter_first()
338 while iter is not None:
339 self
.iter_set_downloading_columns(model
, iter)
340 iter = model
.iter_next( iter)
343 def tree_model( self
):
344 log('Returning TreeModel for %s', self
.url
, sender
= self
)
345 return self
.items_liststore()
347 def iter_set_downloading_columns( self
, model
, iter, episode
=None):
348 global ICON_AUDIO_FILE
, ICON_VIDEO_FILE
, ICON_BITTORRENT
349 global ICON_DOWNLOADING
, ICON_DELETED
, ICON_NEW
352 url
= model
.get_value( iter, 0)
353 episode
= db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
357 if gl
.config
.episode_list_descriptions
:
362 if services
.download_status_manager
.is_download_in_progress(url
):
363 status_icon
= util
.get_tree_icon(ICON_DOWNLOADING
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
365 if episode
.state
== db
.STATE_NORMAL
:
366 if episode
.is_played
:
369 status_icon
= util
.get_tree_icon(ICON_NEW
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
370 elif episode
.was_downloaded():
371 missing
= not episode
.file_exists()
374 log('Episode missing: %s (before drawing an icon)', episode
.url
, sender
=self
)
376 file_type
= util
.file_type_by_extension( model
.get_value( iter, 9))
377 if file_type
== 'audio':
378 status_icon
= util
.get_tree_icon(ICON_AUDIO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
379 elif file_type
== 'video':
380 status_icon
= util
.get_tree_icon(ICON_VIDEO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
381 elif file_type
== 'torrent':
382 status_icon
= util
.get_tree_icon(ICON_BITTORRENT
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
384 status_icon
= util
.get_tree_icon('unknown', not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
385 elif episode
.state
== db
.STATE_DELETED
or episode
.state
== db
.STATE_DOWNLOADED
:
386 status_icon
= util
.get_tree_icon(ICON_DELETED
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
388 log('Warning: Cannot determine status icon.', sender
=self
)
391 model
.set( iter, 4, status_icon
)
393 def items_liststore( self
):
395 Return a gtk.ListStore containing episodes for this channel
397 new_model
= gtk
.ListStore( gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
398 gobject
.TYPE_BOOLEAN
, gtk
.gdk
.Pixbuf
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
399 gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
)
401 for item
in self
.get_all_episodes():
402 description
= item
.title_and_description
405 filelength
= gl
.format_filesize(item
.length
, 1)
409 new_iter
= new_model
.append((item
.url
, item
.title
, filelength
,
410 True, None, item
.cute_pubdate(), description
, util
.remove_html_tags(item
.description
),
411 item
.local_filename(), item
.extension()))
412 self
.iter_set_downloading_columns( new_model
, new_iter
, episode
=item
)
414 self
.update_save_dir_size()
417 def find_episode( self
, url
):
418 return db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
420 def get_save_dir(self
):
421 save_dir
= os
.path
.join(gl
.downloaddir
, self
.filename
, '')
423 # Create save_dir if it does not yet exist
424 if not util
.make_directory( save_dir
):
425 log( 'Could not create save_dir: %s', save_dir
, sender
= self
)
429 save_dir
= property(fget
=get_save_dir
)
431 def remove_downloaded( self
):
432 shutil
.rmtree( self
.save_dir
, True)
434 def get_index_file(self
):
435 # gets index xml filename for downloaded channels list
436 return os
.path
.join( self
.save_dir
, 'index.xml')
438 index_file
= property(fget
=get_index_file
)
440 def get_cover_file( self
):
441 # gets cover filename for cover download cache
442 return os
.path
.join( self
.save_dir
, 'cover')
444 cover_file
= property(fget
=get_cover_file
)
446 def delete_episode_by_url(self
, url
):
447 episode
= db
.load_episode(url
, lambda c
: podcastItem
.create_from_dict(c
, self
))
449 if episode
is not None:
450 util
.delete_file(episode
.local_filename())
451 episode
.set_state(db
.STATE_DELETED
)
453 self
.update_m3u_playlist()
456 class podcastItem(object):
457 """holds data for one object in a channel"""
460 def load(url
, channel
):
461 e
= podcastItem(channel
)
462 d
= db
.load_episode(url
)
464 for k
, v
in d
.iteritems():
470 def from_feedparser_entry( entry
, channel
):
471 episode
= podcastItem( channel
)
473 episode
.title
= entry
.get( 'title', util
.get_first_line( util
.remove_html_tags( entry
.get( 'summary', ''))))
474 episode
.link
= entry
.get( 'link', '')
475 episode
.description
= entry
.get( 'summary', entry
.get( 'link', entry
.get( 'title', '')))
476 episode
.guid
= entry
.get( 'id', '')
477 if entry
.get( 'updated_parsed', None):
478 episode
.pubDate
= rfc822
.mktime_tz(entry
.updated_parsed
+(0,))
480 if episode
.title
== '':
481 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender
= episode
)
484 if hasattr(entry
, 'enclosures') and len(entry
.enclosures
) > 0:
485 enclosure
= entry
.enclosures
[0]
486 if len(entry
.enclosures
) > 1:
487 for e
in entry
.enclosures
:
488 if hasattr( e
, 'href') and hasattr( e
, 'length') and hasattr( e
, 'type') and (e
.type.startswith('audio/') or e
.type.startswith('video/')):
489 if util
.normalize_feed_url(e
.href
) is not None:
490 log( 'Selected enclosure: %s', e
.href
, sender
= episode
)
493 episode
.url
= util
.normalize_feed_url( enclosure
.get( 'href', ''))
494 elif hasattr(entry
, 'link'):
495 (filename
, extension
) = util
.filename_from_url(entry
.link
)
496 if extension
== '' and hasattr( entry
, 'type'):
497 extension
= util
.extension_from_mimetype(e
.type)
498 file_type
= util
.file_type_by_extension(extension
)
499 if file_type
is not None:
500 log('Adding episode with link to file type "%s".', file_type
, sender
=episode
)
501 episode
.url
= entry
.link
504 # This item in the feed has no downloadable enclosure
507 if not episode
.pubDate
:
508 metainfo
= util
.get_episode_info_from_url(episode
.url
)
509 if 'pubdate' in metainfo
:
511 episode
.pubDate
= int(float(metainfo
['pubdate']))
513 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo
['pubdate']), traceback
=True)
515 if hasattr( enclosure
, 'length'):
517 episode
.length
= int(enclosure
.length
)
521 if hasattr( enclosure
, 'type'):
522 episode
.mimetype
= enclosure
.type
524 if episode
.title
== '':
525 ( filename
, extension
) = os
.path
.splitext( os
.path
.basename( episode
.url
))
526 episode
.title
= filename
531 def __init__( self
, channel
):
532 # Used by Storage for faster saving
537 self
.mimetype
= 'application/octet-stream'
539 self
.description
= ''
541 self
.channel
= channel
544 self
.state
= db
.STATE_NORMAL
545 self
.is_played
= False
546 self
.is_locked
= False
548 def save(self
, bulk
=False):
549 if self
.state
!= db
.STATE_DOWNLOADED
and self
.file_exists():
550 self
.state
= db
.STATE_DOWNLOADED
551 db
.save_episode(self
, bulk
=bulk
)
553 def set_state(self
, state
):
555 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
, is_locked
=self
.is_locked
)
557 def mark(self
, state
=None, is_played
=None, is_locked
=None):
558 if state
is not None:
560 if is_played
is not None:
561 self
.is_played
= is_played
562 if is_locked
is not None:
563 self
.is_locked
= is_locked
564 db
.mark_episode(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
566 def mark_downloaded(self
, save
=False):
567 self
.state
= db
.STATE_DOWNLOADED
568 self
.is_played
= False
573 def create_from_dict(d
, channel
):
574 e
= podcastItem(channel
)
577 setattr(e
, key
, d
[key
])
581 def title_and_description(self
):
583 Returns Pango markup for displaying in a TreeView, and
584 disables the description when the config variable
585 "episode_list_descriptions" is not set.
587 if gl
.config
.episode_list_descriptions
:
588 return '%s\n<small>%s</small>' % (saxutils
.escape(self
.title
), saxutils
.escape(self
.one_line_description()))
590 return saxutils
.escape(self
.title
)
592 def age_in_days(self
):
593 return util
.file_age_in_days(self
.local_filename())
596 return self
.age_in_days() > gl
.config
.episode_old_age
598 def get_age_string(self
):
599 return util
.file_age_to_string(self
.age_in_days())
601 age_prop
= property(fget
=get_age_string
)
603 def one_line_description( self
):
604 lines
= util
.remove_html_tags(self
.description
).strip().splitlines()
605 if not lines
or lines
[0] == '':
606 return _('No description available')
608 return ' '.join((l
.strip() for l
in lines
if l
.strip() != ''))
610 def delete_from_disk(self
):
612 self
.channel
.delete_episode_by_url(self
.url
)
614 log('Cannot delete episode from disk: %s', self
.title
, traceback
=True, sender
=self
)
616 def local_filename( self
):
617 ext
= self
.extension()
619 # For compatibility with already-downloaded episodes,
620 # we accept md5 filenames if they are downloaded now.
621 md5_filename
= os
.path
.join(self
.channel
.save_dir
, md5
.new(self
.url
).hexdigest()+ext
)
622 if os
.path
.exists(md5_filename
) or not gl
.config
.experimental_file_naming
:
625 # If the md5 filename does not exist,
626 ( episode
, e
) = util
.filename_from_url(self
.url
)
627 episode
= util
.sanitize_filename(episode
) + ext
629 # If the episode filename looks suspicious,
630 # we still return the md5 filename to be on
631 # the safe side of the fence ;)
632 if len(episode
) == 0 or episode
.startswith('redirect.'):
634 filename
= os
.path
.join(self
.channel
.save_dir
, episode
)
637 def extension( self
):
638 ( filename
, ext
) = util
.filename_from_url(self
.url
)
639 # if we can't detect the extension from the url fallback on the mimetype
640 if ext
== '' or util
.file_type_by_extension(ext
) is None:
641 ext
= util
.extension_from_mimetype(self
.mimetype
)
642 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
646 self
.state
= db
.STATE_NORMAL
647 self
.is_played
= False
648 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
)
651 self
.is_played
= True
652 db
.mark_episode(self
.url
, is_played
=True)
654 def file_exists(self
):
655 return os
.path
.exists(self
.local_filename())
657 def was_downloaded(self
, and_exists
=False):
658 if self
.state
!= db
.STATE_DOWNLOADED
:
660 if and_exists
and not self
.file_exists():
664 def sync_filename( self
):
665 if gl
.config
.custom_sync_name_enabled
:
666 return util
.object_string_formatter(gl
.config
.custom_sync_name
, episode
=self
, channel
=self
.channel
)
670 def file_type( self
):
671 return util
.file_type_by_extension( self
.extension() )
675 return os
.path
.splitext( os
.path
.basename( self
.url
))[0]
678 def published( self
):
680 return datetime
.datetime
.fromtimestamp(self
.pubDate
).strftime('%Y%m%d')
682 log( 'Cannot format pubDate for "%s".', self
.title
, sender
= self
)
685 def cute_pubdate(self
):
686 result
= util
.format_date(self
.pubDate
)
688 return '(%s)' % _('unknown')
692 pubdate_prop
= property(fget
=cute_pubdate
)
694 def calculate_filesize( self
):
696 self
.length
= os
.path
.getsize(self
.local_filename())
698 log( 'Could not get filesize for %s.', self
.url
)
700 def get_filesize_string( self
):
701 return gl
.format_filesize( self
.length
)
703 filesize_prop
= property(fget
=get_filesize_string
)
705 def get_channel_title( self
):
706 return self
.channel
.title
708 channel_prop
= property(fget
=get_channel_title
)
710 def get_played_string( self
):
711 if not self
.is_played
:
716 played_prop
= property(fget
=get_played_string
)
720 def update_channel_model_by_iter( model
, iter, channel
, color_dict
,
721 cover_cache
=None, max_width
=0, max_height
=0 ):
723 count_downloaded
= channel
.stat(state
=db
.STATE_DOWNLOADED
)
724 count_new
= channel
.stat(state
=db
.STATE_NORMAL
, is_played
=False)
725 count_unplayed
= channel
.stat(state
=db
.STATE_DOWNLOADED
, is_played
=False)
728 model
.set(iter, 0, channel
.url
)
729 model
.set(iter, 1, channel
.title
)
731 title_markup
= saxutils
.escape(channel
.title
)
732 description_markup
= saxutils
.escape(util
.get_first_line(channel
.description
) or _('No description available'))
735 d
.append('<span weight="bold">')
736 d
.append(title_markup
)
740 description
= ''.join(d
+['\n', '<small>', description_markup
, '</small>'])
741 model
.set(iter, 2, description
)
743 if channel
.parse_error
is not None:
744 model
.set(iter, 6, channel
.parse_error
)
745 color
= color_dict
['parse_error']
747 color
= color_dict
['default']
749 if channel
.update_flag
:
750 color
= color_dict
['updating']
752 model
.set(iter, 8, color
)
754 if count_unplayed
> 0 or count_downloaded
> 0:
755 model
.set(iter, 3, draw
.draw_pill_pixbuf(str(count_unplayed
), str(count_downloaded
)))
756 model
.set(iter, 7, True)
758 model
.set(iter, 7, False)
760 # Load the cover if we have it, but don't download
761 # it if it's not available (to avoid blocking here)
762 pixbuf
= services
.cover_downloader
.get_cover(channel
, avoid_downloading
=True)
764 if pixbuf
is not None:
765 new_pixbuf
= util
.resize_pixbuf_keep_ratio(pixbuf
, max_width
, max_height
, channel
.url
, cover_cache
)
766 model
.set(iter, 5, new_pixbuf
or pixbuf
)
768 def channels_to_model(channels
, color_dict
, cover_cache
=None, max_width
=0, max_height
=0):
769 new_model
= gtk
.ListStore( str, str, str, gtk
.gdk
.Pixbuf
, int,
770 gtk
.gdk
.Pixbuf
, str, bool, str )
772 for channel
in channels
:
773 update_channel_model_by_iter( new_model
, new_model
.append(), channel
,
774 color_dict
, cover_cache
, max_width
, max_height
)
780 return db
.load_channels(lambda d
: podcastChannel
.create_from_dict(d
))
782 def update_channels(callback_proc
=None, callback_error
=None, is_cancelled_cb
=None):
783 log('Updating channels....')
785 channels
= load_channels()
788 for channel
in channels
:
789 if is_cancelled_cb
is not None and is_cancelled_cb():
791 callback_proc
and callback_proc(count
, len(channels
))
797 def save_channels( channels
):
798 exporter
= opml
.Exporter(gl
.channel_opml_file
)
799 return exporter
.write(channels
)
801 def can_restore_from_opml():
803 if len(opml
.Importer(gl
.channel_opml_file
).items
):
804 return gl
.channel_opml_file
810 class LocalDBReader( object):
812 DEPRECATED - Only used for migration to SQLite
814 def __init__( self
, url
):
817 def get_text( self
, nodelist
):
818 return ''.join( [ node
.data
for node
in nodelist
if node
.nodeType
== node
.TEXT_NODE
])
820 def get_text_by_first_node( self
, element
, name
):
821 return self
.get_text( element
.getElementsByTagName( name
)[0].childNodes
)
823 def get_episode_from_element( self
, channel
, element
):
824 episode
= podcastItem( channel
)
825 episode
.title
= self
.get_text_by_first_node( element
, 'title')
826 episode
.description
= self
.get_text_by_first_node( element
, 'description')
827 episode
.url
= self
.get_text_by_first_node( element
, 'url')
828 episode
.link
= self
.get_text_by_first_node( element
, 'link')
829 episode
.guid
= self
.get_text_by_first_node( element
, 'guid')
832 for k
in ('url', 'link'):
833 if getattr(episode
, k
) is not None:
834 episode
.guid
= getattr(episode
, k
)
835 log('Notice: episode has no guid, using %s', episode
.guid
)
838 episode
.pubDate
= float(self
.get_text_by_first_node(element
, 'pubDate'))
840 log('Looks like you have an old pubDate in your LocalDB -> converting it')
841 episode
.pubDate
= self
.get_text_by_first_node(element
, 'pubDate')
842 log('FYI: pubDate value is: "%s"', episode
.pubDate
, sender
=self
)
843 pubdate
= feedparser
._parse
_date
(episode
.pubDate
)
845 log('Error converting the old pubDate - sorry!', sender
=self
)
848 log('PubDate converted successfully - yay!', sender
=self
)
849 episode
.pubDate
= time
.mktime(pubdate
)
851 episode
.mimetype
= self
.get_text_by_first_node( element
, 'mimetype')
853 log('No mimetype info for %s', episode
.url
, sender
=self
)
854 episode
.calculate_filesize()
857 def load_and_clean( self
, filename
):
859 Clean-up a LocalDB XML file that could potentially contain
860 "unbound prefix" XML elements (generated by the old print-based
861 LocalDB code). The code removes those lines to make the new
864 This should be removed in a future version.
867 for line
in open(filename
).read().split('\n'):
868 if not line
.startswith('<gpodder:info'):
871 return '\n'.join( lines
)
873 def read( self
, filename
):
874 doc
= xml
.dom
.minidom
.parseString( self
.load_and_clean( filename
))
875 rss
= doc
.getElementsByTagName('rss')[0]
877 channel_element
= rss
.getElementsByTagName('channel')[0]
879 channel
= podcastChannel( url
= self
.url
)
880 channel
.title
= self
.get_text_by_first_node( channel_element
, 'title')
881 channel
.description
= self
.get_text_by_first_node( channel_element
, 'description')
882 channel
.link
= self
.get_text_by_first_node( channel_element
, 'link')
885 for episode_element
in rss
.getElementsByTagName('item'):
886 episode
= self
.get_episode_from_element( channel
, episode_element
)
887 episodes
.append(episode
)