1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2018 The gPodder Team
5 # Copyright (c) 2011 Neal H. Walfield
7 # gPodder is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 3 of the License, or
10 # (at your option) any later version.
12 # gPodder is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
23 # gpodder.model - Core model classes for gPodder (2009-08-13)
24 # Based on libpodcasts.py (thp, 2005-10-29)
41 from gpodder
import coverart
, feedcore
, registry
, schema
, util
, vimeo
, youtube
43 logger
= logging
.getLogger(__name__
)
49 """ abstract class for presenting a parsed feed to PodcastChannel """
52 """ :return str: the feed's title """
56 """ :return str: link to the feed's website """
59 def get_description(self
):
60 """ :return str: feed's textual description """
63 def get_cover_url(self
):
64 """ :return str: url of the feed's cover image """
67 def get_payment_url(self
):
68 """ :return str: optional -- feed's payment url """
71 def get_http_etag(self
):
72 """ :return str: optional -- last HTTP etag header, for conditional request next time """
75 def get_http_last_modified(self
):
76 """ :return str: optional -- last HTTP Last-Modified header, for conditional request next time """
79 def get_new_episodes(self
, channel
, existing_guids
):
81 Produce new episodes and update old ones.
82 Feed is a class to present results, so the feed shall have already been fetched.
83 Existing episodes not in all_seen_guids will be purged from the database.
84 :param PodcastChannel channel: the updated channel
85 :param dict(str, PodcastEpisode): existing episodes, by guid
86 :return (list(PodcastEpisode), set(str)): new_episodes, all_seen_guids
90 def get_next_page(self
, channel
, max_episodes
):
92 Paginated feed support (RFC 5005).
93 If the feed is paged, return the next feed page.
94 Returned page will in turn be asked for the next page, until None is returned.
95 :return feedcore.Result: the next feed's page,
96 as a fully parsed Feed or None
101 class PodcastParserFeed(Feed
):
102 def __init__(self
, feed
, fetcher
, max_episodes
=0):
104 self
.fetcher
= fetcher
105 self
.max_episodes
= max_episodes
108 return self
.feed
.get('title')
111 vid
= youtube
.get_youtube_id(self
.feed
['url'])
113 self
.feed
['link'] = youtube
.get_channel_id_url(self
.feed
['url'], self
.fetcher
.feed_data
)
114 return self
.feed
.get('link')
116 def get_description(self
):
117 vid
= youtube
.get_youtube_id(self
.feed
['url'])
119 self
.feed
['description'] = youtube
.get_channel_desc(self
.feed
['url'], self
.fetcher
.feed_data
)
120 return self
.feed
.get('description')
122 def get_cover_url(self
):
123 return self
.feed
.get('cover_url')
125 def get_payment_url(self
):
126 return self
.feed
.get('payment_url')
128 def get_http_etag(self
):
129 return self
.feed
.get('headers', {}).get('etag')
131 def get_http_last_modified(self
):
132 return self
.feed
.get('headers', {}).get('last-modified')
134 def get_new_episodes(self
, channel
, existing_guids
):
135 # Keep track of episode GUIDs currently seen in the feed
138 # list of new episodes
141 # We have to sort the entries in descending chronological order,
142 # because if the feed lists items in ascending order and has >
143 # max_episodes old episodes, new episodes will not be shown.
144 # See also: gPodder Bug 1186
145 entries
= sorted(self
.feed
.get('episodes', []), key
=lambda episode
: episode
['published'], reverse
=True)
147 # We can limit the maximum number of entries that gPodder will parse
148 if self
.max_episodes
> 0 and len(entries
) > self
.max_episodes
:
149 entries
= entries
[:self
.max_episodes
]
151 num_duplicate_guids
= 0
153 # Search all entries for new episodes
154 for entry
in entries
:
155 episode
= channel
.EpisodeClass
.from_podcastparser_entry(entry
, channel
)
159 # Discard episode when its GUID collides with a newer episode
160 if episode
.guid
in seen_guids
:
161 num_duplicate_guids
+= 1
162 channel
._update
_error
= ('Discarded {} episode(s) with non-unique GUID, contact the podcast publisher to fix this issue.'
163 .format(num_duplicate_guids
))
164 logger
.warning('Discarded episode with non-unique GUID, contact the podcast publisher to fix this issue. [%s] [%s]',
165 channel
.title
, episode
.title
)
168 seen_guids
.add(episode
.guid
)
169 # Detect (and update) existing episode based on GUIDs
170 existing_episode
= existing_guids
.get(episode
.guid
, None)
172 if existing_episode
.total_time
== 0 and 'youtube' in episode
.url
:
173 # query duration for existing youtube episodes that haven't been downloaded or queried
174 # such as live streams after they have ended
175 existing_episode
.total_time
= youtube
.get_total_time(episode
)
177 existing_episode
.update_from(episode
)
178 existing_episode
.cache_text_description()
179 existing_episode
.save()
181 elif episode
.total_time
== 0 and 'youtube' in episode
.url
:
182 # query duration for new youtube episodes
183 episode
.total_time
= youtube
.get_total_time(episode
)
185 episode
.cache_text_description()
187 new_episodes
.append(episode
)
188 return new_episodes
, seen_guids
190 def get_next_page(self
, channel
, max_episodes
):
191 if 'paged_feed_next' in self
.feed
:
192 url
= self
.feed
['paged_feed_next']
193 logger
.debug("get_next_page: feed has next %s", url
)
194 url
= channel
.authenticate_url(url
)
195 return self
.fetcher
.fetch(url
, autodiscovery
=False, max_episodes
=max_episodes
)
199 class gPodderFetcher(feedcore
.Fetcher
):
201 This class implements fetching a channel from custom feed handlers
202 or the default using podcastparser
204 def fetch_channel(self
, channel
, max_episodes
):
205 custom_feed
= registry
.feed_handler
.resolve(channel
, None, max_episodes
)
206 if custom_feed
is not None:
208 # TODO: revisit authenticate_url: pass auth as kwarg
209 # If we have a username or password, rebuild the url with them included
210 # Note: using a HTTPBasicAuthHandler would be pain because we need to
211 # know the realm. It can be done, but I think this method works, too
212 url
= channel
.authenticate_url(channel
.url
)
213 return self
.fetch(url
, channel
.http_etag
, channel
.http_last_modified
, max_episodes
=max_episodes
)
215 def _resolve_url(self
, url
):
216 url
= youtube
.get_real_channel_url(url
)
217 url
= vimeo
.get_real_channel_url(url
)
220 def parse_feed(self
, url
, feed_data
, data_stream
, headers
, status
, max_episodes
=0, **kwargs
):
221 self
.feed_data
= feed_data
223 feed
= podcastparser
.parse(url
, data_stream
)
225 feed
['headers'] = headers
226 return feedcore
.Result(status
, PodcastParserFeed(feed
, self
, max_episodes
))
227 except ValueError as e
:
228 raise feedcore
.InvalidFeed('Could not parse feed: {url}: {msg}'.format(url
=url
, msg
=e
))
233 # database -> podcast -> episode -> download/playback
234 # podcast.parent == db
235 # podcast.children == [episode, ...]
236 # episode.parent == podcast
238 # - normally: episode.children = (None, None)
239 # - downloading: episode.children = (DownloadTask(), None)
240 # - playback: episode.children = (None, PlaybackTask())
243 class PodcastModelObject(object):
245 A generic base class for our podcast model providing common helper
246 and utility functions.
248 __slots__
= ('id', 'parent', 'children')
251 def create_from_dict(cls
, d
, *args
):
253 Create a new object, passing "args" to the constructor
254 and then updating the object with the values from "d".
258 # XXX: all(map(lambda k: hasattr(o, k), d))?
259 for k
, v
in d
.items():
265 class PodcastEpisode(PodcastModelObject
):
266 """holds data for one object in a channel"""
267 # In theory, Linux can have 255 bytes (not characters!) in a filename, but
268 # filesystems like eCryptFS store metadata in the filename, making the
269 # effective number of characters less than that. eCryptFS recommends
270 # 140 chars, we use 120 here (140 - len(extension) - len(".partial.webm"))
271 # (youtube-dl appends an extension after .partial, ".webm" is the longest).
272 # References: gPodder bug 1898, http://unix.stackexchange.com/a/32834
273 MAX_FILENAME_LENGTH
= 120 # without extension
274 MAX_FILENAME_WITH_EXT_LENGTH
= 140 - len(".partial.webm") # with extension
276 __slots__
= schema
.EpisodeColumns
+ ('_download_error', '_text_description',)
278 def _deprecated(self
):
279 raise Exception('Property is deprecated!')
281 is_played
= property(fget
=_deprecated
, fset
=_deprecated
)
282 is_locked
= property(fget
=_deprecated
, fset
=_deprecated
)
284 def has_website_link(self
):
285 return bool(self
.link
) and (self
.link
!= self
.url
286 or youtube
.is_video_link(self
.link
))
289 def from_podcastparser_entry(cls
, entry
, channel
):
290 episode
= cls(channel
)
291 episode
.guid
= entry
['guid']
292 episode
.title
= entry
['title']
293 episode
.link
= entry
['link']
294 episode
.episode_art_url
= entry
.get('episode_art_url')
296 # Only one of the two description fields should be set at a time.
297 # This keeps the database from doubling in size and reduces load time from slow storage.
298 # episode._text_description is initialized by episode.cache_text_description() from the set field.
299 # episode.html_description() returns episode.description_html or generates from episode.description.
300 if entry
.get('description_html'):
301 episode
.description
= ''
302 episode
.description_html
= entry
['description_html']
304 episode
.description
= util
.remove_html_tags(entry
['description'] or '')
305 episode
.description_html
= ''
307 episode
.total_time
= entry
['total_time']
308 episode
.published
= entry
['published']
309 episode
.payment_url
= entry
['payment_url']
310 episode
.chapters
= None
311 if entry
.get("chapters"):
312 episode
.chapters
= json
.dumps(entry
["chapters"])
314 audio_available
= any(enclosure
['mime_type'].startswith('audio/') for enclosure
in entry
['enclosures'])
315 video_available
= any(enclosure
['mime_type'].startswith('video/') for enclosure
in entry
['enclosures'])
316 link_has_media
= False
317 if not (audio_available
or video_available
):
319 episode
.url
= util
.normalize_feed_url(entry
['link'])
320 # Check if any extensions (e.g. youtube-dl) support the link
321 link_has_media
= registry
.custom_downloader
.resolve(None, None, episode
) is not None
323 media_available
= audio_available
or video_available
or link_has_media
325 url_is_invalid
= False
326 for enclosure
in entry
['enclosures']:
327 episode
.mime_type
= enclosure
['mime_type']
329 # Skip images in feeds if audio or video is available (bug 979)
330 # This must (and does) also look in Media RSS enclosures (bug 1430)
331 if episode
.mime_type
.startswith('image/') and media_available
:
334 # If we have audio or video available later on, skip
335 # all 'application/*' data types (fixes Linux Outlaws and peertube feeds)
336 if episode
.mime_type
.startswith('application/') and media_available
:
339 episode
.url
= util
.normalize_feed_url(enclosure
['url'])
341 url_is_invalid
= True
344 episode
.file_size
= enclosure
['file_size']
347 # Brute-force detection of the episode link
348 episode
.url
= util
.normalize_feed_url(entry
['link'])
350 # The episode has no downloadable content.
351 # Set an empty URL so downloading will fail.
353 # Display an error icon if URL is invalid.
354 if url_is_invalid
or (entry
['link'] is not None and entry
['link'] != ''):
355 episode
._download
_error
= 'Invalid episode URL'
358 if any(mod
.is_video_link(episode
.url
) for mod
in (youtube
, vimeo
)):
361 # Check if we can resolve this link to a audio/video file
362 filename
, extension
= util
.filename_from_url(episode
.url
)
363 file_type
= util
.file_type_by_extension(extension
)
365 # The link points to a audio or video file - use it!
366 if file_type
is not None:
372 # The episode has no downloadable content.
373 # It is either a blog post or it links to a webpage with content accessible from shownotes title.
374 # Remove the URL so downloading will fail.
378 def __init__(self
, channel
):
379 self
.parent
= channel
380 self
.podcast_id
= self
.parent
.id
381 self
.children
= (None, None)
387 self
.mime_type
= 'application/octet-stream'
389 self
.episode_art_url
= None
390 self
.description
= ''
391 self
.description_html
= ''
395 self
.download_filename
= None
396 self
.payment_url
= None
398 self
.state
= gpodder
.STATE_NORMAL
400 self
.archive
= channel
.auto_archive_episodes
404 self
.current_position
= 0
405 self
.current_position_updated
= 0
407 # Timestamp of last playback time
408 self
.last_playback
= 0
410 self
._download
_error
= None
411 self
._text
_description
= ''
419 return self
.parent
.parent
.db
422 def trimmed_title(self
):
423 """Return the title with the common prefix trimmed"""
424 # Minimum amount of leftover characters after trimming. This
425 # avoids things like "Common prefix 123" to become just "123".
426 # If there are LEFTOVER_MIN or less characters after trimming,
427 # the original title will be returned without trimming.
430 # "Podcast Name - Title" and "Podcast Name: Title" -> "Title"
431 for postfix
in (' - ', ': '):
432 prefix
= self
.parent
.title
+ postfix
433 if (self
.title
.startswith(prefix
)
434 and len(self
.title
) - len(prefix
) > LEFTOVER_MIN
):
435 return self
.title
[len(prefix
):]
438 # "Podcast Name <number>: ..." -> "<number>: ..."
439 r
'^%s (\d+: .*)' % re
.escape(self
.parent
.title
),
441 # "Episode <number>: ..." -> "<number>: ..."
445 for pattern
in regex_patterns
:
446 if re
.match(pattern
, self
.title
):
447 title
= re
.sub(pattern
, r
'\1', self
.title
)
448 if len(title
) > LEFTOVER_MIN
:
451 # "#001: Title" -> "001: Title"
453 not self
.parent
._common
_prefix
454 and re
.match(r
'^#\d+: ', self
.title
)
455 and len(self
.title
) - 1 > LEFTOVER_MIN
):
456 return self
.title
[1:]
458 if (self
.parent
._common
_prefix
is not None
459 and self
.title
.startswith(self
.parent
._common
_prefix
)
460 and len(self
.title
) - len(self
.parent
._common
_prefix
) > LEFTOVER_MIN
):
461 return self
.title
[len(self
.parent
._common
_prefix
):]
465 def _set_download_task(self
, download_task
):
466 self
.children
= (download_task
, self
.children
[1])
468 def _get_download_task(self
):
469 return self
.children
[0]
471 download_task
= property(_get_download_task
, _set_download_task
)
474 def downloading(self
):
475 task
= self
.download_task
479 return task
.status
in (task
.DOWNLOADING
, task
.QUEUED
, task
.PAUSING
, task
.PAUSED
, task
.CANCELLING
)
481 def get_player(self
, config
):
482 file_type
= self
.file_type()
483 if file_type
== 'video' and config
.player
.video
and config
.player
.video
!= 'default':
484 player
= config
.player
.video
485 elif file_type
== 'audio' and config
.player
.audio
and config
.player
.audio
!= 'default':
486 player
= config
.player
.audio
491 def can_play(self
, config
):
493 # gPodder.playback_episodes() filters selection with this method.
495 return (self
.was_downloaded(and_exists
=True)
496 or self
.can_preview()
497 or self
.can_stream(config
))
499 def can_preview(self
):
500 return (self
.downloading
501 and self
.download_task
.custom_downloader
is not None
502 and self
.download_task
.custom_downloader
.partial_filename
is not None
503 and os
.path
.exists(self
.download_task
.custom_downloader
.partial_filename
))
505 def can_stream(self
, config
):
507 Don't try streaming if the user has not defined a player
508 or else we would probably open the browser when giving a URL to xdg-open.
509 We look at the audio or video player depending on its file type.
511 player
= self
.get_player(config
)
512 return player
and player
!= 'default'
514 def can_download(self
):
516 gPodder.on_download_selected_episodes() filters selection with this method.
517 PAUSING and PAUSED tasks can be resumed.
519 return not self
.was_downloaded(and_exists
=True) and (
520 self
.download_task
is None
521 or self
.download_task
.can_queue()
522 or self
.download_task
.status
== self
.download_task
.PAUSING
)
526 gPodder.on_pause_selected_episodes() filters selection with this method.
528 return self
.download_task
is not None and self
.download_task
.can_pause()
530 def can_cancel(self
):
532 DownloadTask.cancel() only cancels the following tasks.
534 return self
.download_task
is not None and self
.download_task
.can_cancel()
536 def can_delete(self
):
538 gPodder.delete_episode_list() filters out locked episodes, and cancels all unlocked tasks in selection.
540 return self
.state
!= gpodder
.STATE_DELETED
and not self
.archive
and (
541 self
.download_task
is None or self
.download_task
.status
== self
.download_task
.FAILED
)
545 gPodder.on_item_toggle_lock_activate() unlocks deleted episodes and toggles all others.
546 Locked episodes can always be unlocked.
548 return self
.state
!= gpodder
.STATE_DELETED
or self
.archive
550 def check_is_new(self
):
551 return (self
.state
== gpodder
.STATE_NORMAL
and self
.is_new
552 and not self
.downloading
)
555 gpodder
.user_extensions
.on_episode_save(self
)
556 self
.db
.save_episode(self
)
558 def on_downloaded(self
, filename
):
559 self
.state
= gpodder
.STATE_DOWNLOADED
561 self
.file_size
= os
.path
.getsize(filename
)
564 def set_state(self
, state
):
568 def playback_mark(self
):
570 self
.last_playback
= int(time
.time())
571 gpodder
.user_extensions
.on_episode_playback(self
)
574 def mark(self
, state
=None, is_played
=None, is_locked
=None):
575 if state
is not None:
577 if is_played
is not None:
578 self
.is_new
= not is_played
580 # "Mark as new" must "undelete" the episode
581 if self
.is_new
and self
.state
== gpodder
.STATE_DELETED
:
582 self
.state
= gpodder
.STATE_NORMAL
583 if is_locked
is not None:
584 self
.archive
= is_locked
587 def age_in_days(self
):
588 return util
.file_age_in_days(self
.local_filename(create
=False,
591 age_int_prop
= property(fget
=age_in_days
)
593 def get_age_string(self
):
594 return util
.file_age_to_string(self
.age_in_days())
596 age_prop
= property(fget
=get_age_string
)
598 def cache_text_description(self
):
600 self
._text
_description
= self
.description
601 elif self
.description_html
:
602 self
._text
_description
= util
.remove_html_tags(self
.description_html
)
604 self
._text
_description
= ''
606 def html_description(self
):
607 return self
.description_html \
608 or util
.nice_html_description(self
.episode_art_url
, self
.description
or _('No description available'))
610 def one_line_description(self
):
611 MAX_LINE_LENGTH
= 120
612 desc
= self
._text
_description
613 desc
= re
.sub(r
'\s+', ' ', desc
).strip()
615 return _('No description available')
617 # Decode the description to avoid gPodder bug 1277
618 desc
= util
.convert_bytes(desc
).strip()
620 if len(desc
) > MAX_LINE_LENGTH
:
621 return desc
[:MAX_LINE_LENGTH
] + '...'
625 def delete_from_disk(self
):
626 filename
= self
.local_filename(create
=False, check_only
=True)
627 if filename
is not None:
628 gpodder
.user_extensions
.on_episode_delete(self
, filename
)
629 util
.delete_file(filename
)
631 self
._download
_error
= None
632 self
.set_state(gpodder
.STATE_DELETED
)
634 def get_playback_url(self
, config
=None, allow_partial
=False):
635 """Local (or remote) playback/streaming filename/URL
637 Returns either the local filename or a streaming URL that
638 can be used to playback this episode.
640 Also returns the filename of a partially downloaded file
641 in case partial (preview) playback is desired.
643 if (allow_partial
and self
.can_preview()):
644 return self
.download_task
.custom_downloader
.partial_filename
646 url
= self
.local_filename(create
=False)
648 if url
is None or not os
.path
.exists(url
):
649 # FIXME: may custom downloaders provide the real url ?
650 url
= registry
.download_url
.resolve(config
, self
.url
, self
, allow_partial
)
653 def find_unique_file_name(self
, filename
, extension
):
654 # Remove leading and trailing whitespace + dots (to avoid hidden files)
655 filename
= filename
.strip('.' + string
.whitespace
) + extension
657 for name
in util
.generate_names(filename
):
658 if (not self
.db
.episode_filename_exists(self
.podcast_id
, name
)
659 or self
.download_filename
== name
):
662 def local_filename(self
, create
, force_update
=False, check_only
=False,
663 template
=None, return_wanted_filename
=False):
664 """Get (and possibly generate) the local saving filename
666 Pass create=True if you want this function to generate a
667 new filename if none exists. You only want to do this when
668 planning to create/download the file after calling this function.
670 Normally, you should pass create=False. This will only
671 create a filename when the file already exists from a previous
672 version of gPodder (where we used md5 filenames). If the file
673 does not exist (and the filename also does not exist), this
674 function will return None.
676 If you pass force_update=True to this function, it will try to
677 find a new (better) filename and move the current file if this
678 is the case. This is useful if (during the download) you get
679 more information about the file, e.g. the mimetype and you want
680 to include this information in the file name generation process.
682 If check_only=True is passed to this function, it will never try
683 to rename the file, even if would be a good idea. Use this if you
684 only want to check if a file exists.
686 If "template" is specified, it should be a filename that is to
687 be used as a template for generating the "real" filename.
689 The generated filename is stored in the database for future access.
691 If return_wanted_filename is True, the filename will not be written to
692 the database, but simply returned by this function (for use by the
693 "import external downloads" feature).
695 if self
.download_filename
is None and (check_only
or not create
):
698 ext
= self
.extension(may_call_local_filename
=False)
700 if not check_only
and (force_update
or not self
.download_filename
):
701 # Avoid and catch gPodder bug 1440 and similar situations
703 logger
.warning('Empty template. Report this podcast URL %s',
707 # Try to find a new filename for the current file
708 if template
is not None:
709 # If template is specified, trust the template's extension
710 episode_filename
, ext
= os
.path
.splitext(template
)
712 episode_filename
, _
= util
.filename_from_url(self
.url
)
714 if 'redirect' in episode_filename
and template
is None:
715 # This looks like a redirection URL - force URL resolving!
716 logger
.warning('Looks like a redirection to me: %s', self
.url
)
717 url
= util
.get_real_url(self
.channel
.authenticate_url(self
.url
))
718 logger
.info('Redirection resolved to: %s', url
)
719 episode_filename
, _
= util
.filename_from_url(url
)
721 # Use title for YouTube, Vimeo and Soundcloud downloads
722 if (youtube
.is_video_link(self
.url
)
723 or vimeo
.is_video_link(self
.url
)
724 or episode_filename
== 'stream'):
725 episode_filename
= self
.title
727 # If the basename is empty, use the md5 hexdigest of the URL
728 if not episode_filename
or episode_filename
.startswith('redirect.'):
729 logger
.error('Report this feed: Podcast %s, episode %s',
730 self
.channel
.url
, self
.url
)
731 episode_filename
= hashlib
.md5(self
.url
.encode('utf-8')).hexdigest()
733 # Also sanitize ext (see #591 where ext=.mp3?dest-id=754182)
734 fn_template
, ext
= util
.sanitize_filename_ext(
737 self
.MAX_FILENAME_LENGTH
,
738 self
.MAX_FILENAME_WITH_EXT_LENGTH
)
739 # Find a unique filename for this episode
740 wanted_filename
= self
.find_unique_file_name(fn_template
, ext
)
742 if return_wanted_filename
:
743 # return the calculated filename without updating the database
744 return wanted_filename
746 # The old file exists, but we have decided to want a different filename
747 if self
.download_filename
and wanted_filename
!= self
.download_filename
:
748 # there might be an old download folder crawling around - move it!
749 new_file_name
= os
.path
.join(self
.channel
.save_dir
, wanted_filename
)
750 old_file_name
= os
.path
.join(self
.channel
.save_dir
, self
.download_filename
)
751 if os
.path
.exists(old_file_name
) and not os
.path
.exists(new_file_name
):
752 logger
.info('Renaming %s => %s', old_file_name
, new_file_name
)
753 os
.rename(old_file_name
, new_file_name
)
754 elif force_update
and not os
.path
.exists(old_file_name
):
755 # When we call force_update, the file might not yet exist when we
756 # call it from the downloading code before saving the file
757 logger
.info('Choosing new filename: %s', new_file_name
)
759 logger
.warning('%s exists or %s does not', new_file_name
, old_file_name
)
760 logger
.info('Updating filename of %s to "%s".', self
.url
, wanted_filename
)
761 elif self
.download_filename
is None:
762 logger
.info('Setting download filename: %s', wanted_filename
)
763 self
.download_filename
= wanted_filename
766 if return_wanted_filename
:
767 # return the filename, not full path
768 return self
.download_filename
769 return os
.path
.join(self
.channel
.save_dir
, self
.download_filename
)
771 def extension(self
, may_call_local_filename
=True):
772 filename
, ext
= util
.filename_from_url(self
.url
)
773 if may_call_local_filename
:
774 filename
= self
.local_filename(create
=False)
775 if filename
is not None:
776 filename
, ext
= os
.path
.splitext(filename
)
777 # if we can't detect the extension from the url fallback on the mimetype
778 if ext
== '' or util
.file_type_by_extension(ext
) is None:
779 ext
= util
.extension_from_mimetype(self
.mime_type
)
790 def file_exists(self
):
791 filename
= self
.local_filename(create
=False, check_only
=True)
795 return os
.path
.exists(filename
)
797 def was_downloaded(self
, and_exists
=False):
798 if self
.state
!= gpodder
.STATE_DOWNLOADED
:
800 if and_exists
and not self
.file_exists():
804 def sync_filename(self
, use_custom
=False, custom_format
=None):
806 return util
.object_string_formatter(custom_format
,
807 episode
=self
, podcast
=self
.channel
)
812 # Assume all YouTube/Vimeo links are video files
813 if youtube
.is_video_link(self
.url
) or vimeo
.is_video_link(self
.url
):
816 return util
.file_type_by_extension(self
.extension())
820 return os
.path
.splitext(os
.path
.basename(self
.url
))[0]
825 Returns published time as HHMM (or 0000 if not available)
828 return datetime
.datetime
.fromtimestamp(self
.published
).strftime('%H%M')
830 logger
.warning('Cannot format pubtime: %s', self
.title
, exc_info
=True)
833 def playlist_title(self
):
834 """Return a title for this episode in a playlist
836 The title will be composed of the podcast name, the
837 episode name and the publication date. The return
838 value is the canonical representation of this episode
839 in playlists (for example, M3U playlists).
841 return '%s - %s (%s)' % (self
.channel
.title
,
845 def cute_pubdate(self
, show_time
=False):
846 result
= util
.format_date(self
.published
)
848 return '(%s)' % _('unknown')
852 timestamp
= datetime
.datetime
.fromtimestamp(self
.published
)
853 return '<small>{}</small>\n{}'.format(timestamp
.strftime('%H:%M'), result
)
859 pubdate_prop
= property(fget
=cute_pubdate
)
861 def published_datetime(self
):
862 return datetime
.datetime
.fromtimestamp(self
.published
)
866 return self
.published_datetime().strftime('%Y-%m-%d')
869 def pubdate_day(self
):
870 return self
.published_datetime().strftime('%d')
873 def pubdate_month(self
):
874 return self
.published_datetime().strftime('%m')
877 def pubdate_year(self
):
878 return self
.published_datetime().strftime('%y')
880 def is_finished(self
):
881 """Return True if this episode is considered "finished playing"
883 An episode is considered "finished" when there is a
884 current position mark on the track, and when the
885 current position is greater than 99 percent of the
886 total time or inside the last 10 seconds of a track.
888 return (self
.current_position
> 0
889 and self
.total_time
> 0
890 and (self
.current_position
+ 10 >= self
.total_time
891 or self
.current_position
>= self
.total_time
* .99))
893 def get_play_info_string(self
, duration_only
=False):
894 duration
= util
.format_time(self
.total_time
)
895 if duration_only
and self
.total_time
> 0:
897 elif self
.is_finished():
898 return '%s (%s)' % (_('Finished'), duration
)
899 elif self
.current_position
> 0 and \
900 self
.current_position
!= self
.total_time
:
901 position
= util
.format_time(self
.current_position
)
902 return '%s / %s' % (position
, duration
)
903 elif self
.total_time
> 0:
908 def update_from(self
, episode
):
909 for k
in ('title', 'url', 'episode_art_url', 'description', 'description_html', 'chapters', 'link',
910 'published', 'guid', 'payment_url'):
911 setattr(self
, k
, getattr(episode
, k
))
912 # Don't overwrite file size on downloaded episodes
913 # See #648 refreshing a youtube podcast clears downloaded file size
914 if self
.state
!= gpodder
.STATE_DOWNLOADED
:
915 setattr(self
, 'file_size', getattr(episode
, 'file_size'))
918 class PodcastChannel(PodcastModelObject
):
919 __slots__
= schema
.PodcastColumns
+ ('_common_prefix', '_update_error',)
921 UNICODE_TRANSLATE
= {ord('ö'): 'o', ord('ä'): 'a', ord('ü'): 'u'}
923 # Enumerations for download strategy
924 STRATEGY_DEFAULT
, STRATEGY_LATEST
= list(range(2))
926 # Description and ordering of strategies
928 (STRATEGY_DEFAULT
, _('Default')),
929 (STRATEGY_LATEST
, _('Only keep latest')),
932 MAX_FOLDERNAME_LENGTH
= 60
933 SECONDS_PER_DAY
= 24 * 60 * 60
934 SECONDS_PER_WEEK
= 7 * 24 * 60 * 60
935 EpisodeClass
= PodcastEpisode
937 feed_fetcher
= gPodderFetcher()
939 def __init__(self
, model
, channel_id
=None):
947 self
.description
= ''
948 self
.cover_url
= None
949 self
.payment_url
= None
951 self
.auth_username
= ''
952 self
.auth_password
= ''
954 self
.http_last_modified
= None
955 self
.http_etag
= None
957 self
.auto_archive_episodes
= False
958 self
.download_folder
= None
959 self
.pause_subscription
= False
960 self
.sync_to_mp3_player
= True
961 self
.cover_thumb
= None
963 self
.section
= _('Other')
964 self
._common
_prefix
= None
965 self
.download_strategy
= PodcastChannel
.STRATEGY_DEFAULT
968 self
.children
= self
.db
.load_episodes(self
, self
.episode_factory
)
969 self
._determine
_common
_prefix
()
971 self
._update
_error
= None
979 return self
.parent
.db
981 def get_download_strategies(self
):
982 for value
, caption
in PodcastChannel
.STRATEGIES
:
983 yield self
.download_strategy
== value
, value
, caption
985 def set_download_strategy(self
, download_strategy
):
986 if download_strategy
== self
.download_strategy
:
989 caption
= dict(self
.STRATEGIES
).get(download_strategy
)
990 if caption
is not None:
991 logger
.debug('Strategy for %s changed to %s', self
.title
, caption
)
992 self
.download_strategy
= download_strategy
994 logger
.warning('Cannot set strategy to %d', download_strategy
)
996 def rewrite_url(self
, new_url
):
997 new_url
= util
.normalize_feed_url(new_url
)
1002 self
.http_etag
= None
1003 self
.http_last_modified
= None
1007 def check_download_folder(self
):
1008 """Check the download folder for externally-downloaded files
1010 This will try to assign downloaded files with episodes in the
1013 This will also cause missing files to be marked as deleted.
1017 for episode
in self
.get_episodes(gpodder
.STATE_DOWNLOADED
):
1018 if episode
.was_downloaded():
1019 filename
= episode
.local_filename(create
=False)
1020 if filename
is None:
1021 # No filename has been determined for this episode
1024 if not os
.path
.exists(filename
):
1025 # File has been deleted by the user - simulate a
1026 # delete event (also marks the episode as deleted)
1027 logger
.debug('Episode deleted: %s', filename
)
1028 episode
.delete_from_disk()
1031 known_files
.add(filename
)
1033 # youtube-dl and yt-dlp create <name>.partial and <name>.partial.<ext> files while downloading.
1034 # On startup, the latter is reported as an unknown external file.
1035 # Both files are properly removed when the download completes.
1036 existing_files
= {filename
1037 for filename
in glob
.glob(os
.path
.join(self
.save_dir
, '*'))
1038 if not filename
.endswith('.partial')}
1040 ignore_files
= ['folder' + ext
for ext
in
1041 coverart
.CoverDownloader
.EXTENSIONS
]
1043 external_files
= existing_files
.difference(list(known_files
)
1044 + [os
.path
.join(self
.save_dir
, ignore_file
)
1045 for ignore_file
in ignore_files
])
1046 if not external_files
:
1049 all_episodes
= self
.get_all_episodes()
1051 for filename
in external_files
:
1054 basename
= os
.path
.basename(filename
)
1055 existing
= [e
for e
in all_episodes
if e
.download_filename
== basename
]
1057 existing
= existing
[0]
1058 logger
.info('Importing external download: %s', filename
)
1059 existing
.on_downloaded(filename
)
1062 for episode
in all_episodes
:
1063 wanted_filename
= episode
.local_filename(create
=True,
1064 return_wanted_filename
=True)
1065 if basename
== wanted_filename
:
1066 logger
.info('Importing external download: %s', filename
)
1067 episode
.download_filename
= basename
1068 episode
.on_downloaded(filename
)
1072 wanted_base
, wanted_ext
= os
.path
.splitext(wanted_filename
)
1073 target_base
, target_ext
= os
.path
.splitext(basename
)
1074 if wanted_base
== target_base
:
1075 # Filenames only differ by the extension
1076 wanted_type
= util
.file_type_by_extension(wanted_ext
)
1077 target_type
= util
.file_type_by_extension(target_ext
)
1079 # If wanted type is None, assume that we don't know
1080 # the right extension before the download (e.g. YouTube)
1081 # if the wanted type is the same as the target type,
1082 # assume that it's the correct file
1083 if wanted_type
is None or wanted_type
== target_type
:
1084 logger
.info('Importing external download: %s', filename
)
1085 episode
.download_filename
= basename
1086 episode
.on_downloaded(filename
)
1090 if not found
and not util
.is_system_file(filename
):
1091 logger
.warning('Unknown external file: %s', filename
)
1094 def sort_key(cls
, podcast
):
1095 key
= util
.convert_bytes(podcast
.title
.lower())
1096 return re
.sub(r
'^the ', '', key
).translate(cls
.UNICODE_TRANSLATE
)
1099 def load(cls
, model
, url
, create
=True, authentication_tokens
=None, max_episodes
=0):
1100 existing
= [p
for p
in model
.get_podcasts() if p
.url
== url
]
1108 if authentication_tokens
is not None:
1109 tmp
.auth_username
= authentication_tokens
[0]
1110 tmp
.auth_password
= authentication_tokens
[1]
1112 # Save podcast, so it gets an ID assigned before
1113 # updating the feed and adding saving episodes
1117 tmp
.update(max_episodes
)
1119 logger
.debug('Fetch failed. Removing buggy feed.')
1120 tmp
.remove_downloaded()
1124 # Determine the section in which this podcast should appear
1125 tmp
.section
= tmp
._get
_content
_type
()
1127 # Determine a new download folder now that we have the title
1128 tmp
.get_save_dir(force_new
=True)
1130 # Mark episodes as downloaded if files already exist (bug 902)
1131 tmp
.check_download_folder()
1133 # Determine common prefix of episode titles
1134 tmp
._determine
_common
_prefix
()
1138 gpodder
.user_extensions
.on_podcast_subscribe(tmp
)
1142 def episode_factory(self
, d
):
1144 This function takes a dictionary containing key-value pairs for
1145 episodes and returns a new PodcastEpisode object that is connected
1148 Returns: A new PodcastEpisode object
1150 episode
= self
.EpisodeClass
.create_from_dict(d
, self
)
1151 episode
.cache_text_description()
1154 def _consume_updated_title(self
, new_title
):
1155 # Replace multi-space and newlines with single space (Maemo bug 11173)
1156 new_title
= re
.sub(r
'\s+', ' ', new_title
).strip()
1158 # Only update the podcast-supplied title when we
1159 # don't yet have a title, or if the title is the
1160 # feed URL (e.g. we didn't find a title before).
1161 if not self
.title
or self
.title
== self
.url
:
1162 self
.title
= new_title
1164 # Start YouTube- and Vimeo-specific title FIX
1165 YOUTUBE_PREFIX
= 'Uploads by '
1166 VIMEO_PREFIX
= 'Vimeo / '
1167 if self
.title
.startswith(YOUTUBE_PREFIX
):
1168 self
.title
= self
.title
[len(YOUTUBE_PREFIX
):] + ' on YouTube'
1169 elif self
.title
.startswith(VIMEO_PREFIX
):
1170 self
.title
= self
.title
[len(VIMEO_PREFIX
):] + ' on Vimeo'
1171 # End YouTube- and Vimeo-specific title FIX
1173 def _consume_metadata(self
, title
, link
, description
, cover_url
,
1175 self
._consume
_updated
_title
(title
)
1177 self
.description
= description
1178 self
.cover_url
= cover_url
1179 self
.payment_url
= payment_url
1182 def _consume_updated_feed(self
, feed
, max_episodes
=0):
1183 self
._consume
_metadata
(feed
.get_title() or self
.url
,
1184 feed
.get_link() or self
.link
,
1185 feed
.get_description() or '',
1186 feed
.get_cover_url() or None,
1187 feed
.get_payment_url() or None)
1189 # Update values for HTTP conditional requests
1190 self
.http_etag
= feed
.get_http_etag() or self
.http_etag
1191 self
.http_last_modified
= feed
.get_http_last_modified() or self
.http_last_modified
1193 # Load all episodes to update them properly.
1194 existing
= self
.get_all_episodes()
1195 # GUID-based existing episode list
1196 existing_guids
= {e
.guid
: e
for e
in existing
}
1198 # Get most recent published of all episodes
1199 last_published
= self
.db
.get_last_published(self
) or 0
1200 # fix for #516 an episode was marked published one month in the future (typo in month number)
1201 # causing every new episode to be marked old
1202 tomorrow
= datetime
.datetime
.now().timestamp() + self
.SECONDS_PER_DAY
1203 if last_published
> tomorrow
:
1204 logger
.debug('Episode published in the future for podcast %s', self
.title
)
1205 last_published
= tomorrow
1207 # new episodes from feed
1208 new_episodes
, seen_guids
= feed
.get_new_episodes(self
, existing_guids
)
1212 next_max_episodes
= max_episodes
- len(seen_guids
)
1213 # want to paginate if:
1214 # - we raised the max episode count so we want more old episodes now
1215 # FIXME: could also be that feed has less episodes than max_episodes and we're paginating for nothing
1216 # - all episodes are new so we continue getting them until max_episodes is reached
1217 could_have_more
= max_episodes
> len(existing
) or len(new_episodes
) == len(seen_guids
)
1218 while next_feed
and could_have_more
:
1219 if max_episodes
> 0 and next_max_episodes
<= 0:
1220 logger
.debug("stopping pagination: seen enough episodes (%i)", max_episodes
)
1222 # brand new: try to load another page!
1223 next_result
= next_feed
.get_next_page(self
, next_max_episodes
)
1224 if next_result
and next_result
.status
== feedcore
.UPDATED_FEED
:
1225 next_feed
= next_result
.feed
1226 for e
in new_episodes
:
1227 existing_guids
[e
.guid
] = e
1228 next_new_episodes
, next_seen_guids
= next_feed
.get_new_episodes(self
, existing_guids
)
1229 logger
.debug("next page has %i new episodes, %i seen episodes", len(next_new_episodes
), len(next_seen_guids
))
1230 if not next_seen_guids
:
1231 logger
.debug("breaking out of get_next_page loop because no episode in this page")
1233 next_max_episodes
-= len(next_seen_guids
)
1234 new_episodes
+= next_new_episodes
1235 seen_guids
= seen_guids
.union(next_seen_guids
)
1239 # mark episodes not new
1240 real_new_episodes
= []
1241 # Search all entries for new episodes
1242 for episode
in new_episodes
:
1243 # Workaround for bug 340: If the episode has been
1244 # published earlier than one week before the most
1245 # recent existing episode, do not mark it as new.
1246 if episode
.published
< last_published
- self
.SECONDS_PER_WEEK
:
1247 logger
.debug('Episode with old date: %s', episode
.title
)
1248 episode
.is_new
= False
1252 real_new_episodes
.append(episode
)
1254 # Only allow a certain number of new episodes per update
1255 if (self
.download_strategy
== PodcastChannel
.STRATEGY_LATEST
1256 and len(real_new_episodes
) > 1):
1257 episode
.is_new
= False
1260 self
.children
.extend(new_episodes
)
1262 self
.remove_unreachable_episodes(existing
, seen_guids
, max_episodes
)
1263 return real_new_episodes
1265 def remove_unreachable_episodes(self
, existing
, seen_guids
, max_episodes
):
1266 # Remove "unreachable" episodes - episodes that have not been
1267 # downloaded and that the feed does not list as downloadable anymore
1268 # Keep episodes that are currently being downloaded, though (bug 1534)
1269 if self
.id is not None:
1270 episodes_to_purge
= [e
for e
in existing
if
1271 e
.state
!= gpodder
.STATE_DOWNLOADED
1272 and e
.guid
not in seen_guids
and not e
.downloading
]
1274 for episode
in episodes_to_purge
:
1275 logger
.debug('Episode removed from feed: %s (%s)',
1276 episode
.title
, episode
.guid
)
1277 gpodder
.user_extensions
.on_episode_removed_from_podcast(episode
)
1278 self
.db
.delete_episode_by_guid(episode
.guid
, self
.id)
1280 # Remove the episode from the "children" episodes list
1281 if self
.children
is not None:
1282 self
.children
.remove(episode
)
1284 # This *might* cause episodes to be skipped if there were more than
1285 # limit.episodes items added to the feed between updates.
1286 # The benefit is that it prevents old episodes from appearing as new
1287 # in certain situations (see bug #340).
1288 self
.db
.purge(max_episodes
, self
.id) # TODO: Remove from self.children!
1290 # Sort episodes by pubdate, descending
1291 self
.children
.sort(key
=lambda e
: e
.published
, reverse
=True)
1293 def update(self
, max_episodes
=0):
1294 max_episodes
= int(max_episodes
)
1297 result
= self
.feed_fetcher
.fetch_channel(self
, max_episodes
)
1299 if result
.status
== feedcore
.UPDATED_FEED
:
1300 new_episodes
= self
._consume
_updated
_feed
(result
.feed
, max_episodes
)
1301 elif result
.status
== feedcore
.NEW_LOCATION
:
1302 # FIXME: could return the feed because in autodiscovery it is parsed already
1304 logger
.info('New feed location: %s => %s', self
.url
, url
)
1305 if url
in {x
.url
for x
in self
.model
.get_podcasts()}:
1306 raise Exception('Already subscribed to ' + url
)
1308 # With the updated URL, fetch the feed again
1309 self
.update(max_episodes
)
1311 elif result
.status
== feedcore
.NOT_MODIFIED
:
1315 except Exception as e
:
1316 # "Not really" errors
1317 # feedcore.AuthenticationRequired
1320 # feedcore.BadRequest
1321 # feedcore.InternalServerError
1322 # feedcore.WifiLogin
1324 # feedcore.Unsubscribe
1326 # feedcore.InvalidFeed
1327 # feedcore.UnknownStatusCode
1328 gpodder
.user_extensions
.on_podcast_update_failed(self
, e
)
1331 gpodder
.user_extensions
.on_podcast_updated(self
)
1333 # Re-determine the common prefix for all episodes
1334 self
._determine
_common
_prefix
()
1340 self
.db
.delete_podcast(self
)
1341 self
.model
._remove
_podcast
(self
)
1344 if self
.download_folder
is None:
1347 gpodder
.user_extensions
.on_podcast_save(self
)
1349 self
.db
.save_podcast(self
)
1350 self
.model
._append
_podcast
(self
)
1352 def get_statistics(self
):
1354 return (0, 0, 0, 0, 0)
1356 return self
.db
.get_podcast_statistics(self
.id)
1360 if not self
.section
:
1361 self
.section
= self
._get
_content
_type
()
1366 def _get_content_type(self
):
1367 if 'youtube.com' in self
.url
or 'vimeo.com' in self
.url
:
1370 audio
, video
, other
= 0, 0, 0
1371 for content_type
in self
.db
.get_content_types(self
.id):
1372 content_type
= content_type
.lower()
1373 if content_type
.startswith('audio'):
1375 elif content_type
.startswith('video'):
1387 def authenticate_url(self
, url
):
1388 return util
.url_add_authentication(url
, self
.auth_username
, self
.auth_password
)
1390 def rename(self
, new_title
):
1391 new_title
= new_title
.strip()
1392 if self
.title
== new_title
:
1395 fn_template
= util
.sanitize_filename(new_title
, self
.MAX_FOLDERNAME_LENGTH
)
1397 new_folder_name
= self
.find_unique_folder_name(fn_template
)
1398 if new_folder_name
and new_folder_name
!= self
.download_folder
:
1399 new_folder
= os
.path
.join(gpodder
.downloads
, new_folder_name
)
1400 old_folder
= os
.path
.join(gpodder
.downloads
, self
.download_folder
)
1401 if os
.path
.exists(old_folder
):
1402 if not os
.path
.exists(new_folder
):
1403 # Old folder exists, new folder does not -> simply rename
1404 logger
.info('Renaming %s => %s', old_folder
, new_folder
)
1405 os
.rename(old_folder
, new_folder
)
1407 # Both folders exist -> move files and delete old folder
1408 logger
.info('Moving files from %s to %s', old_folder
,
1410 for file in glob
.glob(os
.path
.join(old_folder
, '*')):
1411 shutil
.move(file, new_folder
)
1412 logger
.info('Removing %s', old_folder
)
1413 shutil
.rmtree(old_folder
, ignore_errors
=True)
1414 self
.download_folder
= new_folder_name
1416 self
.title
= new_title
1419 def _determine_common_prefix(self
):
1420 # We need at least 2 episodes for the prefix to be "common" ;)
1421 if len(self
.children
) < 2:
1422 self
._common
_prefix
= ''
1425 prefix
= os
.path
.commonprefix([x
.title
for x
in self
.children
])
1426 # The common prefix must end with a space - otherwise it's not
1427 # on a word boundary, and we might end up chopping off too much
1428 if prefix
and prefix
[-1] != ' ':
1429 prefix
= prefix
[:prefix
.rfind(' ') + 1]
1431 self
._common
_prefix
= prefix
1433 def get_all_episodes(self
):
1434 return self
.children
1436 def get_episodes(self
, state
):
1437 return [e
for e
in self
.get_all_episodes() if e
.state
== state
]
1439 def find_unique_folder_name(self
, download_folder
):
1440 # Remove trailing dots to avoid errors on Windows (bug 600)
1441 # Also remove leading dots to avoid hidden folders on Linux
1442 download_folder
= download_folder
.strip('.' + string
.whitespace
)
1444 for folder_name
in util
.generate_names(download_folder
):
1445 if (not self
.db
.podcast_download_folder_exists(folder_name
)
1446 or self
.download_folder
== folder_name
):
1449 def get_save_dir(self
, force_new
=False):
1450 if self
.download_folder
is None or force_new
:
1451 fn_template
= util
.sanitize_filename(self
.title
, self
.MAX_FOLDERNAME_LENGTH
)
1454 fn_template
= util
.sanitize_filename(self
.url
, self
.MAX_FOLDERNAME_LENGTH
)
1456 # Find a unique folder name for this podcast
1457 download_folder
= self
.find_unique_folder_name(fn_template
)
1459 # Try removing the download folder if it has been created previously
1460 if self
.download_folder
is not None:
1461 folder
= os
.path
.join(gpodder
.downloads
, self
.download_folder
)
1465 logger
.info('Old download folder is kept for %s', self
.url
)
1467 logger
.info('Updating download_folder of %s to %s', self
.url
,
1469 self
.download_folder
= download_folder
1472 save_dir
= os
.path
.join(gpodder
.downloads
, self
.download_folder
)
1474 # Create save_dir if it does not yet exist
1475 if not util
.make_directory(save_dir
):
1476 logger
.error('Could not create save_dir: %s', save_dir
)
1480 save_dir
= property(fget
=get_save_dir
)
1482 def remove_downloaded(self
):
1483 # Remove the download directory
1484 for episode
in self
.get_episodes(gpodder
.STATE_DOWNLOADED
):
1485 filename
= episode
.local_filename(create
=False, check_only
=True)
1486 if filename
is not None:
1487 gpodder
.user_extensions
.on_episode_delete(episode
, filename
)
1489 shutil
.rmtree(self
.save_dir
, True)
1492 def cover_file(self
):
1493 return os
.path
.join(self
.save_dir
, 'folder')
1496 class Model(object):
1497 PodcastClass
= PodcastChannel
1499 def __init__(self
, db
):
1501 self
.children
= None
1503 def _append_podcast(self
, podcast
):
1504 if podcast
not in self
.children
:
1505 self
.children
.append(podcast
)
1507 def _remove_podcast(self
, podcast
):
1508 self
.children
.remove(podcast
)
1509 gpodder
.user_extensions
.on_podcast_delete(podcast
)
1511 def get_podcasts(self
):
1512 def podcast_factory(dct
, db
):
1513 return self
.PodcastClass
.create_from_dict(dct
, self
, dct
['id'])
1515 if self
.children
is None:
1516 self
.children
= self
.db
.load_podcasts(podcast_factory
)
1518 # Check download folders for changes (bug 902)
1519 for podcast
in self
.children
:
1520 podcast
.check_download_folder()
1522 return self
.children
1524 def get_podcast(self
, url
):
1525 for p
in self
.get_podcasts():
1530 def load_podcast(self
, url
, create
=True, authentication_tokens
=None,
1532 assert all(url
!= podcast
.url
for podcast
in self
.get_podcasts())
1533 return self
.PodcastClass
.load(self
, url
, create
,
1534 authentication_tokens
,
1538 def podcast_sort_key(cls
, podcast
):
1539 return cls
.PodcastClass
.sort_key(podcast
)
1542 def episode_sort_key(cls
, episode
):
1543 return episode
.published
1546 def sort_episodes_by_pubdate(cls
, episodes
, reverse
=False):
1547 """Sort a list of PodcastEpisode objects chronologically
1549 Returns a iterable, sorted sequence of the episodes
1551 return sorted(episodes
, key
=cls
.episode_sort_key
, reverse
=reverse
)
1554 def check_root_folder_path():
1556 if gpodder
.ui
.win32
:
1557 longest
= len(root
) \
1558 + 1 + PodcastChannel
.MAX_FOLDERNAME_LENGTH \
1559 + 1 + PodcastEpisode
.MAX_FILENAME_WITH_EXT_LENGTH
1561 return _("Warning: path to gPodder home (%(root)s) is very long "
1562 "and can result in failure to download files.\n" % {"root": root
}) \
1563 + _("You're advised to set it to a shorter path.")