From cdbc0055bcfe0978ec6c9f6f53a938a9dda31f05 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Wed, 25 Jul 2018 12:08:33 -0500 Subject: [PATCH] Delete retroseriestv.py --- plugin.video.alfa/channels/retroseriestv.py | 214 -------------------- 1 file changed, 214 deletions(-) delete mode 100644 plugin.video.alfa/channels/retroseriestv.py diff --git a/plugin.video.alfa/channels/retroseriestv.py b/plugin.video.alfa/channels/retroseriestv.py deleted file mode 100644 index cb27b556..00000000 --- a/plugin.video.alfa/channels/retroseriestv.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel RetroSeriesTV -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re -import urllib -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger -from channels import autoplay -from channels import filtertools - - -host = 'https://retroseriestv.com/' - -# IDIOMAS = {'la': 'LAT', 'es': 'Cast'} -# list_language = IDIOMAS.values() -# list_quality = [] -# list_servers = ['openload'] - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(item.clone(title="Todas", action="list_all", url=host + 'seriestv/', thumbnail=get_thumb('all', - auto=True))) - - itemlist.append(item.clone(title="Generos", action="section", url=host, thumbnail=get_thumb('genres', auto=True), - section='genres')) - - itemlist.append(item.clone(title="Por Año", action="section", url=host, thumbnail=get_thumb('year', auto=True), - section='year')) - - itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True), - section='abc')) - - itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=', - thumbnail=get_thumb('search', auto=True))) - - return itemlist - - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) - return data - -def list_all(item): - logger.info() - itemlist = [] - - data = get_source(item.url) - patron = '
(.*?)<.*?(.*?)<' - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches: - - url = scrapedurl - contentSerieName = scrapedtitle - thumbnail = scrapedthumbnail - itemlist.append(item.clone(action='seasons', - title=contentSerieName, - url=url, - thumbnail=thumbnail, - contentSerieName=contentSerieName, - infoLabels={'year':year} - )) - tmdb.set_infoLabels_itemlist(itemlist, True) - - # Paginación - - url_next_page = scrapertools.find_single_match(data,'rel=next.*?href=(.*?) ') - if url_next_page: - itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) - return itemlist - -def section(item): - logger.info() - - itemlist = [] - - data = get_source(item.url) - data = scrapertools.find_single_match(data, '