From 29001df8a62a72c8b08c5169ec7745d1d3ba686b Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 30 Jan 2019 13:40:53 -0300 Subject: [PATCH] Correcciones y novedades MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correcciones: - CineDeTodo: Correcciones en la detección de enlaces y agregada sección series - CinemaHD: Correcciones en la detección de enlaces y agregada sección series - PelisPlus: Corrección en la detección de enlaces - ZonaWorld: Correccion por cambio de estructura Novedades: - PelisRex: Nuevo canal de películas y series - ReyAnime: Nuevo canal de series y películas anime. --- plugin.video.alfa/channels/cinedetodo.py | 230 ++++++++++++---- plugin.video.alfa/channels/cinemahd.py | 216 +++++++++++---- plugin.video.alfa/channels/pelisplus.py | 2 +- plugin.video.alfa/channels/pelisrex.json | 67 +++++ plugin.video.alfa/channels/pelisrex.py | 321 +++++++++++++++++++++++ plugin.video.alfa/channels/reyanime.json | 53 ++++ plugin.video.alfa/channels/reyanime.py | 303 +++++++++++++++++++++ plugin.video.alfa/channels/zonaworld.py | 73 +++--- 8 files changed, 1118 insertions(+), 147 deletions(-) create mode 100644 plugin.video.alfa/channels/pelisrex.json create mode 100644 plugin.video.alfa/channels/pelisrex.py create mode 100644 plugin.video.alfa/channels/reyanime.json create mode 100644 plugin.video.alfa/channels/reyanime.py diff --git a/plugin.video.alfa/channels/cinedetodo.py b/plugin.video.alfa/channels/cinedetodo.py index 6ada755c..07af0a04 100644 --- a/plugin.video.alfa/channels/cinedetodo.py +++ b/plugin.video.alfa/channels/cinedetodo.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# -*- Channel CinemaHD -*- +# -*- Channel CineDeTodo -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- @@ -16,39 +16,61 @@ from channels import autoplay from channels import filtertools -host = 'http://www.cinedetodo.com/' + +host = 'https://www.cinedetodo.net/' IDIOMAS = {'Latino': 'LAT'} list_language = IDIOMAS.values() list_quality = [] -list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza'] +list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango'] def mainlist(item): logger.info() - autoplay.init(item.channel, list_servers, list_quality) itemlist = list() - itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True))) - itemlist.append(item.clone(title="Generos", action="section", section='genre', - thumbnail=get_thumb('genres', auto=True))) - # itemlist.append(item.clone(title="Por Calidad", action="section", section='quality', - # thumbnail=get_thumb('quality', auto=True))) - itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha', - thumbnail=get_thumb('alphabet', auto=True))) - itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=', - thumbnail=get_thumb('search', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host, + thumbnail=get_thumb('last', auto=True), type='MovieList')) + + itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host, + thumbnail=get_thumb('last', auto=True), type='Series')) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=', + thumbnail=get_thumb('search', auto=True))) autoplay.show_option(item.channel, itemlist) return itemlist -def get_source(url): +def sub_menu(item): logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host, + thumbnail=get_thumb('last', auto=True), type=item.type)) + + itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre', + thumbnail=get_thumb('genres', auto=True), type=item.type )) + + if item.type != 'Series': + itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha', + thumbnail=get_thumb('alphabet', auto=True), type=item.type)) + + + + return itemlist + +def get_source(url, referer=None): + logger.info() + if referer is None: + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage(url, headers={'Referer':referer}).data + data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) return data @@ -57,60 +79,86 @@ def list_all(item): itemlist = [] data = get_source(item.url) - if item.section == 'alpha': - patron = '\d+.*?(.*?).*?' - patron += '(\d{4})' + full_data = data + if item.section != '': + data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)') else: - patron = '
.*?.*?'.*?class="MovieList NoLmtxt(.*?)' % item.type) + + if item.section == 'alpha': + patron = '\d+.*?') + url_next_page = scrapertools.find_single_match(full_data,'') if url_next_page: - itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) + itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all', + type=item.type)) return itemlist def section(item): logger.info() itemlist = [] - data = get_source(host) + if item.type == 'Series': + url = host + '?tr_post_type=2' + else: + url = host + '?tr_post_type=1' + data = get_source(url) action = 'list_all' - if item.section == 'quality': - patron = 'menu-item-object-category.*?menu-item-\d+>(.*?)<\/a>' - elif item.section == 'genre': - patron = '(.*?)' - elif item.section == 'year': - patron = 'custom menu-item-15\d+>(\d{4})<\/a><\/li>' + + + if item.section == 'genre': + patron = '(.*?)' elif item.section == 'alpha': - patron = '
  • (.*?)' + patron = '
  • (.*?)' action = 'list_all' + matches = re.compile(patron, re.DOTALL).findall(data) for data_one, data_two in matches: @@ -118,38 +166,104 @@ def section(item): url = data_one title = data_two if title != 'Ver más': - new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section) + if item.type == 'Series': + url =url + '?tr_post_type=2' + else: + url = url + '?tr_post_type=1' + if 'serie'in title.lower(): + continue + new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section, + type=item.type) itemlist.append(new_item) return itemlist +def seasons(item): + logger.info() + + itemlist=[] + + data=get_source(item.url) + patron='Temporada (\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + for season in matches: + season = season.lower().replace('temporada','') + infoLabels['season']=season + title = 'Temporada %s' % season + itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons', + infoLabels=infoLabels)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseasons(tempitem) + + return itemlist + +def episodesxseasons(item): + logger.info() + + itemlist = [] + + full_data=get_source(item.url) + data = scrapertools.find_single_match(full_data, 'Temporada \d+.*?') + patron='(\d+)<.*?([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + + for scrapedepisode, scrapedurl, scrapedtitle in matches: + + infoLabels['episode'] = scrapedepisode + url = scrapedurl + title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle) + + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + + def findvideos(item): logger.info() itemlist = [] data = get_source(item.url) data = scrapertools.decodeHtmlentities(data) - - patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?' + patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?' matches = re.compile(patron, re.DOTALL).findall(data) for option, scrapedurl in matches: scrapedurl = scrapedurl.replace('"','').replace('&','&') data_video = get_source(scrapedurl) - url = scrapertools.find_single_match(data_video, '
    .*?src=(.*?) frameborder') - opt_data = scrapertools.find_single_match(data,'%s>.*?.*?(.*?)'%option).split('-') + url = scrapertools.find_single_match(data_video, '
    .*?src="([^"]+)" frameborder') + opt_data = scrapertools.find_single_match(data,'"%s">.*?.*?(.*?)'%option).split('-') language = opt_data[0].strip() - language = language.replace('(','').replace(')','') + language = re.sub('\(|\)', '', language) quality = opt_data[1].strip() if url != '' and 'youtube' not in url: - itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play')) + itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality, + action='play', infoLabels=item.infoLabels)) elif 'youtube' in url: - trailer = item.clone(title='Trailer', url=url, action='play', server='youtube') + trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube') itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(), i.language, i.quality)) - tmdb.set_infoLabels_itemlist(itemlist, True) try: itemlist.append(trailer) except: @@ -175,7 +289,7 @@ def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto - + item.section = 'search' if texto != '': return list_all(item) else: @@ -190,11 +304,11 @@ def newest(categoria): if categoria in ['peliculas','latino']: item.url = host elif categoria == 'infantiles': - item.url = host+'/animacion' + item.url = host+'animacion/?tr_post_type=1' elif categoria == 'terror': - item.url = host+'/terror' - elif categoria == 'documentales': - item.url = host+'/documental' + item.url = host+'terror/?tr_post_type=1' + item.type = 'MovieList' + item.section = 'search' itemlist = list_all(item) if itemlist[-1].title == 'Siguiente >>': itemlist.pop() diff --git a/plugin.video.alfa/channels/cinemahd.py b/plugin.video.alfa/channels/cinemahd.py index 9bc60787..775ba87e 100644 --- a/plugin.video.alfa/channels/cinemahd.py +++ b/plugin.video.alfa/channels/cinemahd.py @@ -16,41 +16,61 @@ from channels import autoplay from channels import filtertools -host = 'http://www.cinemahd.co/' + +host = 'https://www.cinemahd.co/' IDIOMAS = {'Latino': 'LAT'} list_language = IDIOMAS.values() list_quality = [] -list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza'] +list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango'] def mainlist(item): logger.info() - autoplay.init(item.channel, list_servers, list_quality) itemlist = list() - itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True))) - itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre', - thumbnail=get_thumb('genres', auto=True))) - itemlist.append(Item(channel=item.channel, title="Por Calidad", action="section", section='quality', - thumbnail=get_thumb('quality', auto=True))) - itemlist.append(Item(channel=item.channel, title="Por Año", action="section", section='year', - thumbnail=get_thumb('year', auto=True))) - itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha', - thumbnail=get_thumb('alphabet', auto=True))) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=', - thumbnail=get_thumb('search', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host, + thumbnail=get_thumb('last', auto=True), type='MovieList')) + + itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host, + thumbnail=get_thumb('last', auto=True), type='Series')) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=', + thumbnail=get_thumb('search', auto=True))) autoplay.show_option(item.channel, itemlist) return itemlist -def get_source(url): +def sub_menu(item): logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host, + thumbnail=get_thumb('last', auto=True), type=item.type)) + + itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre', + thumbnail=get_thumb('genres', auto=True), type=item.type )) + + if item.type != 'Series': + itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha', + thumbnail=get_thumb('alphabet', auto=True), type=item.type)) + + + + return itemlist + +def get_source(url, referer=None): + logger.info() + if referer is None: + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage(url, headers={'Referer':referer}).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) return data @@ -60,14 +80,18 @@ def list_all(item): data = get_source(item.url) full_data = data - data = scrapertools.find_single_match(data, '
      ') + if item.section != '': + data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)
    ') + else: + data = scrapertools.find_single_match(data, '