From 8fb76cf42ae0f5b6cfa32e01d27bd42ab1f8aede Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 26 Dec 2018 20:56:59 -0300 Subject: [PATCH 1/6] Animeshd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Corrección por cambio de estructura y mejora en el código --- plugin.video.alfa/channels/animeshd.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/plugin.video.alfa/channels/animeshd.py b/plugin.video.alfa/channels/animeshd.py index 8d343d3b..ea2504a0 100644 --- a/plugin.video.alfa/channels/animeshd.py +++ b/plugin.video.alfa/channels/animeshd.py @@ -90,10 +90,13 @@ def mainlist(item): return itemlist -def get_source(url): +def get_source(url, referer=None): logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'\n|\r|\t| |
|\s{2,}|"|\(|\)', "", data) + if referer is None: + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage(url, headers={'Referer':referer}).data + data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) return data @@ -107,10 +110,11 @@ def lista(item): post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'} post = urllib.urlencode(post) data = get_source(item.url) - patron = 'class=anime>
.*?

(.*?)<\/h2><\/a><\/div>' + patron = 'class="anime">' + patron +='
%s+ - (\d+)
' % season + patron = '

%s+ - (\d+|\d+\/\d+)
' % season patron += '
(.*?)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches: + if '/' in scrapedepi: + scrapedepi = scrapertools.find_single_match (scrapedepi, '(\d+)\/\d+') + title = '%sx%s - %s' % (season, scrapedepi, scrapedtitle) infoLabels['episode'] = scrapedepi if scrapedepi > 0: From fcd95e1849a6919eae4f48070f65f7bcd147377c Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Mon, 31 Dec 2018 12:06:27 -0300 Subject: [PATCH 3/6] correciones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - SeriesMetro: Corrección para episodios multiples y mejora en el codigo - HDFilmologia: Habilitado - PelisHD24: Corrección en la detección de enlaces --- plugin.video.alfa/channels/hdfilmologia.json | 2 +- plugin.video.alfa/channels/pelishd24.py | 3 +++ plugin.video.alfa/channels/seriesmetro.py | 14 +++++++------- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/plugin.video.alfa/channels/hdfilmologia.json b/plugin.video.alfa/channels/hdfilmologia.json index 15460bcb..5c08af37 100644 --- a/plugin.video.alfa/channels/hdfilmologia.json +++ b/plugin.video.alfa/channels/hdfilmologia.json @@ -9,7 +9,7 @@ "banner": "", "categories": [ "movie", - "vose", + "vos" ], "settings": [ { diff --git a/plugin.video.alfa/channels/pelishd24.py b/plugin.video.alfa/channels/pelishd24.py index c4b25295..bbe9d11f 100644 --- a/plugin.video.alfa/channels/pelishd24.py +++ b/plugin.video.alfa/channels/pelishd24.py @@ -426,7 +426,10 @@ def findvideos(item): url = url.replace('\\', '') servername = servertools.get_server_from_url(url) if 'pelishd24.net' in url or 'stream.pelishd24.com' in url: + url = url.strip() vip_data = httptools.downloadpage(url).data + if 'Archivo ELiminado' in vip_data: + continue dejuiced = generictools.dejuice(vip_data) patron = '"file":"([^"]+)"' match = re.compile(patron, re.DOTALL).findall(dejuiced) diff --git a/plugin.video.alfa/channels/seriesmetro.py b/plugin.video.alfa/channels/seriesmetro.py index 32f0bd83..34ceb06f 100644 --- a/plugin.video.alfa/channels/seriesmetro.py +++ b/plugin.video.alfa/channels/seriesmetro.py @@ -77,7 +77,7 @@ def list_all(item): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginacion - next_page = scrapertools.find_single_match(data, 'Página siguiente') + next_page = scrapertools.find_single_match(data, 'class=\'current\'>\d.*?href="([^"]+)">') if next_page != '': itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', @@ -187,12 +187,12 @@ def findvideos(item): matches = re.compile(patron, re.DOTALL).findall(data) for link in matches: - if 'id=' in link: - id_type = 'id' - ir_type = 'ir' - elif 'ud=' in link: - id_type = 'ud' - ir_type = 'ur' + + id_letter = scrapertools.find_single_match(link, '?(\w)d') + + id_type = '%sd' % id_letter + ir_type = '%sr' % id_letter + id = scrapertools.find_single_match(link, '%s=(.*)' % id_type) base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type) From 3db04d3a73f598a3bd49c6aa963ed0b08871226f Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Tue, 1 Jan 2019 20:03:53 -0300 Subject: [PATCH 4/6] SeriesAnimadas MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - SeriesAnimadas: Correcciónes por cambio de estructura --- plugin.video.alfa/channels/seriesanimadas.py | 77 +++++++------------- 1 file changed, 27 insertions(+), 50 deletions(-) diff --git a/plugin.video.alfa/channels/seriesanimadas.py b/plugin.video.alfa/channels/seriesanimadas.py index 5a0747c4..cf612140 100644 --- a/plugin.video.alfa/channels/seriesanimadas.py +++ b/plugin.video.alfa/channels/seriesanimadas.py @@ -45,10 +45,11 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, title='Nuevos Capitulos', url=host, action='new_episodes', type='tvshows', thumbnail=get_thumb('new_episodes', auto=True))) - itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series?', action='list_all', type='tvshows', + itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series/estrenos', action='list_all', + type='tvshows', thumbnail=get_thumb('last', auto=True))) - itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series?', action='list_all', type='tvshows', + itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series', action='list_all', type='tvshows', thumbnail=get_thumb('all', auto=True))) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + 'search?s=', @@ -83,19 +84,14 @@ def list_all(item): itemlist = [] data = get_source(item.url) - - patron = '
.*?.*?' - patron +='(\d{4}).*?([^<]+)<' + patron = '
.*? href="([^"]+)".*?([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches: + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: title = scrapedtitle thumbnail = scrapedthumbnail url = scrapedurl - if scrapedtype == 'Anime': - action = 'episodesxseasons' - elif scrapedtype == 'Serie': - action = 'seasons' + action = 'seasons' new_item = Item(channel=item.channel, action=action, @@ -103,15 +99,14 @@ def list_all(item): url=url, contentSerieName=scrapedtitle, thumbnail=thumbnail, - type=scrapedtype, - infoLabels={'year':year}) + ) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación - url_next_page = scrapertools.find_single_match(data,'li>') + url_next_page = scrapertools.find_single_match(data,'
  • Temporada (\d+)' + patron='
    Temporada (\d+)
    ' matches = re.compile(patron, re.DOTALL).findall(data) - + if len(matches) == 0: + item.type = 'Anime' + return episodesxseasons(item) infoLabels = item.infoLabels - for scrapedurl, season in matches: + for season in matches: infoLabels['season']=season title = 'Temporada %s' % season - itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons', + itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons', infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -156,23 +153,27 @@ def episodesxseasons(item): itemlist = [] data=get_source(item.url) - patron='
    ' - matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels if item.type == 'Anime': season = '1' + patron = ' Episodio (\d+).*?' else: season = item.infoLabels['season'] - episode = len(matches) - for scrapedurl, scrapedtitle in matches: + + patron = 'class="episodie-list" href="([^"]+)" title=".*?Temporada %s .*?pisodio (\d+).*?">' % season + matches = re.compile(patron, re.DOTALL).findall(data) + + if not matches: + patron = 'class="episodie-list" href="([^"]+)" title=".*?pisodio (\d+).*?">' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, episode in matches: infoLabels['episode'] = episode url = scrapedurl - title = scrapedtitle.replace(' online', '') - title = '%sx%s - %s' % (season, episode, title) + title = '%sx%s - Episodio %s' % (season, episode, episode) itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels)) - episode -= 1 tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist[::-1] @@ -183,7 +184,7 @@ def new_episodes(item): itemlist = [] data = get_source(item.url) - patron = '
    .*?.*?data-src="([^"]+)" alt="([^"]+)">' + patron = '
    ' matches = re.compile(patron, re.DOTALL).findall(data) @@ -248,34 +249,10 @@ def search(item, texto): item.url = item.url + texto if texto != '': - return search_results(item) + return list_all(item) else: return [] -def search_results(item): - logger.info() - - itemlist=[] - - data=get_source(item.url) - - patron = '
    Date: Wed, 2 Jan 2019 12:28:01 -0300 Subject: [PATCH 5/6] tvmoviedb - Tvmoviedb: Correcciones para trakt --- plugin.video.alfa/channels/tvmoviedb.py | 32 ++++++++++++------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/plugin.video.alfa/channels/tvmoviedb.py b/plugin.video.alfa/channels/tvmoviedb.py index 1ec3fc1c..3b4ca8c9 100644 --- a/plugin.video.alfa/channels/tvmoviedb.py +++ b/plugin.video.alfa/channels/tvmoviedb.py @@ -2185,21 +2185,20 @@ def acciones_trakt(item): 'runtime': config.get_localized_string(70471), 'popularity': config.get_localized_string(70472), 'percentage': config.get_localized_string(70473), 'votes': config.get_localized_string(70474), 'asc': config.get_localized_string(70475), 'desc': config.get_localized_string(70476)} orden = valores[item.order] + " " + valores[item.how] - itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list", - text_color=color4)) + # itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list", + # text_color=color4)) ratings = [] try: - if item.order: - if item.how == "asc": - reverse = False - else: - reverse = True - - if item.order == "rank" or item.order == "added": - data = sorted(data, key=lambda x: x[item.order.replace("added", "listed_at")], reverse=reverse) - else: - order = item.order.replace("popularity", "votes").replace("percentage", "rating") - data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse) + # if item.order: + # if item.how == "asc": + # reverse = False + # else: + # reverse = True + # if item.order == "rank" or item.order == "added": + # data = sorted(data, key=lambda x: x[item.order.replace("added", "last_collected_at")], reverse=reverse) + # else: + # order = item.order.replace("popularity", "votes").replace("percentage", "rating") + # data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse) for entry in data: try: @@ -2259,7 +2258,7 @@ def order_list(item): logger.info() list_controls = [] - valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] + valores1 = ['rating', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] valores2 = ['asc', 'desc'] dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)} @@ -2268,9 +2267,8 @@ def order_list(item): 'type': 'list', 'default': 0, 'visible': True}) list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True, 'type': 'list', 'default': 0, 'visible': True}) - list_controls[0]['lvalues'] = [config.get_localized_string(70003), config.get_localized_string(70469), config.get_localized_string(60230), config.get_localized_string(70470), config.get_localized_string(70471), config.get_localized_string(70472), - config.get_localized_string(70473), config.get_localized_string(70474)] - list_controls[1]['lvalues'] = [config.get_localized_string(70477), config.get_localized_string(70478)] + list_controls[0]['lvalues'] = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] + list_controls[1]['lvalues'] = ['asc', 'desc'] return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70320), item=item, callback='order_trakt') From 368a10ef1ac9b98a380cb97acfe0b5f93e04e033 Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 2 Jan 2019 12:41:53 -0300 Subject: [PATCH 6/6] PelisPlay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - PelisPlay: Corrección en el agregado a la videoteca --- plugin.video.alfa/channels/pelisplay.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/plugin.video.alfa/channels/pelisplay.py b/plugin.video.alfa/channels/pelisplay.py index 621ff92e..50e920cc 100644 --- a/plugin.video.alfa/channels/pelisplay.py +++ b/plugin.video.alfa/channels/pelisplay.py @@ -286,7 +286,7 @@ def temporadas(item): if len(matches) > 1: for scrapedthumbnail, temporada, url in matches: - new_item = item.clone(action="episodios", season=temporada, url=url, + new_item = item.clone(action="episodesxseason", season=temporada, url=url, thumbnail=host + scrapedthumbnail, extra='serie') new_item.infoLabels['season'] = temporada new_item.extra = "" @@ -308,10 +308,18 @@ def temporadas(item): text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) return itemlist else: - return episodios(item) - + return episdesxseason(item) def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodesxseason(tempitem) + return itemlist + + +def episodesxseason(item): logger.info() itemlist = [] from core import jsontools @@ -331,7 +339,6 @@ def episodios(item): episode = element['metas_formateadas']['nepisodio'] season = element['metas_formateadas']['ntemporada'] scrapedurl = element['url_directa'] - if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): continue title = "%sx%s: %s" % (season, episode.zfill( @@ -359,10 +366,10 @@ def episodios(item): reverse=config.get_setting('orden_episodios', __channel__)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) # Opción "Añadir esta serie a la videoteca" - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + # if config.get_videolibrary_support() and len(itemlist) > 0: + # itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + # action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + # text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) return itemlist