From feedeeb47d99b5e6f60bf7c13c0ebd9397210fa5 Mon Sep 17 00:00:00 2001 From: chivmalev Date: Wed, 20 Feb 2019 10:00:40 -0300 Subject: [PATCH 1/5] maxipelis24:correcciones --- plugin.video.alfa/channels/maxipelis24.py | 35 ++++++++++++----------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index f0e71d21..674e8417 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -28,11 +28,11 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, title="Peliculas", action="movies", url=host, page=0, thumbnail=get_thumb('movies', auto=True))) itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", - url=host, cat='year', page=0, thumbnail=get_thumb('year', auto=True))) + url=host, cat='year', thumbnail=get_thumb('year', auto=True))) itemlist.append(Item(channel=item.channel, action="category", title="Géneros", - url=host, cat='genre', page=0, thumbnail=get_thumb('genres', auto=True))) + url=host, cat='genre', thumbnail=get_thumb('genres', auto=True))) itemlist.append(Item(channel=item.channel, action="category", title="Calidad", - url=host, cat='quality', page=0, thumbnail=get_thumb("quality", auto=True))) + url=host, cat='quality', thumbnail=get_thumb("quality", auto=True))) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + "?s=", page=0, thumbnail=get_thumb("search", auto=True))) @@ -53,17 +53,18 @@ def category(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if item.cat == 'genre': data = scrapertools.find_single_match( data, '

Géneros .*?') - patron = '
  • ([^<]+)<' + patron = '
  • ([^<]+)<' elif item.cat == 'year': data = scrapertools.find_single_match( data, '

    Año de estreno.*?') - patron = 'li>([^<]+).*?<' + patron = 'li>([^<]+).*?<' elif item.cat == 'quality': data = scrapertools.find_single_match(data, '

    Calidad.*?') - patron = 'li>([^<]+)<' + patron = 'li>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: itemlist.append(Item(channel=item.channel, action='movies', @@ -81,7 +82,7 @@ def movies(item): patron += '([^<]+).*?' patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, img, scrapedtitle, resto, year, quality in matches[item.page:item.page + 30]: + for scrapedurl, img, scrapedtitle, resto, year, quality in matches[item.page:item.page + 20]: scrapedtitle = re.sub(r' \((\d+)\)', '', scrapedtitle) plot = scrapertools.htmlclean(resto).strip() title = ' %s [COLOR red][%s][/COLOR]' % (scrapedtitle, quality) @@ -97,14 +98,15 @@ def movies(item): infoLabels={'year': year})) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginacion - if item.page + 30 < len(matches): - itemlist.append(item.clone(page=item.page + 30, title=">> Siguiente")) + if item.page + 20 < len(matches): + itemlist.append(item.clone(page=item.page + 20, title=">> Siguiente")) else: next_page = scrapertools.find_single_match( - data, 'class="respo_pag">
    Siguiente<') + data, '') if next_page: - itemlist.append(item.clone( - url=next_page, page=0, title=">> Siguiente")) + itemlist.append(item.clone(url=next_page, page=0, + title=" Siguiente »")) + return itemlist @@ -113,7 +115,8 @@ def findvideos(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - data1= scrapertools.find_single_match(data,'
      .*?
    ') + data1 = scrapertools.find_single_match( + data, '
      .*?
    ') patron = "li>.*?href=.*?>([^\s]+)" matches1 = re.compile(patron, re.DOTALL).findall(data1) for lang in matches1: @@ -136,7 +139,6 @@ def findvideos(item): url = video_data.headers['location'] title = '%s' - else: patron = '
    Date: Wed, 20 Feb 2019 14:27:06 +0100 Subject: [PATCH 2/5] Mejoras internas --- plugin.video.alfa/channels/rarbg.py | 3 ++- plugin.video.alfa/core/videolibrarytools.py | 4 ++-- plugin.video.alfa/platformcode/platformtools.py | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/channels/rarbg.py b/plugin.video.alfa/channels/rarbg.py index aae2d22e..9c885e61 100644 --- a/plugin.video.alfa/channels/rarbg.py +++ b/plugin.video.alfa/channels/rarbg.py @@ -533,7 +533,7 @@ def play(item): #Permite preparar la descarga de from core import ziptools #buscamos la url del .torrent - patron = '\s*Torrent:<\/td>\s*\s*.*?<\/a>' + patron = '\s*Torrent:<\/td>\s*\s*' try: data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) data = unicode(data, "utf-8", errors="replace").encode("utf-8") @@ -543,6 +543,7 @@ def play(item): #Permite preparar la descarga de if status: return itemlist #IP bloqueada if not scrapertools.find_single_match(data, patron): + logger.error('ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log: PATRON: ' + patron + ' / DATA: ' + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) return itemlist item.url = urlparse.urljoin(host, scrapertools.find_single_match(data, patron)) diff --git a/plugin.video.alfa/core/videolibrarytools.py b/plugin.video.alfa/core/videolibrarytools.py index a5639c22..d3619863 100644 --- a/plugin.video.alfa/core/videolibrarytools.py +++ b/plugin.video.alfa/core/videolibrarytools.py @@ -441,7 +441,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo - if not e.infoLabels: #en series multicanal, prevalece el infolabels... + if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = serie.infoLabels #... del canal actual y no el del original e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") new_episodelist.append(e) @@ -516,7 +516,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) - if not e.infoLabels: #en series multicanal, prevalece el infolabels... + if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = item_nfo.infoLabels #... del canal actual y no el del original if filetools.write(json_path, e.tojson()): diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index 9ed29b74..eda11d9a 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -17,7 +17,6 @@ import xbmc import xbmcgui import xbmcplugin from channelselector import get_thumb -from lib import alfaresolver from platformcode import unify from core import channeltools from core import trakt_tools @@ -697,7 +696,6 @@ def play_video(item, strm=False, force_direct=False, autoplay=False): return # se obtiene la información del video. - mediaurl = alfaresolver.av(mediaurl) if not item.contentThumbnail: thumb = item.thumbnail else: From 5e1fdcafae45b780f971d4aa1e8b1666f87dd673 Mon Sep 17 00:00:00 2001 From: paezner Date: Wed, 20 Feb 2019 17:48:23 +0100 Subject: [PATCH 3/5] pandamovie: cambio de estructura pornboss: cambio de estructura porntrex: cambio estructura y thumbnail videosXYZ: cambio de estructura xms: cambio host --- plugin.video.alfa/channels/pandamovie.py | 18 ++++++++++-------- plugin.video.alfa/channels/pornboss.py | 17 +++++++++-------- plugin.video.alfa/channels/porntrex.py | 2 ++ plugin.video.alfa/channels/videosXYZ.py | 4 ++-- plugin.video.alfa/channels/xms.py | 9 +++------ 5 files changed, 26 insertions(+), 24 deletions(-) diff --git a/plugin.video.alfa/channels/pandamovie.py b/plugin.video.alfa/channels/pandamovie.py index 96f70a82..ec8a72ab 100644 --- a/plugin.video.alfa/channels/pandamovie.py +++ b/plugin.video.alfa/channels/pandamovie.py @@ -14,9 +14,9 @@ host= 'https://pandamovies.pw' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/list-movies")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies")) + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies")) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/movies")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/movies")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -42,7 +42,7 @@ def categorias(item): else: data = scrapertools.get_match(data,'Studios(.*?)') data = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = '
  • ([^<]+)' + patron = '([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: scrapedplot = "" @@ -58,17 +58,19 @@ def lista(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) - patron = '
  • 1
  • + next_page = scrapertools.find_single_match(data,'
  • .*?href=\'([^\']+)\'>') if next_page =="": next_page = scrapertools.find_single_match(data,'Next »') if next_page!="": diff --git a/plugin.video.alfa/channels/pornboss.py b/plugin.video.alfa/channels/pornboss.py index c5f06431..d0886ca6 100644 --- a/plugin.video.alfa/channels/pornboss.py +++ b/plugin.video.alfa/channels/pornboss.py @@ -19,7 +19,7 @@ def mainlist(item): itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/movies/")) itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips/")) - itemlist.append( Item(channel=item.channel, title=" categorias" , action="lista", url=host + "/category/clips/")) + itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/clips/")) return itemlist @@ -41,11 +41,11 @@ def categorias(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) - if item.url == host + "/category/movies/": + if "/category/movies/" in item.url: data = scrapertools.get_match(data,'>Movies(.*?)') else: data = scrapertools.get_match(data,'>Clips(.*?)') - patron = '([^"]+)' + patron = '([^"]+)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: @@ -60,20 +60,21 @@ def lista(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) - patron = '