From 82cc5fba8b99de32f49c4d4551d35238e1f9213e Mon Sep 17 00:00:00 2001 From: Unknown Date: Wed, 13 Sep 2017 17:39:37 -0300 Subject: [PATCH 1/2] Ajustes y reparaciones de canales --- .../channels/ecarteleratrailers.py | 2 +- plugin.video.alfa/channels/seriesdanko.py | 14 +- plugin.video.alfa/channels/serieslatino.py | 4 +- plugin.video.alfa/channels/seriespapaya.py | 43 ++++-- plugin.video.alfa/channels/sipeliculas.py | 31 +++-- plugin.video.alfa/channels/torrentlocura.py | 3 + .../channels/ultrapeliculashd.py | 22 ++- plugin.video.alfa/channels/vepelis.py | 128 +++++------------- plugin.video.alfa/channels/ver-peliculas.py | 12 +- .../channels/verseriesonlinetv.py | 16 ++- plugin.video.alfa/channels/vixto.py | 7 +- 11 files changed, 141 insertions(+), 141 deletions(-) mode change 100755 => 100644 plugin.video.alfa/channels/serieslatino.py diff --git a/plugin.video.alfa/channels/ecarteleratrailers.py b/plugin.video.alfa/channels/ecarteleratrailers.py index a6bfb4dd..04ad7129 100755 --- a/plugin.video.alfa/channels/ecarteleratrailers.py +++ b/plugin.video.alfa/channels/ecarteleratrailers.py @@ -41,7 +41,7 @@ def mainlist(item): logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, - plot=plot, server="directo", folder=False)) + plot=plot,folder=False)) # ------------------------------------------------------ # Extrae la página siguiente diff --git a/plugin.video.alfa/channels/seriesdanko.py b/plugin.video.alfa/channels/seriesdanko.py index 424cf4b3..ac3c7a2b 100644 --- a/plugin.video.alfa/channels/seriesdanko.py +++ b/plugin.video.alfa/channels/seriesdanko.py @@ -40,7 +40,7 @@ def novedades(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |
|
|
|
|
|-\s", "", data) data = re.sub(r"", "", data) - + logger.debug(data) patron = '' patron += " 0: diff --git a/plugin.video.alfa/channels/seriespapaya.py b/plugin.video.alfa/channels/seriespapaya.py index 0baf8eec..afcdfbb4 100644 --- a/plugin.video.alfa/channels/seriespapaya.py +++ b/plugin.video.alfa/channels/seriespapaya.py @@ -11,12 +11,14 @@ from core import httptools from core import jsontools from core import scrapertools from core import servertools +from core import tmdb from core.item import Item from platformcode import config, logger HOST = "http://www.seriespapaya.com" -IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOS'} +IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOSE', 'Español Latino':'lat', + 'Español Castellano':'es', 'Sub Español':'VOSE'} list_idiomas = IDIOMAS.values() CALIDADES = ['360p', '480p', '720p HD', '1080p HD'] @@ -67,22 +69,31 @@ def series_por_letra_y_grupo(item): "letra": item.letter.lower() } data = httptools.downloadpage(url, post=urllib.urlencode(post_request)).data + data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) + patron = '
.*?
(.*?)' + patron +='<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<' + matches = re.compile(patron, re.DOTALL).findall(data) + #series = re.findall( + # 'list_imagen.+?src="(?P[^"]+).+?
]+href="(?P[^"]+)[^>]+>(.*?)', data, + # re.MULTILINE | re.DOTALL) - series = re.findall( - 'list_imagen.+?src="(?P[^"]+).+?
]+href="(?P[^"]+)[^>]+>(.*?)', data, - re.MULTILINE | re.DOTALL) - - for img, url, name in series: - itemlist.append(item.clone( + for img, url, name, plot, year in matches: + new_item= Item( + channel = item.channel, action="episodios", title=name, show=name, url=urlparse.urljoin(HOST, url), thumbnail=urlparse.urljoin(HOST, img), - context=filtertools.context(item, list_idiomas, CALIDADES) - )) + context=filtertools.context(item, list_idiomas, CALIDADES), + plot = plot, + infoLabels={'year':year} + ) + if year: + tmdb.set_infoLabels_item(new_item) + itemlist.append(new_item) - if len(series) == 8: + if len(matches) == 8: itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1)) if item.extra > 0: @@ -94,13 +105,17 @@ def series_por_letra_y_grupo(item): def novedades(item): logger.info() data = httptools.downloadpage(HOST).data - shows = re.findall('sidebarestdiv[^<]+|\s{2,}', "", data) + logger.debug(data) + patron = 'sidebarestdiv>.*?src=(.*?)>' + matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] - for title, url, img in shows: - itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img)) + for title, language,url, img in matches: + language = IDIOMAS[language] + itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img, + language=language)) return itemlist diff --git a/plugin.video.alfa/channels/sipeliculas.py b/plugin.video.alfa/channels/sipeliculas.py index 7e76bd9b..95ba9295 100755 --- a/plugin.video.alfa/channels/sipeliculas.py +++ b/plugin.video.alfa/channels/sipeliculas.py @@ -6,6 +6,7 @@ import urlparse from core import httptools from core import scrapertools from core import servertools +from core import tmdb from core.item import Item from platformcode import logger @@ -60,18 +61,18 @@ def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - # data = re.sub(r'"|\n|\r|\t| |
', "", data) - listado = scrapertools.find_single_match(data, '
(.*?)
[^<]+

([^<]+)') - itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=dataplot, contentTitle=scrapedtitle, extra=item.extra)) + patron = '(.*?)<.*?span>(.*?)<.*?

(.*?)<' + matches = re.compile(patron, re.DOTALL).findall(listado) + + for scrapedurl, scrapedthumbnail, scrapedtitle, year, plot in matches: + itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=plot, contentTitle=scrapedtitle, extra=item.extra, + infoLabels ={'year':year})) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginacion if itemlist != []: patron = 'Siguiente[^<]+' @@ -96,22 +97,26 @@ def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - # data = re.sub(r"'|\n|\r|\t| |
", "", data) listado1 = scrapertools.find_single_match(data, '