From 4e1d2dd7d6d1365da52cc9b55387e526903cf187 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Sat, 19 Aug 2017 11:05:22 -0500 Subject: [PATCH] Update hentaiid.py --- plugin.video.alfa/channels/hentaiid.py | 91 ++++++++------------------ 1 file changed, 29 insertions(+), 62 deletions(-) diff --git a/plugin.video.alfa/channels/hentaiid.py b/plugin.video.alfa/channels/hentaiid.py index fd2f6c07..f6cb51aa 100755 --- a/plugin.video.alfa/channels/hentaiid.py +++ b/plugin.video.alfa/channels/hentaiid.py @@ -17,7 +17,6 @@ def mainlist(item): itemlist = list() itemlist.append(Item(channel=item.channel, action="series", title="Novedades", url=urlparse.urljoin(CHANNEL_HOST, "archivos/h2/"), extra="novedades")) - itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético")) itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=CHANNEL_HOST)) itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura", url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/"))) @@ -25,20 +24,6 @@ def mainlist(item): url=urlparse.urljoin(CHANNEL_HOST, "archivos/hight-definition/"))) itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais", url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/"))) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", - url=urlparse.urljoin(CHANNEL_HOST, "?s="))) - - return itemlist - - -def letras(item): - logger.info() - - itemlist = [] - - for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': - itemlist.append(Item(channel=item.channel, action="series", title=letra, - url=urlparse.urljoin(CHANNEL_HOST, "/?s=letra-%s" % letra.replace("0", "num")))) return itemlist @@ -47,49 +32,34 @@ def generos(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}", "", data) + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - data = scrapertools.get_match(data, "
(.*?)
") - patron = "(.*?)" + pattern = 'id="hentai2">]+>(.*?)
' + data = scrapertools.find_single_match(data, pattern) + + patron = 'href="([^"]+)"[^>]+>(.*?)' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - title = scrapertools.entityunescape(scrapedtitle) - url = urlparse.urljoin(item.url, scrapedurl) + for url, title in matches: # logger.debug("title=[{0}], url=[{1}]".format(title, url)) - itemlist.append(Item(channel=item.channel, action="series", title=title, url=url)) return itemlist -def search(item, texto): - logger.info() - if item.url == "": - item.url = urlparse.urljoin(CHANNEL_HOST, "animes/?buscar=") - texto = texto.replace(" ", "+") - item.url = "%s%s" % (item.url, texto) - - try: - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - def series(item): logger.info() - data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - patron = '

' \ - '(.*?)[^<]+

[^<]+[^<]+ 0: - scrapedurl = match - scrapedtitle = ">> Pagina Siguiente" + if pagination: + page = scrapertools.find_single_match(pagination, '>Página\s*(\d+)\s*de\s*\d+<') + pattern = 'href="([^"]+)">%s<' % (int(page) + 1) + url_page = scrapertools.find_single_match(pagination, pattern) - itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, - folder=True, viewmode="movies_with_plot")) + if url_page: + itemlist.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_page)) return itemlist @@ -124,9 +89,11 @@ def episodios(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '
(.*?)
') - patron = '([^<]+)' + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) + pattern = '
Lista de Capítulos
(.*?)
' + + data = scrapertools.find_single_match(data, pattern) + patron = ']+>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -136,10 +103,9 @@ def episodios(item): plot = item.plot # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title), - fanart=thumbnail, viewmode="movies_with_plot", folder=True)) + fanart=thumbnail)) return itemlist @@ -148,7 +114,8 @@ def findvideos(item): logger.info() data = httptools.downloadpage(item.url).data - patron = '
[^<]+<[iframe|IFRAME].*?[src|SRC]="([^"]+)"' + + patron = '<(?:iframe)?(?:IFRAME)?\s*(?:src)?(?:SRC)?="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for url in matches: