Solucion a paginacion (dada por brunushky) y ahora si muestran las series tanto en categorias como listado alfabetico

This commit is contained in:
Danielr460
2017-09-06 18:16:04 -05:00
parent 6be93834c6
commit 5830c7b87f

View File

@@ -24,7 +24,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista_gen", title="Novedades", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista"),
itemlist.append(Item(channel=item.channel, action="lista", title="Listado Completo de Series", url=urlparse.urljoin(host, "/lista"),
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url=host,
thumbnail=thumb_series))
@@ -118,10 +118,11 @@ def lista_gen(item):
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
patron_sec = '<section class="content">.+?<\/section>'
data = scrapertools.find_single_match(data1, patron_sec)
logger.info("xsxxxxssss"+item.title)
patron = '<article id=.+? class=.+?><div.+?>'
patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle
patron += ' Capítulos Completos ([^"]+)">' # scrapedlang
patron += '<img.+? data-src=.+? data-lazy-src="([^"]+)"' # scrapedthumbnail
patron += '<img src=".+?" data-lazy-src="([^"]+)"' # scrapedthumbnail
matches = scrapertools.find_multiple_matches(data, patron)
i = 0
for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches:
@@ -134,13 +135,14 @@ def lista_gen(item):
show=scrapedtitle, context=renumbertools.context(item)))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
next_page_url = scrapertools.find_single_match(data, patron_pag)
if next_page_url != "" and i != 1:
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista_gen", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
#patron_pag='<a class="nextpostslink" rel="next" href="([^"]+)">'
patron_pag='<li class="next right"><a href="([^"]+)" >([^"]+)<\/a><\/li>'
next_page_url = scrapertools.find_single_match(data,patron_pag)
if next_page_url!="" and i!=1:
item.url=next_page_url[0]
itemlist.append(Item(channel = item.channel,action = "lista_gen",title = ">> Página siguiente", url = next_page_url[0], thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist