From b1bc30b3404e1c40eb326d4dafa12910de4c29f2 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Wed, 11 Oct 2017 16:13:12 -0500
Subject: [PATCH] Update newpct1.py
---
plugin.video.alfa/channels/newpct1.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py
index 009ea8c6..d64b241b 100644
--- a/plugin.video.alfa/channels/newpct1.py
+++ b/plugin.video.alfa/channels/newpct1.py
@@ -339,20 +339,20 @@ def episodios(item):
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
- logger.debug('data: %s'%data)
pattern = '
' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
if pagination:
pattern = 'Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
- list_pages = []
- for x in range(1, int(last_page) + 1):
- list_pages.append("%s%s" % (url, x))
+ list_pages = [item.url]
+ for x in range(2, int(last_page) + 1):
+ response = httptools.downloadpage('%s%s'% (url,x))
+ if response.sucess:
+ list_pages.append("%s%s" % (url, x))
else:
list_pages = [item.url]
- logger.debug ('pattern: %s'%pattern)
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
@@ -424,7 +424,7 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="AƱadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
-
+
return itemlist
def search(item, texto):