From e330a6e1ae61c9ad4f64a645ed043cdaf649f7a5 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Tue, 23 Jul 2019 20:45:13 +0200 Subject: [PATCH] =?UTF-8?q?aggiunta=20possibilit=C3=A0=20di=20paginre=20lu?= =?UTF-8?q?nge=20liste?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/support.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/core/support.py b/core/support.py index f024c1b3..e7a6c9b4 100644 --- a/core/support.py +++ b/core/support.py @@ -137,7 +137,9 @@ def scrape(func): patronBlock = args['patronBlock'] if 'patronBlock' in args else '' typeActionDict = args['type_action_dict'] if 'type_action_dict' in args else {} typeContentDict = args['type_content_dict'] if 'type_content_dict' in args else {} - + if 'pagination' in args: pagination = args['pagination'] if args['pagination'] else 20 + else: pagination = '' + log('PATRON= ', patron) if not data: data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"') data = re.sub('\n|\t', ' ', data) @@ -166,7 +168,11 @@ def scrape(func): 'rating', 'type', 'lang'] # by greko aggiunto episode lang = '' # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita - for match in matches: + pag = item.page if item.page else 1 # pagination + + for i, match in enumerate(matches): + if pagination and (pag - 1) * pagination > i: continue # pagination + if pagination and i >= pag * pagination: break # pagination listGroups = match.keys() match = match.values() @@ -181,8 +187,7 @@ def scrape(func): val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val scraped[kk] = val - title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"]) - .replace('"',"'")).strip() # fix by greko da " a ' + title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"]).replace('"',"'").replace('×', 'x').replace('–','-')).strip() # fix by greko da " a ' plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"])) longtitle = typo(title, 'bold') @@ -191,7 +196,7 @@ def scrape(func): scraped['episode'] = re.sub(r'\s-\s|-|x|–', 'x', scraped['episode']) longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle if scraped['title2']: - title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"]).replace('"', "'")).strip() + title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"]).replace('"', "'").replace('×', 'x').replace('–','-')).strip() longtitle = longtitle + typo(title2, 'bold _ -- _') ## Aggiunto/modificato per gestire i siti che hanno i video @@ -235,15 +240,15 @@ def scrape(func): for name, variants in typeActionDict.items(): if scraped['type'] in variants: action = name - + if scraped["title"] not in blacklist: it = Item( channel=item.channel, action=action, - contentType=item.contentType, + contentType= 'episode' if item.action == 'episodios' else item.contentType, title=longtitle, fulltitle=title, - show=title, + show=item.show if item.action == 'episodios' else title, quality=scraped["quality"], url=scraped["url"], infoLabels=infolabels, @@ -258,8 +263,7 @@ def scrape(func): it = args['itemHook'](it) itemlist.append(it) checkHost(item, itemlist) -## if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \ -## or (item.contentType == "movie" and action != "play"): + if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \ or (item.contentType == "episode" and action != "play") \ or (item.contentType == "movie" and action != "play") : @@ -274,6 +278,18 @@ def scrape(func): if patronNext: nextPage(itemlist, item, data, patronNext, 2) + # next page for pagination + if pagination and len(matches) >= pag * pagination: + itemlist.append( + Item(channel=item.channel, + action = item.action, + contentType=item.contentType, + title=typo(config.get_localized_string(30992), 'color kod bold'), + url=item.url, + args=item.args, + page=pag + 1, + thumbnail=thumb())) + if anime: from specials import autorenumber autorenumber.renumber(itemlist)