This commit is contained in:
Alhaziel01
2020-11-26 18:23:13 +01:00
parent 22f4e80783
commit a91d98185f
2 changed files with 12 additions and 15 deletions

View File

@@ -75,7 +75,7 @@ def peliculas(item):
else:
patron = r'<div class="cover-racolta">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)".*?<p class="title[^>]+>(?P<title>[^<]+)<'
else:
patron = r'<article[^>]+>[^>]+>[^>]+>(?:<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>)?.*?<a href="(?P<url>[^"]+)">\s*(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<p>(?P<plot>[^<]+)<'
patron = r'<article[^>]+>[^>]+>[^>]+>(?:<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>)?.*?<a href="(?P<url>[^"]+)"[^>]*>\s*(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<p>(?P<plot>[^<]+)<'
patronNext = r'<a class="page-numbers next" href="([^"]+)">'
# select category
@@ -121,14 +121,14 @@ def episodios(item):
else:
patron = r'class="title-episodio">(?P<title>[^<]+)<(?P<url>.*?)<p'
def itemlistHook(itemlist):
counter = 0
for item in itemlist:
episode = support.match(item.title, patron=r'\d+').match
if episode == '1':
counter += 1
item.title = support.typo(str(counter) + 'x' + episode.zfill(2) + support.re.sub(r'\[[^\]]+\](?:\d+)?','',item.title),'bold')
return itemlist
# def itemlistHook(itemlist):
# counter = 0
# for item in itemlist:
# episode = support.match(item.title, patron=r'\d+').match
# if episode == '1':
# counter += 1
# item.title = support.typo(str(counter) + 'x' + episode.zfill(2) + support.re.sub(r'\[[^\]]+\](?:\d+)?','',item.title),'bold')
# return itemlist
return locals()

View File

@@ -21,15 +21,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# from core.support import dbg;dbg()
global data
logger.debug("URL", page_url)
# from core.support import dbg;dbg()
video_urls = []
host = scrapertools.find_single_match(page_url, r'http[s]?://[^/]+')
new_url = scrapertools.find_single_match(data, r'<iframe src="([^"]+)"')
if new_url:
host = scrapertools.find_single_match(new_url, r'http[s]?://[^/]+')
data = httptools.downloadpage(host + new_url).data
else:
host = scrapertools.find_single_match(page_url, r'http[s]?://[^/]+')
if new_url: data = httptools.downloadpage(host + new_url).data
label = scrapertools.find_single_match(data, r'type:\s*"video/([^"]+)"')