This commit is contained in:
cttynul
2019-05-07 10:14:20 +02:00
5 changed files with 43 additions and 16 deletions

View File

@@ -45,12 +45,12 @@ def mainlist(item):
def categories(item):
support.log(item)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas',url_host=host)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
return support.thumb(itemlist)
def AZlist(item):
support.log()
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list',url_host=host)
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
def newest(categoria):

View File

@@ -7,6 +7,7 @@ import urllib
from lib import unshortenit
from platformcode import logger, config
from channelselector import thumb
from channels import autoplay
def hdpass_get_servers(item):
@@ -86,7 +87,7 @@ def color(text, color):
def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="", patron_block="",
patronNext="", action="findvideos", url_host="", addVideolibrary = True):
patronNext="", action="findvideos", addVideolibrary = True):
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating
@@ -144,8 +145,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if kk == "url":
val = url_host + val
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
@@ -462,7 +463,9 @@ def nextPage(itemlist, item, data, patron, function_level=1):
# If the call is direct, leave it blank
next_page = scrapertoolsV2.find_single_match(data, patron)
log('NEXT= ',next_page)
if 'http' not in next_page:
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
log('NEXT= ', next_page)
if next_page != "":
itemlist.append(
@@ -477,7 +480,10 @@ def nextPage(itemlist, item, data, patron, function_level=1):
return itemlist
def server(item, data='', headers=''):
def server(item, data='', headers='', AutoPlay=True):
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', item.channel)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', item.channel)
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
@@ -492,9 +498,23 @@ def server(item, data='', headers=''):
videoitem.channel = item.channel
videoitem.contentType = item.contentType
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
if AutoPlay == True:
autoplay.start(itemlist, item)
return itemlist
def aplay(item, itemlist, list_servers='', list_quality=''):
if inspect.stack()[1][3] == 'mainlist':
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
else:
autoplay.start(itemlist, item)
def log(stringa1="", stringa2="", stringa3="", stringa4="", stringa5=""):
# Function to simplify the log
# Automatically returns File Name and Function Name

View File

@@ -259,7 +259,8 @@ def episodios(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace("×", "x")
scrapedtitle = scrapedtitle.replace("_", " ")
scrapedtitle = scrapedtitle.replace(".mp4", "")
puntata = scrapertools.find_single_match(scrapedtitle, '[0-9]+x[0-9]+')
# puntata = scrapertools.find_single_match(scrapedtitle, '[0-9]+x[0-9]+')
puntata = scrapedtitle
for i in itemlist:
if i.args == puntata: #è già stata aggiunta
i.url += " " + scrapedurl

View File

@@ -103,13 +103,13 @@ def findvideos(item):
def generos(item):
findhost()
patron = '<a href="([^"#]+)">([a-zA-Z]+)'
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas', url_host=host)
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Genere</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def year(item):
findhost()
patron = r'<a href="([^"#]+)">(\d+)'
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas', url_host=host)
return support.scrape(item, patron, ['url', 'title'], patron_block='<a href="#">Anno</a><ul class="sub-menu">.*?</ul>', action='peliculas')
def play(item):

View File

@@ -714,7 +714,7 @@ def check_list_links(itemlist, numero='', timeout=3):
for it in itemlist:
if numero > 0 and it.server != '' and it.url != '':
verificacion = check_video_link(it.url, it.server, timeout)
it.title = verificacion + ', ' + it.title.strip()
it.title = verificacion + ' ' + it.title.strip()
it.alive = verificacion
numero -= 1
return itemlist
@@ -725,31 +725,37 @@ def check_video_link(url, server, timeout=3):
:param url, server: Link y servidor
:return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona.
"""
NK = "[COLOR 0xFFF9B613][B]" + u'\u25cf' + "[/B][/COLOR]"
OK = "[COLOR 0xFF00C289][B]" + u'\u25cf' + "[/B][/COLOR]"
KO = "[COLOR 0xFFC20000][B]" + u'\u25cf' + "[/B][/COLOR]"
try:
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
logger.info("[check_video_link] No se puede importar el servidor! %s" % server)
return "??"
return NK
if hasattr(server_module, 'test_video_exists'):
ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limitar tiempo de descarga
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url))
resultado = "[COLOR red][B]NO[/B][/COLOR]"
resultado = KO
else:
logger.info("[check_video_link] comprobacion OK %s %s" % (server, url))
resultado = "[COLOR green][B]OK[/B][/COLOR]"
resultado = OK
except:
logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url))
resultado = "??"
resultado = NK
finally:
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga
return resultado
logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server)
return "??"
return NK