@@ -20,14 +20,125 @@ except:
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys"))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages"))
|
||||
itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False))
|
||||
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host, extra = "categories" ))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host, extra = "qualitys"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host, extra = "languages"))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
|
||||
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series", url = host + "/series/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos", url = host + "/series/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
|
||||
return itemlist
|
||||
|
||||
def nuevos_capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="episode" href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
patron += '-->([^<]+).*?'
|
||||
patron += 'created_at">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
|
||||
scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
title = scrapedtitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom')
|
||||
patron = 'a href="([^"]+).*?'
|
||||
patron += '((?:http|https)://image.tmdb.org[^"]+).*?'
|
||||
patron += 'title="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
itemlist.append(Item(action = "temporadas",
|
||||
channel = item.channel,
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
contentSerieName = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
if itemlist:
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "series",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '</i> (.*?[0-9]+)')
|
||||
for scrapedtitle in matches:
|
||||
season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
|
||||
item.infoLabels["season"] = season
|
||||
url = item.url + "?temporada=%s" %season
|
||||
itemlist.append(item.clone(action = "capitulos",
|
||||
title = scrapedtitle,
|
||||
url = url
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title =""))
|
||||
itemlist.append(item.clone(action = "add_serie_to_library",
|
||||
channel = item.channel,
|
||||
extra = "episodios",
|
||||
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += capitulos(tempitem)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td><a href="([^"]+).*?'
|
||||
patron += '<b>(.*?)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("</b>", "")
|
||||
episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
|
||||
scrapedtitle = scrapedtitle.split(":")[1]
|
||||
scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
|
||||
item.infoLabels["episode"] = episode
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -83,6 +194,7 @@ def filtro(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = dict()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
|
||||
patron = 'a href="([^"]+)".*?'
|
||||
@@ -98,23 +210,31 @@ def peliculas(item):
|
||||
else:
|
||||
year = 0
|
||||
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
if "serie" in scrapedurl:
|
||||
action = "temporadas"
|
||||
infoLabels ['tvshowtitle'] = scrapedtitle
|
||||
else:
|
||||
action = "findvideos"
|
||||
infoLabels ['tvshowtitle'] = ""
|
||||
infoLabels ['year'] = year
|
||||
itemlist.append(Item(action = action,
|
||||
channel = item.channel,
|
||||
fulltitle = fulltitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
infoLabels = {'year': year},
|
||||
infoLabels = infoLabels,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "peliculas",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
if itemlist:
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "peliculas",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -222,11 +222,14 @@ def newest(categoria):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
#itemlist = get_url(item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
url_m3u8 = scrapertools.find_single_match(data, '<source src=(.*?) type=application/x-mpegURL\s*/>')
|
||||
player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
|
||||
data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
|
||||
data_m3u8 = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data_m3u8)
|
||||
url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
|
||||
itemlist.append(item.clone(url=url_m3u8, action='play'))
|
||||
|
||||
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# httptools
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
import inspect
|
||||
import cookielib
|
||||
import gzip
|
||||
import os
|
||||
@@ -15,6 +16,7 @@ from threading import Lock
|
||||
|
||||
from core.cloudflare import Cloudflare
|
||||
from platformcode import config, logger
|
||||
from platformcode.logger import WebErrorException
|
||||
|
||||
cookies_lock = Lock()
|
||||
|
||||
@@ -23,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
@@ -205,8 +207,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
logger.info("Response error: %s" % (response["error"]))
|
||||
logger.info("Response data length: %s" % (len(response["data"])))
|
||||
logger.info("Response headers:")
|
||||
server_cloudflare = ""
|
||||
for header in response["headers"]:
|
||||
logger.info("- %s: %s" % (header, response["headers"][header]))
|
||||
if "cloudflare" in response["headers"][header]:
|
||||
server_cloudflare = "cloudflare"
|
||||
|
||||
is_channel = inspect.getmodule(inspect.currentframe().f_back)
|
||||
# error 4xx o 5xx se lanza excepcion
|
||||
# response["code"] = 400
|
||||
if type(response["code"]) == int and "\\servers\\" not in str(is_channel):
|
||||
if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503):
|
||||
raise WebErrorException(urlparse.urlparse(url)[1])
|
||||
|
||||
if cookies:
|
||||
save_cookies()
|
||||
|
||||
@@ -14,8 +14,7 @@ from core import videolibrarytools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from channelselector import get_thumb
|
||||
|
||||
from platformcode.logger import WebErrorException
|
||||
|
||||
|
||||
def start():
|
||||
@@ -298,7 +297,19 @@ def run(item=None):
|
||||
logger.error("Codigo de error HTTP : %d" % e.code)
|
||||
# "El sitio web no funciona correctamente (error http %d)"
|
||||
platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code)
|
||||
except WebErrorException, e:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
|
||||
"\\\\") + '([^.]+)\.py"'
|
||||
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
|
||||
|
||||
platformtools.dialog_ok(
|
||||
"Error en el canal " + canal,
|
||||
"La web de la que depende parece no estar disponible, puede volver a intentarlo, "
|
||||
"si el problema persiste verifique mediante un navegador la web: %s. "
|
||||
"Si la web funciona correctamente informe el error en: www.alfa-addon.com" %(e))
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
@@ -76,3 +76,8 @@ def error(texto=""):
|
||||
|
||||
xbmc.log("######## ERROR #########", xbmc.LOGERROR)
|
||||
xbmc.log(texto, xbmc.LOGERROR)
|
||||
|
||||
|
||||
class WebErrorException(Exception):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Exception.__init__(self, *args, **kwargs)
|
||||
|
||||
@@ -11,7 +11,7 @@ def test_video_exists(page_url):
|
||||
|
||||
response = httptools.downloadpage(page_url)
|
||||
|
||||
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data:
|
||||
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
|
||||
return False, "[Userscloud] El fichero no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
Reference in New Issue
Block a user