Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.3" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.4" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,12 +19,12 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» allcalidad » inkaseries
|
||||
» animemovil » descargas2020
|
||||
» descargasmix » mispelisyseries
|
||||
» thevideome » cinecalidad
|
||||
» ciberpeliculashd » vidlox
|
||||
» descargasmix » cinefox
|
||||
» allpeliculas » cinemahd
|
||||
» allcalidad
|
||||
¤ arreglos internos
|
||||
|
||||
¤ Agradecimientos a @GeorgeRamga por animemovil.
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -12,6 +12,18 @@
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -8,6 +10,13 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'vimeo', 'netutv']
|
||||
|
||||
|
||||
__channel__='allcalidad'
|
||||
|
||||
host = "http://allcalidad.com/"
|
||||
@@ -20,6 +29,7 @@ except:
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
@@ -27,6 +37,7 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
@@ -146,6 +157,13 @@ def findvideos(item):
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
|
||||
@@ -19,6 +19,18 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
|
||||
@@ -8,6 +8,9 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from channelselector import get_thumb
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', "allpeliculas")
|
||||
__perfil__ = int(config.get_setting('perfil', "allpeliculas"))
|
||||
@@ -18,11 +21,18 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
|
||||
color1, color2, color3 = perfil[__perfil__]
|
||||
|
||||
IDIOMAS = {"Castellano": "CAST", "Latino": "LAT", "Subtitulado": "VOSE", "Ingles": "VO"}
|
||||
IDIOMAS = {"Latino": "LAT"}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65": "thevideos",
|
||||
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
|
||||
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
|
||||
|
||||
list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
|
||||
'fastplay', 'raptu', 'tusfiles']
|
||||
|
||||
host = "http://allpeliculas.com/"
|
||||
|
||||
def mainlist(item):
|
||||
@@ -30,6 +40,8 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
|
||||
url= host + "movies/newmovies?page=1", extra1 = 0,
|
||||
thumbnail=get_thumb('movies', auto=True)))
|
||||
@@ -40,6 +52,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -136,11 +150,9 @@ def findvideos(item):
|
||||
fulltitle = item.title,
|
||||
contentThumbnail = item.thumbnail,
|
||||
url = url,
|
||||
language = IDIOMAS['Latino']
|
||||
))
|
||||
try:
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.append(Item(channel=item.channel))
|
||||
if config.get_videolibrary_support():
|
||||
@@ -148,6 +160,13 @@ def findvideos(item):
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
))
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -615,8 +615,11 @@ def get_languages(channel):
|
||||
list_language = ['No filtrar']
|
||||
list_controls, dict_settings = channeltools.get_channel_controls_settings(channel)
|
||||
for control in list_controls:
|
||||
if control["id"] == 'filter_languages':
|
||||
list_language = control["lvalues"]
|
||||
try:
|
||||
if control["id"] == 'filter_languages':
|
||||
list_language = control["lvalues"]
|
||||
except:
|
||||
pass
|
||||
|
||||
return list_language
|
||||
|
||||
|
||||
@@ -29,36 +29,17 @@ def mainlist(item):
|
||||
extra = "qualitys", thumbnail=get_thumb('quality', auto=True)))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host,
|
||||
extra = "languages", thumbnail=get_thumb('language', auto=True)))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por año", action = "filtro", url = host,
|
||||
extra = "years", thumbnail=get_thumb('year', auto=True)))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
|
||||
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series",
|
||||
url = host +"/series/?peli=1", thumbnail=get_thumb('newest', auto=True)))
|
||||
itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos",
|
||||
url = host + "/series/?peli=1", thumbnail=get_thumb('new episodes', auto=True)))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s=",
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
return itemlist
|
||||
|
||||
def nuevos_capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="episode" href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
patron += '-->([^<]+).*?'
|
||||
patron += 'created_at">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
|
||||
scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
title = scrapedtitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
@@ -70,7 +51,7 @@ def series(item):
|
||||
patron += 'title="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
itemlist.append(Item(action = "temporadas",
|
||||
itemlist.append(Item(action = "capitulos",
|
||||
channel = item.channel,
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
@@ -84,25 +65,41 @@ def series(item):
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "series",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
title = "Página siguiente >>",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist = capitulos(item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '</i> (.*?[0-9]+)')
|
||||
for scrapedtitle in matches:
|
||||
season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
|
||||
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?Content principal')
|
||||
patron = '<a href="([^"]+).*?'
|
||||
patron += '<span>(.*?)</span>'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
s_e = scrapertools.get_season_and_episode(scrapedurl.replace("-",""))
|
||||
if s_e != "":
|
||||
season = s_e.split("x")[0]
|
||||
episode = s_e.split("x")[1]
|
||||
else:
|
||||
season = episode = ""
|
||||
scrapedtitle = s_e + " - " + scrapedtitle
|
||||
item.infoLabels["episode"] = episode
|
||||
item.infoLabels["season"] = season
|
||||
url = item.url + "?temporada=%s" %season
|
||||
itemlist.append(item.clone(action = "capitulos",
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle,
|
||||
url = url
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if config.get_videolibrary_support():
|
||||
@@ -116,36 +113,6 @@ def temporadas(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += capitulos(tempitem)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td><a href="([^"]+).*?'
|
||||
patron += '<b>(.*?)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("</b>", "")
|
||||
episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
|
||||
scrapedtitle = scrapedtitle.split(":")[1]
|
||||
scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
|
||||
item.infoLabels["episode"] = episode
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -183,17 +150,30 @@ def search(item, texto):
|
||||
def filtro(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
filter = ""
|
||||
filter_end = "data-uk-dropdown"
|
||||
if item.extra == "categories":
|
||||
filter = "genero"
|
||||
elif item.extra == "qualitys":
|
||||
filter = "calidad"
|
||||
elif item.extra == "languages":
|
||||
filter = "audio"
|
||||
elif item.extra == "years":
|
||||
filter = "ano"
|
||||
filter_end = "<div style"
|
||||
filter = host + "/?" + filter + "="
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra
|
||||
patron = 'uk-button btn-filter %s.*?%s' %(item.extra, filter_end)
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = "href='([^']+)"
|
||||
patron += "'>([^<]+)"
|
||||
patron = 'id="([^"]+).*?'
|
||||
patron += 'label for.*?>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo in matches:
|
||||
url = filter + url
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = titulo,
|
||||
url = url + "/?peli=1"
|
||||
url = url + "&peli=1"
|
||||
))
|
||||
return itemlist
|
||||
|
||||
@@ -202,8 +182,11 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = dict()
|
||||
filter = "uk-icon-angle-right next"
|
||||
if item.extra == "busca":
|
||||
filter = '<div class="post">'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
|
||||
bloque = scrapertools.find_single_match(data, '%s.*?panel-pagination pagination-bottom' %(filter))
|
||||
patron = 'a href="([^"]+)".*?'
|
||||
patron += 'img alt="([^"]+)".*?'
|
||||
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
|
||||
@@ -218,7 +201,7 @@ def peliculas(item):
|
||||
year = 0
|
||||
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
|
||||
if "serie" in scrapedurl:
|
||||
action = "temporadas"
|
||||
action = "capitulos"
|
||||
infoLabels ['tvshowtitle'] = scrapedtitle
|
||||
else:
|
||||
action = "findvideos"
|
||||
@@ -239,7 +222,7 @@ def peliculas(item):
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "peliculas",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
title = "Página siguiente >>",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
@@ -249,7 +232,9 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'src="([^&]+)'
|
||||
url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = '<a href="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
title = "Ver en: %s"
|
||||
|
||||
@@ -248,12 +248,12 @@ def peliculas(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def dec(item):
|
||||
def dec(item, dec_value):
|
||||
link = []
|
||||
val = item.split(' ')
|
||||
link = map(int, val)
|
||||
for i in range(len(link)):
|
||||
link[i] = link[i] - 6
|
||||
link[i] = link[i] - int(dec_value)
|
||||
real = ''.join(map(chr, link))
|
||||
return (real)
|
||||
|
||||
@@ -302,10 +302,10 @@ def findvideos(item):
|
||||
'BitTorrent': '',
|
||||
'Mega': '',
|
||||
'MediaFire': ''}
|
||||
|
||||
dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
|
||||
for video_cod, server_id in matches:
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
|
||||
video_id = dec(video_cod)
|
||||
video_id = dec(video_cod, dec_value)
|
||||
|
||||
logger.debug('server_id %s' % server_id)
|
||||
if server_id in server_url:
|
||||
|
||||
@@ -20,6 +20,21 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "save_last_search",
|
||||
"type": "bool",
|
||||
@@ -70,9 +85,9 @@
|
||||
"id": "menu_info",
|
||||
"type": "bool",
|
||||
"label": "Mostrar menú intermedio película/episodio",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "last_page",
|
||||
|
||||
@@ -7,9 +7,20 @@ from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Vo':'VO', 'Vose': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['openload', 'powvideo', 'rapidvideo', 'streamango', 'streamcloud', 'flashx', 'gamovideo', 'streamplay']
|
||||
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'cinefox')
|
||||
__perfil__ = int(config.get_setting('perfil', "cinefox"))
|
||||
@@ -24,6 +35,8 @@ if __perfil__ < 3:
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
|
||||
|
||||
host = "http://www.cinefox.tv"
|
||||
|
||||
|
||||
@@ -32,6 +45,8 @@ def mainlist(item):
|
||||
item.text_color = color1
|
||||
itemlist = []
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(item.clone(action="seccion_peliculas", title="Películas", fanart="http://i.imgur.com/PjJaW8o.png",
|
||||
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('movies', auto=True)))
|
||||
# Seccion series
|
||||
@@ -51,6 +66,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="Buscar...", action="local_search", thumbnail=get_thumb('search', auto=True)))
|
||||
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -107,29 +124,33 @@ def busqueda(item):
|
||||
scrapedtitle = scrapedtitle.capitalize()
|
||||
item.infoLabels["year"] = year
|
||||
plot = scrapertools.htmlclean(plot)
|
||||
new_item = Item(channel=item.channel, thumbnail= scrapedthumbnail, plot=plot)
|
||||
if "/serie/" in scrapedurl:
|
||||
action = "episodios"
|
||||
show = scrapedtitle
|
||||
new_item.show = scrapedtitle
|
||||
new_item.contentType = 'tvshow'
|
||||
scrapedurl += "/episodios"
|
||||
title = " [Serie]"
|
||||
contentType = "tvshow"
|
||||
new_item.action = 'episodios'
|
||||
elif "/pelicula/" in scrapedurl:
|
||||
action = "menu_info"
|
||||
show = ""
|
||||
title = " [Película]"
|
||||
contentType = "movie"
|
||||
new_item.action = "findvideos"
|
||||
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
|
||||
filter_list = {"poster_path": filter_thumb}
|
||||
filter_list = filter_list.items()
|
||||
#title = " [Película]"
|
||||
new_item.contentType = "movie"
|
||||
new_item.extra='media'
|
||||
new_item.contentTitle= scrapedtitle
|
||||
new_item.infoLabels['filtro'] = filter_list
|
||||
else:
|
||||
continue
|
||||
title = scrapedtitle + title + " (" + year + ")"
|
||||
itemlist.append(item.clone(action=action, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
plot=plot, show=show, text_color=color2, contentType=contentType))
|
||||
new_item.title = scrapedtitle + " (" + year + ")"
|
||||
new_item.url = scrapedurl
|
||||
itemlist.append(new_item)
|
||||
#itemlist.append(item.clone(action=action, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
# contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
# plot=plot, show=show, text_color=color2, contentType=contentType))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Más resultados')
|
||||
if next_page != "":
|
||||
@@ -330,10 +351,7 @@ def peliculas(item):
|
||||
if "valores" in item and item.valores:
|
||||
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
|
||||
|
||||
if __menu_info__:
|
||||
action = "menu_info"
|
||||
else:
|
||||
action = "findvideos"
|
||||
action = "findvideos"
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_multiple_matches(data,
|
||||
@@ -344,14 +362,15 @@ def peliculas(item):
|
||||
matches = scrapertools.find_multiple_matches(match, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
|
||||
filter_list = {"poster_path": filter_thumb}
|
||||
filter_list = filter_list.items()
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media",
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
text_color=color2, contentType="movie"))
|
||||
text_color=color2, contentType="movie", infoLabels={'filtro':filter_list}))
|
||||
else:
|
||||
patron = '<div class="audio-info">(.*?)</div>(.*?)' \
|
||||
'src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
|
||||
patron = '<div class="audio-info">(.*?)<div (class="quality.*?)src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(match, patron)
|
||||
|
||||
for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
calidad = scrapertools.find_single_match(calidad, '<div class="quality-info".*?>([^<]+)</div>')
|
||||
if calidad:
|
||||
@@ -361,17 +380,25 @@ def peliculas(item):
|
||||
if "medium-vs" in idiomas: audios.append('VOSE')
|
||||
if "medium-la" in idiomas: audios.append('LAT')
|
||||
if "medium-en" in idiomas or 'medium-"' in idiomas:
|
||||
audios.append('V.O')
|
||||
audios.append('VO')
|
||||
title = "%s [%s]" % (scrapedtitle, "/".join(audios))
|
||||
|
||||
if calidad:
|
||||
title += " (%s)" % calidad
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
|
||||
filter_list = {"poster_path": filter_thumb}
|
||||
filter_list = filter_list.items()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
text_color=color2, contentType="movie", quality=calidad, language=audios))
|
||||
text_color=color2, contentType="movie", quality=calidad, language=audios,
|
||||
infoLabels={'filtro':filter_list}))
|
||||
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if next_page != "" and item.title != "":
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page,
|
||||
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
|
||||
@@ -387,10 +414,10 @@ def ultimos(item):
|
||||
logger.info()
|
||||
item.text_color = color2
|
||||
|
||||
if __menu_info__:
|
||||
action = "menu_info_episode"
|
||||
else:
|
||||
action = "episodios"
|
||||
# if __menu_info__:
|
||||
# action = "menu_info_episode"
|
||||
# else:
|
||||
action = "episodios"
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -407,20 +434,16 @@ def ultimos(item):
|
||||
if "medium-vs" in idiomas: audios.append('VOSE')
|
||||
if "medium-la" in idiomas: audios.append('LAT')
|
||||
if "medium-en" in idiomas or 'medium-"' in idiomas:
|
||||
audios.append('V.O')
|
||||
audios.append('VO')
|
||||
title = "%s - %s" % (show, re.sub(show, '', scrapedtitle))
|
||||
if audios:
|
||||
title += " [%s]" % "/".join(audios)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
contentTitle=show, fulltitle=show, show=show,
|
||||
contentSerieName=show, fulltitle=show, show=show,
|
||||
text_color=color2, extra="ultimos", contentType="tvshow"))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
|
||||
if next_page != "":
|
||||
@@ -444,12 +467,12 @@ def series(item):
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(host, scrapedurl + "/episodios")
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail, contentSerieName=scrapedtitle, fulltitle=scrapedtitle,
|
||||
show=scrapedtitle, text_color=color2, contentType="tvshow"))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -512,10 +535,10 @@ def episodios(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_season = data[:]
|
||||
|
||||
if "episodios" in item.extra or not __menu_info__ or item.path:
|
||||
action = "findvideos"
|
||||
else:
|
||||
action = "menu_info_episode"
|
||||
#if "episodios" in item.extra or not __menu_info__ or item.path:
|
||||
action = "findvideos"
|
||||
# else:
|
||||
# action = "menu_info_episode"
|
||||
|
||||
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
|
||||
for i, url in enumerate(seasons):
|
||||
@@ -534,12 +557,7 @@ def episodios(item):
|
||||
new_item.extra = "episode|"
|
||||
itemlist.append(new_item)
|
||||
|
||||
if "episodios" not in item.extra and not item.path:
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
itemlist.reverse()
|
||||
if "episodios" not in item.extra and not item.path:
|
||||
@@ -555,6 +573,8 @@ def episodios(item):
|
||||
extra="episodios###episodios",
|
||||
contentTitle=item.fulltitle))
|
||||
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -612,7 +632,6 @@ def menu_info_episode(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if not "|" in item.extra and not __menu_info__:
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data
|
||||
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
|
||||
@@ -634,7 +653,7 @@ def findvideos(item):
|
||||
if "|" in item.extra:
|
||||
extra = item.extra[:-1]
|
||||
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
|
||||
itemlist.extend(get_enlaces(item, url, "Online"))
|
||||
itemlist=(get_enlaces(item, url, "Online"))
|
||||
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
|
||||
itemlist.extend(get_enlaces(item, url, "de Descarga"))
|
||||
|
||||
@@ -658,12 +677,20 @@ def findvideos(item):
|
||||
type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
|
||||
itemlist.extend(get_enlaces(item, url, type))
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_enlaces(item, url, type):
|
||||
itemlist = []
|
||||
itemlist.append(item.clone(action="", title="Enlaces %s" % type, text_color=color1))
|
||||
#itemlist.append(item.clone(action="", title="Enlaces %s" % type, text_color=color1))
|
||||
data = httptools.downloadpage(url, add_referer=True).data
|
||||
if type == "Online":
|
||||
gg = httptools.downloadpage(item.url, add_referer=True).data
|
||||
@@ -674,14 +701,15 @@ def get_enlaces(item, url, type):
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedopcion, scrapedlanguage, scrapedcalidad in matches:
|
||||
google_url = scrapertools.find_single_match(bloque, 'id="%s.*?src="([^"]+)' % scrapedopcion)
|
||||
if "medium-es" in scrapedlanguage: language = "Castellano"
|
||||
if "medium-en" in scrapedlanguage: language = "Ingles"
|
||||
if "medium-es" in scrapedlanguage: language = "CAST"
|
||||
if "medium-en" in scrapedlanguage: language = "VO"
|
||||
if "medium-vs" in scrapedlanguage: language = "VOSE"
|
||||
if "medium-la" in scrapedlanguage: language = "Latino"
|
||||
if "medium-la" in scrapedlanguage: language = "LAT"
|
||||
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
|
||||
itemlist.append(
|
||||
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
|
||||
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
|
||||
|
||||
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
|
||||
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -694,10 +722,12 @@ def get_enlaces(item, url, type):
|
||||
if servertools.is_server_enabled(server):
|
||||
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
|
||||
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
|
||||
extra="", server=server, language=idioma))
|
||||
extra="", server=server, language=IDIOMAS[idioma]))
|
||||
|
||||
if len(itemlist) == 1:
|
||||
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,18 @@
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -52,4 +64,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,13 +11,23 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'http://www.cinemahd.co/'
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
|
||||
itemlist.append(item.clone(title="Generos", action="section", section='genre',
|
||||
@@ -31,6 +41,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -132,16 +144,28 @@ def findvideos(item):
|
||||
|
||||
language = opt_data[0].strip()
|
||||
quality = opt_data[1].strip()
|
||||
|
||||
if url != '':
|
||||
itemlist.append(item.clone(title='%s', url=url, language=language, quality=quality, action='play'))
|
||||
if url != '' and 'youtube' not in url:
|
||||
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
|
||||
elif 'youtube' in url:
|
||||
trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
|
||||
i.language, i.quality))
|
||||
itemlist.append(trailer)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import urllib
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
@@ -164,6 +165,7 @@ def entradas(item):
|
||||
titulo = scrapedtitle + scrapedinfo
|
||||
titulo = scrapertools.decodeHtmlentities(titulo)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
|
||||
scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail))
|
||||
if not scrapedthumbnail.startswith("http"):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
@@ -201,7 +203,7 @@ def entradas(item):
|
||||
|
||||
if info:
|
||||
titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
|
||||
|
||||
year = scrapertools.find_single_match(titulo,'\[\d{4}\]')
|
||||
scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail))
|
||||
if not scrapedthumbnail.startswith("http"):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
@@ -211,8 +213,8 @@ def entradas(item):
|
||||
|
||||
itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot",
|
||||
show=show, contentType="movie"))
|
||||
|
||||
show=show, contentType="movie", infoLabels={'year':year}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
|
||||
if next_page:
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "elsenordelanillo",
|
||||
"name": "El señor del anillo",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "elsenordelanillo.png",
|
||||
"banner": "elsenordelanillo.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
]
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades",
|
||||
url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/", viewmode="movie"))
|
||||
itemlist.append(Item(channel=item.channel, action="generos", title="Por género",
|
||||
url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/"))
|
||||
itemlist.append(Item(channel=item.channel, action="letras", title="Por letra",
|
||||
url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/"))
|
||||
itemlist.append(Item(channel=item.channel, action="anyos", title="Por año",
|
||||
url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def anyos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# logger.info("data="+data)
|
||||
data = scrapertools.find_single_match(data, 'scalas por a(.*?)</ul>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
patron = '<li><a target="[^"]+" title="[^"]+" href="([^"]+)"><strong>([^<]+)</strong>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def letras(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# logger.info("data="+data)
|
||||
data = scrapertools.find_single_match(data, '<div class="bkpelsalf_ul(.*?)</ul>')
|
||||
logger.info("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
# <li><a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/letra/a.html" title="Películas que comienzan con A">A</a>
|
||||
patron = '<li><a target="[^"]+" href="([^"]+)" title="[^"]+">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# logger.info("data="+data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
# <a class='generos' target="_top" href='/pelisdelanillo/categoria/accion/' title='Las Mejores Películas de Acción De Todos Los Años'> Acción </a>
|
||||
patron = "<a class='generos' target=\"_top\" href='([^']+)' title='[^']+'>([^<]+)</a>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8").strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# logger.info("data="+data)
|
||||
|
||||
# Extrae las entradas
|
||||
'''
|
||||
<!--<pelicula>-->
|
||||
<li class="peli_bx br1px brdr10px ico_a">
|
||||
<h2 class="titpeli bold ico_b"><a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title="El Jardinero Fiel">El Jardinero Fiel</a></h2>
|
||||
<div class="peli_img p_relative">
|
||||
<div class="peli_img_img">
|
||||
<a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title="El Jardinero Fiel">
|
||||
<img src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/files/uploads/1077.jpg" alt="El Jardinero Fiel" /></a>
|
||||
</div>
|
||||
<div>
|
||||
<center><table border="5" bordercolor="#000000"><tr><td>
|
||||
<img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/lat.png">
|
||||
</td><td>
|
||||
<img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/sub.png">
|
||||
</td><td>
|
||||
<img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-cam.png">
|
||||
</td><td>
|
||||
<img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/dvd.png">
|
||||
</td><td>
|
||||
<img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-hd.png">
|
||||
</td></tr></table></center>
|
||||
</div>
|
||||
<div class="peli_txt bgdeg8 brdr10px bxshd2 ico_b p_absolute pd15px white">
|
||||
<div class="plt_tit bold fs14px mgbot10px"><h2 class="bold d_inline fs14px"><font color="black"><b>El Jardinero Fiel</b></font></h2></div>
|
||||
<div class="plt_ft clf mgtop10px">
|
||||
<div class="stars f_left pdtop10px"><strong>Genero</strong>: Suspenso, Drama, 2005</div>
|
||||
<br><br>
|
||||
<div class="stars f_left pdtop10px"><table><tr><td><strong>Idioma</strong>:</td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/lat.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/sub.png"></td></tr></table></div>
|
||||
<br /><br />
|
||||
<div class="stars f_left pdtop10px"><table><tr><td><strong>Calidad</strong>:</td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-cam.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/dvd.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-hd.png"></td></tr></table></div>
|
||||
<br /><br>
|
||||
<div class="stars f_left pdtop10px"><strong>Visualizada</strong>: 629 Veces</div>
|
||||
<a target="_top" class="vrfich bold ico f_right" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title=""></a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</li>
|
||||
<!--</pelicula>-->
|
||||
'''
|
||||
patronbloque = "<!--<pelicula>--[^<]+<li(.*?)</li>"
|
||||
bloques = re.compile(patronbloque, re.DOTALL).findall(data)
|
||||
|
||||
for bloque in bloques:
|
||||
scrapedurl = scrapertools.find_single_match(bloque, '<a.*?href="([^"]+)"')
|
||||
scrapedtitle = scrapertools.find_single_match(bloque, '<a.*?title="([^"]+)"')
|
||||
scrapedthumbnail = scrapertools.find_single_match(bloque, '<img src="([^"]+)"')
|
||||
|
||||
title = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
title = title.strip()
|
||||
title = scrapertools.htmlclean(title)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title))
|
||||
|
||||
# </b></span></a></li[^<]+<li><a href="?page=2">
|
||||
next_page = scrapertools.find_single_match(data, '</b></span></a></li[^<]+<li><a target="_top" href="([^"]+)">')
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=item.url + next_page,
|
||||
folder=True, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# logger.info("data="+data)
|
||||
bloque = scrapertools.find_single_match(data, "function cargamos.*?window.open.'([^']+)'")
|
||||
data = scrapertools.cache_page(bloque)
|
||||
|
||||
from core import servertools
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.folder = False
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("url=" + item.url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
@@ -25,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3163.100 Safari/537.36"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
|
||||
@@ -19,19 +19,13 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
pfxfx = ""
|
||||
headers = {'Host': 'www.flashx.sx',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
|
||||
'Cookie': ''}
|
||||
data = httptools.downloadpage(page_url, cookies=False).data
|
||||
data = data.replace("\n","")
|
||||
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.sx/counter.cgi.*?[^(?:'|")]+)""")
|
||||
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.ws/counter.cgi.*?[^(?:'|")]+)""")
|
||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.sx/dl[^"]+')
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.ws/dl[^"]+')
|
||||
# Para obtener el f y el fxfx
|
||||
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.sx/js\w+/c\w+.*?[^(?:'|")]+)""")
|
||||
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.ws/js\w+/c\w+.*?[^(?:'|")]+)""")
|
||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||
@@ -41,9 +35,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("mfxfxfx2= %s" %pfxfx)
|
||||
if pfxfx == "":
|
||||
pfxfx = "ss=yes&f=fail&fxfx=6"
|
||||
coding_url = 'https://www.flashx.sx/flashx.php?%s' %pfxfx
|
||||
coding_url = 'https://www.flashx.ws/flashx.php?%s' %pfxfx
|
||||
# {f: 'y', fxfx: '6'}
|
||||
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)<!--')
|
||||
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
|
||||
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
|
||||
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
|
||||
hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
|
||||
@@ -52,11 +46,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
flashx_id, urllib.quote(fname), hash_f, imhuman)
|
||||
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
|
||||
|
||||
headers['Referer'] = "https://www.flashx.sx/"
|
||||
headers['Accept'] = "*/*"
|
||||
headers['Host'] = "www.flashx.sx"
|
||||
headers['X-Requested-With'] = 'XMLHttpRequest'
|
||||
|
||||
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
|
||||
httptools.downloadpage(coding_url, cookies=False)
|
||||
httptools.downloadpage(cgi_counter, cookies=False)
|
||||
@@ -66,8 +55,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
except:
|
||||
time.sleep(6)
|
||||
|
||||
headers.pop('X-Requested-With')
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
data = httptools.downloadpage(playnow, post).data
|
||||
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
|
||||
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
|
||||
|
||||
@@ -27,14 +27,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
referer = page_url.replace('iframe', 'preview')
|
||||
|
||||
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
|
||||
|
||||
if data == "File was deleted":
|
||||
return "El archivo no existe o ha sido borrado"
|
||||
|
||||
if 'Video is processing now' in data:
|
||||
return "El vídeo está siendo procesado, intentalo de nuevo mas tarde"
|
||||
|
||||
var = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
|
||||
var = scrapertools.find_single_match(data, 'var _0x[0-f]{4}=(\[[^;]+\]);')
|
||||
|
||||
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
|
||||
@@ -7,11 +7,9 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "File was deleted" in data or "Page Cannot Be Found" in data:
|
||||
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -19,21 +17,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("url=" + page_url)
|
||||
if not "embed" in page_url:
|
||||
page_url = page_url.replace("http://thevideo.me/", "http://thevideo.me/embed-") + ".html"
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
mpri_Key = scrapertools.find_single_match(data, "lets_play_a_game='([^']+)'")
|
||||
var = scrapertools.find_single_match(data, 'vsign.player.*?\+ (\w+)')
|
||||
mpri_Key = scrapertools.find_single_match(data, "%s='([^']+)'" %var)
|
||||
data_vt = httptools.downloadpage("https://thevideo.me/vsign/player/%s" % mpri_Key).data
|
||||
vt = scrapertools.find_single_match(data_vt, 'function\|([^\|]+)\|')
|
||||
if "fallback" in vt:
|
||||
vt = scrapertools.find_single_match(data_vt, 'jwConfig\|([^\|]+)\|')
|
||||
|
||||
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*\:\s*"([^"]+)"\s*,\s*"label"\s*\:\s*"([^"]+)"')
|
||||
video_urls = []
|
||||
|
||||
for media_url, label in media_urls:
|
||||
media_url += "?direct=false&ua=1&vt=%s" % vt
|
||||
video_urls.append(
|
||||
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [thevideo.me]", media_url])
|
||||
|
||||
return video_urls
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://vidlox.tv/embed-.*?.html)",
|
||||
"pattern": "(https://vidlox.(?:tv|me)/embed-.*?.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user