@@ -89,7 +89,7 @@ def newest(categoria):
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -215,14 +215,14 @@ def findvideos(item):
|
||||
url = url + '|' + item.url
|
||||
|
||||
title = "%s - %s" % ('%s', title)
|
||||
itemlist.append(Item (channel=item.channel, action="play", url=url, title=title, text_color=color3))
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
if item.extra != "findvideos" and config.get_videolibrary_support():
|
||||
itemlist.append(Item (channel=item.channel, title="Añadir película a la videoteca", \
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos", text_color="green"))
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", extra="findvideos",
|
||||
text_color="green"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "pelis24",
|
||||
"name": "Pelis24",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": "es",
|
||||
"thumbnail": "pelis24.png",
|
||||
@@ -49,4 +49,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,38 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://peliscity.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
patron = 'cat-item.*?span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
can = 0
|
||||
for cantidad in matches:
|
||||
can += int(cantidad.replace(".", ""))
|
||||
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://peliscity.com",
|
||||
Item(channel=item.channel, title="Películas: (%s)" %can, text_bold=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=" Últimas agregadas", action="agregadas", url= host,
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="agregadas",
|
||||
url="http://peliscity.com/calidad/hd-real-720", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title=" Peliculas HD", action="agregadas",
|
||||
url= host + "/calidad/hd-real-720", viewmode="movie_with_plot"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://peliscity.com"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://peliscity.com/?s="))
|
||||
itemlist.append(Item(channel=item.channel, title="Idioma", action="porIdioma", url="http://peliscity.com/"))
|
||||
Item(channel=item.channel, title=" Listado por género", action="porGenero", url= host))
|
||||
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
|
||||
itemlist.append(Item(channel=item.channel, title=" Idioma", action="porIdioma", url= host))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -29,12 +40,12 @@ def mainlist(item):
|
||||
def porIdioma(item):
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas",
|
||||
url="http://www.peliscity.com/idioma/espanol-castellano/", viewmode="movie_with_plot"))
|
||||
url= host + "/idioma/espanol-castellano/", viewmode="movie_with_plot"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="VOS", action="agregadas", url="http://www.peliscity.com/idioma/subtitulada/",
|
||||
Item(channel=item.channel, title="VOS", action="agregadas", url= host + "/idioma/subtitulada/",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas",
|
||||
url="http://www.peliscity.com/idioma/espanol-latino/", viewmode="movie_with_plot"))
|
||||
url= host + "/idioma/espanol-latino/", viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -43,15 +54,16 @@ def porGenero(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
logger.info("data=" + data)
|
||||
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<'
|
||||
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for urlgen, genero in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="agregadas", title=genero, url=urlgen, folder=True,
|
||||
for urlgen, genero, cantidad in matches:
|
||||
cantidad = cantidad.replace(".", "")
|
||||
titulo = genero + " (" + cantidad + ")"
|
||||
itemlist.append(Item(channel=item.channel, action="agregadas", title=titulo, url=urlgen, folder=True,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
@@ -60,7 +72,7 @@ def porGenero(item):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto_post = texto.replace(" ", "+")
|
||||
item.url = "http://www.peliscity.com/?s=" + texto_post
|
||||
item.url = host + "/?s=" + texto_post
|
||||
|
||||
try:
|
||||
return listaBuscar(item)
|
||||
@@ -76,7 +88,7 @@ def agregadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\r|\t|\s{2}| |"', "", data)
|
||||
|
||||
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
|
||||
@@ -92,10 +104,18 @@ def agregadas(item):
|
||||
plot = info[4]
|
||||
year = info[5].strip()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='findvideos',thumbnail=thumbnail,
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
contentType = "movie",
|
||||
fulltitle = title,
|
||||
infoLabels={'year':year},
|
||||
plot=plot,
|
||||
quality=quality, infoLabels={'year':year}))
|
||||
|
||||
quality=quality,
|
||||
thumbnail=thumbnail,
|
||||
title=title,
|
||||
contentTitle = title,
|
||||
url=url
|
||||
))
|
||||
# Paginación
|
||||
try:
|
||||
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
|
||||
@@ -113,7 +133,7 @@ def listaBuscar(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n", " ", data)
|
||||
logger.info("data=" + data)
|
||||
|
||||
@@ -135,7 +155,7 @@ def findvideos(item):
|
||||
plot = item.plot
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -150,8 +170,14 @@ def findvideos(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=plot, show=item.show, quality= quality, language=language, extra = item.thumbnail))
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
infoLabels={'title': item.fulltitle}, fulltitle=item.title,
|
||||
extra="library"))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -48,6 +48,14 @@
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import base64
|
||||
import re
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import scrapertoolsV2
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
@@ -27,22 +27,13 @@ def mainlist(item):
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
|
||||
url=HOST + "/ultimas-y-actualizadas",
|
||||
url=HOST,
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
|
||||
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
|
||||
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
|
||||
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
|
||||
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="audio", thumbnail=thumbnail % 'idiomas'))
|
||||
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="quality", thumbnail=thumbnail % 'calidad'))
|
||||
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="year", thumbnail=thumbnail % 'year'))
|
||||
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
|
||||
itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails",
|
||||
url=HOST
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
|
||||
@@ -55,8 +46,7 @@ def search(item, texto):
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
# http://www.yaske.ro/search/?q=los+pitufos
|
||||
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
|
||||
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
|
||||
item.extra = ""
|
||||
itemlist.extend(peliculas(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
@@ -80,9 +70,9 @@ def newest(categoria):
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = HOST + "/ultimas-y-actualizadas"
|
||||
item.url = HOST
|
||||
elif categoria == 'infantiles':
|
||||
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
|
||||
item.url = HOST + "/genre/16/"
|
||||
else:
|
||||
return []
|
||||
|
||||
@@ -103,59 +93,46 @@ def newest(categoria):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page = ""
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<article class.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<aside class="item-control down">(.*?)</aside>.*?'
|
||||
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
|
||||
patron += '<h2 class.*?>([^<]+)</h2>'
|
||||
patron = 'class="post-item-image btn-play-item".*?'
|
||||
patron += 'href="([^"]+)">.*?'
|
||||
patron += '<img data-original="([^"]+)".*?'
|
||||
patron += 'glyphicon-calendar"></i>([^<]+).*?'
|
||||
patron += 'post-item-flags"> (.*?)</div.*?'
|
||||
patron += 'text-muted f-14">(.*?)</h3'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
|
||||
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
|
||||
patronidiomas = "<img src='([^']+)'"
|
||||
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
|
||||
patron_next_page = 'href="([^"]+)"> »'
|
||||
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = item.url + matches_next_page
|
||||
|
||||
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
|
||||
year = year.strip()
|
||||
patronidiomas = '<img src="([^"]+)"'
|
||||
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
|
||||
idiomas_disponibles = []
|
||||
for idioma in matchesidiomas:
|
||||
if idioma.endswith("la_la.png"):
|
||||
if idioma.endswith("/la.png"):
|
||||
idiomas_disponibles.append("LAT")
|
||||
elif idioma.endswith("en_en.png"):
|
||||
elif idioma.endswith("/en.png"):
|
||||
idiomas_disponibles.append("VO")
|
||||
elif idioma.endswith("en_es.png"):
|
||||
elif idioma.endswith("/en_es.png"):
|
||||
idiomas_disponibles.append("VOSE")
|
||||
elif idioma.endswith("es_es.png"):
|
||||
elif idioma.endswith("/es.png"):
|
||||
idiomas_disponibles.append("ESP")
|
||||
|
||||
if idiomas_disponibles:
|
||||
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
|
||||
|
||||
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
|
||||
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
|
||||
title = "%s %s" % (contentTitle, idiomas_disponibles)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
@@ -163,48 +140,32 @@ def peliculas(item):
|
||||
if url_next_page:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
|
||||
url=url_next_page, folder=True, text_color=color3, text_bold=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_buscar_contenido(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<select name="' + item.extra + '(.*?)</select>'
|
||||
data = scrapertoolsV2.get_match(data, patron)
|
||||
|
||||
patron = 'Generos.*?</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
# Extrae las entradas
|
||||
patron = "<option value='([^']+)'>([^<]+)</option>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
for scrapedvalue, scrapedtitle in matches:
|
||||
thumbnail = ""
|
||||
|
||||
if item.extra == 'genre':
|
||||
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
|
||||
continue
|
||||
|
||||
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
|
||||
filename = scrapedtitle.lower().replace(' ', '%20')
|
||||
if filename == "ciencia%20ficción":
|
||||
filename = "ciencia%20ficcion"
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
|
||||
% filename
|
||||
|
||||
elif item.extra == 'year':
|
||||
url = HOST + "/search/?q=&year=" + scrapedvalue
|
||||
thumbnail = item.thumbnail
|
||||
else:
|
||||
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
|
||||
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
|
||||
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
|
||||
thumbnail = item.thumbnail
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
|
||||
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
|
||||
patron = 'href="([^"]+)">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = HOST + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = scrapedtitle,
|
||||
url = url,
|
||||
text_color = color1,
|
||||
contentType = 'movie',
|
||||
folder = True,
|
||||
viewmode = "movie_with_plot"
|
||||
))
|
||||
|
||||
if item.extra in ['genre', 'audio', 'year']:
|
||||
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
|
||||
@@ -214,29 +175,28 @@ def menu_buscar_contenido(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
sublist = list()
|
||||
itemlist = []
|
||||
sublist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
|
||||
data = httptools.downloadpage(url).data
|
||||
if not item.plot:
|
||||
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
|
||||
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
|
||||
|
||||
patron = '<option value="([^"]+)"[^>]+'
|
||||
patron += '>([^<]+).*?</i>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
patron = '(/embed/[^"]+).*?'
|
||||
patron += 'quality text-overflow ">([^<]+).*?'
|
||||
patron += 'title="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url, idioma, calidad in matches:
|
||||
if 'yaske' in url:
|
||||
for url, calidad, idioma in matches:
|
||||
if 'embed' in url:
|
||||
url = "http://widget.olimpo.link" + url
|
||||
data = httptools.downloadpage(url).data
|
||||
url_enc = scrapertoolsV2.find_single_match(data, "eval.*?'(.*?)'")
|
||||
url_dec = base64.b64decode(url_enc)
|
||||
url = scrapertoolsV2.find_single_match(url_dec, 'iframe src="(.*?)"')
|
||||
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
|
||||
url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
|
||||
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
|
||||
language=idioma.strip()))
|
||||
|
||||
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
|
||||
|
||||
# Añadir servidores encontrados, agrupandolos por idioma
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Scraper tools for reading and processing web elements
|
||||
# --------------------------------------------------------------------------------
|
||||
@@ -214,6 +214,7 @@ def htmlclean(cadena):
|
||||
cadena = cadena.replace("<tr>", "")
|
||||
cadena = cadena.replace("</tr>", "")
|
||||
cadena = cadena.replace("<![CDATA[", "")
|
||||
cadena = cadena.replace("<wbr>", "")
|
||||
cadena = cadena.replace("<Br />", " ")
|
||||
cadena = cadena.replace("<BR />", " ")
|
||||
cadena = cadena.replace("<Br>", " ")
|
||||
|
||||
@@ -81,6 +81,7 @@ def htmlclean(cadena):
|
||||
cadena = cadena.replace("<tr>", "")
|
||||
cadena = cadena.replace("</tr>", "")
|
||||
cadena = cadena.replace("<![CDATA[", "")
|
||||
cadena = cadena.replace("<wbr>", "")
|
||||
cadena = cadena.replace("<Br />", " ")
|
||||
cadena = cadena.replace("<BR />", " ")
|
||||
cadena = cadena.replace("<Br>", " ")
|
||||
|
||||
@@ -131,17 +131,26 @@ def render_items(itemlist, parent_item):
|
||||
else:
|
||||
icon_image = "DefaultVideo.png"
|
||||
|
||||
# Creamos el listitem
|
||||
listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail)
|
||||
|
||||
# Ponemos el fanart
|
||||
if item.fanart:
|
||||
listitem.setProperty('fanart_image', item.fanart)
|
||||
fanart = item.fanart
|
||||
else:
|
||||
listitem.setProperty('fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg"))
|
||||
fanart = os.path.join(config.get_runtime_path(), "fanart.jpg")
|
||||
|
||||
# TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto.
|
||||
xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
|
||||
# Creamos el listitem
|
||||
listitem = xbmcgui.ListItem(item.title)
|
||||
|
||||
# values icon, thumb or poster are skin dependent.. so we set all to avoid problems
|
||||
# if not exists thumb it's used icon value
|
||||
if config.get_platform(True)['num_version'] >= 16.0:
|
||||
listitem.setArt({'icon': icon_image, 'thumb': item.thumbnail, 'poster': item.thumbnail, 'fanart': fanart})
|
||||
else:
|
||||
listitem.setIconImage(icon_image)
|
||||
listitem.setThumbnailImage(item.thumbnail)
|
||||
listitem.setProperty('fanart_image', fanart)
|
||||
|
||||
# No need it, use fanart instead
|
||||
# xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
|
||||
|
||||
# Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
|
||||
# if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"):
|
||||
@@ -157,7 +166,10 @@ def render_items(itemlist, parent_item):
|
||||
context_commands = set_context_commands(item, parent_item)
|
||||
|
||||
# Añadimos el item
|
||||
listitem.addContextMenuItems(context_commands, replaceItems=True)
|
||||
if config.get_platform(True)['num_version'] >= 17.0:
|
||||
listitem.addContextMenuItems(context_commands)
|
||||
else:
|
||||
listitem.addContextMenuItems(context_commands, replaceItems=True)
|
||||
|
||||
if not item.totalItems:
|
||||
item.totalItems = 0
|
||||
@@ -166,7 +178,7 @@ def render_items(itemlist, parent_item):
|
||||
totalItems=item.totalItems)
|
||||
|
||||
# Fijar los tipos de vistas...
|
||||
if config.get_setting("forceview") == True:
|
||||
if config.get_setting("forceview"):
|
||||
# ...forzamos segun el viewcontent
|
||||
xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent)
|
||||
# logger.debug(parent_item)
|
||||
@@ -184,7 +196,7 @@ def render_items(itemlist, parent_item):
|
||||
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
|
||||
|
||||
# Fijar la vista
|
||||
if config.get_setting("forceview") == True:
|
||||
if config.get_setting("forceview"):
|
||||
viewmode_id = get_viewmode_id(parent_item)
|
||||
xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)
|
||||
|
||||
@@ -256,10 +268,6 @@ def set_infolabels(listitem, item, player=False):
|
||||
elif not player:
|
||||
listitem.setInfo("video", {"Title": item.title})
|
||||
|
||||
# Añadido para Kodi Krypton (v17)
|
||||
if config.get_platform(True)['num_version'] >= 17.0:
|
||||
listitem.setArt({"poster": item.thumbnail})
|
||||
|
||||
|
||||
def set_context_commands(item, parent_item):
|
||||
"""
|
||||
@@ -458,7 +466,12 @@ def play_video(item, strm=False, force_direct=False):
|
||||
|
||||
if item.channel == 'downloads':
|
||||
logger.info("Reproducir video local: %s [%s]" % (item.title, item.url))
|
||||
xlistitem = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail)
|
||||
xlistitem = xbmcgui.ListItem(path=item.url)
|
||||
if config.get_platform(True)['num_version'] >= 16.0:
|
||||
xlistitem.setArt({"thumb": item.thumbnail})
|
||||
else:
|
||||
xlistitem.setThumbnailImage(item.thumbnail)
|
||||
|
||||
set_infolabels(xlistitem, item, True)
|
||||
xbmc.Player().play(item.url, xlistitem)
|
||||
return
|
||||
@@ -491,9 +504,16 @@ def play_video(item, strm=False, force_direct=False):
|
||||
|
||||
# se obtiene la información del video.
|
||||
if not item.contentThumbnail:
|
||||
xlistitem = xbmcgui.ListItem(path=mediaurl, thumbnailImage=item.thumbnail)
|
||||
thumb = item.thumbnail
|
||||
else:
|
||||
xlistitem = xbmcgui.ListItem(path=mediaurl, thumbnailImage=item.contentThumbnail)
|
||||
thumb = item.contentThumbnail
|
||||
|
||||
xlistitem = xbmcgui.ListItem(path=item.url)
|
||||
if config.get_platform(True)['num_version'] >= 16.0:
|
||||
xlistitem.setArt({"thumb": thumb})
|
||||
else:
|
||||
xlistitem.setThumbnailImage(thumb)
|
||||
|
||||
set_infolabels(xlistitem, item, True)
|
||||
|
||||
# si se trata de un vídeo en formato mpd, se configura el listitem para reproducirlo
|
||||
@@ -695,7 +715,14 @@ def set_opcion(item, seleccion, opciones, video_urls):
|
||||
|
||||
if seleccion == -1:
|
||||
# Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
|
||||
listitem = xbmcgui.ListItem(item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
|
||||
listitem = xbmcgui.ListItem(item.title)
|
||||
|
||||
if config.get_platform(True)['num_version'] >= 16.0:
|
||||
listitem.setArt({'icon':"DefaultVideo.png", 'thumb': item.thumbnail})
|
||||
else:
|
||||
listitem.setIconImage("DefaultVideo.png")
|
||||
listitem.setThumbnailImage(item.thumbnail)
|
||||
|
||||
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem)
|
||||
|
||||
# "Descargar"
|
||||
|
||||
@@ -12,8 +12,8 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
|
||||
data = httptools.downloadpage(page_url, add_referer = True).data
|
||||
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data or 'sources: []' in data:
|
||||
return False, "[pelismundo] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
@@ -32,11 +32,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
|
||||
data = scrapertools.find_single_match(data.replace('"', "'"), "sources\s*=[^\[]*\[([^\]]+)\]")
|
||||
matches = scrapertools.find_multiple_matches(data, "[src|file]:'([^']+)'")
|
||||
if len(matches) == 0:
|
||||
matches = scrapertools.find_multiple_matches(data, "[^',]+")
|
||||
video_urls = []
|
||||
for video_url in matches:
|
||||
if video_url.endswith(".mpd"):
|
||||
continue
|
||||
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
|
||||
hash = _hash[::-1]
|
||||
hash = hash.replace(hash[1:2],"",1)
|
||||
hash = hash.replace(hash[1:2], "", 1)
|
||||
video_url = video_url.replace(_hash, hash)
|
||||
|
||||
filename = scrapertools.get_filename_from_url(video_url)[-4:]
|
||||
@@ -56,107 +60,3 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info(" %s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
def decrypt(h, k):
|
||||
import base64
|
||||
|
||||
if len(h) % 4:
|
||||
h += "=" * (4 - len(h) % 4)
|
||||
sig = []
|
||||
h = base64.b64decode(h.replace("-", "+").replace("_", "/"))
|
||||
for c in range(len(h)):
|
||||
sig += [ord(h[c])]
|
||||
|
||||
sec = []
|
||||
for c in range(len(k)):
|
||||
sec += [ord(k[c])]
|
||||
|
||||
dig = range(256)
|
||||
g = 0
|
||||
v = 128
|
||||
for b in range(len(sec)):
|
||||
a = (v + (sec[b] & 15)) % 256
|
||||
c = dig[(g)]
|
||||
dig[g] = dig[a]
|
||||
dig[a] = c
|
||||
g += 1
|
||||
|
||||
a = (v + (sec[b] >> 4 & 15)) % 256
|
||||
c = dig[g]
|
||||
dig[g] = dig[a]
|
||||
dig[a] = c
|
||||
g += 1
|
||||
|
||||
k = 0
|
||||
q = 1
|
||||
p = 0
|
||||
n = 0
|
||||
for b in range(512):
|
||||
k = (k + q) % 256
|
||||
n = (p + dig[(n + dig[k]) % 256]) % 256
|
||||
p = (k + p + dig[n]) % 256
|
||||
c = dig[k]
|
||||
dig[k] = dig[n]
|
||||
dig[n] = c
|
||||
|
||||
q = 3
|
||||
for a in range(v):
|
||||
b = 255 - a
|
||||
if dig[a] > dig[b]:
|
||||
c = dig[a]
|
||||
dig[a] = dig[b]
|
||||
dig[b] = c
|
||||
|
||||
k = 0
|
||||
for b in range(512):
|
||||
k = (k + q) % 256
|
||||
n = (p + dig[(n + dig[k]) % 256]) % 256
|
||||
p = (k + p + dig[n]) % 256
|
||||
c = dig[k]
|
||||
dig[k] = dig[n]
|
||||
dig[n] = c
|
||||
|
||||
q = 5
|
||||
for a in range(v):
|
||||
b = 255 - a
|
||||
if dig[a] > dig[b]:
|
||||
c = dig[a]
|
||||
dig[a] = dig[b]
|
||||
dig[b] = c
|
||||
|
||||
k = 0
|
||||
for b in range(512):
|
||||
k = (k + q) % 256
|
||||
n = (p + dig[(n + dig[k]) % 256]) % 256
|
||||
p = (k + p + dig[n]) % 256
|
||||
c = dig[k]
|
||||
dig[k] = dig[n]
|
||||
dig[n] = c
|
||||
|
||||
q = 7
|
||||
k = 0
|
||||
u = 0
|
||||
d = []
|
||||
for b in range(len(dig)):
|
||||
k = (k + q) % 256
|
||||
n = (p + dig[(n + dig[k]) % 256]) % 256
|
||||
p = (k + p + dig[n]) % 256
|
||||
c = dig[k]
|
||||
dig[k] = dig[n]
|
||||
dig[n] = c
|
||||
u = dig[(n + dig[(k + dig[(u + p) % 256]) % 256]) % 256]
|
||||
d += [u]
|
||||
|
||||
c = []
|
||||
for f in range(len(d)):
|
||||
try:
|
||||
c += [(256 + (sig[f] - d[f])) % 256]
|
||||
except:
|
||||
break
|
||||
|
||||
h = ""
|
||||
for s in c:
|
||||
h += chr(s)
|
||||
|
||||
return h
|
||||
|
||||
@@ -48,5 +48,6 @@
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s26.postimg.org/vo685y2bt/vimeo1.png",
|
||||
"version": 1
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user