Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -285,6 +285,7 @@ function get_response(data) {
|
||||
else {
|
||||
keypress = "";
|
||||
};
|
||||
if (!data.items[x].value) data.items[x].value = "";
|
||||
itemlist[data.items[x].category].push(replace_list(html.config.text, {
|
||||
"item_color": data.items[x].color,
|
||||
"item_label": data.items[x].label,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.9.0" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.9.1" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,13 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
[I]- serieslan
|
||||
- streamplay
|
||||
- descargasmix
|
||||
- canalpelis - Canal nuevo
|
||||
[I]- cinetux
|
||||
- vidoza
|
||||
- canalpelis
|
||||
- pelisplanet
|
||||
- newpct1
|
||||
- pelisplus
|
||||
- torrentlocura - fix para usar videoteca y en mediaserver
|
||||
- fixes internos[/I]
|
||||
|
||||
[COLOR green]Gracias a [COLOR yellow][B]msdos[/B][/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
[COLOR green]Gracias a [COLOR yellow][B]xabier100[/B][/COLOR] y [COLOR yellow][B]fermintxu[/B][/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -38,6 +38,8 @@
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
|
||||
5
plugin.video.alfa/channels/canalpelis.py
Executable file → Normal file
5
plugin.video.alfa/channels/canalpelis.py
Executable file → Normal file
@@ -127,7 +127,7 @@ def peliculas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
logger.info(data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
|
||||
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
|
||||
@@ -138,7 +138,8 @@ def peliculas(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]:
|
||||
if 'Próximamente' not in calidad:
|
||||
if 'Próximamente' not in calidad and '-XXX.jpg' not in scrapedthumbnail:
|
||||
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
|
||||
@@ -314,15 +314,11 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
url = scrapertools.find_single_match(bloque1, patron)
|
||||
if "goo.gl" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if "www.cinetux.me" in url:
|
||||
server = scrapertools.find_single_match(url, "player/(.*?)\.")
|
||||
else:
|
||||
server = servertools.get_server_from_url(url)
|
||||
matches.append([url, server, "", language.strip(), t_tipo])
|
||||
matches.append([url, "", "", language.strip(), t_tipo])
|
||||
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
|
||||
bloque2 = bloque2.replace("\t", "").replace("\r", "")
|
||||
patron = '(?s)optn" href="([^"]+)'
|
||||
patron += '.*?title="([^"]+)'
|
||||
patron += '.*?title="([^\.]+)'
|
||||
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?/span>([^<]+)'
|
||||
@@ -336,7 +332,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
scrapedtipo = match[4]
|
||||
if t_tipo.upper() not in scrapedtipo.upper():
|
||||
continue
|
||||
title = " Mirror en " + scrapedserver.split(".")[0] + " (" + scrapedlanguage + ")"
|
||||
title = " Mirror en %s (" + scrapedlanguage + ")"
|
||||
if len(scrapedcalidad.strip()) > 0:
|
||||
title += " (Calidad " + scrapedcalidad.strip() + ")"
|
||||
|
||||
@@ -357,6 +353,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
|
||||
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
|
||||
filtro=True))
|
||||
lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
|
||||
return lista_enlaces
|
||||
|
||||
|
||||
@@ -368,7 +365,6 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
@@ -380,7 +376,16 @@ def play(item):
|
||||
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
|
||||
"location", "")
|
||||
item.url = scrapedurl
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
else:
|
||||
return [item]
|
||||
itemlist.append(
|
||||
Item(channel = item.channel,
|
||||
action = "play",
|
||||
title = "%s",
|
||||
fulltitle = item.fulltitle,
|
||||
thumbnail = item.thumbnail,
|
||||
server = "",
|
||||
url = item.url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
@@ -13,10 +13,15 @@ def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/",
|
||||
extra="peliculas"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series"))
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series",
|
||||
thumbnail=thumb_series))
|
||||
# itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
|
||||
return itemlist
|
||||
@@ -131,22 +136,22 @@ def listado(item):
|
||||
1).strip()
|
||||
# logger.info("[newpct1.py] titulo="+title)
|
||||
'''
|
||||
if len(title)>3:
|
||||
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
|
||||
if len(title)>3:
|
||||
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
|
||||
else:
|
||||
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
|
||||
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
|
||||
|
||||
if "1.com/series-hd" in url:
|
||||
extra="serie-hd"
|
||||
url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1
|
||||
elif "1.com/series-vo" in url:
|
||||
elif "1.com/series-vo" in url:
|
||||
extra="serie-vo"
|
||||
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
|
||||
elif "1.com/series/" in url:
|
||||
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
|
||||
elif "1.com/series/" in url:
|
||||
extra="serie-tv"
|
||||
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
|
||||
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
|
||||
|
||||
url += '&idioma=&ordenar=Nombre&inon=Descendente'
|
||||
url += '&idioma=&ordenar=Nombre&inon=Descendente'
|
||||
'''
|
||||
else:
|
||||
title = title.replace("Descargar", "", 1).strip()
|
||||
@@ -180,7 +185,7 @@ def listado(item):
|
||||
paginacion = scrapertools.get_match(data, patron)
|
||||
|
||||
if "Next" in paginacion:
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1].replace(" ", "%20")
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
|
||||
extra=item.extra))
|
||||
# logger.info("[newpct1.py] listado items:" + str(len(itemlist)))
|
||||
@@ -203,7 +208,7 @@ def completo(item):
|
||||
|
||||
if item.extra != "serie_add":
|
||||
'''
|
||||
# Afinar mas la busqueda
|
||||
# Afinar mas la busqueda
|
||||
if item_extra=="serie-hd":
|
||||
categoryID=buscar_en_subcategoria(item.show,'1469')
|
||||
elif item_extra=="serie-vo":
|
||||
@@ -369,7 +374,7 @@ def get_episodios(item):
|
||||
paginacion = scrapertools.get_match(data, patron)
|
||||
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
|
||||
if "Next" in paginacion:
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1]
|
||||
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1]
|
||||
url_next_page = url_next_page.replace(" ", "%20")
|
||||
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
|
||||
itemlist.append(
|
||||
|
||||
@@ -38,19 +38,13 @@
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
|
||||
@@ -1,274 +1,328 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://www.pelisplanet.com/"
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters('pelisplanet')
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = host
|
||||
item.text_color = color1
|
||||
item.fanart = fanart_host
|
||||
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'novedades',
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
url=host + 'genero/estrenos/', thumbnail=thumbnail % 'estrenos'))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="generos", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'generos',
|
||||
viewmode="movie_with_plot", url=host + 'generos/'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Filtrar por Idiomas",
|
||||
fanart=fanart_host, folder=False, text_color=color3,
|
||||
text_blod=True, thumbnail=thumbnail % 'idiomas'))
|
||||
|
||||
itemlist.append(item.clone(title="Castellano", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'castellano',
|
||||
viewmode="movie_with_plot", url=host + 'idioma/castellano/'))
|
||||
|
||||
itemlist.append(item.clone(title="Latino", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'latino',
|
||||
viewmode="movie_with_plot", url=host + 'idioma/latino/'))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar por Título o Actor", action="search", text_blod=True,
|
||||
thumbnail=thumbnail % 'busqueda', url=host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="img">.*?<a href="(?P<url>[^"]+)" title="(?P<name>[^"]+)".*?'
|
||||
patron += '<img.+?src="(?P<img>[^"]+)".*?\(([^\)]+)\)"> </a></div>.*?'
|
||||
patron += 'Ver\s(.*?)\sOnline'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, name, img, year, scrapedinfo in matches:
|
||||
contentTitle = scrapertools.decodeHtmlentities(scrapedinfo.strip())
|
||||
plot = item.plot
|
||||
itemlist.append(item.clone(title=name, url=url, contentTitle=contentTitle,
|
||||
plot=plot, action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=img, text_color=color3))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, '<a class="page larger" href="([^"]+)">\d+</a>')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion,
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(
|
||||
data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data,
|
||||
'Castellano</h3>\s*<p>(.+?)<strong>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genero/animacion-e-infantil/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "» Siguiente »":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron_todas = '<div class="home-movies">(.*?)<footer>'
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = 'col-sm-5"><a href="(?P<scrapedurl>[^"]+)".+?'
|
||||
patron += 'browse-movie-link-qd.*?>(?P<calidad>[^>]+)</.+?'
|
||||
patron += '<p>(?P<year>[^>]+)</p>.+?'
|
||||
patron += 'title one-line">(?P<scrapedtitle>[^>]+)</h2>.+?'
|
||||
patron += 'img-responsive" src="(?P<scrapedthumbnail>[^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
|
||||
datas = httptools.downloadpage(scrapedurl).data
|
||||
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
# logger.info(datas)
|
||||
if '/ ' in scrapedtitle:
|
||||
scrapedtitle = scrapedtitle.partition('/ ')[2]
|
||||
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
|
||||
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
|
||||
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
|
||||
director = scrapertools.find_single_match(datas,
|
||||
'<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, plot='',
|
||||
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={"year": year, 'rating': rating, 'director': director},
|
||||
text_color=color3))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas",
|
||||
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data, 'Castellano</h3>\s*<p>(.+?)<strong>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="todos">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle,
|
||||
url=scrapedurl, text_color=color3, thumbnail=scrapedthumbnail,
|
||||
plot="", viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", datas)
|
||||
# logger.info(data)
|
||||
patron = '<a style="cursor:pointer; cursor: hand;" rel="([^"]+)".*?'
|
||||
patron += 'clearfix colores title_calidad">.*?<span>([^<]+)</span></a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(datas)
|
||||
|
||||
for scrapedurl, servidores, in matches:
|
||||
if 'pelispp.com' or 'ultrapelis' in scrapedurl:
|
||||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patronr = 'file: "([^"]+)",label:"([^"]+)",type'
|
||||
matchesr = re.compile(patronr, re.DOTALL).findall(data)
|
||||
for scrapedurl, label in matchesr:
|
||||
url = scrapedurl.replace('\\', '')
|
||||
language = 'latino'
|
||||
quality = label.decode('cp1252').encode('utf8')
|
||||
title = item.contentTitle + ' (' + str(label) + ')'
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, server='directo',
|
||||
thumbnail=thumbnail, fanart=fanart, extra='directo',
|
||||
quality=quality, language=language, ))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
|
||||
# if 'youtube' not in scrapedurl:
|
||||
if 'youtube' not in scrapedurl:
|
||||
quality = scrapertools.find_single_match(
|
||||
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
|
||||
title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
item.contentTitle, quality.upper(), servidores.capitalize())
|
||||
url = scrapedurl.replace('\\', '')
|
||||
thumbnail = item.thumbnail
|
||||
server = servertools.get_server_from_url(url)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality,
|
||||
server=server, text_color=color3, thumbnail=thumbnail))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
|
||||
host = "http://www.pelisplanet.com/"
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
__channel__ = "pelisplanet"
|
||||
parameters = channeltools.get_channel_parameters('pelisplanet')
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = host
|
||||
item.text_color = color1
|
||||
item.fanart = fanart_host
|
||||
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'novedades',
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
url=host + 'genero/estrenos/', thumbnail=thumbnail % 'estrenos'))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="generos", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'generos',
|
||||
viewmode="movie_with_plot", url=host + 'generos/'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Filtrar por Idiomas",
|
||||
fanart=fanart_host, folder=False, text_color=color3,
|
||||
text_blod=True, thumbnail=thumbnail % 'idiomas'))
|
||||
|
||||
itemlist.append(item.clone(title="Castellano", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'castellano',
|
||||
viewmode="movie_with_plot", url=host + 'idioma/castellano/'))
|
||||
|
||||
itemlist.append(item.clone(title="Latino", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', thumbnail=thumbnail % 'latino',
|
||||
viewmode="movie_with_plot", url=host + 'idioma/latino/'))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar por Título o Actor", action="search", text_blod=True,
|
||||
thumbnail=thumbnail % 'busqueda', url=host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="img">.*?<a href="(?P<url>[^"]+)" title="(?P<name>[^"]+)".*?'
|
||||
patron += '<img.+?src="(?P<img>[^"]+)".*?\(([^\)]+)\)"> </a></div>.*?'
|
||||
patron += 'Ver\s(.*?)\sOnline'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, name, img, year, scrapedinfo in matches:
|
||||
contentTitle = scrapertools.decodeHtmlentities(scrapedinfo.strip())
|
||||
plot = item.plot
|
||||
itemlist.append(item.clone(title=name, url=url, contentTitle=contentTitle,
|
||||
plot=plot, action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=img, text_color=color3))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, '<a class="page larger" href="([^"]+)">\d+</a>')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion,
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(
|
||||
data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data,
|
||||
'Castellano</h3>\s*<p>(.+?)<strong>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genero/animacion-e-infantil/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "» Siguiente »":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron_todas = '<div class="home-movies">(.*?)<footer>'
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = 'col-sm-5"><a href="(?P<scrapedurl>[^"]+)".+?'
|
||||
patron += 'browse-movie-link-qd.*?>(?P<calidad>[^>]+)</.+?'
|
||||
patron += '<p>(?P<year>[^>]+)</p>.+?'
|
||||
patron += 'title one-line">(?P<scrapedtitle>[^>]+)</h2>.+?'
|
||||
patron += 'img-responsive" src="(?P<scrapedthumbnail>[^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
|
||||
datas = httptools.downloadpage(scrapedurl).data
|
||||
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
#logger.info(datas)
|
||||
if '/ ' in scrapedtitle:
|
||||
scrapedtitle = scrapedtitle.partition('/ ')[2]
|
||||
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
|
||||
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
|
||||
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
|
||||
director = scrapertools.find_single_match(datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, plot='',
|
||||
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
|
||||
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
|
||||
text_color=color3))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if paginacion:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas",
|
||||
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data, 'Castellano</h3>\s*<p>(.+?)<strong>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="todos">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle,
|
||||
url=scrapedurl, text_color=color3, thumbnail=scrapedthumbnail,
|
||||
plot="", viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", datas)
|
||||
# logger.info(data)
|
||||
patron = '<a style="cursor:pointer; cursor: hand;" rel="([^"]+)".*?'
|
||||
patron += 'clearfix colores title_calidad">.*?<span>([^<]+)</span></a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(datas)
|
||||
|
||||
for scrapedurl, servidores, in matches:
|
||||
if 'youtube' in scrapedurl:
|
||||
video_urls = []
|
||||
doc_id = scrapertools.find_single_match(scrapedurl, "docid=(\w+)")
|
||||
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
|
||||
response = httptools.downloadpage(doc_url, cookies=False)
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
|
||||
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
|
||||
for itag, video_url in streams:
|
||||
video_url += headers_string
|
||||
video_urls.append([video_url, itags[itag]])
|
||||
|
||||
for video_item in video_urls:
|
||||
calidad = video_item[1]
|
||||
title = '%s [COLOR green](%s)[/COLOR] [COLOR green]([/COLOR][COLOR black]You[/COLOR][COLOR red]tube[/COLOR][COLOR green])[/COLOR]'%(item.contentTitle, calidad)
|
||||
url = video_item[0]
|
||||
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
url= url,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
contentTitle=item.contentTitle,
|
||||
server='directo',
|
||||
context = item.context
|
||||
))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
if 'pelispp.com' or 'ultrapelis' in scrapedurl:
|
||||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patronr = 'file: "([^"]+)",label:"([^"]+)",type'
|
||||
matchesr = re.compile(patronr, re.DOTALL).findall(data)
|
||||
for scrapedurl, label in matchesr:
|
||||
url = scrapedurl.replace('\\', '')
|
||||
language = 'latino'
|
||||
quality = label.decode('cp1252').encode('utf8')
|
||||
title = item.contentTitle + ' (' + str(label) + ') ([COLOR blue]G[/COLOR][COLOR red]o[/COLOR][COLOR yellow]o[/COLOR][COLOR blue]g[/COLOR][COLOR green]l[/COLOR][COLOR red]e[/COLOR])'
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, server='directo',
|
||||
thumbnail=thumbnail, fanart=fanart, extra='directo',
|
||||
quality=quality, language=language,))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
|
||||
# if 'youtube' not in scrapedurl:
|
||||
servidores.lower()
|
||||
if 'youtube' not in scrapedurl and 'pelispp.com' not in scrapedurl and 'streamplus' not in servidores:
|
||||
quality = scrapertools.find_single_match(
|
||||
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
|
||||
title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
item.contentTitle, quality.upper(), servidores.capitalize())
|
||||
url = scrapedurl.replace('\\', '')
|
||||
thumbnail = item.thumbnail
|
||||
server = servertools.get_server_from_url(url)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality,
|
||||
server=server, text_color=color3, thumbnail=thumbnail))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
4
plugin.video.alfa/channels/pelisplus.json
Executable file → Normal file
4
plugin.video.alfa/channels/pelisplus.json
Executable file → Normal file
@@ -44,7 +44,9 @@
|
||||
"latino",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"documentary"
|
||||
"documentary",
|
||||
"direct"
|
||||
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
90
plugin.video.alfa/channels/pelisplus.py
Executable file → Normal file
90
plugin.video.alfa/channels/pelisplus.py
Executable file → Normal file
@@ -9,6 +9,7 @@ from core import scrapertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import servertools
|
||||
|
||||
host = "http://www.pelisplus.tv/"
|
||||
|
||||
@@ -25,10 +26,11 @@ list_quality = ['1080p',
|
||||
'720p',
|
||||
'480p',
|
||||
'360p',
|
||||
'240p'
|
||||
'240p',
|
||||
'default'
|
||||
]
|
||||
list_servers = [
|
||||
'directo',
|
||||
'gvideo',
|
||||
'openload',
|
||||
'thevideos'
|
||||
]
|
||||
@@ -419,65 +421,65 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
duplicados = []
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
|
||||
matches = re.compile(patron, re.DOTALL).findall(datas)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug('data: %s'%data)
|
||||
video_page = scrapertools.find_single_match(data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
|
||||
data = httptools.downloadpage(video_page).data
|
||||
patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
|
||||
if 'elreyxhd' or 'pelisplus.biz' in scrapedurl:
|
||||
patronr = ''
|
||||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
if 'tipo' in scrapedurl:
|
||||
server = 'gvideo'
|
||||
gvideo_data = httptools.downloadpage(scrapedurl).data
|
||||
video_url = scrapertools.find_single_match(gvideo_data,'<div id="player">.*?border: none" src="\/\/(.*?)" ')
|
||||
video_url= 'http://%s'%video_url
|
||||
gvideo_url = httptools.downloadpage(video_url).data
|
||||
videourl = servertools.findvideosbyserver(gvideo_url, server)
|
||||
|
||||
quote = scrapertools.find_single_match(data, 'sources.*?file.*?http')
|
||||
if quote and "'" in quote:
|
||||
patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
|
||||
elif '"' in quote:
|
||||
patronr = '{file:"(.*?)",label:"(.*?)"}'
|
||||
if patronr != '':
|
||||
matchesr = re.compile(patronr, re.DOTALL).findall(data)
|
||||
logger.debug('videourl: %s'%videourl)
|
||||
language = 'latino'
|
||||
quality = 'default'
|
||||
url = videourl[0][1]
|
||||
title = '%s (%s)'%(item.contentTitle, server)
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
if video_url not in duplicados:
|
||||
itemlist.append(item.clone(action="play",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
fanart=fanart,
|
||||
show=title,
|
||||
extra='gvideo',
|
||||
language=language,
|
||||
quality=quality,
|
||||
server=server
|
||||
))
|
||||
duplicados.append(video_url)
|
||||
|
||||
for scrapedurl, scrapedcalidad in matchesr:
|
||||
url = scrapedurl
|
||||
language = 'latino'
|
||||
quality = scrapedcalidad.decode('cp1252').encode('utf8')
|
||||
title = item.contentTitle + ' (' + str(scrapedcalidad) + ')'
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
if url not in duplicados:
|
||||
itemlist.append(item.clone(action="play",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
fanart=fanart,
|
||||
show=title,
|
||||
extra='directo',
|
||||
language=language,
|
||||
quality=quality,
|
||||
server='directo',
|
||||
))
|
||||
duplicados.append(url)
|
||||
|
||||
url = scrapedurl
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=datas))
|
||||
|
||||
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
# videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
if videoitem.quality == '' or videoitem.language == '':
|
||||
videoitem.quality = 'default'
|
||||
videoitem.language = 'Latino'
|
||||
videoitem.quality = 'default'
|
||||
videoitem.language = 'Latino'
|
||||
if videoitem.server != '':
|
||||
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
|
||||
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
|
||||
else:
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.server = 'directo'
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.server = 'directo'
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
|
||||
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
|
||||
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
|
||||
|
||||
n = 0
|
||||
for videoitem in itemlist:
|
||||
|
||||
@@ -6,23 +6,12 @@
|
||||
"language": "es",
|
||||
"banner": "torrentlocura.png",
|
||||
"thumbnail": "http://imgur.com/EWmLS3d.png",
|
||||
"fanart": "http://imgur.com/V7QZLAL.jpg",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "31/12/2016",
|
||||
"description": "Release"
|
||||
},
|
||||
{
|
||||
"date": "13/01/2017",
|
||||
"description": "Añadida info a cápitulos en bloque"
|
||||
},
|
||||
{
|
||||
"date": "04/04/2017",
|
||||
"description": "Reparación cambios web"
|
||||
},
|
||||
{
|
||||
"date": "28/06/2017",
|
||||
"description": "Corrección código y algunas mejoras"
|
||||
"date": "25/08/2017",
|
||||
"description": "revamp"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,9 +7,9 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": "http://www.4shared.com/flash/player.swf"
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://www.4shared.com/flash/player.swf"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(http://www.4shared.com/embed/[A-Z0-9a-z]+/[A-Z0-9a-z]+)",
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": "http://www.mp4upload.com/embed/embed"
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://www.mp4upload.com/embed/embed"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "mp4upload.com/embed-([A-Za-z0-9]+)",
|
||||
|
||||
@@ -11,23 +11,21 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": [
|
||||
"http://streamcloud.eu/stylesheets",
|
||||
"http://streamcloud.eu/control",
|
||||
"http://streamcloud.eu/xupload",
|
||||
"http://streamcloud.eu/js",
|
||||
"http://streamcloud.eu/favicon",
|
||||
"http://streamcloud.eu/reward",
|
||||
"http://streamcloud.eu/login",
|
||||
"http://streamcloud.eu/deliver",
|
||||
"http://streamcloud.eu/faq",
|
||||
"http://streamcloud.eu/tos",
|
||||
"http://streamcloud.eu/checkfiles",
|
||||
"http://streamcloud.eu/contact",
|
||||
"http://streamcloud.eu/serve"
|
||||
]
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://streamcloud.eu/stylesheets",
|
||||
"http://streamcloud.eu/control",
|
||||
"http://streamcloud.eu/xupload",
|
||||
"http://streamcloud.eu/js",
|
||||
"http://streamcloud.eu/favicon",
|
||||
"http://streamcloud.eu/reward",
|
||||
"http://streamcloud.eu/login",
|
||||
"http://streamcloud.eu/deliver",
|
||||
"http://streamcloud.eu/faq",
|
||||
"http://streamcloud.eu/tos",
|
||||
"http://streamcloud.eu/checkfiles",
|
||||
"http://streamcloud.eu/contact",
|
||||
"http://streamcloud.eu/serve"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(streamcloud.eu/[a-z0-9]+)",
|
||||
|
||||
@@ -11,22 +11,20 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": [
|
||||
"http://streamin.to/embed-theme.html",
|
||||
"http://streamin.to/embed-jquery.html",
|
||||
"http://streamin.to/embed-s.html",
|
||||
"http://streamin.to/embed-images.html",
|
||||
"http://streamin.to/embed-faq.html",
|
||||
"http://streamin.to/embed-embed.html",
|
||||
"http://streamin.to/embed-ri.html",
|
||||
"http://streamin.to/embed-d.html",
|
||||
"http://streamin.to/embed-css.html",
|
||||
"http://streamin.to/embed-js.html",
|
||||
"http://streamin.to/embed-player.html",
|
||||
"http://streamin.to/embed-cgi.html"
|
||||
]
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://streamin.to/embed-theme.html",
|
||||
"http://streamin.to/embed-jquery.html",
|
||||
"http://streamin.to/embed-s.html",
|
||||
"http://streamin.to/embed-images.html",
|
||||
"http://streamin.to/embed-faq.html",
|
||||
"http://streamin.to/embed-embed.html",
|
||||
"http://streamin.to/embed-ri.html",
|
||||
"http://streamin.to/embed-d.html",
|
||||
"http://streamin.to/embed-css.html",
|
||||
"http://streamin.to/embed-js.html",
|
||||
"http://streamin.to/embed-player.html",
|
||||
"http://streamin.to/embed-cgi.html"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "streamin.to/(?:embed-)?([a-z0-9A-Z]+)",
|
||||
|
||||
@@ -7,7 +7,9 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"ignore_urls": [
|
||||
"http://uploaded.net/file/ref"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:ul|uploaded).(?:net|to)/(?:file/|f/)?([a-zA-Z0-9]+)",
|
||||
|
||||
@@ -7,11 +7,10 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Page not found" in data:
|
||||
return False, "[vidoza] El archivo no existe o ha sido borrado"
|
||||
elif "Video is processing now" in data:
|
||||
return False, "[vidoza] El archivo no existe o ha sido borrado"
|
||||
elif "processing" in data:
|
||||
return False, "[vidoza] El vídeo se está procesando"
|
||||
|
||||
return True, ""
|
||||
@@ -19,9 +18,7 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
video_urls = []
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label:"([^"]+)"')
|
||||
for media_url, calidad in matches:
|
||||
|
||||
@@ -7,40 +7,38 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": [
|
||||
"http://vidspot.net/embed-theme.html",
|
||||
"http://vidspot.net/embed-jquery.html",
|
||||
"http://vidspot.net/embed-s.html",
|
||||
"http://vidspot.net/embed-images.html",
|
||||
"http://vidspot.net/embed-faq.html",
|
||||
"http://vidspot.net/embed-embed.html",
|
||||
"http://vidspot.net/embed-ri.html",
|
||||
"http://vidspot.net/embed-d.html",
|
||||
"http://vidspot.net/embed-css.html",
|
||||
"http://vidspot.net/embed-js.html",
|
||||
"http://vidspot.net/embed-player.html",
|
||||
"http://vidspot.net/embed-cgi.html",
|
||||
"http://vidspot.net/embed-i.html",
|
||||
"http://vidspot.net/images",
|
||||
"http://vidspot.net/theme",
|
||||
"http://vidspot.net/xupload",
|
||||
"http://vidspot.net/s",
|
||||
"http://vidspot.net/js",
|
||||
"http://vidspot.net/jquery",
|
||||
"http://vidspot.net/login",
|
||||
"http://vidspot.net/make",
|
||||
"http://vidspot.net/i",
|
||||
"http://vidspot.net/faq",
|
||||
"http://vidspot.net/tos",
|
||||
"http://vidspot.net/premium",
|
||||
"http://vidspot.net/checkfiles",
|
||||
"http://vidspot.net/privacy",
|
||||
"http://vidspot.net/refund",
|
||||
"http://vidspot.net/links",
|
||||
"http://vidspot.net/contact"
|
||||
]
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://vidspot.net/embed-theme.html",
|
||||
"http://vidspot.net/embed-jquery.html",
|
||||
"http://vidspot.net/embed-s.html",
|
||||
"http://vidspot.net/embed-images.html",
|
||||
"http://vidspot.net/embed-faq.html",
|
||||
"http://vidspot.net/embed-embed.html",
|
||||
"http://vidspot.net/embed-ri.html",
|
||||
"http://vidspot.net/embed-d.html",
|
||||
"http://vidspot.net/embed-css.html",
|
||||
"http://vidspot.net/embed-js.html",
|
||||
"http://vidspot.net/embed-player.html",
|
||||
"http://vidspot.net/embed-cgi.html",
|
||||
"http://vidspot.net/embed-i.html",
|
||||
"http://vidspot.net/images",
|
||||
"http://vidspot.net/theme",
|
||||
"http://vidspot.net/xupload",
|
||||
"http://vidspot.net/s",
|
||||
"http://vidspot.net/js",
|
||||
"http://vidspot.net/jquery",
|
||||
"http://vidspot.net/login",
|
||||
"http://vidspot.net/make",
|
||||
"http://vidspot.net/i",
|
||||
"http://vidspot.net/faq",
|
||||
"http://vidspot.net/tos",
|
||||
"http://vidspot.net/premium",
|
||||
"http://vidspot.net/checkfiles",
|
||||
"http://vidspot.net/privacy",
|
||||
"http://vidspot.net/refund",
|
||||
"http://vidspot.net/links",
|
||||
"http://vidspot.net/contact"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vidspot.(?:net/|php\\?id=)(?:embed-)?([a-z0-9]+)",
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": {
|
||||
"value": "http://www.yourupload.com/embed/embed"
|
||||
},
|
||||
"ignore_urls": [
|
||||
"http://www.yourupload.com/embed/embed"
|
||||
],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "yourupload.com/embed/([A-z0-9]+)",
|
||||
|
||||
Reference in New Issue
Block a user