@@ -33,15 +33,13 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
|
||||
list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
|
||||
'fastplay', 'raptu', 'tusfiles']
|
||||
|
||||
host = "http://allpeliculas.com/"
|
||||
host = "http://allpeliculas.io/"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
|
||||
url= host + "movies/newmovies?page=1", extra1 = 0,
|
||||
thumbnail=get_thumb('movies', auto=True)))
|
||||
@@ -51,16 +49,13 @@ def mainlist(item):
|
||||
url= host, thumbnail=get_thumb('colections', auto=True)))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def colecciones(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'href="(/peliculas[^"]+).*?'
|
||||
patron += 'title_geo"><span>([^<]+).*?'
|
||||
@@ -143,11 +138,11 @@ def findvideos(item):
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, calidad in matches:
|
||||
calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+")
|
||||
itemlist.append(item.clone(
|
||||
channel = item.channel,
|
||||
action = "play",
|
||||
title = calidad,
|
||||
fulltitle = item.title,
|
||||
thumbnail = item.thumbnail,
|
||||
contentThumbnail = item.thumbnail,
|
||||
url = url,
|
||||
@@ -159,7 +154,7 @@ def findvideos(item):
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
# Requerido para FilterTools
|
||||
|
||||
@@ -183,31 +178,22 @@ def lista(item):
|
||||
dict_param = dict()
|
||||
item.infoLabels = {}
|
||||
item.text_color = color2
|
||||
|
||||
params = '{}'
|
||||
if item.extra1 != 0:
|
||||
dict_param["genero"] = [item.extra1]
|
||||
params = jsontools.dump(dict_param)
|
||||
|
||||
data = httptools.downloadpage(item.url, post=params).data
|
||||
data = data.replace("<mark>","").replace("<\/mark>","")
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
for it in dict_data["items"]:
|
||||
title = it["title"]
|
||||
plot = it["slogan"]
|
||||
rating = it["imdb"]
|
||||
year = it["year"]
|
||||
url = host + "pelicula/" + it["slug"]
|
||||
title = it["title"] + " (%s)" %year
|
||||
thumb = host + it["image"]
|
||||
item.infoLabels['year'] = year
|
||||
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
|
||||
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
|
||||
|
||||
try:
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentTitle=it["title"], contentType="movie"))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
|
||||
item.url = item.url.replace(pagina, "")
|
||||
if pagina == "":
|
||||
@@ -219,6 +205,7 @@ def lista(item):
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
@@ -246,12 +233,10 @@ def newest(categoria):
|
||||
|
||||
if itemlist[-1].action == "lista":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
"id": "cuelgame",
|
||||
"name": "Cuelgame",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "cuelgame.png",
|
||||
"banner": "cuelgame.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"documentary",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
from core.scrapertools import decodeHtmlentities as dhe
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper",
|
||||
url="http://cuelgame.net/?category=4",
|
||||
thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg",
|
||||
fanart="http://imgur.com/7frGoPL.jpg"))
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="",
|
||||
thumbnail="http://images2.alphacoders.com/846/84682.jpg",
|
||||
fanart="http://imgur.com/1sIHN1r.jpg"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = "http://cuelgame.net/search.php?q=%s" % (texto)
|
||||
|
||||
try:
|
||||
return scraper(item)
|
||||
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def scraper(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data)
|
||||
patron = '<h2> <a href="([^"]+)".*?'
|
||||
patron += 'class="l:\d+".*?>([^<]+)</a>'
|
||||
patron += '(.*?)class="lazy".*?'
|
||||
patron += 'news-content">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, check_thumb, scrapedplot in matches:
|
||||
scrapedtitle = re.sub(r'\.', ' ', scrapedtitle)
|
||||
scrapedthumbnail = scrapertools.find_single_match(check_thumb, "</div><img src=\'([^\']+)\'")
|
||||
title_year = re.sub(r"(\d+)p", "", scrapedtitle)
|
||||
if "category=4" in item.url:
|
||||
try:
|
||||
year = scrapertools.find_single_match(title_year, '.*?(\d\d\d\d)')
|
||||
except:
|
||||
year = ""
|
||||
else:
|
||||
year = ""
|
||||
# No deja pasar items de la mula
|
||||
if scrapedurl.startswith("ed2k:"):
|
||||
continue
|
||||
scrapedtitle = "[COLOR greenyellow]" + scrapedtitle + "[/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
|
||||
thumbnail=scrapedthumbnail, folder=False))
|
||||
# Extrae el paginador
|
||||
patronvideos = '<a href="([^"]+)" rel="next">siguiente »</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patronvideos)
|
||||
if len(matches) > 0:
|
||||
# corrige "&" para la paginación
|
||||
next_page = matches[0].replace("amp;", "")
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl,
|
||||
thumbnail="http://imgur.com/ycPgVVO.png", folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://cuelgame.net/?category=4'
|
||||
itemlist = scraper(item)
|
||||
if itemlist[-1].action == "Página siguiente >>":
|
||||
itemlist.pop()
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
return itemlist
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "plusdede",
|
||||
"name": "Plusdede",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",
|
||||
|
||||
@@ -5,8 +5,10 @@ import re
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from megaserver import Client
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'puyasubs')
|
||||
__perfil__ = config.get_setting('perfil', "puyasubs")
|
||||
@@ -20,39 +22,36 @@ if __perfil__ < 3:
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "http://puya.si"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Anime", thumbnail=item.thumbnail,
|
||||
url="http://puya.si/?cat=4", text_color=color1))
|
||||
url= host + "/?cat=4", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Doramas", thumbnail=item.thumbnail,
|
||||
url="http://puya.si/?cat=142", text_color=color1))
|
||||
url= host + "/?cat=142", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="", title="Descargas", text_color=color2))
|
||||
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes y Doramas en proceso",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25501", text_color=color1))
|
||||
thumbnail=item.thumbnail, url= host + "/?page_id=25501", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes Finalizados",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1))
|
||||
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="letra", title=" Descargas Animes Finalizados por Letra",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1))
|
||||
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Doramas Finalizados",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25507", text_color=color1))
|
||||
thumbnail=item.thumbnail, url= host + "/?page_id=25507", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Películas y Ovas",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25503", text_color=color1))
|
||||
thumbnail=item.thumbnail, url= host + "/?page_id=25503", text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="torrents", title="Lista de Torrents", thumbnail=item.thumbnail,
|
||||
url="https://www.frozen-layer.com/buscar/descargas", text_color=color1))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar anime/dorama/película",
|
||||
thumbnail=item.thumbnail, url="http://puya.si/?s=", text_color=color3))
|
||||
|
||||
thumbnail=item.thumbnail, url= host + "/?s=", text_color=color3))
|
||||
itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color=color5, folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
@@ -73,9 +72,7 @@ def search(item, texto):
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloques = scrapertools.find_multiple_matches(data, '<h2 class="entry-title">(.*?)</article>')
|
||||
patron = 'href="([^"]+)".*?>(.*?)</a>.*?(?:<span class="bl_categ">(.*?)|</span>)</footer>'
|
||||
@@ -96,27 +93,22 @@ def listado(item):
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb,
|
||||
contentTitle=contenttitle, show=contenttitle, contentType=tipo,
|
||||
infoLabels={'filtro': filtro_tmdb}, text_color=color1))
|
||||
|
||||
if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades":
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
next_page = scrapertools.find_single_match(data, "<span class='current'>.*?<a href='([^']+)'")
|
||||
if next_page:
|
||||
next_page = next_page.replace("&", "&")
|
||||
itemlist.append(Item(channel=item.channel, action="listado", url=next_page, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, extra=item.extra, text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def descargas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
if not item.pagina:
|
||||
item.pagina = 0
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="(http://puya.si/\?page_id=\d+|http://safelinking.net/[0-9A-z]+)">(.*?)</a>'
|
||||
if item.letra:
|
||||
@@ -130,32 +122,25 @@ def descargas(item):
|
||||
.replace("[Puya+] ", "")
|
||||
contenttitle = re.sub(r'(\[[^\]]*\])', '', contenttitle).strip()
|
||||
filtro_tmdb = {"original_language": "ja"}.items()
|
||||
|
||||
tipo = "tvshow"
|
||||
if "page_id=25503" in item.url:
|
||||
tipo = "movie"
|
||||
|
||||
action = "findvideos"
|
||||
if "safelinking" in url:
|
||||
action = "extract_safe"
|
||||
itemlist.append(Item(channel=item.channel, action=action, url=url, title=title, contentTitle=contenttitle,
|
||||
show=contenttitle, contentType=tipo, infoLabels={'filtro': filtro_tmdb},
|
||||
text_color=color1))
|
||||
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if len(matches) > item.pagina + 20:
|
||||
pagina = item.pagina + 20
|
||||
itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def letra(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li>(?:<strong>|)([A-z#]{1})(?:</strong>|)</li>'
|
||||
@@ -163,20 +148,16 @@ def letra(item):
|
||||
for match in matches:
|
||||
itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url,
|
||||
thumbnail=item.thumbnail, text_color=color1))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def torrents(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
if not item.pagina:
|
||||
item.pagina = 0
|
||||
|
||||
post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds"
|
||||
data = httptools.downloadpage(item.url, post).data
|
||||
|
||||
patron = "<td>.*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?<td class='fecha'>.*?<td>(.*?)</td>" \
|
||||
".*?<span class=\"stats\d+\">(\d+)</span>.*?<span class=\"stats\d+\">(\d+)</span>"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -184,20 +165,15 @@ def torrents(item):
|
||||
contentTitle = title
|
||||
if "(" in contentTitle:
|
||||
contentTitle = contentTitle.split("(")[0]
|
||||
|
||||
size = size.strip()
|
||||
filtro_tmdb = {"original_language": "ja"}.items()
|
||||
title += " [COLOR %s][Semillas:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % (
|
||||
color4, seeds, color5, leechers, size)
|
||||
url = "https://www.frozen-layer.com" + url
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle,
|
||||
server="torrent", show=contentTitle, contentType="tvshow", text_color=color1,
|
||||
infoLabels={'filtro': filtro_tmdb}))
|
||||
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if len(matches) > item.pagina + 25:
|
||||
pagina = item.pagina + 25
|
||||
itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente",
|
||||
@@ -208,43 +184,39 @@ def torrents(item):
|
||||
next_page = "https://www.frozen-layer.com" + next_page
|
||||
itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, pagina=0, text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data2 = data.replace("\n","")
|
||||
idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />')
|
||||
calidades = ['720p', '1080p']
|
||||
torrentes = scrapertools.find_multiple_matches(data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"')
|
||||
calidades = ['1080p', '720p']
|
||||
torrentes = scrapertools.find_multiple_matches(data, '<a href="((?:https://www.frozen-layer.com/descargas[^"]+|https://nyaa.si/view/[^"]+))"')
|
||||
if torrentes:
|
||||
for i, enlace in enumerate(torrentes):
|
||||
title = "Ver por Torrent %s" % idiomas
|
||||
if ">720p" in data and ">1080p" in data:
|
||||
try:
|
||||
title = "[%s] %s" % (calidades[i], title)
|
||||
except:
|
||||
pass
|
||||
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
|
||||
|
||||
if ">720p" in data2 and ">1080p" in data2:
|
||||
title = "[%s] %s" % (calidades[i], title)
|
||||
if "nyaa" in enlace:
|
||||
data1 = httptools.downloadpage(url=enlace).data
|
||||
enlace = "https://nyaa.si" + scrapertools.find_single_match(data1, 'a href="(/do[^"]+)')
|
||||
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
|
||||
enlace = scrapertools.find_single_match(data1, '<a href="(magnet[^"]+)')
|
||||
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
|
||||
#itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
|
||||
onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"')
|
||||
if onefichier:
|
||||
for i, enlace in enumerate(onefichier):
|
||||
title = "Ver por 1fichier %s" % idiomas
|
||||
if ">720p" in data and ">1080p" in data:
|
||||
if ">720p" in data and ">1080p" in data2:
|
||||
try:
|
||||
title = "[%s] %s" % (calidades[i], title)
|
||||
except:
|
||||
pass
|
||||
itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier"))
|
||||
|
||||
safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"')
|
||||
if safelink:
|
||||
for i, safe in enumerate(safelink):
|
||||
@@ -276,17 +248,14 @@ def findvideos(item):
|
||||
except:
|
||||
pass
|
||||
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def carpeta(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
|
||||
if item.server == "onefichier":
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<tr>.*?<a href="([^"]+)".*?>(.*?)</a>.*?<td class="normal">(.*?)</td>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, size in matches:
|
||||
@@ -295,11 +264,7 @@ def carpeta(item):
|
||||
server="onefichier", text_color=color1, thumbnail=item.thumbnail,
|
||||
infoLabels=item.infoLabels))
|
||||
else:
|
||||
from megaserver import Client
|
||||
from platformcode import platformtools
|
||||
|
||||
c = Client(url=item.url)
|
||||
|
||||
files = c.get_files()
|
||||
c.stop()
|
||||
for enlace in files:
|
||||
@@ -308,7 +273,6 @@ def carpeta(item):
|
||||
Item(channel=item.channel, title=enlace["name"], url=item.url + "|" + file_id, action="play",
|
||||
server="mega", text_color=color1, thumbnail=item.thumbnail,
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
itemlist.sort(key=lambda item: item.title)
|
||||
return itemlist
|
||||
|
||||
@@ -316,16 +280,13 @@ def carpeta(item):
|
||||
def extract_safe(item):
|
||||
logger.info()
|
||||
if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
|
||||
itemlist = list()
|
||||
|
||||
hash = item.url.rsplit("/", 1)[1]
|
||||
headers = [['Content-Type', 'application/json;charset=utf-8']]
|
||||
post = jsontools.dump({"hash": hash})
|
||||
data = httptools.downloadpage("http://safelinking.net/v1/protected", post, headers).data
|
||||
data = jsontools.load(data)
|
||||
|
||||
for link in data.get("links"):
|
||||
enlace = link["url"]
|
||||
domain = link["domain"]
|
||||
@@ -335,29 +296,11 @@ def extract_safe(item):
|
||||
server = "mega"
|
||||
if "/#F!" in enlace:
|
||||
action = "carpeta"
|
||||
|
||||
elif "1fichier" in domain:
|
||||
server = "onefichier"
|
||||
if "/dir/" in enlace:
|
||||
action = "carpeta"
|
||||
|
||||
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
|
||||
if item.server == "torrent" and "frozen" in item.url and not item.url.endswith(".torrent"):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
enlace = scrapertools.find_single_match(data, "<div id='descargar_torrent'>.*?href='([^']+)'")
|
||||
if enlace:
|
||||
itemlist.append(item.clone(url=enlace))
|
||||
else:
|
||||
itemlist.append(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -365,7 +308,7 @@ def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
item.url = "http://puya.si/?cat=4"
|
||||
item.url = host + "/?cat=4"
|
||||
item.extra = "novedades"
|
||||
itemlist = listado(item)
|
||||
|
||||
@@ -373,12 +316,10 @@ def newest(categoria):
|
||||
itemlist.pop()
|
||||
for it in itemlist:
|
||||
it.contentTitle = it.title
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -9,11 +9,11 @@ from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from lib import jsunpack
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
{
|
||||
"id": "seriecanal",
|
||||
"name": "Seriecanal",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://i.imgur.com/EwMK8Yd.png",
|
||||
"banner": "seriecanal.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "user",
|
||||
"type": "text",
|
||||
"label": "Usuario",
|
||||
"color": "0xFFd50b0b",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "password",
|
||||
"type": "text",
|
||||
"label": "Contraseña",
|
||||
"color": "0xFFd50b0b",
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"hidden": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 2,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,226 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal")
|
||||
__perfil__ = config.get_setting('perfil', "seriecanal")
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
|
||||
color1, color2, color3 = perfil[__perfil__]
|
||||
|
||||
host = "https://www.seriecanal.com/"
|
||||
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
data = httptools.downloadpage(host).data
|
||||
if "Cerrar Sesion" in data:
|
||||
return True, ""
|
||||
usuario = config.get_setting("user", "seriecanal")
|
||||
password = config.get_setting("password", "seriecanal")
|
||||
if usuario == "" or password == "":
|
||||
return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"'
|
||||
else:
|
||||
post = urllib.urlencode({'username': usuario, 'password': password})
|
||||
data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data
|
||||
if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data:
|
||||
return True, ""
|
||||
else:
|
||||
return False, "Error en el login. El usuario y/o la contraseña no son correctos"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
result, message = login()
|
||||
if result:
|
||||
itemlist.append(item.clone(action="series", title="Últimos episodios", url=host))
|
||||
itemlist.append(item.clone(action="genero", title="Series por género"))
|
||||
itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético"))
|
||||
itemlist.append(item.clone(action="search", title="Buscar..."))
|
||||
else:
|
||||
itemlist.append(item.clone(action="", title=message, text_color="red"))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \
|
||||
"C_Create&view=thumb&pgs=1&p2=1"
|
||||
try:
|
||||
post = "keyserie=" + texto
|
||||
item.extra = post
|
||||
return series(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def genero(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.find_single_match(data, '<ul class="tag-cloud">(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.capitalize()
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm" style="margin:5px 0;">(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.infoLabels = {}
|
||||
item.text_color = color2
|
||||
if item.extra != "":
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<div class="item-inner" style="margin: 0 20px 0px 0\;"><img src="([^"]+)".*?' \
|
||||
'href="([^"]+)" title="Click para Acceder a la Ficha(?:\|([^"]+)|)".*?' \
|
||||
'<strong>([^"]+)</strong></a>.*?<strong>([^"]+)</strong></p>.*?' \
|
||||
'<p class="text-warning".*?\;">(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches:
|
||||
title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
temporada = scrapertools.find_single_match(scrapedtemp, "\d+")
|
||||
episode = scrapertools.find_single_match(scrapedepi, "\d+")
|
||||
#item.contentType = "tvshow"
|
||||
if temporada != "":
|
||||
item.infoLabels['season'] = temporada
|
||||
#item.contentType = "season"
|
||||
if episode != "":
|
||||
item.infoLabels['episode'] = episode
|
||||
#item.contentType = "episode"
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url,
|
||||
contentSerieName=scrapedtitle,
|
||||
context=["buscar_trailer"]))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
# Extra marca siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" (?:onclick="return false;" |)title='
|
||||
'"Página Siguiente"')
|
||||
if next_page != "/":
|
||||
url = urlparse.urljoin(host, next_page)
|
||||
itemlist.append(item.clone(action="series", title=">> Siguiente", url=url, text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color3
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
# Busca en la seccion descarga/torrent
|
||||
data_download = scrapertools.find_single_match(data, '<th>Episodio - Enlaces de Descarga</th>(.*?)</table>')
|
||||
patron = '<p class="item_name".*?<a href="([^"]+)".*?>([^"]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data_download, patron)
|
||||
for scrapedurl, scrapedepi in matches:
|
||||
new_item = item.clone()
|
||||
if "Episodio" not in scrapedepi:
|
||||
scrapedtitle = "[Torrent] Episodio " + scrapedepi
|
||||
else:
|
||||
scrapedtitle = "[Torrent] " + scrapedepi
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
|
||||
itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent",
|
||||
contentType="episode"))
|
||||
# Busca en la seccion online
|
||||
data_online = scrapertools.find_single_match(data, "<th>Enlaces de Visionado Online</th>(.*?)</table>")
|
||||
patron = '<a href="([^"]+)\\n.*?src="([^"]+)".*?' \
|
||||
'title="Enlace de Visionado Online">([^"]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data_online, patron)
|
||||
for scrapedurl, scrapedthumb, scrapedtitle in matches:
|
||||
# Deshecha enlaces de trailers
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"):
|
||||
new_item = item.clone()
|
||||
server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png")
|
||||
title = "[" + server.capitalize() + "]" + " " + scrapedtitle
|
||||
|
||||
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
|
||||
itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode"))
|
||||
# Comprueba si hay otras temporadas
|
||||
if not "No hay disponible ninguna Temporada adicional" in data:
|
||||
data_temp = scrapertools.find_single_match(data, '<div class="panel panel-success">(.*?)</table>')
|
||||
data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp)
|
||||
patron = '<tr><td><p class="item_name"><a href="([^"]+)".*?' \
|
||||
'<p class="text-success"><strong>([^"]+)</strong>'
|
||||
matches = scrapertools.find_multiple_matches(data_temp, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
new_item = item.clone()
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedtitle = scrapedtitle.capitalize()
|
||||
temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)")
|
||||
if temporada != "":
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.infoLabels['episode'] = ""
|
||||
itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red",
|
||||
contentType="season"))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
new_item = item.clone()
|
||||
if config.is_xbmc():
|
||||
new_item.contextual = True
|
||||
itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if item.extra == "torrent":
|
||||
itemlist.append(item.clone())
|
||||
else:
|
||||
# Extrae url de enlace bit.ly
|
||||
if item.url.startswith("http://bit.ly/"):
|
||||
item.url = scrapertools.getLocationHeaderFromResponse(item.url)
|
||||
video_list = servertools.findvideos(item.url)
|
||||
if video_list:
|
||||
url = video_list[0][1]
|
||||
server = video_list[0][2]
|
||||
itemlist.append(item.clone(server=server, url=url))
|
||||
|
||||
return itemlist
|
||||
@@ -1,25 +0,0 @@
|
||||
{
|
||||
"id": "seriesyonkis",
|
||||
"name": "Seriesyonkis",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "seriesyonkis.png",
|
||||
"banner": "seriesyonkis.png",
|
||||
"fanart": "seriesyonkis.jpg",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'https://yonkis.to'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas",
|
||||
url=host + "/series-mas-vistas"))
|
||||
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos",
|
||||
url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9"))
|
||||
for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
|
||||
itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def mas_vistas(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = re.compile('<a title="([^"]+)" href="([^"]+)".*?src="([^"]+)".*?</a>', re.S).findall(data)
|
||||
|
||||
itemlist = []
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
|
||||
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/"))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
post = "keyword=%s&search_type=serie" % texto
|
||||
data = httptools.downloadpage(item.url, post=post).data
|
||||
|
||||
try:
|
||||
patron = '<a href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)".*?class="content">([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = host + scrapedurl
|
||||
thumb = host + scrapedthumb.replace("/90/", "/150/")
|
||||
plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip())
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumb, plot=plot, show=title))
|
||||
|
||||
return itemlist
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def ultimos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
logger.debug("data %s" % data)
|
||||
matches = re.compile('data-href="([^"]+)" data-src="([^"]+)" data-alt="([^"]+)".*?<a[^>]+>(.*?)</a>', re.S).findall(data)
|
||||
|
||||
for url, thumb, show, title in matches:
|
||||
|
||||
url = host + url
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=show.strip(),
|
||||
action="findvideos", fulltitle=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
matches = scrapertools.find_single_match(data, '<ul id="list-container" class="dictionary-list">(.*?)</ul>')
|
||||
matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches)
|
||||
for title, url in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title,
|
||||
url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title))
|
||||
|
||||
# Paginador
|
||||
matches = re.compile('<a href="([^"]+)">></a>', re.S).findall(data)
|
||||
|
||||
paginador = None
|
||||
if len(matches) > 0:
|
||||
paginador = Item(channel=item.channel, action="series", title="!Página siguiente",
|
||||
url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show)
|
||||
|
||||
if paginador and len(itemlist) > 0:
|
||||
itemlist.insert(0, paginador)
|
||||
itemlist.append(paginador)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
pattern = '<meta property="og:description" content="([^/]+)" /><meta property="og:image" content="([^"]+)"'
|
||||
plot, thumb = scrapertools.find_single_match(data, pattern)
|
||||
|
||||
matches = re.compile('<a class="episodeLink p1" href="([^"]+)"><strong>(.*?)</strong>(.*?)</a>', re.S).findall(data)
|
||||
|
||||
for url, s_e, title in matches:
|
||||
url = host + url
|
||||
title = s_e.strip() + title
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=item.show, plot=plot,
|
||||
action="findvideos", fulltitle=title))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show))
|
||||
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
|
||||
action="download_all_episodes", extra="episodios", show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
pattern = '<a href="([^"]+)"[^>]+><img[^>]+alt="([^"]+)" /></a></td><td class="episode-lang"><span ' \
|
||||
'class="flags[^"]+" title="([^"]+)"'
|
||||
|
||||
matches = re.compile(pattern, re.S).findall(data)
|
||||
|
||||
for url, server, language in matches:
|
||||
title = "[%s] - [%s]" % (language, server)
|
||||
url = host + url
|
||||
server = re.sub('(\..*)', '', server)
|
||||
logger.debug("url %s" % url)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
|
||||
thumbnail=item.thumbnail, language=language, server=server))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for video_item in itemlist:
|
||||
video_item.title = "%s [%s]" % (item.fulltitle, item.lang)
|
||||
video_item.thumbnail = item.thumbnail
|
||||
video_item.language = item.language
|
||||
|
||||
return itemlist
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "tupornotv",
|
||||
"name": "tuporno.tv",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"banner": "tupornotv.png",
|
||||
"thumbnail": "tupornotv.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades",
|
||||
url="http://tuporno.tv/pendientes"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/",
|
||||
folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades",
|
||||
url="http://tuporno.tv/videosRecientes/", folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados",
|
||||
url="http://tuporno.tv/topVideos/", folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/",
|
||||
folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def novedades(item):
|
||||
logger.info()
|
||||
url = item.url
|
||||
# ------------------------------------------------------
|
||||
# Descarga la página
|
||||
# ------------------------------------------------------
|
||||
data = scrapertools.cachePage(url)
|
||||
# logger.info(data)
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Extrae las entradas
|
||||
# ------------------------------------------------------
|
||||
# seccion novedades
|
||||
'''
|
||||
<table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px">
|
||||
<a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a>
|
||||
<h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2>
|
||||
'''
|
||||
patronvideos = '<div class="relative">(.*?)</div><div class="video'
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
# if DEBUG: scrapertools.printMatches(matches)
|
||||
|
||||
itemlist = []
|
||||
for match in matches:
|
||||
# Titulo
|
||||
try:
|
||||
scrapedtitle = re.compile('title="(.+?)"').findall(match)[0]
|
||||
|
||||
except:
|
||||
scrapedtitle = ''
|
||||
try:
|
||||
scrapedurl = re.compile('href="(.+?)"').findall(match)[0]
|
||||
scrapedurl = urlparse.urljoin(url, scrapedurl)
|
||||
except:
|
||||
continue
|
||||
try:
|
||||
scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0]
|
||||
scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail)
|
||||
except:
|
||||
scrapedthumbnail = ''
|
||||
scrapedplot = ""
|
||||
try:
|
||||
duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0]
|
||||
except:
|
||||
try:
|
||||
duracion = re.compile('\((.+?)\)<br').findall(match[3])[0]
|
||||
except:
|
||||
duracion = ""
|
||||
|
||||
# logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]")
|
||||
# Añade al listado de XBMC
|
||||
# trozos = scrapedurl.split("/")
|
||||
# id = trozos[len(trozos)-1]
|
||||
# videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Extrae el paginador
|
||||
# ------------------------------------------------------
|
||||
# <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a>
|
||||
patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>'
|
||||
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
|
||||
if len(siguiente) > 0:
|
||||
scrapedurl = urlparse.urljoin(url, siguiente[0])
|
||||
itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def masVistos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes",
|
||||
folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
|
||||
url = item.url
|
||||
# ------------------------------------------------------
|
||||
# Descarga la página
|
||||
# ------------------------------------------------------
|
||||
data = scrapertools.cachePage(url)
|
||||
# logger.info(data)
|
||||
# ------------------------------------------------------
|
||||
# Extrae las entradas
|
||||
# ------------------------------------------------------
|
||||
# seccion categorias
|
||||
# Patron de las entradas
|
||||
if url == "http://tuporno.tv/categorias/":
|
||||
patronvideos = '<li><a href="([^"]+)"' # URL
|
||||
patronvideos += '>([^<]+)</a></li>' # TITULO
|
||||
else:
|
||||
patronvideos = '<a href="(.tags[^"]+)"' # URL
|
||||
patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
# if DEBUG: scrapertools.printMatches(matches)
|
||||
|
||||
itemlist = []
|
||||
for match in matches:
|
||||
if match[1] in ["SexShop", "Videochat", "Videoclub"]:
|
||||
continue
|
||||
# Titulo
|
||||
scrapedtitle = match[1]
|
||||
scrapedurl = urlparse.urljoin(url, match[0])
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
|
||||
# Añade al listado de XBMC
|
||||
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def masVotados(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy",
|
||||
folder=True))
|
||||
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades",
|
||||
url="http://tuporno.tv/topVideos/todas/recientes", folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana",
|
||||
folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes",
|
||||
folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano",
|
||||
folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
else:
|
||||
texto = item.extra.replace(" ", "+")
|
||||
item.url = "http://tuporno.tv/buscador/?str=" + texto
|
||||
try:
|
||||
return getsearch(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def getsearch(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
itemlist = []
|
||||
for match in matches:
|
||||
# Titulo
|
||||
scrapedtitle = match[2].replace("<b>", "")
|
||||
scrapedtitle = scrapedtitle.replace("</b>", "")
|
||||
scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0])
|
||||
scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1])
|
||||
scrapedplot = ""
|
||||
duracion = match[3]
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
|
||||
|
||||
'''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>'''
|
||||
patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>'
|
||||
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
|
||||
if len(siguiente) > 0:
|
||||
patronultima = '<!--HV_SIGUIENTE_ENLACE'
|
||||
ultpagina = re.compile(patronultima, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(siguiente)
|
||||
|
||||
if len(ultpagina) == 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, siguiente[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Lee la pagina del video
|
||||
data = scrapertools.cachePage(item.url)
|
||||
codVideo = scrapertools.get_match(data, 'body id="([^"]+)"')
|
||||
logger.info("codVideo=" + codVideo)
|
||||
|
||||
# Lee la pagina con el codigo
|
||||
# http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
|
||||
url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146"
|
||||
data = scrapertools.cachePage(url)
|
||||
logger.info("data=" + data)
|
||||
kpt = scrapertools.get_match(data, "kpt\=(.+?)\&")
|
||||
logger.info("kpt=" + kpt)
|
||||
|
||||
# Decodifica
|
||||
import base64
|
||||
url = base64.decodestring(kpt)
|
||||
logger.info("url=" + url)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot,
|
||||
server="Directo", folder=False))
|
||||
|
||||
return itemlist
|
||||
76
plugin.video.alfa/channels/yape.json
Normal file
76
plugin.video.alfa/channels/yape.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"id": "yape",
|
||||
"name": "Yape",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast","vose"],
|
||||
"thumbnail": "https://s8.postimg.cc/71ed4op5d/yape1.png",
|
||||
"banner": "https://s8.postimg.cc/4wu03lfsx/yape2.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"ESP",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
193
plugin.video.alfa/channels/yape.py
Normal file
193
plugin.video.alfa/channels/yape.py
Normal file
@@ -0,0 +1,193 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Yape -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
|
||||
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
|
||||
cali = {'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
|
||||
|
||||
list_language = idio.values()
|
||||
list_quality = cali.values()
|
||||
list_servers = ['streamango', 'powvideo', 'openload', 'streamplay', 'vidoza', 'clipwaching']
|
||||
|
||||
|
||||
__channel__='yape'
|
||||
|
||||
host = "https://yape.nu"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host + "/catalogue?sort=time_update&page=", page=1, thumbnail = get_thumb("updated", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Mas vistas", action = "peliculas", url = host + "/catalogue?sort=mosts-today&page=", page=1, thumbnail = get_thumb("more watched", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas", action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
|
||||
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/search?s=%s&page=" %texto
|
||||
item.extra = "busca"
|
||||
item.page = 1
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url + str(item.page)
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = 'class="col-lg-2 col-md-3 col-6 mb-3">.*?href="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'txt-size-13">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Ver ","").replace(" Completa Online Gratis","")
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
#pagination
|
||||
if len(itemlist)>0:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
page = item.page + 1,
|
||||
title = "Página siguiente >>",
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + "/catalogue?sort=latest?page="
|
||||
item.page=1
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/genre/animacion?page'
|
||||
item.page = 1
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genre/terror?page='
|
||||
item.page = 1
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'dropdown-item py-1 px-2" href="([^"]+)"'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = titulo,
|
||||
url = url + "?page=",
|
||||
page = 1
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'Descargar</span>(.*?)Te recomendamos')
|
||||
if bloque == "[]":
|
||||
return []
|
||||
patron = 'sv_([^_]+).*?'
|
||||
patron += 'link="([^"]+).*?'
|
||||
patron += 'juM9Fbab.*?src="([^"]+).*?'
|
||||
patron += 'rounded c.">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedserver, scrapedurl, scrapedlanguage, scrapedquality in matches:
|
||||
titulo = "Ver en: " + scrapedserver.capitalize() + " (%s)(%s)" %(cali[scrapedquality], idio[scrapedlanguage])
|
||||
itemlist.append(
|
||||
item.clone(action = "play",
|
||||
language = idio[scrapedlanguage],
|
||||
quality = cali[scrapedquality],
|
||||
title = titulo,
|
||||
url = scrapedurl
|
||||
))
|
||||
itemlist.sort(key=lambda it: (it.language, it.server))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, 'iframe class="" src="([^"]+)')
|
||||
item.url = url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
itemlist.append(item.clone())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist[0].thumbnail = item.contentThumbnail
|
||||
return itemlist
|
||||
@@ -134,13 +134,13 @@ def open_settings():
|
||||
if settings_post['adult_aux_new_password1'] == settings_post['adult_aux_new_password2']:
|
||||
set_setting('adult_password', settings_post['adult_aux_new_password1'])
|
||||
else:
|
||||
platformtools.dialog_ok(config.get_localized_string(60305),
|
||||
config.get_localized_string(60306),
|
||||
config.get_localized_string(60307))
|
||||
platformtools.dialog_ok(get_localized_string(60305),
|
||||
get_localized_string(60306),
|
||||
get_localized_string(60307))
|
||||
|
||||
else:
|
||||
platformtools.dialog_ok(config.get_localized_string(60305), config.get_localized_string(60309),
|
||||
config.get_localized_string(60310))
|
||||
platformtools.dialog_ok(get_localized_string(60305), get_localized_string(60309),
|
||||
get_localized_string(60310))
|
||||
|
||||
# Deshacer cambios
|
||||
set_setting("adult_mode", settings_pre.get("adult_mode", 0))
|
||||
@@ -195,23 +195,23 @@ def get_setting(name, channel="", server="", default=None):
|
||||
|
||||
# Specific channel setting
|
||||
if channel:
|
||||
# logger.info("config.get_setting reading channel setting '"+name+"' from channel json")
|
||||
# logger.info("get_setting reading channel setting '"+name+"' from channel json")
|
||||
from core import channeltools
|
||||
value = channeltools.get_channel_setting(name, channel, default)
|
||||
# logger.info("config.get_setting -> '"+repr(value)+"'")
|
||||
# logger.info("get_setting -> '"+repr(value)+"'")
|
||||
return value
|
||||
|
||||
# Specific server setting
|
||||
elif server:
|
||||
# logger.info("config.get_setting reading server setting '"+name+"' from server json")
|
||||
# logger.info("get_setting reading server setting '"+name+"' from server json")
|
||||
from core import servertools
|
||||
value = servertools.get_server_setting(name, server, default)
|
||||
# logger.info("config.get_setting -> '"+repr(value)+"'")
|
||||
# logger.info("get_setting -> '"+repr(value)+"'")
|
||||
return value
|
||||
|
||||
# Global setting
|
||||
else:
|
||||
# logger.info("config.get_setting reading main setting '"+name+"'")
|
||||
# logger.info("get_setting reading main setting '"+name+"'")
|
||||
value = __settings__.getSetting(name)
|
||||
if not value:
|
||||
return default
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/)([A-z0-9]+)",
|
||||
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/|)([A-z0-9]+)",
|
||||
"url": "https://www.rapidvideo.com/e/\\1"
|
||||
}
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user