Merge pull request #6 from alfa-addon/master

version 2.7.18
This commit is contained in:
Alfa-beto
2018-12-19 09:30:17 -03:00
committed by GitHub
38 changed files with 914 additions and 1040 deletions

View File

@@ -626,7 +626,7 @@ class platform(Platformtools):
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
channelname = os.path.basename(channelpath).split(".")[0]
ch_type = os.path.basename(os.path.dirname(channelpath))
# Si no tenemos list_controls, hay que sacarlos del json del canal

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.17" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.18" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -10,8 +10,8 @@
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<icon>logo-n.jpg</icon>
<fanart>fanart-xmas.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
@@ -19,15 +19,15 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ Todopeliculas ¤ Maxipelis24 ¤ allcalidad
¤ descargacineclasico ¤ porntrex ¤ seriesmetro
¤ pedropolis ¤ thumzilla ¤ xms
¤ jkanime ¤ newpct1 ¤ descargacineclasico
¤ DoramasMP4 ¤ cine24h ¤ ciberpeliculashd
¤ erotik ¤ pelis24 ¤ pelisplay
¤ serieslan ¤ anitoonstv
[COLOR green][B]Novedades[/B][/COLOR]
¤ cine24h ¤ hdfilmologia ¤ pelis24
¤ pelishd24 ¤ pelisplay
¤ vi2 ¤ tvpelis
¤ Agradecimientos a @chivmalev por colaborar con ésta versión
¤ Agradecimientos a @w1s0 por colaborar con ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -32,11 +32,11 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", contentTitle="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
@@ -109,14 +109,14 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
if item.title!="Series":
if item.contentTitle!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', action='lista'))
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.title, action='lista'))
return itemlist

View File

@@ -232,11 +232,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
data = httptools.downloadpage(url).data
patron = '<a href="([^"]+)'
patron = '(?i)src=&quot;([^&]+)&'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
if ".gif" in scrapedurl:
continue
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,

View File

@@ -3,14 +3,15 @@
"name": "Cine24H",
"active": true,
"adult": false,
"language": ["lat", "cast", "eng"],
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose"
"vose",
"direct"
],
"settings": [
{

View File

@@ -138,10 +138,10 @@ def peliculas(item):
contentType = 'movie'
title = scrapedtitle
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

View File

@@ -8,5 +8,15 @@
"thumbnail": "descargacineclasico2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -3,29 +3,30 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "https://www.youfreeporntube.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos",
url="http://www.ero-tik.com/newvideos.html?&page=1"))
url= host + "/new-clips.html?&page=1"))
itemlist.append(
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.ero-tik.com/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Top ultima semana",
url="http://www.ero-tik.com/topvideos.html?do=recent"))
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Populares",
url=host + "/topvideo.html?page=1"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://www.ero-tik.com/search.php?keywords="))
url=host + "/search.php?keywords="))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "{0}{1}".format(item.url, texto)
try:
@@ -41,96 +42,73 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, actriz in matches:
itemlist.append(Item(channel=item.channel, action="listacategoria", title=actriz, url=url))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title,
fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente",
url="http://ero-tik.com/" + paginacion))
url=host + "/" + paginacion))
return itemlist
def listacategoria(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(
Item(channel=item.channel, action="play", thumbnail=thumbnail, title=title, fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(
Item(channel=item.channel, action="listacategoria", title=">> Página Siguiente", url=paginacion))
return itemlist
def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = item.title
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "filesmonster_catalogue",
"name": "Filesmonster Catalogue",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "filesmonster_catalogue.png",
"banner": "filesmonster_catalogue.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,397 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
user = config.get_setting("filesmonsteruser")
itemlist = []
itemlist.append(Item(channel=item.channel, action="unusualporn", title="Canal unusualporn.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
itemlist.append(Item(channel=item.channel, action="files_monster", title="Canal files-monster.org",
thumbnail="http://files-monster.org/template/static/images/logo.jpg"))
itemlist.append(Item(channel=item.channel, action="filesmonster", title="Canal filesmonster.filesdl.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
if user != '': itemlist.append(
Item(channel=item.channel, action="favoritos", title="Favoritos en filesmonster.com del usuario " + user,
folder=True))
return itemlist
def filesmonster(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Ultimos vídeos",
thumbnail="http://photosex.biz/imager/w_400/h_400/9f869c6cb63e12f61b58ffac2da822c9.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en filesmonster.fliesdl.net",
url="http://filesmonster.filesdl.net/posts/search?q=%s"))
return itemlist
def unusualporn(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos_2", title="Últimos vídeos", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="categorias_2", title="Categorías", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en unusualporn",
url="http://unusualporn.net/search/%s"))
return itemlist
def files_monster(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="videos_3", title="Últimos vídeos", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(
Item(channel=item.channel, action="categorias_3", title="Categorías", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en files-monster.org",
url="http://files-monster.org/search?search=%s"))
return itemlist
def favoritos(item):
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
fa = open(fname, 'a+')
fa.close()
f = open(fname, 'r')
lines = f.readlines()
f.close()
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
partes1 = data1.split('"')
estado = partes1[3]
if estado != 'success': itemlist.append(Item(channel=item.channel,
title="No pudo accederse con tus datos de acceso de Filesmonster.com, introdúcelos en con el apartado figuración. Error: " + estado + data1))
url_favoritos = "http://filesmonster.com/?favorites=1"
data2 = scrapertools.cache_page(url_favoritos, post=post2)
data2 = scrapertools.find_single_match(data2, 'favorites-table(.*?)pager')
patronvideos = '<a href="([^"]+)">([^<]+)</a>.*?del=([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data2)
contador = 0
for url, title, borrar in matches:
contador = contador + 1
imagen = ''
for linea in lines:
partes2 = linea.split("@")
parte_url = partes2[0]
parte_imagen = partes2[1]
if (parte_url == url): imagen = parte_imagen.rstrip('\n').rstrip('\r')
if url.find("?fid=") == -1:
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.title,
url=url, thumbnail=imagen, folder=False))
else:
itemlist.append(
Item(channel=item.channel, action="detail", server="filesmonster", title=title, fulltitle=title,
thumbnail=imagen, url=url, folder=True))
itemlist.append(Item(channel=item.channel, action="quitar_favorito",
title="(-) quitar de mis favoritos en filesmonster.com", thumbnail=imagen,
url="http://filesmonster.com/?favorites=1&del=" + borrar, plot=borrar))
itemlist.append(Item(channel=item.channel, title="", folder=True))
if contador == 0 and estado == 'success':
itemlist.append(
Item(channel=item.channel, title="No tienes ningún favorito, navega por las diferentes fuentes y añádelos"))
return itemlist
def quitar_favorito(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
itemlist.append(Item(channel=item.channel, action="favoritos",
title="El vídeo ha sido eliminado de tus favoritos, pulsa para volver a tu lista de favoritos"))
return itemlist
def anadir_favorito(item):
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
if item.plot == 'el archivo':
id1 = item.url.split('?id=')
id = id1[1]
que = "file"
if item.plot == 'la carpeta':
id1 = item.url.split('?fid=')
id = id1[1]
que = "folder"
url = "http://filesmonster.com/ajax/add_to_favorites"
post3 = "username=" + user + "&password=" + password + "&id=" + id + "&obj_type=" + que
data2 = scrapertools.cache_page(url, post=post3)
if data2 == 'Already in Your favorites': itemlist.append(Item(channel=item.channel, action="favoritos",
title="" + item.plot + " ya estaba en tu lista de favoritos (" + user + ") en Filesmonster"))
if data2 != 'You are not logged in' and data2 != 'Already in Your favorites':
itemlist.append(Item(channel=item.channel, action="favoritos",
title="Se ha añadido correctamente " + item.plot + " a tu lista de favoritos (" + user + ") en Filesmonster",
plot=data1 + data2))
f = open(fname, "a+")
if (item.plot == 'la carpeta'):
ruta = "http://filesmonster.com/folders.php?"
if (item.plot == 'el archivo'):
ruta = "http://filesmonster.com/download.php"
laruta = ruta + item.url
laruta = laruta.replace("http://filesmonster.com/folders.php?http://filesmonster.com/folders.php?",
"http://filesmonster.com/folders.php?")
laruta = laruta.replace("http://filesmonster.com/download.php?http://filesmonster.com/download.php?",
"http://filesmonster.com/download.php?")
f.write(laruta + '@' + item.thumbnail + '\n')
f.close()
if data2 == 'You are not logged in': itemlist.append(Item(channel=item.channel, action="favoritos",
title="No ha sido posible añadir " + item.plot + " a tu lista de favoritos (" + user + " no logueado en Filesmonster)", ))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
data = scrapertools.find_single_match(data,
'Categories <b class="caret"></b></a>(.*?)RSS <b class="caret"></b></a>')
patronvideos = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos", title=title, url=url))
return itemlist
def categorias_2(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li class="cat-item cat-item-[\d]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a><a class="rss_s" title="[^"]+" target="_blank" href="[^"]+"></a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_2", title=title, url=url))
return itemlist
def categorias_3(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_3", title=title, url=url))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
original = item.url
item.url = item.url % texto
try:
if original == 'http://filesmonster.filesdl.net/posts/search?q=%s':
return videos(item)
if original == 'http://unusualporn.net/search/%s':
return videos_2(item)
if original == 'http://files-monster.org/search?search=%s':
return videos_3(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def videos(item):
logger.info()
itemlist = []
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<div class="panel-body" style="text-align: center;">.*?<img src="([^"]+)".*?'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
title = title.strip()
itemlist.append(
Item(channel=item.channel, action="detail", title=title, fulltitle=title, url=url, thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página Siguiente", url=url))
return itemlist
def videos_2(item):
logger.info()
itemlist = []
url_limpia = item.url.split("?")[0]
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = 'data-link="([^"]+)" data-title="([^"]+)" src="([^"]+)" border="0" />';
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos_2", title=">> Página Siguiente", url=url))
return itemlist
def videos_3(item):
logger.info()
itemlist = []
url = item.url
url_limpia = item.url.split("?")[0]
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<a href="([^"]+)">.*?<img src="([^"]+)" border="0" title=".*?([^"]+).*?" height="70" />'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data,
'<a style="text-decoration:none;" href="([^"]+)">&rarr;</a>').replace(
"&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(
Item(channel=item.channel, action="videos_3", title=">> Página Siguiente", url=url_limpia + url))
return itemlist
def detail(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist
def detail_2(item):
logger.info()
itemlist = []
# descarga la pagina
data = scrapertools.downloadpageGzip(item.url)
data = data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>')
data = data[0]
# descubre la url
patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match2 in matches:
url = "http://filesmonster.com/download.php" + match2[0]
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0],
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist

View File

@@ -1,21 +0,0 @@
{
"id": "freecambay",
"name": "FreeCamBay",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/wuzhOCt.png?1",
"categories": [
"adult"
],
"settings": [
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú antes de reproducir con imágenes",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,261 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.freecambay.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
itemlist.append(item.clone(action="categorias", title="Modelos",
url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \
"_list&sort_by=total_videos"))
itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/"))
itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
if '>HD<' in quality:
scrapedtitle += " [COLOR red][HD][/COLOR]"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page))
return itemlist
def playlists(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page))
return itemlist
def videos(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \
"=added2fav_date&&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
if not matches:
patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for url, quality in matches:
if "http" in quality:
calidad = url
url = quality
quality = calidad + "p"
itemlist.append(['.mp4 %s [directo]' % quality, url])
if item.extra == "play_menu":
return itemlist, data
return itemlist
def menu_info(item):
logger.info()
itemlist = []
video_urls, data = play(item.clone(extra="play_menu"))
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
bloque = scrapertools.find_single_match(data, '<div class="block-screenshots">(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, '<img class="thumb lazy-load".*?data-original="([^"]+)"')
for i, img in enumerate(matches):
if i == 0:
continue
img = urlparse.urljoin(host, img)
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
return itemlist
def tags(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Tags":
letras = []
matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>')
for title in matches:
title = title.strip()
if title not in letras:
letras.append(title)
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title))
else:
if not item.length:
item.length = 0
bloque = scrapertools.find_single_match(data,
'>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % (
item.extra, item.extra))
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>')
for url, title in matches[item.length:item.length + 100]:
itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title))
if len(itemlist) >= 100:
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente",
length=item.length + 100, extra=item.extra))
return itemlist

View File

@@ -3,15 +3,29 @@
"name": "HDFilmologia",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -7,7 +7,8 @@ import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -46,39 +47,45 @@ fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Últimas Agregadas", action="movies",thumbnail=get_thumb('last', auto=True),
itemlist.append(item.clone(title="Últimas Agregadas", action="movies", thumbnail=get_thumb('last', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Más Vistas", action="movies",thumbnail=get_thumb('more watched', auto=True),
itemlist.append(item.clone(title="Más Vistas", action="movies", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por País", action="countriesYears",thumbnail=get_thumb('country', auto=True),
itemlist.append(item.clone(title="Películas Por País", action="countriesYears", thumbnail=get_thumb('country',
auto=True), text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears",thumbnail=get_thumb('year', auto=True),
itemlist.append(item.clone(title="Géneros", action="genres", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="genres",thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -107,7 +114,7 @@ def sub_search(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' # url
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
patron += '<div class="sres-desc">(.*?)</div>' # plot
@@ -117,7 +124,7 @@ def sub_search(item):
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", text_color=color3, page=0, plot=plot,
thumbnail=host+scrapedthumbnail))
thumbnail=host + scrapedthumbnail))
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
@@ -147,10 +154,10 @@ def movies(item):
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -165,7 +172,6 @@ def movies(item):
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
@@ -182,7 +188,7 @@ def genres(item):
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
url=host+scrapedurl, text_color=color3, viewmode="movie_with_plot"))
url=host + scrapedurl, text_color=color3, viewmode="movie_with_plot"))
return itemlist
@@ -197,15 +203,14 @@ def countriesYears(item):
patron_todas = 'Por País</option>(.*?)</option></select>'
else:
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action="movies"))
itemlist.append(item.clone(title=scrapedtitle, url=host + scrapedurl, action="movies"))
return itemlist
@@ -246,13 +251,17 @@ def findvideos(item):
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
if 'youtube' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",

View File

@@ -1,22 +0,0 @@
{
"id": "hentaienespanol",
"name": "HentaiEnEspañol",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://s11.postimg.cc/cmuwcvvpf/hentaienespanol.png",
"banner": "https://s3.postimg.cc/j3qkfut8z/hentaienespanol_banner.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = 'http://www.xn--hentaienespaol-1nb.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart=''))
itemlist.append(
Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='',
fanart=''))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?'
patron += 'height="170px" src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle # .decode('utf-8')
thumbnail = scrapedthumbnail
fanart = ''
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
# Paginacion
title = ''
siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">')
title = 'Pagina Siguiente >>> '
fanart = ''
itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return todas(item)
else:
return []

View File

@@ -108,8 +108,8 @@ def series(item):
plot=scrapedplot, show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
try:
siguiente = scrapertools.find_single_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
scrapedurl = item.url + siguiente
siguiente = scrapertools.find_single_match(data, '<a class="text nav-next" href="([^"]+)"')
scrapedurl = siguiente
scrapedtitle = ">> Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""

View File

@@ -100,7 +100,7 @@
"id": "intervenidos_channels_list",
"type": "text",
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto'), ('1', 'newpct1', 'descargas2020', 'http://www.newpct1.com', 'http://descargas2020.com', '', '', '', '', '', '*', '', 'force')",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force')",
"enabled": true,
"visible": false
},

View File

@@ -3,16 +3,30 @@
"name": "Pelis24",
"active": true,
"adult": false,
"language": ["lat"],
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -53,35 +53,32 @@ list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Novedades", action="peliculas",thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"),
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"),
item.clone(title="Tendencias", action="peliculas",thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Tendencias", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas",thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears",thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
@@ -98,6 +95,7 @@ def search(item, texto):
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
@@ -107,7 +105,7 @@ def sub_search(item):
# logger.info(data)
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
# logger.info(data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="year">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -139,7 +137,8 @@ def peliculas(item):
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
# img, title
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
patron += '</h3><span>([^<]+)</span>' # year
@@ -148,11 +147,10 @@ def peliculas(item):
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year}, quality=quality,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year}, quality=quality,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -164,7 +162,6 @@ def peliculas(item):
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
@@ -180,10 +177,10 @@ def genresYears(item):
else:
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
data = scrapertools.find_single_match(data, patron_todas)
# logger.error(texto='***********uuuuuuu*****' + data)
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
@@ -192,7 +189,6 @@ def genresYears(item):
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
return itemlist
@@ -222,7 +218,7 @@ def series(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
@@ -232,7 +228,7 @@ def series(item):
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:'+scrapedthumbnail, contentType='tvshow'))
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
@@ -360,7 +356,8 @@ def findvideos(item):
# urls = re.compile(patron2, re.DOTALL).findall(data)
for option, lang in matches:
url = scrapertools.find_single_match(data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
url = scrapertools.find_single_match(
data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
@@ -374,10 +371,9 @@ def findvideos(item):
server = servertools.get_server_from_url(url)
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
# if 'google' not in url and 'directo' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
@@ -388,7 +384,6 @@ def findvideos(item):
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',

View File

@@ -3,14 +3,14 @@
"name": "PelisPlay",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"fanart": "https://s33.postimg.cc/d3ioghaof/image.png",
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://www.pelisplay.tv/static/img/logo.png",
"banner": "https://s33.postimg.cc/cyex6xlen/image.png",
"banner": "https://i.postimg.cc/tCb8wh8s/pelisplaybn.jpg",
"categories": [
"movie",
"tvshow",
"vos"
"vose"
],
"settings": [
{
@@ -22,7 +22,9 @@
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
"Latino",
"Castellano",
"Subtitulado"
]
},
{

View File

@@ -46,13 +46,12 @@ parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT'}
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
@@ -63,12 +62,13 @@ def mainlist(item):
viewcontent='tvshow', viewmode="tvshow_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Netflix", action="flixmovies", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', viewmode="movie_with_plot", fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
thumbnail='http://img.app.kiwi/icon/jcbqFma-5e91cY9MlEasA-fvCRJK493MxphrqbBd8oS74FtYg00IXeOAn0ahsLprxIA'),
item.clone(title="Netflix", action="flixmenu", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', viewmode="movie_with_plot",
fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
thumbnail='https://i.postimg.cc/Pxs9zYjz/image.png'),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host+'buscar')]
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -81,28 +81,10 @@ def menumovies(item):
viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movie', url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
viewcontent='movie', url=host+'peliculas', viewmode="movie_with_plot"),
viewcontent='movie', url=host + 'peliculas', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp',
thumbnail=get_thumb('search.png'), url=host+'peliculas')]
return itemlist
def flixmovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
viewcontent='movie', viewmode="movie_with_plot"),
# item.clone(title="Estrenos", action="peliculas", text_blod=True,
# viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movie', url=host, viewmode="movie_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
viewcontent='movie', url=host+'netflix', viewmode="movie_with_plot")]
thumbnail=get_thumb('search.png'), url=host + 'peliculas')]
return itemlist
@@ -116,12 +98,52 @@ def menuseries(item):
item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
viewcontent='movie', url=host+'series', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
thumbnail=get_thumb('search.png'), url=host+'series')]
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
viewcontent='movie', url=host + 'series', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
thumbnail=get_thumb('search.png'), url=host + 'series')]
return itemlist
def flixmenu(item):
logger.info()
itemlist = [item.clone(title="Películas", action="flixmovies", text_blod=True, extra='movie', mediatype="movie",
viewcontent='movie', viewmode="tvshow_with_plot"),
item.clone(title="Series", action="flixtvshow", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', viewmode="tvshow_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True,
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
return itemlist
def flixmovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
viewcontent='movie', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscarp",
thumbnail=get_thumb('search.png'), url=host + 'peliculas/netflix')]
return itemlist
def flixtvshow(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, url=host + 'series/netflix?filtro=fecha_actualizacion',
viewcontent='tvshow', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscars",
thumbnail=get_thumb('search.png'), url=host + 'series/netflix')]
return itemlist
@@ -130,9 +152,8 @@ def p_portipo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
action = ''
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
@@ -140,9 +161,9 @@ def p_portipo(item):
action = 'peliculas'
elif item.extra == 'serie':
action = 'series'
itemlist.append(item.clone(action = action,
title = scrapedtitle,
url = scrapedurl,
itemlist.append(item.clone(action=action,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail
))
itemlist.sort(key=lambda it: it.title)
@@ -154,10 +175,9 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
patron += '<a href="([^"]+)">.*?' # url
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot
patron += '<strong>([^<]+)</strong></h4>' # title
@@ -168,12 +188,13 @@ def peliculas(item):
item.plot = plot
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year":year}, thumbnail=host+scrapedthumbnail,
infoLabels={"year": year}, thumbnail=host + scrapedthumbnail,
url=scrapedurl, title=scrapedtitle, plot=plot))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
@@ -201,13 +222,6 @@ def search(item, texto):
return []
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -241,19 +255,19 @@ def series(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' # img, url
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?'
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=host+scrapedthumbnail,
url=scrapedurl, thumbnail=host + scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
@@ -264,10 +278,8 @@ def series(item):
def temporadas(item):
logger.info()
itemlist = []
from core import jsontools
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?'
patron += 'class="abrir_temporada" href="([^"]+)">' # img, season
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -275,20 +287,21 @@ def temporadas(item):
if len(matches) > 1:
for scrapedthumbnail, temporada, url in matches:
new_item = item.clone(action="episodios", season=temporada, url=url,
thumbnail=host+scrapedthumbnail, extra='serie')
thumbnail=host + scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
i.title = "%s. %s" % (
i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadírselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
# itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
@@ -304,26 +317,25 @@ def episodios(item):
from core import jsontools
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
# logger.info(data)
post_link = '%sentradas/abrir_temporada' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"')
data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"')
post= {'t':data_t, 's':data_s, '_token':token}
post = {'t': data_t, 's': data_s, '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
# json_data = jsontools.load(new_data)
# logger.info(new_data)
patron = '"nepisodio":"([^"]+)",[^,]+,"ntemporada":"([^"]+)".*?"url_directa":"([^"]+)",.*?"titulo":"([^"]+)",'
json_data = jsontools.load(new_data)
matches = re.compile(patron, re.DOTALL).findall(new_data)
for episode, season, scrapedurl, scrapedname in matches:
scrapedurl = scrapedurl.replace('\\', '')
logger.info('###name%s' % scrapedname)
for element in json_data['data']['episodios']:
scrapedname = element['titulo']
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
title = "%sx%s: %s" % (season, episode.zfill(
2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
@@ -338,7 +350,8 @@ def episodios(item):
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
i.title = "%sx%s: %s" % (
i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -355,22 +368,20 @@ def episodios(item):
def findvideos(item):
logger.info()
from lib import generictools
from core import jsontools
import urllib
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?' # data-player, servername
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>' # quality, lang
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?'
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_player, servername, quality, lang in matches:
post_link = '%sentradas/procesar_player' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
post= {'data':data_player, 'tipo':'videohost', '_token':token}
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
post = {'data': data_player, 'tipo': 'videohost', '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
json_data = jsontools.load(new_data)
@@ -378,12 +389,13 @@ def findvideos(item):
if 'pelisplay.tv/embed/' in url:
new_data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(new_data, '"file":"([^"]+)",').replace('\\', '')
url = scrapertools.find_single_match(
new_data, '"file":"([^"]+)",').replace('\\', '')
elif 'fondo_requerido' in url:
link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0]
post_link = '%sprivate/plugins/gkpluginsphp.php' % host
post= {'link':link}
post = {'link': link}
post = urllib.urlencode(post)
new_data2 = httptools.downloadpage(post_link, post=post).data
url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '')
@@ -391,14 +403,15 @@ def findvideos(item):
lang = lang.lower().strip()
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
'subtitulado': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (servername.title(), quality, lang)
itemlist.append(item.clone(channel=__channel__, title=title, action='play', language=lang, quality=quality, url=url))
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(channel=__channel__, title=title,
action='play', language=lang, quality=quality, url=url))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -32,6 +32,8 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
itemlist.append(
Item(channel=item.channel, action="lista", title="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -45,9 +47,12 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
if item.title == "Series":
patron += 'class="link">.+?<img src="([^"]+)".*?'
else:
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
if item.url==host:
if item.url==host or item.url==host+"/liveaction":
a=1
else:
num=(item.url).split('-')
@@ -150,25 +155,24 @@ def findvideos(item):
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
#for id, title in buttons:
new_url = golink(0, _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">')
buttons = [0,1,2]
for id in buttons:
new_url = golink(int(id), _sa, sl)
data_new = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
try:
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -210,4 +214,4 @@ def resolve(value1, value2):
lista[j] = k
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
return reto
return reto

View File

@@ -0,0 +1,77 @@
{
"id": "tvpelis",
"name": "TvPelis",
"active": true,
"adult": false,
"language": ["lat", "cast", "*"],
"thumbnail": "http://www.tvpelis.tv/wp-content/themes/tvpelistv3/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,374 @@
# -*- coding: utf-8 -*-
# -*- Channel TvPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.tvpelis.tv/'
IDIOMAS = {'Latino': 'LAT', 'latino': 'LAT', 'Español':'CAST', 'castellano': 'CAST', 'Vose':'VOSE', 'vose':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['xdrive', 'bitertv', 'okru']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="movies_menu",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="list_all", url=host+'genero/series/',
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Documentales", action="list_all", url=host + 'genero/documentales/',
thumbnail=get_thumb('documental', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
# thumbnail=get_thumb('lat', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
# thumbnail=get_thumb('vose', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Generos", action="section",
# thumbnail=get_thumb('genres', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
# thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title='Buscar', action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def movies_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url=host + 'genero/castellano/',
thumbnail=get_thumb('cast', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
thumbnail=get_thumb('lat', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
thumbnail=get_thumb('vose', auto=True)))
itemlist.append(Item(channel=item.channel, title="Hindú", action="list_all", url=host + 'genero/hindu/',
thumbnail=get_thumb('hindu', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data,
"<div id='z1'><section><div id='main'><div class='breadcrumbs'>(.*?)</ul>")
logger.debug(data)
patron = 'article id=.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?'
patron += 'class="selectidioma">(.*?)class="fixyear".*?class="genero">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, type in matches:
url = scrapedurl
lang = get_language(lang_data)
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '([^\(]+)\(?').strip()
#scrapedtitle = scrapedtitle.replace('Latino','')
scrapedtitle = re.sub('latino|español|sub|audio','', scrapedtitle.lower()).capitalize()
if not config.get_setting('unify'):
title = '%s %s' % (scrapedtitle, lang)
else:
title = scrapedtitle
thumbnail = 'https:'+scrapedthumbnail
new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, language = lang,
infoLabels={'year':year})
logger.debug(type)
if 'series' not in type.lower():
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(full_data, '<link rel="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '<h2>Categorias de Peliculas</h2>(.*?)</ul>')
patron = 'href="([^"]+)"> <em>Peliculas de </em>([^<]+)<span>'
if item.title == 'Por Años':
data = scrapertools.find_single_match(data, '>Filtrar por A&ntilde;o</option>(.*?)</select>')
patron = 'value="([^"]+)">Peliculas del A&ntilde;o (\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, title=title.strip(), url=url, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
all_seasons = []
data=get_source(item.url)
patron='Temporada \d+'
matches = re.compile(patron, re.DOTALL).findall(data)
action = 'episodesxseasons'
if len(matches) == 0:
matches.append('1')
action = 'aios'
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
if title not in all_seasons:
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action=action,
infoLabels=infoLabels))
all_seasons.append(title)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def aios(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='href="([^"]+)" rel="bookmark"><i class="fa icon-chevron-sign-right"></i>.*?Capitulo (?:00|)(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = item.url+scrapedurl
title = '%sx%s - Episodio %s' % (infoLabels['season'], infoLabels['episode'], infoLabels['episode'])
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)".*?</i>.*?Temporada %s, Episodio (\d+) - ([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '<em class="bandera sp([^"]+)"')
for lang in lang_list:
if not lang in IDIOMAS:
lang = 'vose'
lang = IDIOMAS[lang]
if lang not in language:
language.append(lang)
return language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id="([^"]+)".?class="tab_part.*?">.?<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = 'class="(rep)".*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, player in matches:
if 'ok.ru' in player:
url = 'http:' + player
elif 'rutube' in player:
url = 'http:' + player + "|%s" % item.url
elif 'http' not in player:
hidden_data = get_source('%s%s' % (host, player))
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = player
lang = scrapertools.find_single_match(data, '<li rel="%s">([^<]+)</li>' % option)
if lang.lower() in ['online', 'trailer']:
continue
if lang in IDIOMAS:
lang = IDIOMAS[lang]
if not config.get_setting('unify'):
title = ' [%s]' % lang
else:
title = ''
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'latino':
item.url = host + 'filter?language=2'
elif categoria == 'castellano':
item.url = host + 'filter?language=1'
elif categoria == 'infantiles':
item.url = host + 'genre/25/infantil'
elif categoria == 'terror':
item.url = host + 'genre/15/terror'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -415,9 +415,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
season_episode = scrapertools.get_season_and_episode(e.title)
# Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anterioires
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio
if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
if json_path in ficheros: #Si existe el .json sacamos de ahí las urls
if overwrite: #pero solo si se se sobrescriben los .json
json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json
@@ -433,6 +435,8 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if e.emergency_urls: del e.emergency_urls
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
e = emergency_urls(e, channel, json_path) #generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo

Binary file not shown.

After

Width:  |  Height:  |  Size: 764 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 978 KiB

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -210,7 +210,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
# Creamos el listitem
#listitem = xbmcgui.ListItem(item.title)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(jawcloud.co/embed-([A-z0-9]+))",
"pattern": "(jawcloud.co/(?:embed-|)([A-z0-9]+))",
"url": "https://\\1.html"
}
]

View File

@@ -7,6 +7,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "The file you were looking for could not be found" in data:
return False, "[jawcloud] El archivo ha ido borrado"
return True, ""

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://embed.mystream.to/(\\w+)",
"url": "https://embed.mystream.to/\\1"
}
]
},
"free": true,
"id": "mystream",
"name": "mystream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/t43grQdh/mystream1.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mystream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[mystream] El archivo no existe o ha sido borrado"
if "<title>video is no longer available" in data.data:
return False, "[mystream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
logger.info("url=" + page_url)
video_urls = []
headers = {'referer': page_url}
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
text_decode = aadecode(code)
matches = scrapertools.find_multiple_matches(text_decode, "'src', '([^']+)'")
for url in matches:
video_urls.append(['mystream [mp4]',url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://rutube.ru/play/embed/[a-zA-Z0-9]+.p=[a-zA-Z0-9-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "rutube",
"name": "rutube",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://www.cubancouncil.com/uploads/project_images/rutube_branding_black.png.648x0_q90_replace_alpha.jpg"
}

View File

@@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
# -*- Server Rutube -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from core import jsontools
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
return False, "[Rutube] El video ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
import urllib
video_urls = []
referer = ''
id = ''
if "|" in page_url:
page_url = page_url.replace('?', '|')
page_url, id, referer = page_url.split("|", 2)
header = {'referer':referer}
referer = urllib.urlencode(header)
"http://rutube.ru/api/play/options/10531822/?format=json&sqr4374_compat=1&no_404=true&referer=http%3A%2F%2Frutube.ru%2Fplay%2Fembed%2F10531822%3Fp%3DeDk8m91H0UBPOCUuFicFbQ&p=eDk8m91H0UBPOCUuFicFbQ"
base_link = page_url.replace("/play/embed/", "/api/play/options/")
new_link = base_link + '/?format=json&sqr4374_compat=1&no_404=true&%s&%s' % (referer, id)
data = httptools.downloadpage(new_link).data
json_data = jsontools.load(data)
video_urls.append(['Rutube', json_data['video_balancer']['m3u8']])
return video_urls

View File

@@ -3,13 +3,11 @@
# Conector UpVID By Alfa development Group
# --------------------------------------------------------
import re
import re, base64
from core import httptools
from core import scrapertools
from platformcode import logger
import re, base64
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):