This commit is contained in:
I7PAEZ\paez
2018-12-12 21:32:50 +01:00
57 changed files with 3639 additions and 1081 deletions

View File

@@ -626,7 +626,7 @@ class platform(Platformtools):
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
channelname = os.path.basename(channelpath).split(".")[0]
ch_type = os.path.basename(os.path.dirname(channelpath))
# Si no tenemos list_controls, hay que sacarlos del json del canal

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.16" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.18" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -10,8 +10,8 @@
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<icon>logo-n.jpg</icon>
<fanart>fanart-xmas.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
@@ -19,13 +19,15 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ DivxTotal ¤ Pelismagnet ¤ Subtorrents
¤ Todopleiculas ¤ Zonatorrent ¤ maxipelis24
¤ Danimados ¤ mediafire ¤ mp4upload
¤ vevio ¤ BlogHorror ¤ PelisPlus
¤ TuPeliculas ¤ VeSeriesOnline ¤ Actualizado pack +18
¤ jkanime ¤ newpct1 ¤ descargacineclasico
¤ DoramasMP4 ¤ cine24h ¤ ciberpeliculashd
¤ erotik ¤ pelis24 ¤ pelisplay
¤ serieslan ¤ anitoonstv
¤ Agradecimientos a @paeznet y @chivmalev por colaborar con ésta versión
[COLOR green][B]Novedades[/B][/COLOR]
¤ vi2 ¤ tvpelis
¤ Agradecimientos a @w1s0 por colaborar con ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -124,12 +124,12 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)short_overlay.*?<a href="([^"]+)'
patron += '.*?img.*?src="([^"]+)'
patron += '.*?title="([^"]+).*?'
patron += 'data-postid="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, titulo, datapostid in matches:
matches = scrapertools.find_multiple_matches(data, '(?s)shortstory cf(.*?)rate_post')
for datos in matches:
url = scrapertools.find_single_match(datos, 'href="([^"]+)')
titulo = scrapertools.find_single_match(datos, 'short_header">([^<]+)').strip()
datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)')
thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)')
post = 'action=get_movie_details&postID=%s' %datapostid
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data
idioma = "Latino"

View File

@@ -32,11 +32,11 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", contentTitle="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
@@ -109,14 +109,14 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
if item.title!="Series":
if item.contentTitle!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', action='lista'))
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.title, action='lista'))
return itemlist

View File

@@ -232,11 +232,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
data = httptools.downloadpage(url).data
patron = '<a href="([^"]+)'
patron = '(?i)src=&quot;([^&]+)&'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
if ".gif" in scrapedurl:
continue
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,

View File

@@ -0,0 +1,64 @@
{
"id": "cine24h",
"name": "Cine24H",
"active": true,
"adult": false,
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose",
"direct"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,382 @@
# -*- coding: utf-8 -*-
# -*- Channel CanalPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "cine24h"
host = "https://cine24h.net/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
thumbnail=get_thumb('tvshows', auto=True), page=0),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas/', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + '?s=trfilter&trfilter=1&years%5B%5D=2018', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas-mas-vistas/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Estrenos por Año", action="genresYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host,
viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0, extra='buscarP')]
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
if item.title == 'Buscar' and 'serie' in scrapedurl:
action = 'temporadas'
contentType = 'tvshow'
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
else:
action = 'findvideos'
contentType = 'movie'
title = scrapedtitle
itemlist.append(Item(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
def genresYears(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "Estrenos por Año":
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
else:
patron_todas = 'Géneros</a>(.*?)</li></ul></li>'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
return itemlist
def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason in matches:
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
new_item.infoLabels['season'] = scrapedseason
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+)x(\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>' # option, server, lang - quality
matches = re.compile(patron, re.DOTALL).findall(data)
for option, servername, quote in matches:
patron = '<span>(.*?) -([^<]+)</span'
match = re.compile(patron, re.DOTALL).findall(quote)
lang, quality = match[0]
quality = quality.strip()
headers = {'Referer': item.url}
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
new_data = httptools.downloadpage(url_1, headers=headers).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)
url2 = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
url = url2 + '|%s' % url_1
if 'rapidvideo' in url2:
url = url2
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'español': '[COLOR green](CAST)[/COLOR]',
'subespañol': '[COLOR red](VOS)[/COLOR]',
'sub': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
servername = servertools.get_server_from_url(url)
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, quality=quality,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -8,5 +8,15 @@
"thumbnail": "descargacineclasico2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -11,27 +11,28 @@ from lib import unshortenit
host = "http://www.descargacineclasico.net"
def agrupa_datos(data):
# Agrupa los datos
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|<!--.*?-->', '', data)
data = re.sub(r'\s+', ' ', data)
data = re.sub(r'>\s<', '><', data)
return data
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas",
url=host, viewmode="movie_with_plot",
thumbnail=get_thumb('last', auto=True)))
url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero",
url=host,
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url=host,
thumbnail=get_thumb('search', auto=True)))
url=host, thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Listado alfabetico", action="porLetra",
url=host + "/cine-online/", thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def porLetra(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'noindex,nofollow" href="([^"]+)">(\w+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, titulo in matches:
itemlist.append( Item(channel=item.channel , action="agregadas" , title=titulo, url=url, viewmode="movie_with_plot"))
return itemlist
@@ -43,7 +44,9 @@ def porGenero(item):
data = re.compile(patron,re.DOTALL).findall(data)
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data[0])
for url,genero in matches:
for url, genero in matches:
if genero == "Erótico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot"))
return itemlist
@@ -129,7 +132,6 @@ def findvideos(item):
contentTitle = item.contentTitle
))
return itemlist
return itemlist
def play(item):

View File

@@ -16,17 +16,20 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.doramasmp4.com/'
host = 'https://www2.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
@@ -38,9 +41,9 @@ def mainlist(item):
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True),
url=host + 'catalogue?format%5B%5D=movie', thumbnail=get_thumb('movies', auto=True),
type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'ajax/search.php',
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
@@ -52,7 +55,7 @@ def doramas_menu(item):
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue?format%5B%5D=drama',
thumbnail=get_thumb('all', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
@@ -62,22 +65,24 @@ def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=col-lg-2 col-md-3 col-6><a href=(.*?) title=.*?'
patron += '<img src=(.*?) alt=(.*?) class=img-fluid>.*?bg-primary text-capitalize>(.*?)</span>'
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype in matches:
media_type = item.type
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
url = scrapedurl
scrapedtype = scrapedtype.lower()
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=scrapedtype)
if scrapedtype != 'dorama':
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
if media_type != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.type = item.type
else:
new_item.contentSerieName=scrapedtitle
@@ -99,44 +104,15 @@ def list_all(item):
type=item.type))
return itemlist
def search_results(item):
logger.info()
itemlist=[]
data = httptools.downloadpage(item.url, post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<a class=media p-2 href=(.*?)><img class=mr-2 src=(.*?)>.*?500>(.*?)</div>'
patron += '<div class=text-muted tx-11>(.*?)</div>.*?200>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
new_item = Item(channel=item.channel, url=scrapedurl, thumbnail=scrapedthumbnail, title=scrapedtitle)
if scrapedtype != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = '<div class=col-lg-3 col-md-6 mb-2><a href=(.*?) title=.*?'
patron +='<img src=(.*?) alt.*?truncate-width>(.*?)<.*?mb-1>(.*?)<'
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
@@ -151,8 +127,7 @@ def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a itemprop=url href=(.*?) title=.*? class=media.*?truncate-width>(.*?)<.*?'
patron +='text-muted mb-1>Capítulo (.*?)</div>'
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -186,77 +161,56 @@ def findvideos(item):
logger.info()
itemlist = []
duplicated = []
headers={'referer':item.url}
data = get_source(item.url)
patron = 'animated pulse data-url=(.*?)>'
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
item.type = 'dorama'
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
# if item.type !='episode' and item.type != 'movie':
# item.type = 'dorama'
# item.contentSerieName = item.contentTitle
# item.contentTitle = ''
# return episodios(item)
# else:
for video_url in matches:
headers = {'referer': video_url}
token = scrapertools.find_single_match(video_url, 'token=(.*)')
if 'fast.php' in video_url:
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = 'https://www2.doramasmp4.com/api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if len(itemlist) == 0 and item.type == 'search':
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodios(item)
else:
for video_url in matches:
video_data = httptools.downloadpage(video_url, headers=headers).data
server = ''
if 'Media player DMP4' in video_data:
url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'")
server = 'Directo'
else:
url = scrapertools.find_single_match(video_data, '<iframe src="(.*?)".*?scrolling="no"')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
if server !='':
new_item.server = server
itemlist.append(new_item)
# Requerido para FilterTools
# for video_item in itemlist:
# if 'sgl.php' in video_item.url:
# headers = {'referer': item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
# video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# duplicated.append(video_item.url)
# video_item.channel = item.channel
# video_item.infoLabels = item.infoLabels
# video_item.language=IDIOMAS['sub']
#
# patron = 'var item = {id: (\d+), episode: (\d+),'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for id, episode in matches:
# data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
# sources = data_json['options']
# for src in sources:
# url = sources[src]
#
# if 'sgl.php' in url:
# headers = {'referer':item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(url, headers = headers).data
# url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
# infoLabels=item.infoLabels)
# if url != '' and url not in duplicated:
# itemlist.append(new_item)
# duplicated.append(url)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para FilterTools
# Requerido para AutoPlay
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
autoplay.start(itemlist, item)
return itemlist
@@ -266,14 +220,11 @@ def search(item, texto):
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url
post = {'q':texto}
post = urllib.urlencode(post)
item.url = item.url + texto
item.type = 'search'
item.post = post
if texto != '':
try:
return search_results(item)
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -3,29 +3,30 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "https://www.youfreeporntube.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos",
url="http://www.ero-tik.com/newvideos.html?&page=1"))
url= host + "/new-clips.html?&page=1"))
itemlist.append(
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.ero-tik.com/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Top ultima semana",
url="http://www.ero-tik.com/topvideos.html?do=recent"))
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Populares",
url=host + "/topvideo.html?page=1"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://www.ero-tik.com/search.php?keywords="))
url=host + "/search.php?keywords="))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "{0}{1}".format(item.url, texto)
try:
@@ -41,96 +42,73 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, actriz in matches:
itemlist.append(Item(channel=item.channel, action="listacategoria", title=actriz, url=url))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title,
fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente",
url="http://ero-tik.com/" + paginacion))
url=host + "/" + paginacion))
return itemlist
def listacategoria(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()
# Añade al listado
itemlist.append(
Item(channel=item.channel, action="play", thumbnail=thumbnail, title=title, fulltitle=title, url=url,
viewmode="movie", folder=True))
paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')
if paginacion:
itemlist.append(
Item(channel=item.channel, action="listacategoria", title=">> Página Siguiente", url=paginacion))
return itemlist
def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = item.title
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "filesmonster_catalogue",
"name": "Filesmonster Catalogue",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "filesmonster_catalogue.png",
"banner": "filesmonster_catalogue.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,397 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
user = config.get_setting("filesmonsteruser")
itemlist = []
itemlist.append(Item(channel=item.channel, action="unusualporn", title="Canal unusualporn.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
itemlist.append(Item(channel=item.channel, action="files_monster", title="Canal files-monster.org",
thumbnail="http://files-monster.org/template/static/images/logo.jpg"))
itemlist.append(Item(channel=item.channel, action="filesmonster", title="Canal filesmonster.filesdl.net",
thumbnail="http://filesmonster.biz/img/logo.png"))
if user != '': itemlist.append(
Item(channel=item.channel, action="favoritos", title="Favoritos en filesmonster.com del usuario " + user,
folder=True))
return itemlist
def filesmonster(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Ultimos vídeos",
thumbnail="http://photosex.biz/imager/w_400/h_400/9f869c6cb63e12f61b58ffac2da822c9.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg",
url="http://filesmonster.filesdl.net"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en filesmonster.fliesdl.net",
url="http://filesmonster.filesdl.net/posts/search?q=%s"))
return itemlist
def unusualporn(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos_2", title="Últimos vídeos", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="categorias_2", title="Categorías", url="http://unusualporn.net/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en unusualporn",
url="http://unusualporn.net/search/%s"))
return itemlist
def files_monster(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="videos_3", title="Últimos vídeos", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(
Item(channel=item.channel, action="categorias_3", title="Categorías", url="http://www.files-monster.org/",
thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en files-monster.org",
url="http://files-monster.org/search?search=%s"))
return itemlist
def favoritos(item):
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
fa = open(fname, 'a+')
fa.close()
f = open(fname, 'r')
lines = f.readlines()
f.close()
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
partes1 = data1.split('"')
estado = partes1[3]
if estado != 'success': itemlist.append(Item(channel=item.channel,
title="No pudo accederse con tus datos de acceso de Filesmonster.com, introdúcelos en con el apartado figuración. Error: " + estado + data1))
url_favoritos = "http://filesmonster.com/?favorites=1"
data2 = scrapertools.cache_page(url_favoritos, post=post2)
data2 = scrapertools.find_single_match(data2, 'favorites-table(.*?)pager')
patronvideos = '<a href="([^"]+)">([^<]+)</a>.*?del=([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data2)
contador = 0
for url, title, borrar in matches:
contador = contador + 1
imagen = ''
for linea in lines:
partes2 = linea.split("@")
parte_url = partes2[0]
parte_imagen = partes2[1]
if (parte_url == url): imagen = parte_imagen.rstrip('\n').rstrip('\r')
if url.find("?fid=") == -1:
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.title,
url=url, thumbnail=imagen, folder=False))
else:
itemlist.append(
Item(channel=item.channel, action="detail", server="filesmonster", title=title, fulltitle=title,
thumbnail=imagen, url=url, folder=True))
itemlist.append(Item(channel=item.channel, action="quitar_favorito",
title="(-) quitar de mis favoritos en filesmonster.com", thumbnail=imagen,
url="http://filesmonster.com/?favorites=1&del=" + borrar, plot=borrar))
itemlist.append(Item(channel=item.channel, title="", folder=True))
if contador == 0 and estado == 'success':
itemlist.append(
Item(channel=item.channel, title="No tienes ningún favorito, navega por las diferentes fuentes y añádelos"))
return itemlist
def quitar_favorito(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
itemlist.append(Item(channel=item.channel, action="favoritos",
title="El vídeo ha sido eliminado de tus favoritos, pulsa para volver a tu lista de favoritos"))
return itemlist
def anadir_favorito(item):
logger.info()
name_file = os.path.splitext(os.path.basename(__file__))[0]
fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt")
user = config.get_setting("filesmonsteruser")
password = config.get_setting("filesmonsterpassword")
itemlist = []
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
if item.plot == 'el archivo':
id1 = item.url.split('?id=')
id = id1[1]
que = "file"
if item.plot == 'la carpeta':
id1 = item.url.split('?fid=')
id = id1[1]
que = "folder"
url = "http://filesmonster.com/ajax/add_to_favorites"
post3 = "username=" + user + "&password=" + password + "&id=" + id + "&obj_type=" + que
data2 = scrapertools.cache_page(url, post=post3)
if data2 == 'Already in Your favorites': itemlist.append(Item(channel=item.channel, action="favoritos",
title="" + item.plot + " ya estaba en tu lista de favoritos (" + user + ") en Filesmonster"))
if data2 != 'You are not logged in' and data2 != 'Already in Your favorites':
itemlist.append(Item(channel=item.channel, action="favoritos",
title="Se ha añadido correctamente " + item.plot + " a tu lista de favoritos (" + user + ") en Filesmonster",
plot=data1 + data2))
f = open(fname, "a+")
if (item.plot == 'la carpeta'):
ruta = "http://filesmonster.com/folders.php?"
if (item.plot == 'el archivo'):
ruta = "http://filesmonster.com/download.php"
laruta = ruta + item.url
laruta = laruta.replace("http://filesmonster.com/folders.php?http://filesmonster.com/folders.php?",
"http://filesmonster.com/folders.php?")
laruta = laruta.replace("http://filesmonster.com/download.php?http://filesmonster.com/download.php?",
"http://filesmonster.com/download.php?")
f.write(laruta + '@' + item.thumbnail + '\n')
f.close()
if data2 == 'You are not logged in': itemlist.append(Item(channel=item.channel, action="favoritos",
title="No ha sido posible añadir " + item.plot + " a tu lista de favoritos (" + user + " no logueado en Filesmonster)", ))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
data = scrapertools.find_single_match(data,
'Categories <b class="caret"></b></a>(.*?)RSS <b class="caret"></b></a>')
patronvideos = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos", title=title, url=url))
return itemlist
def categorias_2(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li class="cat-item cat-item-[\d]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a><a class="rss_s" title="[^"]+" target="_blank" href="[^"]+"></a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_2", title=title, url=url))
return itemlist
def categorias_3(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, action="videos_3", title=title, url=url))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
original = item.url
item.url = item.url % texto
try:
if original == 'http://filesmonster.filesdl.net/posts/search?q=%s':
return videos(item)
if original == 'http://unusualporn.net/search/%s':
return videos_2(item)
if original == 'http://files-monster.org/search?search=%s':
return videos_3(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def videos(item):
logger.info()
itemlist = []
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<div class="panel-body" style="text-align: center;">.*?<img src="([^"]+)".*?'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
title = title.strip()
itemlist.append(
Item(channel=item.channel, action="detail", title=title, fulltitle=title, url=url, thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página Siguiente", url=url))
return itemlist
def videos_2(item):
logger.info()
itemlist = []
url_limpia = item.url.split("?")[0]
url = item.url
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = 'data-link="([^"]+)" data-title="([^"]+)" src="([^"]+)" border="0" />';
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title, thumbnail in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace("&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(Item(channel=item.channel, action="videos_2", title=">> Página Siguiente", url=url))
return itemlist
def videos_3(item):
logger.info()
itemlist = []
url = item.url
url_limpia = item.url.split("?")[0]
while url and len(itemlist) < 25:
data = scrapertools.downloadpage(url)
patronvideos = '<a href="([^"]+)">.*?<img src="([^"]+)" border="0" title=".*?([^"]+).*?" height="70" />'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
thumbnail=thumbnail))
url = scrapertools.find_single_match(data,
'<a style="text-decoration:none;" href="([^"]+)">&rarr;</a>').replace(
"&amp;", "&")
# Enlace para la siguiente pagina
if url:
itemlist.append(
Item(channel=item.channel, action="videos_3", title=">> Página Siguiente", url=url_limpia + url))
return itemlist
def detail(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
patronvideos = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist
def detail_2(item):
logger.info()
itemlist = []
# descarga la pagina
data = scrapertools.downloadpageGzip(item.url)
data = data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>')
data = data[0]
# descubre la url
patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match2 in matches:
url = "http://filesmonster.com/download.php" + match2[0]
title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(
Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
url=url, thumbnail=item.thumbnail, folder=False))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0],
thumbnail=item.thumbnail, plot="el archivo", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url in matches:
if not url == item.url:
logger.info(url)
logger.info(item.url)
title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, folder=True))
itemlist.append(Item(channel=item.channel, action="anadir_favorito",
title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
thumbnail=item.thumbnail, plot="la carpeta", folder=True))
itemlist.append(Item(channel=item.channel, title=""));
return itemlist

View File

@@ -1,21 +0,0 @@
{
"id": "freecambay",
"name": "FreeCamBay",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/wuzhOCt.png?1",
"categories": [
"adult"
],
"settings": [
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú antes de reproducir con imágenes",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,261 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.freecambay.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
itemlist.append(item.clone(action="categorias", title="Modelos",
url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \
"_list&sort_by=total_videos"))
itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/"))
itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
if '>HD<' in quality:
scrapedtitle += " [COLOR red][HD][/COLOR]"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page))
return itemlist
def playlists(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="videos">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
if videos:
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page))
return itemlist
def videos(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "freecambay"):
action = "menu_info"
# Extrae las entradas
patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from=" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \
"=added2fav_date&&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
if not matches:
patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for url, quality in matches:
if "http" in quality:
calidad = url
url = quality
quality = calidad + "p"
itemlist.append(['.mp4 %s [directo]' % quality, url])
if item.extra == "play_menu":
return itemlist, data
return itemlist
def menu_info(item):
logger.info()
itemlist = []
video_urls, data = play(item.clone(extra="play_menu"))
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
bloque = scrapertools.find_single_match(data, '<div class="block-screenshots">(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, '<img class="thumb lazy-load".*?data-original="([^"]+)"')
for i, img in enumerate(matches):
if i == 0:
continue
img = urlparse.urljoin(host, img)
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
return itemlist
def tags(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Tags":
letras = []
matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>')
for title in matches:
title = title.strip()
if title not in letras:
letras.append(title)
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title))
else:
if not item.length:
item.length = 0
bloque = scrapertools.find_single_match(data,
'>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % (
item.extra, item.extra))
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>')
for url, title in matches[item.length:item.length + 100]:
itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title))
if len(itemlist) >= 100:
itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente",
length=item.length + 100, extra=item.extra))
return itemlist

View File

@@ -0,0 +1,62 @@
{
"id": "hdfilmologia",
"name": "HDFilmologia",
"active": true,
"adult": false,
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
"banner": "",
"categories": [
"movie",
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,271 @@
# -*- coding: utf-8 -*-
# -*- Channel HDFilmologia -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "hdfilmologia"
host = "https://hdfilmologia.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Últimas Agregadas", action="movies", thumbnail=get_thumb('last', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Más Vistas", action="movies", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por País", action="countriesYears", thumbnail=get_thumb('country',
auto=True), text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="genres", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?do=search&mode=advanced&subaction=search&story={0}".format(texto))
# 'https://hdfilmologia.com/?do=search&mode=advanced&subaction=search&story=la+sombra'
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' # url
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
patron += '<div class="sres-desc">(.*?)</div>' # plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", text_color=color3, page=0, plot=plot,
thumbnail=host + scrapedthumbnail))
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
if pagination:
itemlist.append(Item(channel=__channel__, action="sub_search",
title="» Siguiente »", url=pagination))
tmdb.set_infoLabels(itemlist)
return itemlist
def movies(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div class="kino-item ignore-select">.*?<a href="([^"]+)" class="kino-h"><h2>([^<]+)</h2>.*?' # url, title
patron += '<img src="([^"]+)".*?' # img
patron += '<div class="k-meta qual-mark">([^<]+)</div>.*?' # quality
patron += '<strong>Año:</strong></div>([^<]+)</li>' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, year in matches[item.page:item.page + 25]:
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 25 < len(matches):
itemlist.append(item.clone(page=item.page + 25,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, 'class="pnext"><a href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def genres(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
url=host + scrapedurl, text_color=color3, viewmode="movie_with_plot"))
return itemlist
def countriesYears(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
if item.title == "Películas Por País":
patron_todas = 'Por País</option>(.*?)</option></select>'
else:
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host + scrapedurl, action="movies"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '(\w+)src\d+="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang, url in matches:
server = servertools.get_server_from_url(url)
if 'dropbox' in url:
server = 'dropbox'
if '/drive/' in url:
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
server = 'gdrive'
if 'ultrapeliculashd' in url:
data = httptools.downloadpage(url).data
# logger.info(data)
patron = "\|s\|(\w+)\|"
matches = re.compile(patron, re.DOTALL).findall(data)
for key in matches:
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
server = 'dropbox'
languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]',
'e': '[COLOR green](CAST)[/COLOR]',
's': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
if 'youtube' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "hentaienespanol",
"name": "HentaiEnEspañol",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://s11.postimg.cc/cmuwcvvpf/hentaienespanol.png",
"banner": "https://s3.postimg.cc/j3qkfut8z/hentaienespanol_banner.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = 'http://www.xn--hentaienespaol-1nb.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart=''))
itemlist.append(
Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='',
fanart=''))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?'
patron += 'height="170px" src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle # .decode('utf-8')
thumbnail = scrapedthumbnail
fanart = ''
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
# Paginacion
title = ''
siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">')
title = 'Pagina Siguiente >>> '
fanart = ''
itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return todas(item)
else:
return []

View File

@@ -108,8 +108,8 @@ def series(item):
plot=scrapedplot, show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
try:
siguiente = scrapertools.find_single_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
scrapedurl = item.url + siguiente
siguiente = scrapertools.find_single_match(data, '<a class="text nav-next" href="([^"]+)"')
scrapedurl = siguiente
scrapedtitle = ">> Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""

View File

@@ -98,29 +98,31 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
patron = scrapertools.find_single_match(data, '<div id="player2">(.*?)</div>')
patron = '<div id="div.*?<div class="movieplay">.+?[a-zA-Z]="([^&]+)&'
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
elif 'od=' in link:
id_type = 'od'
ir_type = 'or'
elif 'ad=' in link:
id_type = 'ad'
ir_type = 'ar'
elif 'ed=' in link:
id_type = 'ed'
ir_type = 'er'
for link in matches:
if 'maxipelis24.tv/hideload/?' in link:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
elif 'od=' in link:
id_type = 'od'
ir_type = 'or'
elif 'ad=' in link:
id_type = 'ad'
ir_type = 'ar'
elif 'ed=' in link:
id_type = 'ed'
ir_type = 'er'
else:
continue
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
@@ -131,11 +133,18 @@ def findvideos(item):
url = video_data.headers['location']
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
language='', infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
return itemlist

View File

@@ -100,7 +100,7 @@
"id": "intervenidos_channels_list",
"type": "text",
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto'), ('1', 'newpct1', 'descargas2020', 'http://www.newpct1.com', 'http://descargas2020.com', '', '', '', '', '', '*', '', 'force')",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force')",
"enabled": true,
"visible": false
},

View File

@@ -136,7 +136,7 @@ def peliculas(item):
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
@@ -239,7 +239,7 @@ def series(item):
action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />")
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,

View File

@@ -0,0 +1,63 @@
{
"id": "pelis24",
"name": "Pelis24",
"active": true,
"adult": false,
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose",
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,392 @@
# -*- coding: utf-8 -*-
# -*- Channel CanalPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pelis24"
host = "https://www.pelis24.in/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"),
item.clone(title="Tendencias", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
# logger.info(data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="year">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
if 'tvshows' not in scrapedurl:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", infoLabels={"year": year},
thumbnail=scrapedthumbnail, text_color=color3))
paginacion = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion,
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
tmdb.set_infoLabels(itemlist)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
# img, title
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
patron += '</h3><span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year}, quality=quality,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
def genresYears(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "Estrenos por Año":
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
else:
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
data = scrapertools.find_single_match(data, patron_todas)
# logger.error(texto='***********uuuuuuu*****' + data)
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, videos_num in matches:
title = '%s (%s)' % (scrapedtitle, videos_num.replace('.', ','))
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
return itemlist
def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason in matches:
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
new_item.infoLabels['season'] = scrapedseason
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+)x(\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
# logger.info(data)
# patron1 = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span><span>(.*?)</span>' # option, server, lang - quality
patron = 'href="#option-(.*?)"><span class="dt_flag"><img src="[^"]+"></span>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
# urls = re.compile(patron2, re.DOTALL).findall(data)
for option, lang in matches:
url = scrapertools.find_single_match(
data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'español': '[COLOR green](CAST)[/COLOR]',
'subespañol': '[COLOR red](VOS)[/COLOR]',
'sub': '[COLOR red](VOS)[/COLOR]',
'ingles': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
server = servertools.get_server_from_url(url)
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
# if 'google' not in url and 'directo' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,64 @@
{
"id": "pelishd24",
"name": "PelisHD24",
"active": true,
"adult": false,
"language": ["lat", "cast", "eng"],
"fanart": "https://pelishd24.com/wp-content/uploads/2018/11/background.png",
"thumbnail": "https://pelishd24.com/wp-content/uploads/2018/07/pelishd24.2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos",
"direct"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,464 @@
# -*- coding: utf-8 -*-
# -*- Channel PelisHD24 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urlparse
from channels import autoplay
from lib import generictools
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pelishd24"
host = "https://pelishd24.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'English': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
thumbnail=get_thumb('tvshows', auto=True), page=0),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Todas", action="peliculas", thumbnail=get_thumb('all', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas/', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + '?s=trfilter&trfilter=1&years=2018', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'mas-vistas/', viewmode="movie_with_plot"),
item.clone(title="Más Votadas", action="peliculas", thumbnail=get_thumb('more voted', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas-mas-votadas/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genres_atoz", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="A-Z", action="genres_atoz", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host,
viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0, extra='buscarP')]
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def peliculas(item):
logger.info()
itemlist = []
action = ''
contentType = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
title = ''
if '/serie/' in scrapedurl:
action = 'temporadas'
contentType = 'tvshow'
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
else:
action = 'findvideos'
contentType = 'movie'
title = scrapedtitle
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer", contentType=contentType))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
def genres_atoz(item):
logger.info()
itemlist = []
action = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "A-Z":
patron_todas = '<ul class="AZList"(.*?)</li></ul>'
action = 'atoz'
else:
patron_todas = '<a href="#">GENERO</a>(.*?)</li></ul>'
action = 'peliculas'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=action))
return itemlist
def atoz(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<td class="MvTbImg"> <a href="([^"]+)".*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<strong>([^<]+)</strong> </a></td><td>([^<]+)</td>.*?' # title, year
patron += '<span class="Qlty">([^<]+)</span>' # quality
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches[item.page:item.page + 30]:
title = ''
action = ''
if '/serie/' in scrapedurl:
action = 'temporadas'
contentType = 'tvshow'
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
else:
action = 'findvideos'
contentType = 'movie'
title = "%s [COLOR yellow]%s[/COLOR]" % (scrapedtitle, quality)
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, contentType=contentType,
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer", show=scrapedtitle, ))
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<article class="TPost C">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason in matches:
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
new_item.infoLabels['season'] = scrapedseason
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += host + 'episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+)x(\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='episodios')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = 'data-tplayernv="Opt(.*?)"><span>[^"<]+</span>(.*?)</li>' # option, servername, lang - quality
matches = re.compile(patron, re.DOTALL).findall(data)
for option, quote in matches:
patron = '<span>(.*?) -([^<]+)</span'
match = re.compile(patron, re.DOTALL).findall(quote)
lang, quality = match[0]
quality = quality.strip()
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
url_1 = scrapertools.find_single_match(data,
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
new_data = httptools.downloadpage(url_1).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)
patron1 = '<iframe width="560" height="315" src="([^"]+)"'
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
urls = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
servername = servertools.get_server_from_url(urls)
if 'stream.pelishd24.net' in urls:
vip_data = httptools.downloadpage(urls).data
dejuiced = generictools.dejuice(vip_data)
patron = '"file":"([^"]+)"'
match = re.compile(patron, re.DOTALL).findall(dejuiced)
for scrapedurl in match:
urls = scrapedurl
servername = 'gvideo'
if 'pelishd24.com/?trhide' in urls:
data = httptools.downloadpage(urls).data
# logger.error(texto='****hex'+data)
patron = '"file":"([^"]+)"'
match = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in match:
urls = scrapedurl
servername = 'gvideo'
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
if 'embed.pelishd24.com' not in urls and 'embed.pelishd24.net' not in urls:
itemlist.append(item.clone(action='play', title=title, url=urls, language=lang, quality=quality,
text_color=color3))
for url in match1:
new_data = httptools.downloadpage(url).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)
patron1 = '\["\d+","([^"]+)",\d+]'
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
for url in match1:
url = url.replace('\\', '')
servername = servertools.get_server_from_url(url)
if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
vip_data = httptools.downloadpage(url).data
dejuiced = generictools.dejuice(vip_data)
patron = '"file":"([^"]+)"'
match = re.compile(patron, re.DOTALL).findall(dejuiced)
for scrapedurl in match:
url = scrapedurl
servername = 'gvideo'
if 'ww3.pelishd24.com' in url:
data1 = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data1, '"file": "([^"]+)"')
servername = 'gvideo'
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(action='play', title=title, url=url, language=lang, quality=quality,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,95 @@
{
"id": "pelisplay",
"name": "PelisPlay",
"active": true,
"adult": false,
"language": ["cast", "lat", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://www.pelisplay.tv/static/img/logo.png",
"banner": "https://i.postimg.cc/tCb8wh8s/pelisplaybn.jpg",
"categories": [
"movie",
"tvshow",
"vose"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"Subtitulado"
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,427 @@
# -*- coding: utf-8 -*-
# -*- Channel PelisPlay -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pelisplay"
host = "https://www.pelisplay.tv/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movie', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', viewmode="tvshow_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Netflix", action="flixmenu", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', viewmode="movie_with_plot",
fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
thumbnail='https://i.postimg.cc/Pxs9zYjz/image.png'),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Estrenos", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
item.clone(title="Más Populares", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
viewcontent='movie', url=host + 'peliculas', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp',
thumbnail=get_thumb('search.png'), url=host + 'peliculas')]
return itemlist
def menuseries(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', url=host + 'series', viewmode="tvshow_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', url=host + 'series?filtro=visitas', viewmode="tvshow_with_plot"),
item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"),
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
viewcontent='movie', url=host + 'series', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
thumbnail=get_thumb('search.png'), url=host + 'series')]
return itemlist
def flixmenu(item):
logger.info()
itemlist = [item.clone(title="Películas", action="flixmovies", text_blod=True, extra='movie', mediatype="movie",
viewcontent='movie', viewmode="tvshow_with_plot"),
item.clone(title="Series", action="flixtvshow", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshow', viewmode="tvshow_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True,
thumbnail=get_thumb('search.png'), url=host + 'buscar')]
return itemlist
def flixmovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
viewcontent='movie', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscarp",
thumbnail=get_thumb('search.png'), url=host + 'peliculas/netflix')]
return itemlist
def flixtvshow(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, url=host + 'series/netflix?filtro=fecha_actualizacion',
viewcontent='tvshow', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=visitas', viewmode="movie_with_plot"),
item.clone(title="Recíen Agregadas", action="series", text_blod=True,
viewcontent='tvshow', url=host + 'series/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", text_blod=True, extra="buscars",
thumbnail=get_thumb('search.png'), url=host + 'series/netflix')]
return itemlist
def p_portipo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
action = ''
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if item.extra == 'movie':
action = 'peliculas'
elif item.extra == 'serie':
action = 'series'
itemlist.append(item.clone(action=action,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail
))
itemlist.sort(key=lambda it: it.title)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
patron += '<a href="([^"]+)">.*?' # url
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot
patron += '<strong>([^<]+)</strong></h4>' # title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches:
if item.infoLabels['plot'] == '':
item.plot = plot
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year": year}, thumbnail=host + scrapedthumbnail,
url=scrapedurl, title=scrapedtitle, plot=plot))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?q={0}".format(texto))
if item.extra == 'buscarp' or item.extra == 'buscars':
item.url = urlparse.urljoin(item.url, "?buscar={0}".format(texto))
try:
if item.extra == 'buscars':
return series(item)
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/animacion/"
elif categoria == 'terror':
item.url = host + "genre/terror/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?'
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=host + scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(
data, '<li><a href="([^"]+)" rel="next">')
if pagination:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
thumbnail=get_thumb("next.png")))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?'
patron += 'class="abrir_temporada" href="([^"]+)">' # img, season
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 1:
for scrapedthumbnail, temporada, url in matches:
new_item = item.clone(action="episodios", season=temporada, url=url,
thumbnail=host + scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (
i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadírselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
# itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
from core import jsontools
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
post_link = '%sentradas/abrir_temporada' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"')
data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"')
post = {'t': data_t, 's': data_s, '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
json_data = jsontools.load(new_data)
for element in json_data['data']['episodios']:
scrapedname = element['titulo']
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(
2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (
i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
from core import jsontools
import urllib
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?'
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_player, servername, quality, lang in matches:
post_link = '%sentradas/procesar_player' % host
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
post = {'data': data_player, 'tipo': 'videohost', '_token': token}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(post_link, post=post).data
json_data = jsontools.load(new_data)
url = json_data['data']
if 'pelisplay.tv/embed/' in url:
new_data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(
new_data, '"file":"([^"]+)",').replace('\\', '')
elif 'fondo_requerido' in url:
link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0]
post_link = '%sprivate/plugins/gkpluginsphp.php' % host
post = {'link': link}
post = urllib.urlencode(post)
new_data2 = httptools.downloadpage(post_link, post=post).data
url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '')
lang = lang.lower().strip()
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(channel=__channel__, title=title,
action='play', language=lang, quality=quality, url=url))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
return itemlist

View File

@@ -66,16 +66,21 @@ def lista(item):
action = "menu_info"
# Extrae las entradas
patron = '<div class="video-item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)<'
patron = '<div class="video-item.*?href="([^"]+)" '
patron += 'title="([^"]+)".*?'
patron += 'data-src="([^"]+)"'
patron += '(.*?)<div class="durations">.*?'
patron += '</i>([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
if "go.php?" in scrapedurl:
scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0])
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
if not scrapedthumbnail.startswith("https"):
scrapedthumbnail = "https:%s" % scrapedthumbnail
else:
scrapedurl = urlparse.urljoin(host, scrapedurl)
if not scrapedthumbnail.startswith("https"):
scrapedthumbnail = host + "%s" % scrapedthumbnail
scrapedthumbnail = "https:%s" % scrapedthumbnail
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
if '>HD<' in quality:
@@ -110,7 +115,6 @@ def lista(item):
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
@@ -225,7 +229,6 @@ def play(item):
itemlist = []
data = get_data(item.url)
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
if not matches:

View File

@@ -1,7 +1,7 @@
{
"id": "rexpelis",
"name": "Rexpelis",
"active": true,
"active": false,
"adult": false,
"language": ["lat","cast"],
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -32,6 +32,8 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
itemlist.append(
Item(channel=item.channel, action="lista", title="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -45,9 +47,12 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
if item.title == "Series":
patron += 'class="link">.+?<img src="([^"]+)".*?'
else:
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
if item.url==host:
if item.url==host or item.url==host+"/liveaction":
a=1
else:
num=(item.url).split('-')
@@ -150,25 +155,24 @@ def findvideos(item):
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
#for id, title in buttons:
new_url = golink(0, _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
#buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">')
buttons = [0,1,2]
for id in buttons:
new_url = golink(int(id), _sa, sl)
data_new = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
try:
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -210,4 +214,4 @@ def resolve(value1, value2):
lista[j] = k
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
return reto
return reto

View File

@@ -59,7 +59,7 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:

View File

@@ -3,11 +3,11 @@
"name": "ThumbZilla",
"active": true,
"adult": true,
"language": "*",
"language": "en",
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png",
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png?cache=2018110203",
"banner": "",
"categories": [
"categories": [
"adult"
],
"settings": [
@@ -35,4 +35,3 @@
}
]
}

View File

@@ -44,28 +44,36 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
viewmode="movie", thumbnail=get_thumb("/channels_adult.png")))
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending',
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/tending',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd',
action="videos", viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
thumbnail=get_thumb("channels_adult.png")))
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
return itemlist
@@ -92,6 +100,7 @@ def search(item, texto):
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a class="[^"]+" href="([^"]+)">' # url
@@ -99,15 +108,20 @@ def videos(item):
patron += '<span class="title">([^<]+)</span>.*?' # title
patron += '<span class="duration">([^<]+)</span>' # time
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
title = "[%s] %s" % (time, scrapedtitle)
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
if paginacion:
itemlist.append(Item(channel=item.channel, action="videos",
thumbnail=thumbnail % 'rarrow',
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
@@ -116,9 +130,12 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
patron += '<span class="count">([^<]+)</span>' # title, vids
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, vids in matches:
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
title = "%s (%s)" % (scrapedtitle, vids.title())
@@ -127,17 +144,14 @@ def categorias(item):
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
title=title, url=url, thumbnail=thumbnail,
viewmode="movie_with_plot", folder=True))
return itemlist
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl,calidad in matches:
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) )
return itemlist
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
itemlist.append(item.clone(url=url, title=item.contentTile))
return itemlist

View File

@@ -178,7 +178,9 @@ def listado(item):
#logger.debug(data)
#Buscamos la url de paginado y la última página
patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>'
patron = '<a href="([^"]+=(\d+))" title="Next">Next<\/a>'
if not scrapertools.find_single_match(data, patron):
patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>'
try:
next_page_url, curr_page = scrapertools.find_single_match(data, patron)
curr_page = int(curr_page) / len(matches)

View File

@@ -0,0 +1,77 @@
{
"id": "tvpelis",
"name": "TvPelis",
"active": true,
"adult": false,
"language": ["lat", "cast", "*"],
"thumbnail": "http://www.tvpelis.tv/wp-content/themes/tvpelistv3/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,374 @@
# -*- coding: utf-8 -*-
# -*- Channel TvPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.tvpelis.tv/'
IDIOMAS = {'Latino': 'LAT', 'latino': 'LAT', 'Español':'CAST', 'castellano': 'CAST', 'Vose':'VOSE', 'vose':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['xdrive', 'bitertv', 'okru']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="movies_menu",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="list_all", url=host+'genero/series/',
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Documentales", action="list_all", url=host + 'genero/documentales/',
thumbnail=get_thumb('documental', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
# thumbnail=get_thumb('lat', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
# thumbnail=get_thumb('vose', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Generos", action="section",
# thumbnail=get_thumb('genres', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
# thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title='Buscar', action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def movies_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url=host + 'genero/castellano/',
thumbnail=get_thumb('cast', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'genero/latino/',
thumbnail=get_thumb('lat', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'genero/vose/',
thumbnail=get_thumb('vose', auto=True)))
itemlist.append(Item(channel=item.channel, title="Hindú", action="list_all", url=host + 'genero/hindu/',
thumbnail=get_thumb('hindu', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data,
"<div id='z1'><section><div id='main'><div class='breadcrumbs'>(.*?)</ul>")
logger.debug(data)
patron = 'article id=.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?'
patron += 'class="selectidioma">(.*?)class="fixyear".*?class="genero">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, type in matches:
url = scrapedurl
lang = get_language(lang_data)
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '([^\(]+)\(?').strip()
#scrapedtitle = scrapedtitle.replace('Latino','')
scrapedtitle = re.sub('latino|español|sub|audio','', scrapedtitle.lower()).capitalize()
if not config.get_setting('unify'):
title = '%s %s' % (scrapedtitle, lang)
else:
title = scrapedtitle
thumbnail = 'https:'+scrapedthumbnail
new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, language = lang,
infoLabels={'year':year})
logger.debug(type)
if 'series' not in type.lower():
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(full_data, '<link rel="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '<h2>Categorias de Peliculas</h2>(.*?)</ul>')
patron = 'href="([^"]+)"> <em>Peliculas de </em>([^<]+)<span>'
if item.title == 'Por Años':
data = scrapertools.find_single_match(data, '>Filtrar por A&ntilde;o</option>(.*?)</select>')
patron = 'value="([^"]+)">Peliculas del A&ntilde;o (\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, title=title.strip(), url=url, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
all_seasons = []
data=get_source(item.url)
patron='Temporada \d+'
matches = re.compile(patron, re.DOTALL).findall(data)
action = 'episodesxseasons'
if len(matches) == 0:
matches.append('1')
action = 'aios'
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
if title not in all_seasons:
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action=action,
infoLabels=infoLabels))
all_seasons.append(title)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def aios(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='href="([^"]+)" rel="bookmark"><i class="fa icon-chevron-sign-right"></i>.*?Capitulo (?:00|)(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = item.url+scrapedurl
title = '%sx%s - Episodio %s' % (infoLabels['season'], infoLabels['episode'], infoLabels['episode'])
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)".*?</i>.*?Temporada %s, Episodio (\d+) - ([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type=item.type,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '<em class="bandera sp([^"]+)"')
for lang in lang_list:
if not lang in IDIOMAS:
lang = 'vose'
lang = IDIOMAS[lang]
if lang not in language:
language.append(lang)
return language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id="([^"]+)".?class="tab_part.*?">.?<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = 'class="(rep)".*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, player in matches:
if 'ok.ru' in player:
url = 'http:' + player
elif 'rutube' in player:
url = 'http:' + player + "|%s" % item.url
elif 'http' not in player:
hidden_data = get_source('%s%s' % (host, player))
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = player
lang = scrapertools.find_single_match(data, '<li rel="%s">([^<]+)</li>' % option)
if lang.lower() in ['online', 'trailer']:
continue
if lang in IDIOMAS:
lang = IDIOMAS[lang]
if not config.get_setting('unify'):
title = ' [%s]' % lang
else:
title = ''
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'latino':
item.url = host + 'filter?language=2'
elif categoria == 'castellano':
item.url = host + 'filter?language=1'
elif categoria == 'infantiles':
item.url = host + 'genre/25/infantil'
elif categoria == 'terror':
item.url = host + 'genre/15/terror'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,89 @@
{
"id": "vi2",
"name": "vi2",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://i.postimg.cc/0Qy9wf8b/vi2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,333 @@
# -*- coding: utf-8 -*-
# -*- Channel Vi2.co -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['Full HD 1080p',
'HDRip',
'DVDScreener',
'720p',
'Ts Screener hq',
'HD Real 720p',
'DVDRip',
'BluRay-1080p',
'BDremux-1080p']
list_servers = [
'directo',
'openload',
'rapidvideo',
'jawcloud',
'cloudvideo',
'upvid',
'vevio',
'gamovideo'
]
host = 'http://vi2.co'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas',
thumbnail= get_thumb('movies', auto=True)))
# itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series',
# thumbnail= get_thumb('tvshows', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def select_menu(item):
logger.info()
itemlist=[]
url = host + '/%s/es/' % item.type
itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url,
thumbnail=get_thumb('genres', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
thumbnail=get_thumb('year', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
thumbnail=get_thumb("search", auto=True), type=item.type))
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
url = host + '/%s/es/ajax/1/' % item.type
link_type = item.title.lower()
if link_type == 'streaming':
link_type = 'flash'
movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE']
tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE']
if item.type == 'peliculas':
title = movies_options
thumb_1 = 'all'
else:
thumb_1 = 'last'
title = tv_options
itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type,
action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type,
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[1],
url=url + '?q=%s+espanol' % link_type, action='list_all',
thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[2],
url=url + '?q=%s+latino' % link_type, action='list_all',
thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title=title[3],
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
link_type=link_type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def section(item):
logger.info()
itemlist=[]
excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent']
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">')
patron = 'href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl.replace('/?','/ajax/1/?')
if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()):
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def list_all(item):
from core import jsontools
logger.info()
itemlist = []
listed =[]
quality=''
infoLabels = {}
json_data= jsontools.load(get_source(item.url))
data = json_data['render']
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
#if item.type == 'peliculas':
patron = '<img class="cover".*?src="([^"]+)" data-id="\d+" '
patron +='alt="Ver ([^\(]+)(.*?)">'
patron += '<div class="mdl-card__menu"><a class="clean-link" href="([^"]+)">'
patron += '.*?<span class="link-size">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches:
if item.send_lang != '':
lang = item.send_lang
else:
lang = ''
year='-'
extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','')
extra_info = extra_info.split('|')
for info in extra_info:
info = info.strip()
if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info:
quality = info
if 'ingl' in info.lower():
info = 'VO'
if info in IDIOMAS:
lang = info
elif info.isdigit():
year = info
if lang in IDIOMAS:
lang = IDIOMAS[lang]
title = '%s' % scrapedtitle.strip()
if not config.get_setting('unify'):
if year.isdigit():
title = '%s [%s]' % (title, year)
if quality != '':
title = '%s [%s]' % (title, quality)
if lang != '':
title = '%s [%s]' % (title, lang)
thumbnail = scrapedthumbnail
url = host+scrapedurl
if item.type == 'series':
season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)')
infoLabels['season'] = season
infoLabels['episode'] = episode
else:
infoLabels['year'] = year
if title not in listed:
new_item = Item(channel=item.channel,
title=title,
url=url,
action='findvideos',
thumbnail=thumbnail,
type=item.type,
language = lang,
quality=quality,
link_type=item.link_type,
torrent_data= size,
infoLabels = infoLabels
)
if item.type == 'peliculas':
new_item.contentTitle = scrapedtitle
else:
scrapedtitle = scrapedtitle.split(' - ')
new_item.contentSerieName = scrapedtitle[0]
itemlist.append(new_item)
listed.append(title)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
if json_data['next']:
actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
next_page =int(actual_page) + 1
url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
action='list_all', send_lang=item.send_lang))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
server = ''
data = get_source(item.url)
pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"')
data = get_source(host+pre_url)
patron = 'data-video="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
lang = item.language
quality = item.quality
for url in matches:
title = ''
link_type = ''
url = base64.b64decode(url)
if 'torrent' in url and item.link_type == 'torrent':
server = 'torrent'
link_type = 'torrent'
title = ' [%s]' % item.torrent_data
elif 'torrent' not in url:
link_type = 'flash'
if url != '' and (link_type == item.link_type.lower()):
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server,
language=lang, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'ver/'
elif categoria == 'infantiles':
item.url = host + 'genero/animacion/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
elif categoria == 'documentales':
item.url = host + 'genero/terror/'
item.type=item.type
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -13,6 +13,7 @@ from platformcode import config, logger
__channel__ = "xms"
host = 'https://xxxmoviestream.com/'
host1 = 'https://www.cam4.com/'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
@@ -41,7 +42,6 @@ thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0',
@@ -60,32 +60,50 @@ def mainlist(item):
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
thumbnail=thumbnail % '4'))
itemlist.append(Item(channel=__channel__, title="WebCam", action="webcamenu",
viewmode="movie_with_plot", viewcontent='movies',
thumbnail='https://ae01.alicdn.com/kf/HTB1LDoiaHsrBKNjSZFpq6AXhFXa9/-.jpg'))
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=thumbnail % '5'))
return itemlist
def webcamenu(item):
logger.info()
itemlist = [item.clone(title="Trending Cams", action="webcam", text_blod=True, url=host1,
viewcontent='movies', viewmode="movie_with_plot"),
item.clone(title="Females", action="webcam", text_blod=True,
viewcontent='movies', url=host1 + 'female', viewmode="movie_with_plot"),
item.clone(title="Males", action="webcam", text_blod=True,
viewcontent='movies', url=host1 + 'male', viewmode="movie_with_plot"),
item.clone(title="Couples", action="webcam", text_blod=True,
viewcontent='movies', url=host1 + 'couple', viewmode="movie_with_plot"),
item.clone(title="Trans", action="webcam", text_blod=True, extra="Películas Por año",
viewcontent='movies', url=host1 + 'transgender', viewmode="movie_with_plot")]
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|#038;", "", data)
# logger.info(data)
patron_todos = '<div id="content">(.*?)<div id="footer"'
data = scrapertools.find_single_match(data, patron_todos)
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
patron += '<a href="([^"]+)" title="([^"]+)".*?' #url, title
patron += '<div class="right"><p>([^<]+)</p>' # plot
patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title
patron += '<div class="right"><p>([^<]+)</p>' # plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
plot = scrapertools.decodeHtmlentities(plot)
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot},
fanart=scrapedthumbnail,viewmode="movie_with_plot",
folder=True, contentTitle=scrapedtitle))
# Extrae el paginador
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next &rsaquo;</a></li><li>')
paginacion = urlparse.urljoin(item.url, paginacion)
@@ -95,6 +113,36 @@ def peliculas(item):
thumbnail=thumbnail % 'rarrow',
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
def webcam(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|#038;", "", data)
patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url
patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url
patron += 'data-username="([^"]+)".*?' # username
patron += 'title="([^"]+)".*?' # title
patron += 'data-profile="([^"]+)" />' # img
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
# Extrae el paginador
paginacion = scrapertools.find_single_match(data, '<span id="pagerSpan">\d+</span> <a href="([^"]+)"')
paginacion = urlparse.urljoin(item.url, paginacion)
if paginacion:
itemlist.append(Item(channel=__channel__, action="webcam",
thumbnail=thumbnail % 'rarrow',
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
@@ -104,10 +152,9 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += '</noscript><a href="([^"]+)".*?' # url
patron += '<span>([^<]+)</span></a>.*?' # title
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += '</noscript><a href="([^"]+)".*?' # url
patron += '<span>([^<]+)</span></a>.*?' # title
patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' # num_vids
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -143,16 +190,15 @@ def sub_search(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += 'title="([^"]+)" />.*?' # title
patron += '</noscript><a href="([^"]+)".*?' # url
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += 'title="([^"]+)" />.*?' # title
patron += '</noscript><a href="([^"]+)".*?' # url
patron += '<div class="right"><p>([^<]+)</p>' # plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail,
action="findvideos", thumbnail=scrapedthumbnail))
action="findvideos", thumbnail=scrapedthumbnail))
paginacion = scrapertools.find_single_match(
data, "<a href='([^']+)' class=\"inactive\">\d+</a>")
@@ -168,8 +214,6 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -179,5 +223,4 @@ def findvideos(item):
itemlist.append(item.clone(action='play', title=title, server=server, url=url))
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "youporn",
"name": "youporn",
"active": true,
"adult": false,
"adult": true,
"language": ["*"],
"thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png",
"banner": "",

View File

@@ -415,9 +415,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
season_episode = scrapertools.get_season_and_episode(e.title)
# Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anterioires
if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio
if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
if json_path in ficheros: #Si existe el .json sacamos de ahí las urls
if overwrite: #pero solo si se se sobrescriben los .json
json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json
@@ -433,6 +435,8 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if e.emergency_urls: del e.emergency_urls
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
e = emergency_urls(e, channel, json_path) #generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo

Binary file not shown.

After

Width:  |  Height:  |  Size: 764 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 978 KiB

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -210,7 +210,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
# Creamos el listitem
#listitem = xbmcgui.ListItem(item.title)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(jawcloud.co/embed-([A-z0-9]+))",
"pattern": "(jawcloud.co/(?:embed-|)([A-z0-9]+))",
"url": "https://\\1.html"
}
]

View File

@@ -7,6 +7,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "The file you were looking for could not be found" in data:
return False, "[jawcloud] El archivo ha ido borrado"
return True, ""

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://embed.mystream.to/(\\w+)",
"url": "https://embed.mystream.to/\\1"
}
]
},
"free": true,
"id": "mystream",
"name": "mystream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/t43grQdh/mystream1.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector mystream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[mystream] El archivo no existe o ha sido borrado"
if "<title>video is no longer available" in data.data:
return False, "[mystream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
logger.info("url=" + page_url)
video_urls = []
headers = {'referer': page_url}
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
text_decode = aadecode(code)
matches = scrapertools.find_multiple_matches(text_decode, "'src', '([^']+)'")
for url in matches:
video_urls.append(['mystream [mp4]',url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://rutube.ru/play/embed/[a-zA-Z0-9]+.p=[a-zA-Z0-9-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "rutube",
"name": "rutube",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://www.cubancouncil.com/uploads/project_images/rutube_branding_black.png.648x0_q90_replace_alpha.jpg"
}

View File

@@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
# -*- Server Rutube -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
from core import jsontools
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
return False, "[Rutube] El video ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
import urllib
video_urls = []
referer = ''
id = ''
if "|" in page_url:
page_url = page_url.replace('?', '|')
page_url, id, referer = page_url.split("|", 2)
header = {'referer':referer}
referer = urllib.urlencode(header)
"http://rutube.ru/api/play/options/10531822/?format=json&sqr4374_compat=1&no_404=true&referer=http%3A%2F%2Frutube.ru%2Fplay%2Fembed%2F10531822%3Fp%3DeDk8m91H0UBPOCUuFicFbQ&p=eDk8m91H0UBPOCUuFicFbQ"
base_link = page_url.replace("/play/embed/", "/api/play/options/")
new_link = base_link + '/?format=json&sqr4374_compat=1&no_404=true&%s&%s' % (referer, id)
data = httptools.downloadpage(new_link).data
json_data = jsontools.load(data)
video_urls.append(['Rutube', json_data['video_balancer']['m3u8']])
return video_urls

View File

@@ -3,13 +3,11 @@
# Conector UpVID By Alfa development Group
# --------------------------------------------------------
import re
import re, base64
from core import httptools
from core import scrapertools
from platformcode import logger
import re, base64
from lib.aadecode import decode as aadecode
from platformcode import logger
def test_video_exists(page_url):

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "vidup.(?:me|tv)/(?:embed-|)([A-z0-9]+)",
"pattern": "vidup.(?:me|tv|io)/(?:embed-|)([A-z0-9]+)",
"url": "http://vidup.tv/embed-\\1.html"
}
]

View File

@@ -20,8 +20,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
post= {}
post = urllib.urlencode(post)
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
headers = {"Referer":page_url}
url = httptools.downloadpage(page_url, follow_redirects=False, headers=headers, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed.([A-z0-9]+)"), post=post).data
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches: