Merge remote-tracking branch 'upstream/master'

This commit is contained in:
pipcat
2018-04-26 10:44:15 +02:00
37 changed files with 1672 additions and 649 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.5.8" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.11" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,16 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» torrentrapid » torrentlocura
» mispelisyseries » descargas2020
» vidlox » downace
» tvvip » clipwatching
» hdfull » peliculasaudiolatino
» descargas2020 » mispelisyseries
» torrentloculra » torrentrapid
» tumejortorrent » tvsinpagar
¤ arreglos internos
¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión
¤ Gracias al equipo SOD, @lopezvg, @f_y_m por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

15
plugin.video.alfa/channels/animeflv_me.py Executable file → Normal file
View File

@@ -213,7 +213,7 @@ def series(item):
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
@@ -237,21 +237,26 @@ def episodios(item):
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
new_item=itemlist.append(Item(channel=item.channel, action="findvideos",
url=url, thumbnail=item.thumbnail, plot=plot, show=item.show))
# El enlace pertenece a un episodio
if episode:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
item.channel, item.contentSerieName, season, episode)
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.contentSerieName = item.contentSerieName
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# El enlace pertenece a una pelicula
else:
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
new_item.title=title
new_item.fulltitle="%s %s" % (item.show, title)
itemlist.append(new_item)
# El sistema soporta la videoteca y se encontro por lo menos un episodio
# o pelicula

View File

@@ -136,6 +136,8 @@ def start(itemlist, item):
# Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay
server_list = channel_node.get('servers', [])
for server in server_list:
server = server.lower()
quality_list = channel_node.get('quality', [])
# Si no se definen calidades la se asigna default como calidad unica
@@ -145,7 +147,7 @@ def start(itemlist, item):
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]])
favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]].lower())
favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]])
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
@@ -175,25 +177,25 @@ def start(itemlist, item):
# si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.quality not in favorite_quality \
if item.server.lower() not in favorite_servers or item.quality not in favorite_quality \
or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem']= item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
elif priority == 2: # Solo servidores
# si el servidor no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.url in url_list_valid:
if item.server.lower() not in favorite_servers or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
elif priority == 3: # Solo calidades
@@ -261,11 +263,11 @@ def start(itemlist, item):
if not platformtools.is_playing() and not played:
videoitem = autoplay_elem['videoitem']
logger.debug('videoitem %s' % videoitem)
if videoitem.server not in max_intentos_servers:
max_intentos_servers[videoitem.server] = max_intentos
if videoitem.server.lower() not in max_intentos_servers:
max_intentos_servers[videoitem.server.lower()] = max_intentos
# Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente
if max_intentos_servers[videoitem.server] == 0:
if max_intentos_servers[videoitem.server.lower()] == 0:
continue
lang = " "
@@ -312,15 +314,15 @@ def start(itemlist, item):
logger.debug(str(len(autoplay_list)))
# Si hemos llegado hasta aqui es por q no se ha podido reproducir
max_intentos_servers[videoitem.server] -= 1
max_intentos_servers[videoitem.server.lower()] -= 1
# Si se han alcanzado el numero maximo de intentos de este servidor
# preguntar si queremos seguir probando o lo ignoramos
if max_intentos_servers[videoitem.server] == 0:
if max_intentos_servers[videoitem.server.lower()] == 0:
text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper()
if not platformtools.dialog_yesno("AutoPlay", text,
"¿Desea ignorar todos los enlaces de este servidor?"):
max_intentos_servers[videoitem.server] = max_intentos
max_intentos_servers[videoitem.server.lower()] = max_intentos
# Si no quedan elementos en la lista se informa
if autoplay_elem == autoplay_list[-1]:
@@ -439,7 +441,7 @@ def check_value(channel, itemlist):
quality_list = channel_node['quality'] = list()
for item in itemlist:
if item.server not in server_list and item.server !='':
if item.server.lower() not in server_list and item.server !='':
server_list.append(item.server)
change = True
if item.quality not in quality_list and item.quality !='':

View File

@@ -121,8 +121,8 @@ def episodios(item):
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img alt=".+?" title=".+?" src="([^"]+)">'
patron_caps += '<\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
patron_caps = '<img alt=".+?" src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?'
patron_caps += '<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
@@ -148,11 +148,11 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1 = scrapertools.find_single_match(data,
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data1,patron)
if "favicons?domain" in itemla[1]:
if "favicons?domain" in itemla[0]:
method = 1
data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}")
patron='".+?":"(.+?)"'

View File

@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -4,55 +4,79 @@ import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
from platformcode import logger
HOST = "http://documentales-online.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=HOST,
itemlist.append(Item(channel=item.channel, title="Novedades", action="videos", url=HOST,
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados",
thumbnail=get_thumb('hot', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="seccion", url=HOST, extra="series",
itemlist.append(Item(channel=item.channel, title="Series destacadas", action="seccion", url=HOST, extra="series",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Categorías", action="categorias", url=HOST,
thumbnail=get_thumb('categories', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Top 100", action="categorias", url=HOST))
# itemlist.append(Item(channel=item.channel, title="Populares", action="categorias", url=HOST))
itemlist.append(Item(channel=item.channel, title="Top 100", action="listado", url=HOST + "top/",
thumbnail=get_thumb('more voted', auto=True)))
itemlist.append(Item(channel=item.channel, title="Populares", action="listado", url=HOST + "populares/",
thumbnail=get_thumb('more watched', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series y Temas", action="listado", url=HOST + "series-temas/",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
thumbnail=get_thumb('search', auto=True)))
return itemlist
# itemlist.append(Item(channel=item.channel, title=" Series y Temas", action="categorias", url=HOST))
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('<span class="wpp-views">', '')
bloque = scrapertools.find_single_match(data, 'class="post-entry(.*?)class="post-share')
if "series-temas" not in item.url:
patron = '<a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '/a>([^<]+)<'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedextra in matches:
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = scrapedtitle + scrapedextra,
url = HOST + scrapedurl
))
else:
patron = """<a href='([^']+)'.*?"""
patron += """>([^<]+)<.*?"""
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "videos",
channel = item.channel,
title = scrapedtitle,
url = HOST + scrapedurl
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.extra == "destacados":
patron_seccion = '<h4 class="widget-title">Destacados</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "findvideos"
else:
patron_seccion = '<h4 class="widget-title">Series destacadas</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "listado"
action = "videos"
data = scrapertools.find_single_match(data, patron_seccion)
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
aux_action = action
for url, title in matches:
if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title:
@@ -60,61 +84,46 @@ def seccion(item):
else:
action = aux_action
itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title))
return itemlist
def listado(item):
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
pagination = scrapertools.find_single_match(data, '<div class="older"><a href="([^"]+)"')
pagination = scrapertools.find_single_match(data, "rel='next' href='([^']+)'")
if not pagination:
pagination = scrapertools.find_single_match(data, '<span class=\'current\'>\d</span>'
'<a class="page larger" href="([^"]+)">')
patron = '<ul class="sp-grid">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)">(.*?)</a>.*?<img.*?src="([^"]+)"', re.DOTALL).findall(data)
for url, title, thumb in matches:
itemlist.append(item.clone(title=title, url=url, action="findvideos", fulltitle=title, thumbnail=thumb))
if pagination:
itemlist.append(item.clone(title=">> Página siguiente", url=pagination))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, 'a href="#">Categorías</a><ul class="sub-menu">(.*?)</ul>')
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
for url, title in matches:
itemlist.append(item.clone(title=title, url=url, action="listado", fulltitle=title))
itemlist.append(item.clone(title=title, url=url, action="videos", fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = HOST + "?s=%s" % texto
return listado(item)
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
@@ -125,37 +134,21 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.fulltitle == "Cosmos (Carl Sagan)":
matches = scrapertools.find_multiple_matches(data,
'<p><strong>(.*?)</strong><br /><iframe.+?src="(https://www\.youtube\.com/[^?]+)')
if "Cosmos (Carl Sagan)" in item.title:
patron = '(?s)<p><strong>([^<]+)<.*?'
patron += '<iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data,patron)
for title, url in matches:
new_item = item.clone(title=title, url=url)
from core import servertools
aux_itemlist = servertools.find_video_items(new_item)
for videoitem in aux_itemlist:
videoitem.title = new_item.title
videoitem.fulltitle = new_item.title
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
itemlist.extend(aux_itemlist)
itemlist.append(item.clone(action = "play", title=title, url=url
))
else:
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="(https://www\.youtube\.com/[^?]+)')
from core import servertools
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="([^"]+)"')
itemlist.extend(servertools.find_video_items(data=",".join(data)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -23,36 +23,6 @@ list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
host = 'http://doomtv.net/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Biográfia": "https://s15.postimg.org/5lrpbx323/biografia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png",
"Estrenos": "https://s21.postimg.org/fy69wzm93/estrenos.png",
"Peleas": "https://s14.postimg.org/we1oyg05t/peleas.png",
"Policiales": "https://s21.postimg.org/n9e0ci31z/policial.png",
"Uncategorized": "https://s30.postimg.org/uj5tslenl/otros.png",
"LGBT": "https://s30.postimg.org/uj5tslenl/otros.png"}
def mainlist(item):
@@ -177,15 +147,13 @@ def seccion(item):
url = scrapedurl
title = scrapedtitle
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='lista',
title=title,
url=url,
thumbnail = thumbnail
thumbnail=thumbnail
))
return itemlist
@@ -221,64 +189,25 @@ def newest(categoria):
return itemlist
def get_vip(item, url):
logger.info()
itemlist = []
data = httptools.downloadpage(url+'/videocontent').data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
video_id = scrapertools.find_single_match(data, 'id=videoInfo ><span >(.*?)</span>')
new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id
json_data = httptools.downloadpage(new_url).data
dict_data = jsontools.load(json_data)
sources = dict_data['sources']
for vip_item in sources['mp4_cdn']:
vip_url= vip_item['url']
vip_quality = vip_item['label']
title ='%s [%s]' % (item.title, vip_quality)
itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
player_vip = scrapertools.find_single_match(data, 'class=movieplay><iframe src=(https://v.d0stream.com.*?) frameborder')
itemlist.extend(get_vip(item, player_vip))
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
hidden_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", hidden_data)
patron = 'sources: \[{file: (.*?),'
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
for videoitem in matches:
new_item = Item(
channel = item.channel,
url = videoitem,
title = item.title,
contentTitle = item.title,
action = 'play',
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -664,14 +664,12 @@ def findvideos(item):
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
matches = []
for match in data_decrypt:
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))
server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')
url = '%s%s' % (server_url, match['code'])
@@ -831,20 +829,10 @@ def get_status(status, type, id):
## --------------------------------------------------------------------------------
def jhexdecode(t):
k = re.sub(r'(_0x.{4})(?=\(|=)', 'var_0', t).replace('\'','\"')
def to_hex(c, type):
h = int("%s" % c, 16)
if type == '1':
return 'p[%s]' % h
if type == '2':
return '[%s]' % h
x = re.sub(r'(?:p\[)(0x.{,2})(?:\])', lambda z: to_hex(z.group(1), '1'), k)
y = re.sub(r'(?:\(")(0x.{,2})(?:"\))', lambda z: to_hex(z.group(1), '2'), x)
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
r = re.sub(r'_\d+x\w+', 'var_0', r)
def to_hx(c):
h = int("%s" % c.groups(0), 16)
@@ -852,14 +840,8 @@ def jhexdecode(t):
return chr(h)
else:
return ""
r = re.sub(r'(?:\\|)x(\w{2})(?=[^\w\d])', to_hx, y).replace('var ', '')
server_list = eval(scrapertools.find_single_match(r, '=(\[.*?\])'))
for val in range(475,0, -1):
server_list.append(server_list[0])
server_list.pop(0)
r = re.sub(r'=\[(.*?)\]', '=%s' % str(server_list), r)
r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
f = eval(scrapertools.get_match(r, '\s*var_0\s*=\s*([^;]+);'))
for i, v in enumerate(f):
@@ -875,7 +857,6 @@ def jhexdecode(t):
return r
def obfs(data, key, n=126):
chars = list(data)
for i in range(0, len(chars)):

View File

@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -9,7 +9,7 @@ from core import servertools
from core.item import Item
from platformcode import logger, config
HOST = 'http://peliculasaudiolatino.com'
HOST = 'http://verpeliculasenlatino.com'
def mainlist(item):

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
@@ -215,37 +216,29 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
patron = '<div class=TPlayerTb.Current id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
base_link = 'https://repros.live/player/ajaxdata'
for opt, urls_page in matches:
logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info)
video_id = video_info['extid']
video_server = video_info['server']
video_status = video_info['status']
if video_status in ['finished', 'propio']:
if video_status == 'finished':
url = 'https://'+video_server+'/embed/'+video_id
else:
url = 'https://'+video_server+'/e/'+video_id
title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
video_data = httptools.downloadpage(urls_page, headers=headers).data
servers = scrapertools.find_multiple_matches(video_data,'data-player="(.*?)" data-embed="(.*?)">')
for server, code in servers:
post = {'codigo':code}
post = urllib.urlencode(post)
video_json=jsontools.load(httptools.downloadpage('https://repros.live/player/ajaxdata', post=post).data)
url = video_json['url']
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist

View File

@@ -14,7 +14,7 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
host = "https://pepecine.info"
host = "https://pepecinehd.tv"
perpage = 20
def mainlist1(item):
@@ -29,7 +29,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/peliculas-tv-online',
url=host+'/tv-peliculas-online',
action='list_latest',
indexp=1,
type='movie'))
@@ -149,7 +149,7 @@ def list_latest(item):
logger.info()
itemlist = []
data = get_source(item.url)
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) style')
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) ')
data = get_source(data_url)
patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?"
patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <"

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "plusdede.png",
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",
"banner": "plusdede.png",
"categories": [
"movie",

View File

@@ -61,17 +61,17 @@ def mainlist(item):
item.url = HOST
item.fanart = fanart_host
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -86,31 +86,31 @@ def menuseries(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series"))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series"))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following"))
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie"))
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites"))
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending"))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen"))
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended"))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series"))
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -122,29 +122,29 @@ def menupeliculas(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis"))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis"))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3"))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending"))
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended"))
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites"))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen"))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis"))
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png'))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -156,23 +156,23 @@ def menulistas(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas"))
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
itemlist.append(
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas"))
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
itemlist.append(
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas"))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist

0
plugin.video.alfa/channels/seriecanal.json Executable file → Normal file
View File

0
plugin.video.alfa/channels/seriecanal.py Executable file → Normal file
View File

View File

@@ -24,7 +24,7 @@
"Inglés",
"Latino",
"Catalán",
"VOS"
"VOSE"
]
},
{
@@ -44,4 +44,4 @@
"visible": true
}
]
}
}

View File

@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -140,6 +140,8 @@ def listado(item):
extra = ""
context = "movie"
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
if ".com/serie" in url and "/miniseries" not in url:
action = "episodios"
@@ -219,6 +221,8 @@ def listado_busqueda(item):
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
if not year or year <= "1900":
year = '-'
# fix encoding for title
title = scrapertools.htmlclean(title)
@@ -319,7 +323,14 @@ def findvideos(item):
# item.url = item.url.replace(".com/",".com/ver-online/")
# item.url = item.url.replace(".com/",".com/descarga-directa/")
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
# Obtener la información actualizada del Episodio
if item.contentType == "episode":
if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"):
tmdb.set_infoLabels_item(item, seekTmdb = True)
if not item.contentTitle:
item.contentTitle = item.infoLabels['title']
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
@@ -367,7 +378,7 @@ def findvideos(item):
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
title = title_torrent
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
if url != "": #Torrent
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
@@ -410,6 +421,14 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
titulo = '%s, %s' % (item.alive, titulo)
elif item.alive == "??":
titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo)
else:
logger.debug(item.alive + ": / " + titulo + " / " + enlace)
raise
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
@@ -446,6 +465,15 @@ def findvideos(item):
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
if p <= 2:
item.alive = servertools.check_video_link(enlace, servidor)
if item.alive.lower() == "ok":
parte_titulo = '%s, %s' % (item.alive, parte_titulo)
elif item.alive == "??":
parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo)
else:
logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace)
break
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
plot=item.plot, infoLabels=item.infoLabels, folder=False))

View File

@@ -1,22 +1,23 @@
{
"id": "vertelenovelas",
"name": "Ver Telenovelas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "vertelenovelas.png",
"banner": "vertelenovelas.png",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}
{
"id": "tvvip",
"name": "TV-VIP",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://i.imgur.com/gNHVlI4.png",
"banner": "http://i.imgur.com/wyRk5AG.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": "!eq(-1,'') + !eq(-2,'')",
"visible": true
}
]
}

View File

@@ -0,0 +1,666 @@
# -*- coding: utf-8 -*-
import os
import re
import sys
import unicodedata
import urllib
import time
from core import channeltools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
host = "http://tv-vip.com"
def mainlist(item):
logger.info()
item.viewmode = "movie"
itemlist = []
data = httptools.downloadpage(host + "/json/playlist/home/index.json")
itemlist.append(Item(channel=item.channel, title="Películas", action="submenu",
thumbnail=host+"/json/playlist/peliculas/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Series", action="submenu",
thumbnail=host+"/json/playlist/series/poster.jpg",
fanart=host+"/json/playlist/series/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Versión Original", action="entradasconlistas",
url=host+"/json/playlist/version-original/index.json",
thumbnail=host+"/json/playlist/version-original/thumbnail.jpg",
fanart=host+"/json/playlist/version-original/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Documentales", action="entradasconlistas",
url=host+"/json/playlist/documentales/index.json",
thumbnail=host+"/json/playlist/documentales/thumbnail.jpg",
fanart=host+"/json/playlist/documentales/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Películas Infantiles", action="entradasconlistas",
url=host+"/json/playlist/peliculas-infantiles/index.json",
thumbnail=host+"/json/playlist/peliculas-infantiles/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas-infantiles/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Series Infantiles", action="entradasconlistas",
url=host+"/json/playlist/series-infantiles/index.json",
thumbnail=host+"/json/playlist/series-infantiles/thumbnail.jpg",
fanart=host+"/json/playlist/series-infantiles/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
thumbnail="http://i.imgur.com/gNHVlI4.png", fanart="http://i.imgur.com/9loVksV.png"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
if item.title == "Buscar...": item.extra = "local"
item.url = host + "/video-prod/s/search?q=%s&n=100" % texto
try:
return busqueda(item, texto)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
for line in sys.exc_info():
logger.error("%s" % line)
return []
def busqueda(item, texto):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
for child in data["objectList"]:
infolabels = {}
infolabels['year'] = child['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
if 'playListChilds' not in child:
infolabels['plot'] = child['description']
type = "repo"
fulltitle = child['name']
title = child['name']
infolabels['duration'] = child['duration']
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
else:
type = "playlist"
infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds'])
fulltitle = child['id']
title = "[COLOR red][LISTA][/COLOR] " + child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + \
str(child['number']) + "[/COLOR])"
# En caso de búsqueda global se filtran los resultados
if item.extra != "local":
if "+" in texto: texto = "|".join(texto.split("+"))
if not re.search(r'(?i)' + texto, title, flags=re.DOTALL): continue
url = host + "/json/%s/%s/index.json" % (type, child["id"])
# Fanart
if child['hashBackground']:
fanart = host + "/json/%s/%s/background.jpg" % (type, child["id"])
else:
fanart = host + "/json/%s/%s/thumbnail.jpg" % (type, child["id"])
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/%s/%s/poster.jpg" % (type, child["id"])
else:
thumbnail = fanart
if type == 'playlist':
itemlist.insert(0, Item(channel=item.channel, action="entradasconlistas", title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle,
infoLabels=infolabels, viewmode="movie_with_plot", folder=True))
else:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle,
context="05", infoLabels=infolabels, viewmode="movie_with_plot", folder=True))
return itemlist
def submenu(item):
logger.info()
itemlist = []
if item.title == "Series":
itemlist.append(Item(channel=item.channel, title="Nuevos Capítulos", action="episodios",
url=host+"/json/playlist/nuevos-capitulos/index.json",
thumbnail=host+"/json/playlist/nuevos-capitulos/background.jpg",
fanart=host+"/json/playlist/nuevos-capitulos/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Más Vistas", action="series",
url=host+"/json/playlist/top-series/index.json",
thumbnail=host+"/playlist/top-series/thumbnail.jpg",
fanart=host+"/json/playlist/top-series/background.jpg",
extra1="Series"))
itemlist.append(Item(channel=item.channel, title="Últimas Series", action="series",
url=host+"/json/playlist/series/index.json",
thumbnail=item.thumbnail, fanart=item.fanart, extra1="Series"))
itemlist.append(Item(channel=item.channel, title="Lista de Series A-Z", action="series",
url=host+"/json/playlist/series/index.json", thumbnail=item.thumbnail,
fanart=item.fanart, extra1="Series"))
else:
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas",
url=host+"/json/playlist/000-novedades/index.json",
thumbnail=host+"/json/playlist/ultimas-peliculas/thumbnail.jpg",
fanart=host+"/json/playlist/ultimas-peliculas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas",
url=host+"/json/playlist/peliculas-mas-vistas/index.json",
thumbnail=host+"/json/playlist/peliculas-mas-vistas/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas-mas-vistas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Categorías", action="cat",
url=host+"/json/playlist/peliculas/index.json",
thumbnail=item.thumbnail, fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Películas 3D", action="entradasconlistas",
url=host+"/json/playlist/3D/index.json",
thumbnail=host+"/json/playlist/3D/thumbnail.jpg",
fanart=host+"/json/playlist/3D/background.jpg"))
return itemlist
def cat(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
exception = ["peliculas-mas-vistas", "ultimas-peliculas"]
for child in data["sortedPlaylistChilds"]:
if child["id"] not in exception:
url = host + "/json/playlist/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
# Thumbnail
thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
title = child['id'].replace('-', ' ').capitalize().replace("Manga", "Animación/Cine Oriental")
title += " ([COLOR gold]" + str(child['number']) + "[/COLOR])"
itemlist.append(
Item(channel=item.channel, action="entradasconlistas", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, folder=True))
return itemlist
def entradas(item):
logger.info()
itemlist = []
infolabels = {}
if item.title == "Nuevos Capítulos":
context = "5"
else:
context = "05"
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
for child in data["sortedRepoChilds"]:
infolabels['year'] = child['year']
url = host + "/json/repo/%s/index.json" % child["id"]
thumbnail = ""
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
fulltitle = child['name']
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url,
thumbnail=thumbnail, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, context=context))
tmdb.set_infoLabels(itemlist)
return itemlist
def entradasconlistas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
# Si hay alguna lista
contentSerie = False
contentList = False
if data['playListChilds']:
itemlist.append(Item(channel=item.channel, title="**LISTAS**", action="", text_color="red", text_blod=True,
folder=False))
for child in data['sortedPlaylistChilds']:
infolabels = {}
infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds'])
if child['seasonNumber'] and not contentList and re.search(r'(?i)temporada', child['id']):
infolabels['season'] = child['seasonNumber']
contentSerie = True
else:
contentSerie = False
contentList = True
title = child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + str(child['number']) + "[/COLOR])"
url = host + "/json/playlist/%s/index.json" % child["id"]
thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
itemlist.append(Item(channel=item.channel, action="entradasconlistas", title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=child['id'],
infoLabels=infolabels, viewmode="movie_with_plot"))
else:
contentList = True
if data["sortedRepoChilds"] and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="**VÍDEOS**", action="", text_color="blue", text_blod=True,
folder=False))
for child in data["sortedRepoChilds"]:
infolabels = {}
infolabels['plot'] = child['description']
infolabels['year'] = data['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
infolabels['duration'] = child['duration']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
url = host + "/json/repo/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
fulltitle = child['name']
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, context="05", viewmode="movie_with_plot", folder=True))
# Se añade item para añadir la lista de vídeos a la videoteca
if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, text_color="green", title="Añadir esta lista a la videoteca",
url=item.url, action="listas"))
elif contentSerie:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="series_library", fulltitle=data['name'], show=data['name'],
text_color="green"))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
exception = ["top-series", "nuevos-capitulos"]
for child in data["sortedPlaylistChilds"]:
if child["id"] not in exception:
infolabels = {}
infolabels['plot'] = child['description']
infolabels['year'] = child['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
infolabels['mediatype'] = "episode"
if child['seasonNumber']: infolabels['season'] = child['seasonNumber']
url = host + "/json/playlist/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/playlist/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if item.extra1 == "Series":
if child['name'] != "":
fulltitle = child['name']
fulltitle = fulltitle.replace('-', '')
title = child['name'] + " (" + child['year'] + ")"
else:
title = fulltitle = child['id'].capitalize()
if "Temporada" not in title:
title += " [Temporadas: [COLOR gold]" + str(child['numberOfSeasons']) + "[/COLOR]]"
elif item.title == "Más Vistas":
title = title.replace("- Temporada", "--- Temporada")
else:
if data['name'] != "":
fulltitle = data['name']
if child['seasonNumber']:
title = data['name'] + " --- Temporada " + child['seasonNumber'] + \
" [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
title = child['name'] + " [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
fulltitle = data['id']
if child['seasonNumber']:
title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \
" [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
title = data['id'].capitalize() + " [COLOR gold](" + str(child['number']) + ")[/COLOR]"
if not child['playListChilds']:
action = "episodios"
else:
action = "series"
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, server="",
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True))
if len(itemlist) == len(data["sortedPlaylistChilds"]) and item.extra1 != "Series":
itemlist.sort(key=lambda item: item.title, reverse=True)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", show=data['name'],
text_color="green", extra="series_library"))
if item.title == "Últimas Series": return itemlist
if item.title == "Lista de Series A-Z": itemlist.sort(key=lambda item: item.fulltitle)
if data["sortedRepoChilds"] and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**", text_color="blue",
text_blod=True, action="", folder=False))
for child in data["sortedRepoChilds"]:
infolabels = {}
if child['description']:
infolabels['plot'] = data['description']
else:
infolabels['plot'] = child['description']
infolabels['year'] = data['year']
if not child['tags']:
infolabels['genre'] = ', '.join([x.strip() for x in data['tags']])
else:
infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['duration'] = child['duration']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
url = host + "/json/repo/%s/index.json" % child["id"]
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [1080p][/B]"
fulltitle = child['name']
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
server="", thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True))
if item.extra == "new":
itemlist.sort(key=lambda item: item.title, reverse=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Redirección para actualización de videoteca
if item.extra == "series_library":
itemlist = series_library(item)
return itemlist
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
# Se prueba un método u otro porque algunas series no están bien listadas
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
if item.infoLabels:
item.infoLabels['duration'] = str(child['duration'])
item.infoLabels['season'] = str(data['seasonNumber'])
item.infoLabels['episode'] = str(child['episode'])
item.infoLabels['mediatype'] = "episode"
#contentTitle = item.fulltitle + "|" + str(data['seasonNumber']) + "|" + str(child['episode'])
# En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás
if item.title == "Nuevos Capítulos":
url = host + "/json/playlist/%s/index.json" % child["season"]
action = "series"
extra = "new"
else:
url = host + "/json/repo/%s/index.json" % child["id"]
action = "findvideos"
extra = ""
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
try:
title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
except:
title = fulltitle = child['id']
itemlist.append(item.clone(action=action, server="", title=title, url=url, thumbnail=thumbnail,
fanart=item.fanart, fulltitle=fulltitle, contentSerieName=fulltitle, context="35",
viewmode="movie", extra=extra, show=item.fulltitle, folder=True))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
if data['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child
else:
thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child
title = fulltitle = child.capitalize().replace('_', ' ')
itemlist.append(item.clone(action="findvideos", server="", title=title, url=url, thumbnail=thumbnail,
fanart=item.fanart, fulltitle=fulltitle, contentSerieName=item.fulltitle,
context="25", show=item.fulltitle, folder=True))
# Opción de añadir a la videoteca en casos de series de una única temporada
if len(itemlist) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos":
if config.get_videolibrary_support() and item.show == "":
if "-" in item.title:
show = item.title.split('-')[0]
else:
show = item.title.split('(')[0]
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="green",
url=item.url, action="add_serie_to_library", show=show, extra="series_library"))
return itemlist
def series_library(item):
logger.info()
# Funcion unicamente para añadir/actualizar series a la libreria
lista_episodios = []
show = item.show.strip()
data_serie = anti_cloudflare(item.url, host=host, headers=headers)
data_serie = jsontools.load(data_serie)
# Para series que en la web se listan divididas por temporadas
if data_serie["sortedPlaylistChilds"]:
for season_name in data_serie["sortedPlaylistChilds"]:
url_season = host + "/json/playlist/%s/index.json" % season_name['id']
data = anti_cloudflare(url_season, host=host, headers=headers)
data = jsontools.load(data)
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
url = host + "/json/repo/%s/index.json" % child["id"]
fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " " + str(data['seasonNumber']) + "x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="",
title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle,
contentTitle=fulltitle, show=show))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
fulltitle = child.capitalize().replace('_', ' ')
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " " + str(data['seasonNumber']) + "x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="",
title=fulltitle, extra=url, url=item.url, contentTitle=fulltitle,
fulltitle=fulltitle, show=show))
# Para series directas de una sola temporada
else:
data = data_serie
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
url = host + "/json/repo/%s/index.json" % child["id"]
fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " 1x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle,
contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle,
show=show))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
fulltitle = child.capitalize().replace('_', ' ')
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " 1x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle,
contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle,
show=show))
return lista_episodios
def findvideos(item):
logger.info()
itemlist = []
# En caso de llamarse a la función desde una serie de la videoteca
if item.extra.startswith("http"): item.url = item.extra
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
id = urllib.quote(data['id'])
for child in data["profiles"].keys():
videopath = urllib.quote(data["profiles"][child]['videoUri'])
for i in range(0, len(data["profiles"][child]['servers'])):
url = data["profiles"][child]['servers'][i]['url'] + videopath
size = " " + data["profiles"][child]["sizeHuman"]
resolution = " [" + (data["profiles"][child]['videoResolution']) + "]"
title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p')
if i == 0:
title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]"
else:
title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]"
# Para poner enlaces de mayor calidad al comienzo de la lista
if data["profiles"][child]["profileId"] == "default":
itemlist.insert(i, item.clone(action="play", server="directo", title=title, url=url,
viewmode="list", extra=id, folder=False))
else:
itemlist.append(item.clone(action="play", server="directo", title=title, url=url,
viewmode="list", extra=id, folder=False))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
if len(itemlist) > 0 and item.extra == "":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green",
url=item.url, action="add_pelicula_to_library",
infoLabels={'title':item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle))
return itemlist
def play(item):
logger.info()
itemlist = []
uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)')
uri_request = host + "/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time()))
data = httptools.downloadpage(uri_request).data
data = jsontools.load(data)
url = item.url.replace(".tv-vip.com/transcoder/", ".tv-vip.info/c/transcoder/") + "?tt=" + str(data['tt']) + \
"&mm=" + data['mm'] + "&bb=" + data['bb']
itemlist.append(item.clone(action="play", server="directo", url=url, folder=False))
return itemlist
def listas(item):
logger.info()
# Para añadir listas a la videoteca en carpeta CINE
itemlist = []
data = anti_cloudflare(item.url, host=host, headers=headers)
data = jsontools.load(data)
head = header_string + get_cookie_value()
for child in data["sortedRepoChilds"]:
infolabels = {}
# Fanart
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
thumbnail += head
fanart += head
url = host + "/json/repo/%s/index.json" % child["id"]
if child['name'] == "":
title = scrapertools.slugify(child['id'].rsplit(".", 1)[0])
else:
title = scrapertools.slugify(child['name'])
title = title.replace('-', ' ').replace('_', ' ').capitalize()
infolabels['title'] = title
try:
from core import videolibrarytools
new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos",
thumbnail=thumbnail, infoLabels=infolabels, category="Cine")
videolibrarytools.library.add_movie(new_item)
error = False
except:
error = True
import traceback
logger.error(traceback.format_exc())
if not error:
itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la videoteca',
action="", folder=False))
else:
itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso',
action="", folder=False))
return itemlist

View File

@@ -1,117 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimos capítulos", action="ultimos", url="http://www.vertelenovelas.cc/",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.vertelenovelas.cc/ajax/autocompletex.php?q=" + texto
try:
return series(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def ultimos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>')
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"'))
thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"')
if thumbnail == "":
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>')
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"'))
thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"')
if thumbnail == "":
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<h2>Cap(.*?)</ul>')
patron = '<li><a href="([^"]+)"><span>([^<]+)</span></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle)
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url,
folder=True, fulltitle=title))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
pattern = 'data-id="([^"]+)"'
list_servers = re.compile(pattern, re.DOTALL).findall(data)
list_urls = []
for _id in list_servers:
post = "id=%s" % _id
data = httptools.downloadpage("http://www.vertelenovelas.cc/goto/", post=post).data
list_urls.append(scrapertools.find_single_match(data, 'document\.location = "([^"]+)";'))
from core import servertools
itemlist = servertools.find_video_items(data=", ".join(list_urls))
for videoitem in itemlist:
# videoitem.title = item.title
videoitem.channel = item.channel
return itemlist

View File

@@ -410,8 +410,7 @@ def findvideos(item):
if config.get_setting("quit_channel_name", "videolibrary") == 0:
server.title = "%s: %s" % (nom_canal.capitalize(), server.title)
server.infoLabels = item_json.infoLabels
#server.infoLabels = item_json.infoLabels
if not server.thumbnail:
server.thumbnail = item.thumbnail

View File

@@ -716,6 +716,7 @@ def check_list_links(itemlist, numero):
if numero > 0 and it.server != '' and it.url != '':
verificacion = check_video_link(it.url, it.server)
it.title = verificacion + ', ' + it.title.strip()
it.alive = verificacion
numero -= 1
return itemlist

View File

@@ -1,227 +1,453 @@
# -*- coding: utf-8 -*-
try:
from selenium.webdriver import PhantomJS
from contextlib import closing
linkbucks_support = True
except:
linkbucks_support = False
try:
from urllib.request import urlsplit, urlparse
except:
from urlparse import urlsplit, urlparse
import json
import os
import re
import time
from base64 import b64decode
import requests
class UnshortenIt(object):
_headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.69 Safari/537.36'}
_adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st'
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus':
return self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks':
if linkbucks_support:
return self._unshorten_linkbucks(uri)
else:
return uri, 'linkbucks.com not supported. Install selenium package to add support.'
if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu':
return self._unshorten_lnxlu(uri)
if re.search(self._shst_regex, domain, re.IGNORECASE):
return self._unshorten_shst(uri)
try:
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = requests.get(uri, timeout=self._timeout)
return r.url, r.status_code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.text)[0]
return uri, 200
r = requests.head(uri, headers=self._headers, timeout=self._timeout)
while True:
if 'location' in r.headers:
r = requests.head(r.headers['location'])
uri = r.url
else:
return r.url, r.status_code
except Exception as e:
return uri, str(e)
def _unshorten_adfly(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
decoded_uri = b64decode(left.encode() + right.encode())[2:].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.status_code
else:
return uri, 'No ysmm variable found'
except Exception as e:
return uri, str(e)
def _unshorten_linkbucks(self, uri):
try:
with closing(PhantomJS(
service_log_path=os.path.dirname(os.path.realpath(__file__)) + '/ghostdriver.log')) as browser:
browser.get(uri)
# wait 5 seconds
time.sleep(5)
page_source = browser.page_source
link = re.findall(r'skiplink(.*?)\>', page_source)
if link is not None:
link = re.sub(r'\shref\=|\"', '', link[0])
if link == '':
return uri, 'Failed to extract link.'
return link, 200
else:
return uri, 'Failed to extract link.'
except Exception as e:
return uri, str(e)
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "nl-NL,nl;q=0.8,en-US;q=0.6,en;q=0.4",
"Cache-Control": "no-cache",
"Pragma": "no-cache"
}
r = requests.get(uri, headers=http_header, timeout=self._timeout)
html = r.text
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;,q=0.8",
"Connection": "keep-alive",
"Host": "adfoc.us",
"Cache-Control": "no-cache",
"Pragma": "no-cache",
"Referer": orig_uri,
}
r = requests.get(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.status_code
else:
return uri, 'No click_url variable found'
except Exception as e:
return uri, str(e)
def _unshorten_lnxlu(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
code = re.findall('/\?click\=(.*)\."', html)
if len(code) > 0:
payload = {'click': code[0]}
r = requests.get('http://lnx.lu/', params=payload, headers=self._headers, timeout=self._timeout)
return r.url, r.status_code
else:
return uri, 'No click variable found'
except Exception as e:
return uri, str(e)
def _unshorten_shst(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;,q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "sh.st",
"Referer": uri,
"Origin": "http://sh.st",
"X-Requested-With": "XMLHttpRequest"
}
time.sleep(5)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = requests.get('http://sh.st/shortest-url/end-adsession', params=payload, headers=http_header,
timeout=self._timeout)
response = r.content[6:-2].decode('utf-8')
if r.status_code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
return uri, 'Error extracting url'
else:
return uri, 'Error extracting url'
return uri, r.status_code
except Exception as e:
return uri, str(e)
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
return unshortener.unshorten(uri, type, timeout)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlsplit, urlparse, parse_qs, urljoin
except:
from urlparse import urlsplit, urlparse, parse_qs, urljoin
import json
import os
import re
import time
import urllib
from base64 import b64decode
from platformcode import logger
import xbmc
from core import httptools
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
rec = re.compile(regex, flags=flags)
match = rec.search(text)
if not match:
return False
return match.group(1)
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|threadsphere\.bid|restorecosm\.bid'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st'
_hrefli_regex = r'href\.li'
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None):
domain = urlsplit(uri).netloc
if not domain:
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain,
re.IGNORECASE) or type == 'adfocus':
return self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain,
re.IGNORECASE) or type == 'linkbucks':
return self._unshorten_linkbucks(uri)
if re.search(self._lnxlu_regex, domain,
re.IGNORECASE) or type == 'lnxlu':
return self._unshorten_lnxlu(uri)
if re.search(self._shrink_service_regex, domain, re.IGNORECASE):
return self._unshorten_shrink_service(uri)
if re.search(self._shst_regex, domain, re.IGNORECASE):
return self._unshorten_shst(uri)
if re.search(self._hrefli_regex, domain, re.IGNORECASE):
return self._unshorten_hrefli(uri)
if re.search(self._anonymz_regex, domain, re.IGNORECASE):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
return uri, 200
def unwrap_30x(self, uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
loop_counter = 0
try:
if loop_counter > 5:
raise ValueError("Infinitely looping redirect from URL: '%s'" %
(uri,))
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
return r.url, r.code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
else:
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
if not r.success:
return uri, -1
retries = 0
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
follow_redirects=False,
only_headers=True)
uri = r.url
loop_counter += 1
retries = retries + 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
def _clear_google_outbound_proxy(self, url):
'''
So google proxies all their outbound links through a redirect so they can detect outbound links.
This call strips them out if they are present.
This is useful for doing things like parsing google search results, or if you're scraping google
docs, where google inserts hit-counters on all outbound links.
'''
# This is kind of hacky, because we need to check both the netloc AND
# part of the path. We could use urllib.parse.urlsplit, but it's
# easier and just as effective to use string checks.
if url.startswith("http://www.google.com/url?") or \
url.startswith("https://www.google.com/url?"):
qs = urlparse(url).query
query = parse_qs(qs)
if "q" in query: # Google doc outbound links (maybe blogspot, too)
return True, query["q"].pop()
elif "url" in query: # Outbound links from google searches
return True, query["url"].pop()
else:
raise ValueError(
"Google outbound proxy URL without a target url ('%s')?" %
url)
return False, url
def _unshorten_adfly(self, uri):
logger.info()
try:
r = httptools.downloadpage(
uri, timeout=self._timeout, cookies=False)
html = r.data
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
# Additional digit arithmetic
encoded_uri = list(left + right)
numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n))
for first, second in zip(numbers, numbers):
xor = int(first[1]) ^ int(second[1])
if xor < 10:
encoded_uri[first[0]] = str(xor)
decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.code
else:
return uri, 'No ysmm variable found'
except Exception as e:
return uri, str(e)
def _unshorten_linkbucks(self, uri):
'''
(Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase.
This has necessidated a license change.
'''
r = httptools.downloadpage(uri, timeout=self._timeout)
firstGet = time.time()
baseloc = r.url
if "/notfound/" in r.url or \
"(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data:
return uri, 'Error: Link not found or requires a survey!'
link = None
content = r.data
regexes = [
r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb",
r"AdBriteInit\(\"(.*?)\"\)",
r"Linkbucks\.TargetUrl = '(.*?)';",
r"Lbjs\.TargetUrl = '(http://[^<>\"]*?)'",
r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"",
r"id=\"content\" src=\"([^\"]*)",
]
for regex in regexes:
if self.inValidate(link):
link = find_in_text(regex, content)
if self.inValidate(link):
match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content)
if match:
link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content)
if self.inValidate(link):
scripts = re.findall("(<script type=\"text/javascript\">[^<]+</script>)", content)
if not scripts:
return uri, "No script bodies found?"
js = False
for script in scripts:
# cleanup
script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script)
if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script):
js = script
if not js:
return uri, "Could not find correct script?"
token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js)
if not token:
token = find_in_text(r"\?t=([a-f0-9]{40})", js)
assert token
authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y"
l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js)
l2 = find_in_text(
r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);",
js)
if any([not l1, not l2, not token]):
return uri, "Missing required tokens?"
authkey = int(l1) + int(l2)
p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1))
r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout)
time_left = 5.033 - (time.time() - firstGet)
xbmc.sleep(max(time_left, 0) * 1000)
p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token,
key=str(authkey)))
r3 = httptools.downloadpage(p3_url, timeout=self._timeout)
resp_json = json.loads(r3.data)
if "Url" in resp_json:
return resp_json['Url'], r3.code
return "Wat", "wat"
def inValidate(self, s):
# Original conditional:
# (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank")))
if not s:
return True
if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank":
return True
else:
return False
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = dict()
http_header["Host"] = "adfoc.us"
http_header["Referer"] = orig_uri
r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.code
else:
return uri, 'No click_url variable found'
except Exception as e:
return uri, str(e)
def _unshorten_lnxlu(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
code = re.findall('/\?click\=(.*)\."', html)
if len(code) > 0:
payload = {'click': code[0]}
r = httptools.downloadpage(
'http://lnx.lu?' + urllib.urlencode(payload),
timeout=self._timeout)
return r.url, r.code
else:
return uri, 'No click variable found'
except Exception as e:
return uri, str(e)
def _unshorten_shst(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = dict()
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
xbmc.sleep(5 * 1000)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
urllib.urlencode(payload),
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
if r.code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
return uri, 'Error extracting url'
else:
return uri, 'Error extracting url'
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_hrefli(self, uri):
try:
# Extract url from query
parsed_uri = urlparse(uri)
extracted_uri = parsed_uri.query
if not extracted_uri:
return uri, 200
# Get url status code
r = httptools.downloadpage(
extracted_uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
return r.url, r.code
except Exception as e:
return uri, str(e)
def _unshorten_anonymz(self, uri):
# For the moment they use the same system as hrefli
return self._unshorten_hrefli(uri)
def _unshorten_shrink_service(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r"<input type='hidden' name='\d+' id='\d+' value='([^']+)'>", html)[0]
from core import scrapertools
uri = scrapertools.decodeHtmlentities(uri)
uri = uri.replace("&sol;", "/") \
.replace("&colon;", ":") \
.replace("&period;", ".") \
.replace("&excl;", "!") \
.replace("&num;", "#") \
.replace("&quest;", "?") \
.replace("&lowbar;", "_")
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_rapidcrypt(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<a class="button" href="([^"]+)">Click to continue</a>', html)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
def unwrap_30x_only(uri, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status
def unshorten_only(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unshorten(uri, type=type)
return uri, status
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unshorten(uri, type=type)
if status == 200:
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status

View File

@@ -24,7 +24,7 @@ def start():
funciones que deseamos que se ejecuten nada mas abrir el plugin.
"""
logger.info()
config.set_setting('show_once', True)
# Test if all the required directories are created
config.verify_directories_created()
@@ -51,6 +51,11 @@ def run(item=None):
item.start = True;
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if config.get_setting('show_once'):
platformtools.dialog_ok('Alfa', 'Alfa recomienda para mejorar tu experiencia:',
'Palomitas, relajate y disfruta.')
config.set_setting('show_once', False)
logger.info(item.tostring())
try:
@@ -310,6 +315,7 @@ def run(item=None):
log_message)
def reorder_itemlist(itemlist):
logger.info()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))

View File

@@ -324,7 +324,9 @@ def title_format(item):
# Si el titulo no tiene contentSerieName entonces se formatea como pelicula
item.title = '%s' % set_color(item.contentTitle, 'movie')
if item.contentType=='movie':
item.context='Buscar esta pelicula en otros canales'
if item.context:
if isinstance(item.context, list):
item.context.append('Buscar esta pelicula en otros canales')
if 'Novedades' in item.category and item.from_channel=='news':
#logger.debug('novedades')
@@ -406,6 +408,7 @@ def title_format(item):
if 'Activar' in item.context[1]['title']:
item.title= '%s' % (set_color(item.title, 'no_update'))
#logger.debug('Despues del formato: %s' % item)
# Damos formato al servidor si existiera
if item.server:
server = '%s' % set_color(item.server.strip().capitalize(), 'server')
@@ -417,6 +420,7 @@ def title_format(item):
if item.action != 'play' and item.server:
item.title ='%s %s'%(item.title, server.strip())
elif item.action == 'play' and item.server:
if item.quality == 'default':
quality = ''
#logger.debug('language_color: %s'%language_color)
@@ -424,6 +428,12 @@ def title_format(item):
if lang:
item.title = add_languages(item.title, simple_language)
#logger.debug('item.title: %s' % item.title)
# si hay verificacion de enlaces
if item.alive != '':
if item.alive.lower() == 'no':
item.title = '[[COLOR red][B]X[/B][/COLOR]] %s' % item.title
elif item.alive == '??':
item.title = '[[COLOR yellow][B]?[/B][/COLOR]] %s' % item.title
else:
item.title = '%s' % item.title
#logger.debug('item.title despues de server: %s' % item.title)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "clipwatching.com/(.*?).html",
"pattern": "clipwatching.com/(\\w+)",
"url": "http://clipwatching.com/\\1.html"
}
]

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://.+.d0stream.com/embed/([a-z0-9]+)",
"url": "https://v.d0stream.com/embed/\\1"
}
]
},
"free": true,
"id": "dostream",
"name": "dostream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/lczc08bsx/dostream.png"
}

View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector DoStream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Dostream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
patron = "(?:'src'|'url'):'(http.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['dostream',url])
return video_urls

View File

@@ -12,6 +12,8 @@ def test_video_exists(page_url):
return False, "[Downace] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Downace] Error de downace, no se puede generar el enlace al video"
if "File has been removed due to inactivity" in data:
return False, "[Downace] El archivo ha sido removido por inactividad"
return True, ""

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
}
]
},
"free": true,
"id": "estream",
"name": "estream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/ibd54ayf5/estream.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Estream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Estream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<source src=([^ ]+) type='video/mp4' label='.*?x(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url, quality in matches:
video_urls.append(["%sp [estream]" % quality, url])
return video_urls

View File

@@ -73,11 +73,11 @@ def get_video_url_anterior(page_url, premium=False, user="", password="", video_
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.ws/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.ws/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.bz/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.ws/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.bz/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -87,7 +87,7 @@ def get_video_url_anterior(page_url, premium=False, user="", password="", video_
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.ws/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.bz/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://vidlox.(?:tv|me)/embed-.*?.html)",
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
"url": "\\1"
}
]