Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-01-29 13:47:48 -03:00
20 changed files with 739 additions and 241 deletions

View File

@@ -46,3 +46,8 @@ def debug(texto=""):
def error(texto=""):
logger_object.error(unicode(str(texto), "utf-8", "ignore").replace("\n", "\n" + " " * 67))
class WebErrorException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.13" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.14" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,10 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» plusdede » cinecalidad
» allpeliculas » serieslan
» pepecine » cinetux
» novelashdgratis ¤ arreglos internos
» animeid » anitoonstv
» hentaiid » mundoflv
» vertelenovelas » todopeliculas
» cinemahd » filebebo
» clipwatching » sendvid
» thevideome » videowood
¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -4,25 +4,25 @@ import re
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
CHANNEL_HOST = "http://animeid.tv/"
CHANNEL_HOST = "https://www.animeid.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="novedades_series", title="Últimas series", url="http://www.animeid.tv/"))
Item(channel=item.channel, action="novedades_series", title="Últimas series", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios",
url="http://www.animeid.tv/", viewmode="movie_with_plot"))
url=CHANNEL_HOST, viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="generos", title="Listado por genero", url="http://www.animeid.tv/"))
Item(channel=item.channel, action="generos", title="Listado por genero", url=CHANNEL_HOST))
itemlist.append(
Item(channel=item.channel, action="letras", title="Listado alfabetico", url="http://www.animeid.tv/"))
Item(channel=item.channel, action="letras", title="Listado alfabetico", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
return itemlist
@@ -33,7 +33,7 @@ def newest(categoria):
item = Item()
try:
if categoria == 'anime':
item.url = "http://animeid.tv/"
item.url = CHANNEL_HOST
itemlist = novedades_episodios(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
@@ -50,19 +50,17 @@ def search(item, texto):
itemlist = []
if item.url == "":
item.url = "http://www.animeid.tv/ajax/search?q="
item.url = CHANNEL_HOST + "ajax/search?q="
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
headers = []
headers.append(
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
headers.append(["Referer", "http://www.animeid.tv/"])
headers.append(["Referer", CHANNEL_HOST])
headers.append(["X-Requested-With", "XMLHttpRequest"])
data = scrapertools.cache_page(item.url, headers=headers)
data = data.replace("\\", "")
logger.debug("data=" + data)
patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -71,8 +69,6 @@ def search(item, texto):
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, viewmode="movie_with_plot"))
@@ -89,240 +85,146 @@ def search(item, texto):
def novedades_series(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<section class="series">(.*?)</section>')
patronvideos = '<li><a href="([^"]+)"><span class="tipo\d+">([^<]+)</span><strong>([^<]+)</strong>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="series">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">.*?tipo\d+">([^<]+)</span>.*?<strong>([^<]+)</strong>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, tipo, title in matches:
scrapedtitle = title + " (" + tipo + ")"
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=title, viewmode="movie_with_plot"))
show=title, viewmode="movie_with_plot"))
return itemlist
def novedades_episodios(item):
logger.info()
# Descarga la pagina
# <article> <a href="/ver/uchuu-kyoudai-35"> <header>Uchuu Kyoudai #35</header> <figure><img src="http://static.animeid.com/art/uchuu-kyoudai/normal/b4934a1d.jpg" class="cover" alt="Uchuu Kyoudai" width="250" height="140" /></figure><div class="mask"></div> <aside><span class="p"><strong>Reproducciones: </strong>306</span> <span class="f"><strong>Favoritos: </strong>0</span></aside> </a> <p>Una noche en el año 2006, cuando eran jovenes, los dos hermanos Mutta (el mayor) y Hibito (el menor) vieron un OVNI que hiba en dirección hacia la luna. Esa misma noche decidieron que ellos se convertirian en astronautas y irian al espacio exterior. En el año 2050, Hibito se ha convertido en astronauta y que ademas está incluido en una misión que irá a la luna. En cambio Mutta siguió una carrera mas tradicional, y terminó trabajando en una compañia de fabricación de automoviles. Sin embargo, Mutta termina arruinando su carrera por ciertos problemas que tiene con su jefe. Ahora bien, no sólo perdió su trabajo si no que fue incluido en la lista negra de la industria laboral. Pueda ser que esta sea su unica oportunidad que tenga Mutta de volver a perseguir su sueño de la infancia y convertirse en astronauta, al igual que su perqueño hermano Hibito.</p> </article>
# <img pagespeed_high_res_src="
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<section class="lastcap">(.*?)</section>')
patronvideos = '<a href="([^"]+)">[^<]+<header>([^<]+)</header>[^<]+<figure><img[^>]+src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
data = scrapertools.find_single_match(data, '<section class="lastcap">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title, thumbnail, plot in matches:
scrapedtitle = scrapertools.entityunescape(title)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
episodio = scrapertools.get_match(scrapedtitle, '\s+#(.*?)$')
episodio = scrapertools.find_single_match(scrapedtitle, '\s+#(.*?)$')
contentTitle = scrapedtitle.replace('#' + episodio, '')
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
return itemlist
def generos(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<div class="generos">(.*?)</div>')
patronvideos = '<li> <a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<div class="generos">(.*?)</div>')
patronvideos = '(?s)<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, show=title, viewmode="movie_with_plot"))
return itemlist
def letras(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul id="letras">(.*?)</ul>')
data = scrapertools.find_single_match(data, '<ul id="letras">(.*?)</ul>')
patronvideos = '<li> <a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, show=title, viewmode="movie_with_plot"))
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl,
show=title, viewmode="movie_with_plot"))
return itemlist
def series(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.debug("datito %s" % data)
'''
<article class="item">
<a href="/aoi-sekai-no-chuushin-de">
<header>Aoi Sekai no Chuushin de</header>
<figure>
<img src="http://static.animeid.com/art/aoi-sekai-no-chuushin-de/cover/0077cb45.jpg" width="116"
height="164" />
</figure>
<div class="mask"></div>
</a>
<p>
El Reino de Segua ha ido perdiendo la guerra contra el Imperio de Ninterdo pero la situación ha cambiado
con la aparición de un chico llamado Gear. Todos los personajes son parodias de protas de videojuegos de
Nintendo y Sega respectivamente, como lo son Sonic the Hedgehog, Super Mario Bros., The Legend of Zelda,
etc.
</p>
</article>
'''
patron = '<article class="item"[^<]+'
patron += '<a href="([^"]+)"[^<]+<header>([^<]+)</header[^<]+'
patron += '<figure><img[\sa-z_]+src="([^"]+)"[^<]+</figure><div class="mask"></div></a>[^<]+<p>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)<article class="item"[^<]+'
patron += '<a href="([^"]+)"[^<]+<header>([^<]+)</header[^<]+.*?'
patron += 'src="([^"]+)".*?<p>(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail, plot in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle,
viewmode="movie_with_plot"))
itemlist = sorted(itemlist, key=lambda it: it.title)
try:
page_url = scrapertools.get_match(data, '<li><a href="([^"]+)">&gt;</a></li>')
page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">&gt;</a></li>')
itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente",
url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="",
plot=""))
except:
pass
return itemlist
def episodios(item, final=True):
logger.info()
# Descarga la pagina
body = httptools.downloadpage(item.url).data
try:
scrapedplot = scrapertools.get_match(body, '<meta name="description" content="([^"]+)"')
except:
pass
try:
scrapedthumbnail = scrapertools.get_match(body, '<link rel="image_src" href="([^"]+)"')
except:
pass
data = scrapertools.get_match(body, '<ul id="listado">(.*?)</ul>')
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for url, title in matches:
scrapedtitle = scrapertools.htmlclean(title)
try:
episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)")
titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle)
if len(episodio) == 1:
scrapedtitle = "1x0" + episodio + " - " + titulo_limpio
else:
scrapedtitle = "1x" + episodio + " - " + titulo_limpio
except:
pass
scrapedurl = urlparse.urljoin(item.url, url)
# scrapedthumbnail = ""
# scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show))
try:
next_page = scrapertools.get_match(body, '<a href="([^"]+)">\&gt\;</a>')
next_page = urlparse.urljoin(item.url, next_page)
item2 = Item(channel=item.channel, action="episodios", title=item.title, url=next_page,
thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot")
itemlist.extend(episodios(item2, final=False))
except:
import traceback
logger.error(traceback.format_exc())
if final and config.get_videolibrary_support():
data = httptools.downloadpage(item.url).data
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
CHANNEL_HEADERS = [
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
]
page = 0
while True:
page += 1
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page)
data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
# Cuando ya no hay datos devuelve: "list":[]
if '"list":[]' in data:
break
dict_data = jsontools.load(data)
list = dict_data['list']
for dict in list:
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = "1x" + dict["numero"] + " - " + dict["date"],
url = CHANNEL_HOST + dict['href'],
thumbnail = item.thumbnail,
show = item.show,
viewmode = "movie_with_plot"
))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = []
data = httptools.downloadpage(item.url).data
url_anterior = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">« Capítulo anterior')
url_siguiente = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">Siguiente capítulo »')
data = scrapertools.find_single_match(data, '<ul id="partes">(.*?)</ul>')
data = data.replace("\\/", "/")
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
logger.info("data=" + data)
# http%3A%2F%2Fwww.animeid.moe%2Fstream%2F41TLmCj7_3q4BQLnfsban7%2F1440956023.mp4
# http://www.animeid.moe/stream/41TLmCj7_3q4BQLnfsban7/1440956023.mp4
# http://www.animeid.tv/stream/oiW0uG7yqBrg5TVM5Cm34n/1385370686.mp4
patron = '(http://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
data = scrapertools.find_single_match(data, '<ul id="partes">(.*?)</ul>').decode("unicode-escape")
data = data.replace("\\/", "/").replace("%3A", ":").replace("%2F", "/")
patron = '(https://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)'
matches = scrapertools.find_multiple_matches(data, patron)
encontrados = set()
for url in matches:
if url not in encontrados:
@@ -330,15 +232,12 @@ def findvideos(item):
Item(channel=item.channel, action="play", title="[directo]", server="directo", url=url, thumbnail="",
plot="", show=item.show, folder=False))
encontrados.add(url)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = "[" + videoitem.server + "]"
if url_anterior:
title_anterior = url_anterior.strip("/v/").replace('-', ' ').strip('.html')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior,

View File

@@ -95,7 +95,7 @@ def episodios(item):
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")

View File

@@ -54,7 +54,7 @@ def show_option(channel, itemlist, text_color='yellow', thumbnail=None, fanart=N
fanart = 'https://s7.postimg.org/65ooga04b/Auto_Play.png'
plot_autoplay = 'AutoPlay permite auto reproducir los enlaces directamente, basándose en la configuracion de tus ' \
'servidores y calidades preferidas. '
'servidores y calidades favoritas. '
itemlist.append(
Item(channel=__channel__,
title="Configurar AutoPlay",
@@ -79,8 +79,6 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
'''
logger.info()
logger.debug('item inicial %s' % item)
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
@@ -105,6 +103,7 @@ def start(itemlist, item):
if settings_node['active']:
url_list_valid = []
autoplay_list = []
autoplay_b = []
favorite_servers = []
favorite_quality = []
@@ -118,7 +117,7 @@ def start(itemlist, item):
config.set_setting("player_mode", 0)
# Informa que AutoPlay esta activo
platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
#platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
# Prioridades a la hora de ordenar itemlist:
# 0: Servidores y calidades
@@ -152,6 +151,7 @@ def start(itemlist, item):
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
for item in itemlist:
autoplay_elem = dict()
b_dict = dict()
# Comprobamos q se trata de un item de video
if 'server' not in item:
@@ -177,6 +177,9 @@ def start(itemlist, item):
# descartamos el item
if item.server not in favorite_servers or item.quality not in favorite_quality \
or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem']= item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
@@ -186,6 +189,9 @@ def start(itemlist, item):
# si el servidor no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
@@ -194,6 +200,9 @@ def start(itemlist, item):
# si la calidad no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.quality not in favorite_quality or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
@@ -205,6 +214,7 @@ def start(itemlist, item):
# Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list
url_list_valid.append(item.url)
item.plan_b=True
autoplay_elem['videoitem'] = item
# autoplay_elem['server'] = item.server
# autoplay_elem['quality'] = item.quality
@@ -223,9 +233,17 @@ def start(itemlist, item):
elif priority == 3: # Solo calidades
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
plan_b = settings_node['plan_b']
ready = False
text_b = ''
if plan_b:
autoplay_list.extend(autoplay_b)
# Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno
# funcional o fallen todos
if autoplay_list:
if autoplay_list or (plan_b and autoplay_b):
played = False
max_intentos = 5
max_intentos_servers = {}
@@ -236,6 +254,10 @@ def start(itemlist, item):
for autoplay_elem in autoplay_list:
play_item = Item
# Si no es un elemento favorito si agrega el texto plan b
if autoplay_elem['videoitem'].type_b:
text_b = '(Plan B)'
if not platformtools.is_playing() and not played:
videoitem = autoplay_elem['videoitem']
logger.debug('videoitem %s' % videoitem)
@@ -250,7 +272,7 @@ def start(itemlist, item):
if hasattr(videoitem, 'language') and videoitem.language != "":
lang = " '%s' " % videoitem.language
platformtools.dialog_notification("AutoPlay", "%s%s%s" % (
platformtools.dialog_notification("AutoPlay %s" %text_b, "%s%s%s" % (
videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False)
# TODO videoitem.server es el id del server, pero podria no ser el nombre!!!
@@ -282,12 +304,11 @@ def start(itemlist, item):
except:
pass
try:
if platformtools.is_playing():
played = True
break
except: # TODO evitar el informe de que el conector fallo o el video no se encuentra
except:
logger.debug(str(len(autoplay_list)))
# Si hemos llegado hasta aqui es por q no se ha podido reproducir
@@ -300,9 +321,10 @@ def start(itemlist, item):
if not platformtools.dialog_yesno("AutoPlay", text,
"¿Desea ignorar todos los enlaces de este servidor?"):
max_intentos_servers[videoitem.server] = max_intentos
logger.debug('elem: %s list %s' % (autoplay_list.index(autoplay_elem),autoplay_list[-1]))
# Si no quedan elementos en la lista se informa
if autoplay_elem == autoplay_list[-1]:
platformtools.dialog_notification('AutoPlay', 'No hubo enlaces funcionales')
platformtools.dialog_notification('AutoPlay', 'No hubo enlaces funcionales')
else:
platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias')
@@ -470,7 +492,7 @@ def autoplay_config(item):
"type": "label", "enabled": True, "visible": True}
list_controls.append(separador)
# Seccion servidores Preferidos
# Seccion servidores favoritos
server_list = channel_node.get("servers", [])
if not server_list:
enabled = False
@@ -478,7 +500,7 @@ def autoplay_config(item):
else:
enabled = "eq(-3,true)"
custom_servers_settings = {"id": "custom_servers", "label": " Servidores Preferidos", "color": "0xff66ffcc",
custom_servers_settings = {"id": "custom_servers", "label": " Servidores favoritos", "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_servers_settings)
if dict_values['active'] and enabled:
@@ -501,7 +523,7 @@ def autoplay_config(item):
if settings_node.get("server_%s" % num, 0) > len(server_list) - 1:
dict_values["server_%s" % num] = 0
# Seccion Calidades Preferidas
# Seccion Calidades favoritas
quality_list = channel_node.get("quality", [])
if not quality_list:
enabled = False
@@ -509,7 +531,7 @@ def autoplay_config(item):
else:
enabled = "eq(-7,true)"
custom_quality_settings = {"id": "custom_quality", "label": " Calidades Preferidas", "color": "0xff66ffcc",
custom_quality_settings = {"id": "custom_quality", "label": " Calidades Favoritas", "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_quality_settings)
if dict_values['active'] and enabled:
@@ -532,11 +554,20 @@ def autoplay_config(item):
if settings_node.get("quality_%s" % num, 0) > len(quality_list) - 1:
dict_values["quality_%s" % num] = 0
# Plan B
dict_values['plan_b'] = settings_node.get('plan_b', False)
enabled = "eq(-4,true)|eq(-8,true)"
plan_b = {"id": "plan_b", "label": " Plan B (Si fallan los favoritos prueba otros enlaces)",
"color": "0xffffff99",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(plan_b)
# Seccion Prioridades
priority_list = ["Servidor y Calidad", "Calidad y Servidor"]
set_priority = {"id": "priority", "label": " Prioridad (Indica el orden para Auto-Reproducir)",
"color": "0xffffff99", "type": "list", "default": 0,
"enabled": True, "visible": "eq(-4,true)+eq(-8,true)+eq(-11,true)", "lvalues": priority_list}
"enabled": True, "visible": "eq(-5,true)+eq(-9,true)+eq(-12,true)", "lvalues": priority_list}
list_controls.append(set_priority)
dict_values["priority"] = settings_node.get("priority", 0)

View File

@@ -0,0 +1,39 @@
{
"id": "cinemahd",
"name": "CinemaHD",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "",
"banner": "",
"version": 1,
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,173 @@
# -*- coding: utf-8 -*-
# -*- Channel CinemaHD -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cinemahd.co/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host))
itemlist.append(item.clone(title="Generos", action="section", section='genre'))
itemlist.append(item.clone(title="Por Calidad", action="section", section='quality'))
itemlist.append(item.clone(title="Por Año", action="section", section='year'))
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha'))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>.*?Qlty>(.*?)</span>'
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>.*?Qlty>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
title = '%s [%s] [%s]'%(contentTitle, year, quality)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality = quality,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>.*?»</a></div>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = 'menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
url= scrapedurl
opt_data = scrapertools.find_single_match(data,'%s><span>.*?<strong>\d+<.*?</span>.*?<span>('
'.*?)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if url != '':
itemlist.append(item.clone(title='%s', url=url, language=language, quality=quality, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'/animacion'
elif categoria == 'terror':
item.url = host+'/terror'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -21,7 +21,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/")))
itemlist.append(Item(channel=item.channel, action="series", title="High Definition",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/hight-definition/")))
url=urlparse.urljoin(CHANNEL_HOST, "archivos/high-definition/")))
itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/")))
@@ -75,7 +75,7 @@ def series(item):
show=show, fulltitle=fulltitle, fanart=thumbnail, folder=True))
if pagination:
page = scrapertools.find_single_match(pagination, '>Página\s*(\d+)\s*de\s*\d+<')
page = scrapertools.find_single_match(pagination, '>(?:Page|Página)\s*(\d+)\s*(?:of|de)\s*\d+<')
pattern = 'href="([^"]+)">%s<' % (int(page) + 1)
url_page = scrapertools.find_single_match(pagination, pattern)

View File

@@ -641,7 +641,7 @@ def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
if item.server not in ['streamplay','streame']:
if item.server not in ['streamplay','streame', 'clipwatching', 'vidoza']:
url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)')
else:
url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">')

View File

@@ -0,0 +1,44 @@
{
"id": "todopeliculas",
"name": "TodoPeliculas",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://www.todo-peliculas.com/images/logo.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Castellano"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,172 @@
# -*- coding: utf-8 -*-
# -*- Channel TodoPeliculas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'cast': 'Castellano'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'http://www.todo-peliculas.com/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host+'torrents'))
itemlist.append(item.clone(title="Por Calidad", action="section", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'buscar?searchword='))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'buscar':
patron = '<div class=moditemfdb><a title=(.*?)\s+href=(.*?)><img.*?class=thumbnailresult src=(.*?)/>'
elif item.type == 'section':
patron = '<div class=blogitem >.*?href=(.*?)>.*?src=(.*?) alt.*?title=(.*?)>'
else:
patron = '<div class=blogitem ><a title=(.*?)\s+href=(.*?)>.*?src=(.*?) onload'
matches = re.compile(patron, re.DOTALL).findall(data)
for info_1, info_2, info_3 in matches:
if item.type != 'section':
url = host+info_2
quality = scrapertools.find_single_match(info_1, '\[(.*?)\]')
contentTitle = re.sub(r'\[.*?\]', '', info_1)
title = '%s [%s]'%(contentTitle, quality)
thumbnail = info_3
else:
url = host + info_1
quality = scrapertools.find_single_match(info_3, '\[(.*?)\]')
contentTitle = re.sub(r'\[.*?\]', '', info_3)
title = '%s [%s]' % (contentTitle, quality)
thumbnail = info_2
quality = ''
if quality == '':
title = title.replace('[]', '')
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality = quality
))
# Paginación
url_next_page = scrapertools.find_single_match(data,'Anterior.*?<a href=/(.*?) title=Siguiente>Siguiente</a>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=host+url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
patron = '<li><a href=(.*?) rel=tag class=>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapedtitle
new_item = Item(channel=item.channel, title= title, url=url, action='list_all', type='section')
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
second_url = scrapertools.find_single_match(data, '<p><a href=(.*?) rel')
data = get_source(host+second_url)
url = scrapertools.find_single_match(data, "open\('(.*?)'")
if url != '':
quality = item.quality
title = 'Torrent [%s]' % quality
itemlist.append(item.clone(title=title, url=url, quality=quality, action='play', server='torrent',
language='cast'))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'buscar'
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host+'torrents'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -11,21 +11,17 @@ from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Catálogo", action="series", url="http://www.vertelenovelas.cc/",
itemlist.append(Item(channel=item.channel, title="Ultimos capítulos", action="ultimos", url="http://www.vertelenovelas.cc/",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.vertelenovelas.cc/ajax/autocompletex.php?q=" + texto
try:
return series(item)
@@ -37,16 +33,38 @@ def search(item, texto):
return []
def ultimos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>')
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"'))
thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"')
if thumbnail == "":
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def series(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
@@ -57,60 +75,43 @@ def series(item):
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<h2>Cap(.*?)</ul>')
patron = '<li><a href="([^"]+)"><span>([^<]+)</span></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle)
plot = ""
thumbnail = ""
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
Item(channel=item.channel, action="findvideos", title=title, url=url,
folder=True, fulltitle=title))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
pattern = 'data-id="([^"]+)"'
list_servers = re.compile(pattern, re.DOTALL).findall(data)
logger.debug("llist_servers %s" % list_servers)
list_urls = []
for _id in list_servers:
post = "id=%s" % _id
data = httptools.downloadpage("http://www.vertelenovelas.cc/goto/", post=post).data
list_urls.append(scrapertools.find_single_match(data, 'document\.location = "([^"]+)";'))
from core import servertools
itemlist = servertools.find_video_items(data=", ".join(list_urls))
for videoitem in itemlist:
# videoitem.title = item.title
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(s[0-9]+\\.animeid.tv/\\?vid=[A-z0-9-_]+)",
"url": "https://\\1"
}
]
},
"free": true,
"id": "animeid",
"name": "animeid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[animeid] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[animeid] Error de animeid, no se puede generar el enlace al video"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
label, videourl = scrapertools.find_single_match(data, 'label":"([^"]+)".*?file":"([^"]+)')
if "animeid.tv" in videourl:
videourl = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True).headers.get("location", "")
video_urls.append([".MP4 " + label + " [animeid]", videourl])
return video_urls

View File

@@ -8,10 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
if "File Not Found" in data:
return False, "[clipwatching] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[clipwatching] Error de clipwatching, no se puede generar el enlace al video"
return True, ""

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://filebebo.com/e/[a-zA-Z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "filebebo",
"name": "filebebo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://filebebo.com/images/logo.png"
}

View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# -*- Server Filebebo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data:
return False, "[Filebebo] El video ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = get_source(page_url)
url = scrapertools.find_single_match(data, "<source src=(.*?) type='video/.*?'")
video_urls.append(['Filebebo', url])
return video_urls

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -10,28 +11,19 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
# var video_source = "//cache-2.sendvid.com/1v0chsus.mp4";
media_url = "http:" + scrapertools.find_single_match(data, 'var\s+video_source\s+\=\s+"([^"]+)"')
data = scrapertools.httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, 'var\s+video_source\s+\=\s+"([^"]+)"')
if "cache-1" in media_url:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache1) [sendvid]", media_url])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache2) [sendvid]",
media_url.replace("cache-1", "cache-2")])
elif "cache-2" in media_url:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache1) [sendvid]",
media_url.replace("cache-2", "cache-1")])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache2) [sendvid]", media_url])
else:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [sendvid]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,6 +6,10 @@
{
"pattern": "(?:thevideo.me|tvad.me|thevid.net)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
},
{
"pattern": "(?:thevideo.me|tvad.me|thevid.net|thevideo.us)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]
},

View File

@@ -1,34 +1,26 @@
# -*- coding: utf-8 -*-
from aadecode import decode as aadecode
from core import scrapertools
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if "This video doesn't exist." in data:
return False, 'The requested video was not found.'
return False, '[videowood] El video no puede ser encontrado o ha sido eliminado.'
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
text_encode = scrapertools.find_single_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
from aadecode import decode as aadecode
text_decode = aadecode(text_encode)
# URL del vídeo
patron = "'([^']+)'"
media_url = scrapertools.find_single_match(text_decode, patron)
video_urls.append([media_url[-4:] + " [Videowood]", media_url])
return video_urls