Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
unknown
2017-11-24 16:57:15 -03:00
19 changed files with 143 additions and 389 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.2" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,9 +19,10 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» pelisplus » cinetux
» powvideo » streamplay
» gvideo ¤ arreglos internos
» cartoonlatino » clasicofilm
» hdfull » yaske
» ver-peliculas » thevideome
» powvideo ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]caperucitaferoz[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -11,6 +11,12 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
list_servers = ['openload',
'directo'
]
list_quality = ['default']
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
@@ -117,7 +123,8 @@ def __find_series(html):
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="letras",
@@ -134,6 +141,7 @@ def mainlist(item):
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -179,10 +187,13 @@ def search(item, texto):
show_list = __find_series(html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
except:
import sys
for line in sys.exc_info():
@@ -197,10 +208,13 @@ def series(item):
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
@@ -292,4 +306,5 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
autoplay.start(__sort_by_quality(itemlist), item)
return __sort_by_quality(itemlist)

View File

@@ -96,7 +96,6 @@ def recientes(item):
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,

View File

@@ -90,7 +90,7 @@ def start(itemlist, item):
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
videoitem.infoLabels=item.infoLabels
#videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc():

View File

@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -33,8 +34,7 @@ def menupeliculas(item):
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
# <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul>
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -78,7 +78,6 @@ def menudocumentales(item):
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto, categoria=""):
logger.info(item.url + " search " + texto)
itemlist = []
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
def peliculas(item, paginacion=True):
logger.info()
url = item.url
# Descarga la página
data = scrapertools.cache_page(url)
data = httptools.downloadpage(url).data
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
patron += '<div class="detalles-ficha"[^<]+'
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
@@ -118,16 +115,11 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
itemlist.append(
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
fanart=item.fanart, viewmode="movie_with_plot"))
# Extrae el paginador
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \&raquo\;</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -187,7 +179,7 @@ def enlaces(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
try:
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
@@ -201,18 +193,6 @@ def enlaces(item):
except:
pass
'''
<div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div>
</li><li id="box-enlace-330690" class="box-enlace">
<div class="box-enlace-cabecera">
<div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de:
<a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div>
<div class="datos-act">Actualizado: Hace 8 minutos</div>
<div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div>
<div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div>
</div>
'''
patron = '<div class="box-enlace-cabecera"[^<]+'
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
@@ -222,19 +202,15 @@ def enlaces(item):
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
logger.debug("matches=" + repr(matches))
for thumbnail, usuario, fecha, id, id2, servidores in matches:
# <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" />
patronservidores = '<img src="[^"]+" title="([^"]+)"'
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
lista_servidores = ""
for servidor in matches2:
lista_servidores = lista_servidores + servidor + ", "
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
@@ -250,7 +226,7 @@ def enlaces(item):
def findvideos(item):
logger.info()
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -25,29 +25,15 @@ list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def lista_gen(item):
logger.info()
@@ -179,11 +165,10 @@ def findvideos(item):
for link in itemla:
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
if "drive" in server:
server1 = 'Gvideo'
else:
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
itemlist.append(item.clone(url=url, action="play",
title="Enlace encontrado en %s "
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
scrapertools.printMatches(itemlist)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -512,7 +512,7 @@ def episodios(item):
else:
action = "menu_info_episode"
seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
for i, url in enumerate(seasons):
if i != 0:
data_season = httptools.downloadpage(url, add_referer=True).data

View File

@@ -2,11 +2,15 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import config, platformtools, logger
host = "http://www.clasicofilm.com/"
# Configuracion del canal
@@ -47,7 +51,6 @@ def mainlist(item):
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
@@ -55,13 +58,9 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, "var cx = '([^']+)'")
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % (
cx, texto)
item.url = host + "search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -104,7 +103,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
from core import jsontools
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
@@ -133,7 +131,6 @@ def peliculas(item):
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
@@ -146,48 +143,22 @@ def peliculas(item):
def busqueda(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
try:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["richSnippet"]["metatags"]["ogUrl"]
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
except:
continue
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
fulltitle = title_split[0]
except:
fulltitle = title
year = ""
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
continue
infolabels = {'year': year}
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next))
patron = """post-title entry-titl.*?href='([^']+)'"""
patron += """>([^<]+).*?"""
patron += """src="([^"]+)"""
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
ctitle = scrapedtitle.split("(")[0].strip()
itemlist.append(item.clone(action = "findvideos",
contentTitle = ctitle,
infoLabels = {"year" : year},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
@@ -197,9 +168,10 @@ def generos(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"'
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedurl = scrapedurl.replace("&amp;","&")
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
@@ -210,13 +182,13 @@ def generos(item):
def findvideos(item):
from core import servertools
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
@@ -226,13 +198,11 @@ def findvideos(item):
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()

View File

@@ -692,12 +692,10 @@ def findvideos(item):
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:

View File

@@ -177,7 +177,8 @@ class main(xbmcgui.WindowDialog):
self.infoLabels["originaltitle"] = otmdb.result.get("original_title",
otmdb.result.get("original_name", ""))
self.trailers = otmdb.get_videos()
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
if otmdb.result.get("runtime", 0):
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
else:
self.trailers = []

View File

@@ -12,7 +12,7 @@ from core.item import Item
from core.tmdb import Tmdb
from platformcode import logger
host = "http://www.mejortorrent.com"
host = "https://mejortorrent.website"
def mainlist(item):
@@ -29,19 +29,19 @@ def mainlist(item):
thumb_buscar = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis))
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html",
url= host + "/torrents-de-peliculas-hd-alta-definicion.html",
thumbnail=thumb_pelis_hd))
itemlist.append(Item(channel=item.channel, title="Series", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series))
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html",
url= host + "/torrents-de-series-hd-alta-definicion.html",
thumbnail=thumb_series_hd))
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az))
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus))
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
return itemlist
@@ -55,10 +55,10 @@ def listalfabetico(item):
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']:
itemlist.append(Item(channel=item.channel, action="getlist", title=letra,
url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html"))
url= host + "/series-letra-" + letra.lower() + ".html"))
itemlist.append(Item(channel=item.channel, action="getlist", title="Todas",
url="http://www.mejortorrent.com/series-letra..html"))
url= host + "/series-letra..html"))
return itemlist
@@ -67,7 +67,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto)
item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto)
try:
return buscador(item)
@@ -81,30 +81,12 @@ def search(item, texto):
def buscador(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
# busca series
patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>"
patron += ".*?<span style='color:gray;'>([^']+)</span>"
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
matches = scrapertools.find_multiple_matches(data, patron)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
@@ -119,10 +101,7 @@ def buscador(item):
# busca pelis
patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>"
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -135,10 +114,7 @@ def buscador(item):
patron += "<font Color='darkblue'>(.*?)</font>.*?"
patron += "<td align='right' width='20%'>(.*?)</td>"
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -154,23 +130,7 @@ def buscador(item):
def getlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
if item.url.find("peliculas") > -1:
patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+'
patron += '<img src="([^"]+)"[^<]+</a>'
@@ -202,27 +162,18 @@ def getlist(item):
action = "episodios"
folder = True
extra = "docus"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail in matches:
title = scrapertools.get_match(scrapedurl, patron_enlace)
title = title.replace("-", " ")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
thumbnail = host + urllib.quote(scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
folder=folder, extra=extra))
matches = re.compile(patron_title, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
# Cambia el título sacado de la URL por un título con más información.
# esta implementación asume que va a encontrar las mismas coincidencias
# que en el bucle anterior, lo cual técnicamente es erróneo, pero que
# funciona mientras no cambien el formato de la página
cnt = 0
for scrapedtitle, notused, scrapedinfo in matches:
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
@@ -244,7 +195,6 @@ def getlist(item):
# Extrae el paginador
patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
@@ -267,18 +217,11 @@ def episodios(item):
item.thumbnail = scrapertools.find_single_match(data,
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
item.thumbnail = host + + urllib.quote(item.thumbnail)
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
data = scrapertools.get_match(data,
"<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>")
'''
<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td>
<td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td>
<td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'>
<input type='checkbox' name='episodios[1]' value='18741'>
'''
if item.extra == "series":
patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+"
else:
@@ -289,7 +232,6 @@ def episodios(item):
patron += "<input type='checkbox' name='([^']+)' value='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
logger.debug('tmdb_title=' + tmdb_title)
@@ -306,7 +248,7 @@ def episodios(item):
title = scrapedtitle + " (" + fecha + ")"
url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios"
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
logger.debug("post=" + post)
@@ -370,20 +312,15 @@ def show_movie_info(item):
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
torrent_data = httptools.downloadpage(url).data
logger.debug("torrent_data=" + torrent_data)
# <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'>
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
link = urlparse.urljoin(url, link)
logger.debug("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
@@ -402,26 +339,12 @@ def play(item):
data = httptools.downloadpage(item.url, post=item.extra).data
logger.debug("data=" + data)
# series
#
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent"
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent"
#
# docus
#
# <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sue–o de todos. </a>
params = dict(urlparse.parse_qsl(item.extra))
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
link = scrapertools.get_match(data, patron)
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))
return itemlist
def newest(categoria):
@@ -430,12 +353,12 @@ def newest(categoria):
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.mejortorrent.com/torrents-de-peliculas.html'
item.url = host + "/torrents-de-peliculas.html"
itemlist = getlist(item)
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()
item.url = 'http://www.mejortorrent.com/torrents-de-series.html'
item.url = host + "/torrents-de-series.html"
itemlist.extend(getlist(item))
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()

View File

@@ -238,23 +238,12 @@ def lista(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
item.plot = scrapertools.find_single_match(data,
'<span>Sinopsis:<\/span>.([^<]+)<span '
'class="text-detail-hide"><\/span>.<\/p>')
# Paginacion
if item.title != 'Buscar' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = item.extra + 'pag-' + next_page
if not next_page_url.startswith("http"):
next_page_url = host + next_page_url
next_page_url = host + next_page_url
itemlist.append(
Item(channel=item.channel,
action="lista",
@@ -441,9 +430,8 @@ def get_vip(url):
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
data=httptools.downloadpage(new_url, follow_redirects=False).headers
itemlist.extend(servertools.find_video_items(data=str(data)))
data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
itemlist.append(Item(url=data))
return itemlist
@@ -463,22 +451,17 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
# videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = item.thumbnail
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
videoitem.action = 'play'
videoitem.fulltitle = item.title
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.title = item.contentTitle + ' (%s)'
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
n = 0
for videoitem in itemlist:
if 'youtube' in videoitem.url:
@@ -490,7 +473,7 @@ def findvideos(item):
itemlist.pop(1)
# Requerido para FilterTools
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -511,6 +494,11 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Channel Ver-peliculas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
@@ -18,7 +18,7 @@ from core import tmdb
__channel__ = "ver-peliculas"
host = "http://ver-peliculas.org/"
host = "http://ver-peliculas.io/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -122,10 +122,8 @@ def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug(data)
pattern = '<a href="([^"]+)"[^>]+><img (?:src)?(?:data-original)?="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, pattern)
for url, thumb, title in matches:
year = scrapertools.find_single_match(url, '-(\d+)-online')
title = title.replace("Película", "", 1).partition(" /")[0].partition(":")[0]
@@ -135,10 +133,9 @@ def listado(item):
infoLabels={"year": year},
url=url,
thumbnail=thumb,
contentTitle=title
contentTitle=title.strip()
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<ul class="pagination">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, '<a href="#">\d+</a>.*?<a href="([^"]+)">')
@@ -172,8 +169,7 @@ def findvideos(item):
duplicated = []
data = get_source(item.url)
logger.debug(data)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
movie_info = scrapertools.find_single_match(item.url,
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
movie_host = movie_info[0]
@@ -186,7 +182,7 @@ def findvideos(item):
video_list = json_data['lista']
itemlist = []
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
video_base_url = host + '/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
@@ -200,28 +196,22 @@ def findvideos(item):
playlist = jsontools.load(data)
sources = playlist[['playlist'][0]]
server = playlist['server']
for video_link in sources:
url = video_link['sources']
# if 'onevideo' in url:
# data = get_source(url)
# g_urls = servertools.findvideos(data=data)
# url = g_urls[0][1]
# server = g_urls[0][0]
if url not in duplicated and server!='drive':
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'
title = '(%s) %s (%s)' % (server, item.title, lang)
title = 'Ver en %s [' + lang + ']'
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title=title,
url=url,
server=server,
thumbnail=thumbnail,
action='play'
))
duplicated.append(url)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
@@ -235,6 +225,11 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(category):
logger.info()
item = Item()

View File

@@ -11,6 +11,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
@@ -98,14 +99,12 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = 'class="post-item-image btn-play-item".*?'
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post-item-flags"> (.*?)</div.*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
patron_next_page = 'href="([^"]+)"> &raquo;'
@@ -119,22 +118,16 @@ def peliculas(item):
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("/la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("/en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("/en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("/es.png"):
idiomas_disponibles.append("ESP")
for lang in idiomas1.keys():
if idioma.endswith(lang):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
@@ -179,36 +172,28 @@ def findvideos(item):
logger.info()
itemlist = []
sublist = []
# Descarga la página
url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
data = httptools.downloadpage(item.url).data
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
data = httptools.downloadpage(url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '(/embed/[^"]+).*?'
patron += 'quality text-overflow ">([^<]+).*?'
patron += 'title="([^"]+)'
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad, idioma in matches:
if 'embed' in url:
url = "http://widget.olimpo.link" + url
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
for server, url, idioma, calidad in matches:
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
title="Ver en %s %s" %(server, calidad)
))
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
itemlist.append(item.clone(title=k, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
@@ -221,3 +206,12 @@ def findvideos(item):
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info()
itemlist = []
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
item.server = servertools.get_server_from_url(item.url)
return [item]

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "kingvid.tv/(?:embed-|)([A-z0-9]+)",
"url": "http://kingvid.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "kingvid",
"name": "kingvid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/oq0tPhY.png?1"
}

View File

@@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "<title>watch </title>" in data.lower() or "File was deleted" in data:
return False, "[kingvid] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data
match = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(match)
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\}')
video_urls = []
for video_url in matches:
filename = scrapertools.get_filename_from_url(video_url)[-4:]
if video_url.endswith("smil"):
playpath = video_url.rsplit("/", 1)[1].replace(".smil", "")
rtmp = scrapertools.find_single_match(data, 'image\s*:\s*"([^"]+)"')
rtmp = scrapertools.find_single_match(rtmp, 'i/(.*?)_')
video_url = "rtmp://kingvid.tv:1935/vod/ playpath=mp4:%s_n?h=%s " \
"swfUrl=http://kingvid.tv/player7/jwplayer.flash.swf pageUrl=%s" % \
(rtmp, playpath, page_url)
filename = "RTMP"
video_urls.append([filename + " [kingvid]", video_url])
elif video_url[-4:] in ['.mp4', 'm3u8']:
video_urls.append([filename + " [kingvid]", video_url])
video_urls.sort(key=lambda x: x[0], reverse=True)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -16,9 +16,6 @@ def test_video_exists(page_url):
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if "<title>watch </title>" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado"
if "el archivo ha sido borrado por no respetar" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado por no respetar los Terminos de uso"

View File

@@ -23,7 +23,7 @@ def test_video_exists(page_url):
if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
if reponse.code == 500:
if response.code == 500:
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
return True, ""

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me)/(?:embed-|)([A-z0-9]+)",
"pattern": "(?:thevideo.me|tvad.me|thevid.net)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]