56 Commits

Author SHA1 Message Date
alfa-addon
0c65e2d20c v2.4.3 2017-11-27 18:13:01 -05:00
Alfa
2a5c27355b Merge pull request #181 from Intel11/patch-1
Actualizados
2017-11-27 23:54:54 +01:00
Intel1
cad93526ce hdfull: fix thumbnail 2017-11-27 17:40:26 -05:00
Intel1
4cfe60c2a8 Add files via upload 2017-11-27 16:24:11 -05:00
Intel1
10a7535242 Update tvmoviedb.py 2017-11-27 15:38:46 -05:00
Intel1
489cd02192 Update search.py 2017-11-27 15:34:32 -05:00
Intel1
089098c58f yourupload: fix archivo no existe 2017-11-26 09:05:00 -05:00
Intel1
57e5d32567 zstream: dominio no existe 2017-11-25 10:08:59 -05:00
Intel1
9adcafc343 zstream: dominio no existe 2017-11-25 10:08:49 -05:00
Intel1
cf068bc13f letwatch: no existe dominio 2017-11-25 09:50:57 -05:00
Intel1
7579428087 letwatch: no existe dominio 2017-11-25 09:50:48 -05:00
Intel1
5ca2ed6212 pelisfox: fix patron 2017-11-25 09:26:01 -05:00
Intel1
11494549b9 gvideo: patron actualizado 2017-11-25 09:19:39 -05:00
Intel1
77423ec5a8 cientux: fix play 2017-11-24 15:33:20 -05:00
Intel1
be2c691909 Update powvideo.py 2017-11-24 15:23:53 -05:00
Intel1
611a0e28a3 Update descargasmix.py 2017-11-24 14:33:19 -05:00
Intel1
8ea2efb632 pelisculasdk: actualizado 2017-11-24 11:13:06 -05:00
Intel1
f71de37f0f yaske: actualizado para gvideo 2017-11-24 08:40:33 -05:00
alfa-addon
d4b2a61318 v2.4.2 2017-11-23 23:16:11 -05:00
Alfa
fd1f5c28df Merge pull request #180 from Intel11/ultimo
Actualizados
2017-11-24 05:04:37 +01:00
Intel1
1e08ee9bd6 Update hdfull.py 2017-11-23 17:08:29 -05:00
Intel1
08ac52b279 Update clasicofilm.py 2017-11-23 16:54:18 -05:00
Intel1
7b52463ce6 Update clasicofilm.py 2017-11-23 16:04:54 -05:00
Intel1
e79364ef93 clasicofilm: fix 2017-11-23 15:50:10 -05:00
Intel1
de4b08606a thevideome: patron actualizado 2017-11-23 15:08:22 -05:00
Intel1
21b655b074 ver-peliculas: fix 2017-11-23 14:58:06 -05:00
Intel1
48120ac6ab powvideo: fix 2017-11-23 11:01:13 -05:00
Intel1
5c360bdc68 cartoonlatino: actualizado para gvideo 2017-11-23 10:52:20 -05:00
Intel1
de267299e7 yaske: optimizado lista de películas 2017-11-23 09:44:51 -05:00
alfa-addon
d0139dfde3 v2.4.1 2017-11-22 16:42:45 -05:00
alfa-addon
7115c2f832 fix 2017-11-22 16:42:26 -05:00
Alfa
85135711de Merge pull request #166 from danielr460/patch-2
Añadiduras
2017-11-22 21:45:04 +01:00
Alfa
8c5c495633 Merge pull request #178 from Intel11/patch-2
Actualizados
2017-11-22 21:44:49 +01:00
Alfa
fdcf27a5fa Merge pull request #179 from danielr460/master
Arreglos menores
2017-11-22 21:44:32 +01:00
Intel1
7523b02e62 Update yaske.py 2017-11-22 15:39:24 -05:00
Intel1
3ca234f8ae yaske: optimizado findvideos 2017-11-22 15:28:21 -05:00
Intel1
2848692d79 cinefox: fix 2017-11-22 14:25:14 -05:00
Intel1
d6f73e1f06 Update rapidvideo.py 2017-11-22 10:14:29 -05:00
Intel1
0dbf9c544a Update infoplus.py 2017-11-22 09:42:26 -05:00
Intel1
4fdf382ca3 animemovil: fix buscador 2017-11-22 09:18:05 -05:00
Intel1
ca943ab6ef bajui: fix thumbnail 2017-11-22 08:39:55 -05:00
danielr460
41a66823e5 Corrección en calidad de enlaces al momento de buscar desde la videoteca. 2017-11-22 07:42:04 -05:00
Intel1
a6206420b5 Update powvideo.py 2017-11-21 17:54:53 -05:00
Intel1
1ebe99ede1 mejortorrent: dominio cambiado 2017-11-21 17:06:00 -05:00
Intel1
aaa0149bc8 Delete kingvid.py 2017-11-21 16:03:09 -05:00
Intel1
4cb704a6c3 Delete kingvid.json 2017-11-21 16:02:58 -05:00
Intel1
411b3ce23d pelisplus: actualizado 2017-11-21 16:00:41 -05:00
Intel1
1b0f91d4f2 powvideo: fix 2017-11-21 14:59:49 -05:00
alfa-addon
f97a283175 fix 2017-11-20 14:49:03 -05:00
alfa-addon
3b02b62a29 v2.4.0 2017-11-20 13:59:03 -05:00
alfa-addon
25f8a9dc4b fixed 2017-11-20 13:58:41 -05:00
Alfa
860bd0f834 Merge pull request #177 from Intel11/ultimo
Actualizados
2017-11-20 19:38:58 +01:00
Intel1
6bede726f8 gvideo: fix 2017-11-20 10:25:31 -05:00
Intel1
f045d2ee7c pelisplus: fix 2017-11-20 10:23:41 -05:00
Intel1
51c4d7d746 cinetux: fix gvideo 2017-11-20 10:13:28 -05:00
Daniel Rincón Rodríguez
3746d3bfb0 Añadido autoplay 2017-11-10 14:17:16 -05:00
34 changed files with 961 additions and 1419 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.9" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.3" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,10 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» seriesblanco » seodiv
» grantorrent ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
» cinetux » descargasmix
» hdfull » peliculasdk
» pelisfox » yaske
» gvideo » powvideo
» yourupload ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>

View File

@@ -11,6 +11,12 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
list_servers = ['openload',
'directo'
]
list_quality = ['default']
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
@@ -117,7 +123,8 @@ def __find_series(html):
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="letras",
@@ -134,6 +141,7 @@ def mainlist(item):
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -179,10 +187,13 @@ def search(item, texto):
show_list = __find_series(html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
except:
import sys
for line in sys.exc_info():
@@ -197,10 +208,13 @@ def series(item):
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
@@ -292,4 +306,5 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
autoplay.start(__sort_by_quality(itemlist), item)
return __sort_by_quality(itemlist)

View File

@@ -96,7 +96,6 @@ def recientes(item):
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,

View File

@@ -90,7 +90,7 @@ def start(itemlist, item):
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
videoitem.infoLabels=item.infoLabels
#videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc():

View File

@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -33,8 +34,7 @@ def menupeliculas(item):
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
# <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul>
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -78,7 +78,6 @@ def menudocumentales(item):
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto, categoria=""):
logger.info(item.url + " search " + texto)
itemlist = []
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
def peliculas(item, paginacion=True):
logger.info()
url = item.url
# Descarga la página
data = scrapertools.cache_page(url)
data = httptools.downloadpage(url).data
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
patron += '<div class="detalles-ficha"[^<]+'
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
@@ -118,16 +115,11 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
itemlist.append(
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
fanart=item.fanart, viewmode="movie_with_plot"))
# Extrae el paginador
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \&raquo\;</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -187,7 +179,7 @@ def enlaces(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
try:
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
@@ -201,18 +193,6 @@ def enlaces(item):
except:
pass
'''
<div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div>
</li><li id="box-enlace-330690" class="box-enlace">
<div class="box-enlace-cabecera">
<div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de:
<a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div>
<div class="datos-act">Actualizado: Hace 8 minutos</div>
<div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div>
<div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div>
</div>
'''
patron = '<div class="box-enlace-cabecera"[^<]+'
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
@@ -222,19 +202,15 @@ def enlaces(item):
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
logger.debug("matches=" + repr(matches))
for thumbnail, usuario, fecha, id, id2, servidores in matches:
# <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" />
patronservidores = '<img src="[^"]+" title="([^"]+)"'
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
lista_servidores = ""
for servidor in matches2:
lista_servidores = lista_servidores + servidor + ", "
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
@@ -250,7 +226,7 @@ def enlaces(item):
def findvideos(item):
logger.info()
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -25,29 +25,15 @@ list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def lista_gen(item):
logger.info()
@@ -179,11 +165,10 @@ def findvideos(item):
for link in itemla:
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
if "drive" in server:
server1 = 'Gvideo'
else:
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
itemlist.append(item.clone(url=url, action="play",
title="Enlace encontrado en %s "
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
scrapertools.printMatches(itemlist)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -512,7 +512,7 @@ def episodios(item):
else:
action = "menu_info_episode"
seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
for i, url in enumerate(seasons):
if i != 0:
data_season = httptools.downloadpage(url, add_referer=True).data

View File

@@ -267,15 +267,11 @@ def findvideos(item):
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la videoteca"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
))
else:
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
return itemlist
@@ -300,6 +296,8 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
url = scrapertools.find_single_match(bloque1, patron)
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "drive.php" in url:
scrapedserver = "gvideo"
if "player" in url:
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
if "ok" in scrapedserver: scrapedserver = "okru"
@@ -352,10 +350,10 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url or item.server == "okru":
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
item.url = "http://docs.google.com/get_video_info?docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
@@ -369,6 +367,9 @@ def play(item):
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
"location", "")
item.url = scrapedurl
item.thumbnail = item.contentThumbnail
item.server = servertools.get_server_from_url(item.url)
return [item]
item.server = ""
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
for i in itemlist:
i.thumbnail = i.contentThumbnail
return itemlist

View File

@@ -2,11 +2,15 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import config, platformtools, logger
host = "http://www.clasicofilm.com/"
# Configuracion del canal
@@ -47,7 +51,6 @@ def mainlist(item):
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
@@ -55,13 +58,9 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, "var cx = '([^']+)'")
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % (
cx, texto)
item.url = host + "search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -104,7 +103,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
from core import jsontools
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
@@ -133,7 +131,6 @@ def peliculas(item):
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
@@ -146,48 +143,22 @@ def peliculas(item):
def busqueda(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
try:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["richSnippet"]["metatags"]["ogUrl"]
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
except:
continue
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
fulltitle = title_split[0]
except:
fulltitle = title
year = ""
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
continue
infolabels = {'year': year}
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next))
patron = """post-title entry-titl.*?href='([^']+)'"""
patron += """>([^<]+).*?"""
patron += """src="([^"]+)"""
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
ctitle = scrapedtitle.split("(")[0].strip()
itemlist.append(item.clone(action = "findvideos",
contentTitle = ctitle,
infoLabels = {"year" : year},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
@@ -197,9 +168,10 @@ def generos(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"'
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedurl = scrapedurl.replace("&amp;","&")
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
@@ -210,13 +182,13 @@ def generos(item):
def findvideos(item):
from core import servertools
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
@@ -226,13 +198,11 @@ def findvideos(item):
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()

View File

@@ -42,12 +42,12 @@ def mainlist(item):
fanart="http://i.imgur.com/ggFFR8o.png"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
def setting_channel(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
@@ -108,7 +108,7 @@ def busqueda(item):
def lista(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
@@ -125,7 +125,7 @@ def lista(item):
def lista_series(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
@@ -254,7 +254,7 @@ def episodios(item):
return itemlist
def epienlaces(item):
def episode_links(item):
logger.info()
itemlist = []
item.text_color = color3
@@ -286,7 +286,7 @@ def epienlaces(item):
else:
if servertools.is_server_enabled(scrapedserver):
try:
servers_module = __import__("servers." + scrapedserver)
# servers_module = __import__("servers." + scrapedserver)
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
extra=item.url))
except:
@@ -302,13 +302,14 @@ def epienlaces(item):
def findvideos(item):
logger.info()
if item.contentSeason!='':
return epienlaces(item)
if item.contentSeason != '':
return episode_links(item)
itemlist = []
item.text_color = color3
data = get_data(item.url)
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
if year:
@@ -346,9 +347,9 @@ def findvideos(item):
patron = 'make_links.*?,[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(data_online, patron)
for i, code in enumerate(matches):
enlace = mostrar_enlaces(code)
enlaces = servertools.findvideos(data=enlace[0])
if enlaces and "peliculas.nu" not in enlaces:
enlace = show_links(code)
links = servertools.findvideos(data=enlace[0])
if links and "peliculas.nu" not in links:
if i == 0:
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
@@ -362,8 +363,8 @@ def findvideos(item):
new_item.title += " +INFO"
itemlist.append(new_item)
title = " Ver vídeo en " + enlaces[0][2]
itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
title = " Ver vídeo en " + links[0][2]
itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
if scriptg:
gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
@@ -419,9 +420,9 @@ def findvideos(item):
continue
if servertools.is_server_enabled(scrapedserver):
try:
servers_module = __import__("servers." + scrapedserver)
# servers_module = __import__("servers." + scrapedserver)
# Saca numero de enlaces
urls = mostrar_enlaces(scrapedurl)
urls = show_links(scrapedurl)
numero = str(len(urls))
titulo = " %s - Nº enlaces: %s" % (titulo, numero)
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
@@ -449,12 +450,13 @@ def play(item):
headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url:
if "enlacesmix" in url or "enlacesws.com" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
enlaces = servertools.findvideosbyserver(url, item.server)
if enlaces:
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
links = servertools.findvideosbyserver(url, item.server)
if links:
itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
else:
itemlist.append(item.clone())
@@ -465,13 +467,13 @@ def enlaces(item):
logger.info()
itemlist = []
urls = mostrar_enlaces(item.extra)
urls = show_links(item.extra)
numero = len(urls)
for enlace in urls:
enlaces = servertools.findvideos(data=enlace)
if enlaces:
for link in enlaces:
if "/folder/" in enlace:
for url in urls:
links = servertools.findvideos(data=url)
if links:
for link in links:
if "/folder/" in url:
titulo = link[0]
else:
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
@@ -482,7 +484,7 @@ def enlaces(item):
return itemlist
def mostrar_enlaces(data):
def show_links(data):
import base64
data = data.split(",")
len_data = len(data)
@@ -536,6 +538,7 @@ def get_data(url_orig, get_host=False):
return response.data
def newest(categoria):
logger.info()
itemlist = []
@@ -558,7 +561,6 @@ def newest(categoria):
itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
@@ -566,7 +568,7 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
logger.error("%s" % line)
return []
return itemlist

View File

@@ -310,7 +310,8 @@ def fichas(item):
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
#thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
thumbnail = scrapedthumbnail
language = ''
title = scrapedtitle.strip()
show = title
@@ -692,12 +693,10 @@ def findvideos(item):
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:

View File

@@ -177,7 +177,8 @@ class main(xbmcgui.WindowDialog):
self.infoLabels["originaltitle"] = otmdb.result.get("original_title",
otmdb.result.get("original_name", ""))
self.trailers = otmdb.get_videos()
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
if otmdb.result.get("runtime", 0):
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
else:
self.trailers = []

View File

@@ -12,7 +12,7 @@ from core.item import Item
from core.tmdb import Tmdb
from platformcode import logger
host = "http://www.mejortorrent.com"
host = "https://mejortorrent.website"
def mainlist(item):
@@ -29,19 +29,19 @@ def mainlist(item):
thumb_buscar = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis))
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html",
url= host + "/torrents-de-peliculas-hd-alta-definicion.html",
thumbnail=thumb_pelis_hd))
itemlist.append(Item(channel=item.channel, title="Series", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series))
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html",
url= host + "/torrents-de-series-hd-alta-definicion.html",
thumbnail=thumb_series_hd))
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az))
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus))
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
return itemlist
@@ -55,10 +55,10 @@ def listalfabetico(item):
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']:
itemlist.append(Item(channel=item.channel, action="getlist", title=letra,
url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html"))
url= host + "/series-letra-" + letra.lower() + ".html"))
itemlist.append(Item(channel=item.channel, action="getlist", title="Todas",
url="http://www.mejortorrent.com/series-letra..html"))
url= host + "/series-letra..html"))
return itemlist
@@ -67,7 +67,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto)
item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto)
try:
return buscador(item)
@@ -81,30 +81,12 @@ def search(item, texto):
def buscador(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
# busca series
patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>"
patron += ".*?<span style='color:gray;'>([^']+)</span>"
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
matches = scrapertools.find_multiple_matches(data, patron)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
@@ -119,10 +101,7 @@ def buscador(item):
# busca pelis
patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>"
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -135,10 +114,7 @@ def buscador(item):
patron += "<font Color='darkblue'>(.*?)</font>.*?"
patron += "<td align='right' width='20%'>(.*?)</td>"
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -154,23 +130,7 @@ def buscador(item):
def getlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
if item.url.find("peliculas") > -1:
patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+'
patron += '<img src="([^"]+)"[^<]+</a>'
@@ -202,27 +162,18 @@ def getlist(item):
action = "episodios"
folder = True
extra = "docus"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail in matches:
title = scrapertools.get_match(scrapedurl, patron_enlace)
title = title.replace("-", " ")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
thumbnail = host + urllib.quote(scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
folder=folder, extra=extra))
matches = re.compile(patron_title, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
# Cambia el título sacado de la URL por un título con más información.
# esta implementación asume que va a encontrar las mismas coincidencias
# que en el bucle anterior, lo cual técnicamente es erróneo, pero que
# funciona mientras no cambien el formato de la página
cnt = 0
for scrapedtitle, notused, scrapedinfo in matches:
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
@@ -244,7 +195,6 @@ def getlist(item):
# Extrae el paginador
patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
@@ -267,18 +217,11 @@ def episodios(item):
item.thumbnail = scrapertools.find_single_match(data,
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
item.thumbnail = host + + urllib.quote(item.thumbnail)
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
data = scrapertools.get_match(data,
"<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>")
'''
<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td>
<td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td>
<td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'>
<input type='checkbox' name='episodios[1]' value='18741'>
'''
if item.extra == "series":
patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+"
else:
@@ -289,7 +232,6 @@ def episodios(item):
patron += "<input type='checkbox' name='([^']+)' value='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
logger.debug('tmdb_title=' + tmdb_title)
@@ -306,7 +248,7 @@ def episodios(item):
title = scrapedtitle + " (" + fecha + ")"
url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios"
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
logger.debug("post=" + post)
@@ -370,20 +312,15 @@ def show_movie_info(item):
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
torrent_data = httptools.downloadpage(url).data
logger.debug("torrent_data=" + torrent_data)
# <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'>
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
link = urlparse.urljoin(url, link)
logger.debug("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
@@ -402,26 +339,12 @@ def play(item):
data = httptools.downloadpage(item.url, post=item.extra).data
logger.debug("data=" + data)
# series
#
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent"
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent"
#
# docus
#
# <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sue–o de todos. </a>
params = dict(urlparse.parse_qsl(item.extra))
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
link = scrapertools.get_match(data, patron)
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))
return itemlist
def newest(categoria):
@@ -430,12 +353,12 @@ def newest(categoria):
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.mejortorrent.com/torrents-de-peliculas.html'
item.url = host + "/torrents-de-peliculas.html"
itemlist = getlist(item)
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()
item.url = 'http://www.mejortorrent.com/torrents-de-series.html'
item.url = host + "/torrents-de-series.html"
itemlist.extend(getlist(item))
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()

View File

@@ -2,6 +2,7 @@
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -10,74 +11,29 @@ from platformcode import logger
from platformcode import config
from core import tmdb
try:
import xbmc
import xbmcgui
except:
pass
import unicodedata
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
host = "http://www.peliculasdk.com/"
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>')
text = text.replace('[CR]', '<br>')
text = text.replace('[B]', '<b>')
text = text.replace('[/B]', '</b>')
text = text.replace('"color: yellow"', '"color: gold"')
text = text.replace('"color: white"', '"color: auto"')
return text
host = "http://www.peliculasdk.com"
def mainlist(item):
logger.info()
itemlist = []
title = "Estrenos"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos",
Item(channel=item.channel, title="[COLOR orange]Estrenos[/COLOR]", action="peliculas", url= host + "/ver/estrenos",
fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg",
thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg"))
title = "PelisHd"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/",
Item(channel=item.channel, title="[COLOR orange]PelisHd[/COLOR]", action="peliculas", url= host + "/calidad/HD-720/",
fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg",
thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg"))
title = "Pelis HD-Rip"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320",
Item(channel=item.channel, title="[COLOR orange]Pelis HD-Rip[/COLOR]", action="peliculas", url= host + "/calidad/HD-320",
fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg",
thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg"))
title = "Pelis Audio español"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/",
Item(channel=item.channel, title="[COLOR orange]Pelis Audio español[/COLOR]", action="peliculas", url= host + "/idioma/Espanol/",
fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg",
thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg"))
title = "Buscar..."
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/",
Item(channel=item.channel, title="[COLOR orange]Buscar...[/COLOR]", action="search", url= host + "/calidad/HD-720/",
fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg",
thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg"))
@@ -88,7 +44,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto)
item.url = host + "/index.php?s=%s&x=0&y=0" % (texto)
try:
return buscador(item)
@@ -103,11 +59,8 @@ def search(item, texto):
def buscador(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="karatula".*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="tisearch"><a href="([^"]+)">'
@@ -115,57 +68,38 @@ def buscador(item):
patron += 'Audio:(.*?)</a>.*?'
patron += 'Género:(.*?)</a>.*?'
patron += 'Calidad:(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches:
try:
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "",
scrapedtitle).strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip()
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedcalidad = scrapedcalidad.replace(scrapedcalidad,
bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]"))
scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje,
bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]"))
scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
scrapedtitle = scrapedtitle.replace(scrapedtitle,
bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]"))
extra = year + "|" + title_fan
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
thumbnail=scrapedthumbnail, extra=extra,
scrapedcalidad = "[COLOR orange]" + scrapedcalidad + "[/COLOR]"
scrapedlenguaje = "[COLOR orange]" + scrapedlenguaje + "[/COLOR]"
title = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
title = "[COLOR white]" + title + "[/COLOR]"
scrapedtitle = scrapedtitle.split("(")[0].strip()
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="findvideos",
thumbnail=scrapedthumbnail, contentTitle = scrapedtitle, infoLabels={'year':year},
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True))
tmdb.set_infoLabels(itemlist, True)
try:
next_page = scrapertools.get_match(data,
'<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page,
itemlist.append(Item(channel=item.channel, action="buscador", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
except:
pass
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|&#.*?;", "", data)
patron = 'style="position:relative;"> '
patron += '<a href="([^"]+)" '
patron += 'title="([^<]+)">'
@@ -173,363 +107,64 @@ def peliculas(item):
patron += 'Audio:(.*?)</br>.*?'
patron += 'Calidad:(.*?)</br>.*?'
patron += 'Género:.*?tag">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches:
try:
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitle)
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip()
scrapedlenguaje = scrapedlenguaje.split(',')
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedtitle = scrapedtitle
extra = year + "|" + title_fan
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
thumbnail=scrapedthumbnail, extra=extra,
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
language=scrapedlenguaje, quality=scrapedcalidad, contentTitle= scrapedtitle, infoLabels={
'year':year})
#TODO Dividir los resultados antes
#if year:
# tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
itemlist.append(Item(channel=item.channel,
title=scrapedtitle,
url=scrapedurl,
action="findvideos",
thumbnail=scrapedthumbnail,
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
language=scrapedlenguaje,
quality=scrapedcalidad,
contentTitle = scrapedtitle,
infoLabels={'year':year}
))
tmdb.set_infoLabels(itemlist)
## Paginación
next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page,
itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
return itemlist
def fanart(item):
logger.info()
itemlist = []
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
title_fan = item.extra.split("|")[1]
title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan)
fulltitle = title
title = title.replace(' ', '%20')
title = ''.join(
(c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn'))
try:
sinopsis = scrapertools.find_single_match(data, '<span class="clms">Sinopsis: <\/span>(.*?)<\/div>')
except:
sinopsis = ""
year = item.extra.split("|")[0]
if not "series" in item.url:
# filmafinity
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
title, year)
data = scrapertools.downloadpage(url)
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
if url_filmaf:
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
data = scrapertools.downloadpage(url_filmaf)
else:
try:
url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year)
data = browser(url_bing)
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;', '', data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
data = scrapertools.cachePage("http://" + url_filma)
else:
data = scrapertools.cachePage(url_filma)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
if sinopsis == " ":
try:
sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
sinopsis = sinopsis.replace("<br><br />", "\n")
sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis)
except:
pass
try:
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
except:
rating_filma = "Sin puntuacion"
critica = ""
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
matches_reviews = scrapertools.find_multiple_matches(data, patron)
if matches_reviews:
for review, autor, valoracion in matches_reviews:
review = dhe(scrapertools.htmlclean(review))
review += "\n" + autor + "[CR]"
review = re.sub(r'Puntuac.*?\)', '', review)
if "positiva" in valoracion:
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
elif "neutral" in valoracion:
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
else:
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
else:
critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]"
print "ozuu"
print critica
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
title = re.sub(r":.*|\(.*?\)", "", title)
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica
show = item.fanart + "|" + "" + "|" + sinopsis
posterdb = item.thumbnail
fanart_info = item.fanart
fanart_3 = ""
fanart_2 = item.fanart
category = item.thumbnail
id_scraper = ""
itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos",
thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
for id, fan in matches:
fan = re.sub(r'\\|"', '', fan)
try:
rating = scrapertools.find_single_match(data, '"vote_average":(.*?),')
except:
rating = "Sin puntuación"
id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica
try:
posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"')
posterdb = "https://image.tmdb.org/t/p/original" + posterdb
except:
posterdb = item.thumbnail
if "null" in fan:
fanart = item.fanart
else:
fanart = "https://image.tmdb.org/t/p/original" + fan
item.extra = fanart
url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
fanart_info = item.extra
fanart_3 = ""
fanart_2 = item.extra
for fanart_info, fanart_3, fanart_2 in matches:
fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info
fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3
fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2
if fanart == item.fanart:
fanart = fanart_info
# clearart, fanart_2 y logo
url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"hdmovielogo":.*?"url": "([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '"moviedisc"' in data:
disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"')
if '"movieposter"' in data:
poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"')
if '"moviethumb"' in data:
thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"')
if '"moviebanner"' in data:
banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"')
if len(matches) == 0:
extra = posterdb
# "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png"
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
category = posterdb
itemlist.append(
Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent",
thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category,
library=item.library, fulltitle=fulltitle, folder=True))
for logo in matches:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
if '"moviebackground"' in data:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
else:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if '"moviebackground"' in data:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
else:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = logo
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if not '"hdmovieclearart"' in data and not '"moviebackground"' in data:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = item.extra
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
thumbnail=logo, fanart=item.extra, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
title_info = "Info"
if posterdb == item.thumbnail:
if '"movieposter"' in data:
thumbnail = poster
else:
thumbnail = item.thumbnail
else:
thumbnail = posterdb
id = id_scraper
extra = extra + "|" + id + "|" + title.encode('utf8')
title_info = title_info.replace(title_info, bbcode_kodi2html("[COLOR skyblue]" + title_info + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail,
fanart=fanart_info, extra=extra, category=category, show=show, folder=False))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"<!--.*?-->", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">')
patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
check = re.compile(patron, re.DOTALL).findall(bloque_tab)
servers_data_list = []
patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for check_tab, server, id in matches:
scrapedplot = scrapertools.get_match(data, '<span class="clms">(.*?)</div></div>')
plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot)
scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]"))
for plot in plotformat:
scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]"))
scrapedplot = scrapedplot.replace("</span>", "[CR]")
scrapedplot = scrapedplot.replace(":", "")
if check_tab in str(check):
idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
servers_data_list.append([server, id, idioma, calidad])
url = "http://www.peliculasdk.com/Js/videod.js"
data = scrapertools.cachePage(url)
url = host + "/Js/videod.js"
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')
patron = 'function (\w+)\(id\).*?'
patron += 'data-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for server, url in matches:
for enlace, id, idioma, calidad in servers_data_list:
if server == enlace:
video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url))
video_url = re.sub(r"'\+codigo\+'", "", video_url)
video_url = video_url.replace('embed//', 'embed/')
@@ -541,21 +176,13 @@ def findvideos(item):
video_url = scrapertools.get_match(str(url), "u'([^']+)'")
except:
continue
servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/')
servertitle = servertitle.replace("embed.", "")
servertitle = servertitle.replace("player.", "")
servertitle = servertitle.replace("api.video.", "")
servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle)
servertitle = servertitle.replace("anonymouse.org", "netu")
title = servertitle
logger.debug('servertitle: %s' % servertitle)
server = servertools.get_server_name(servertitle)
logger.debug('server: %s'%server)
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
itemlist.append(
Item(channel=item.channel, title=title, url=video_url, action="play",
item.clone(title=title, url=video_url, action="play",
thumbnail=item.category,
plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad))
language=idioma, quality=calidad))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.fulltitle}
@@ -563,187 +190,12 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFff6666",
thumbnail='http://imgur.com/0gyYvuC.png'))
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
data = scrapertools.cache_page(item.url)
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = scrapertools.unescape(video[0])
url = item.url
server = video[2]
# xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot )
itemlist.append(
Item(channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title,
fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False))
return itemlist
def info(item):
logger.info()
itemlist = []
url = item.url
id = item.extra
if "serie" in item.url:
try:
rating_tmdba_tvdb = item.extra.split("|")[6]
if item.extra.split("|")[6] == "":
rating_tmdba_tvdb = "Sin puntuación"
except:
rating_tmdba_tvdb = "Sin puntuación"
else:
rating_tmdba_tvdb = item.extra.split("|")[3]
rating_filma = item.extra.split("|")[4]
print "eztoquee"
print rating_filma
print rating_tmdba_tvdb
filma = "http://s6.postimg.org/6yhe5fgy9/filma.png"
try:
if "serie" in item.url:
title = item.extra.split("|")[8]
else:
title = item.extra.split("|")[6]
title = title.replace("%20", " ")
title = "[COLOR yellow][B]" + title + "[/B][/COLOR]"
except:
title = item.title
try:
if "." in rating_tmdba_tvdb:
check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).')
else:
check_rat_tmdba = rating_tmdba_tvdb
if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8:
rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10:
rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
else:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
print "lolaymaue"
except:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
try:
check_rat_filma = scrapertools.get_match(rating_filma, '(\d)')
print "paco"
print check_rat_filma
if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8:
print "dios"
print check_rat_filma
rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]"
elif int(check_rat_filma) >= 8:
print check_rat_filma
rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]"
else:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
print "rojo??"
print check_rat_filma
except:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
if not "serie" in item.url:
url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "?api_key=2e2160006592024ba87ccdf78c28f49f&append_to_response=credits&language=es"
data_plot = scrapertools.cache_page(url_plot)
plot = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",')
tagline = scrapertools.find_single_match(data_plot, '"tagline":(".*?")')
if plot == "":
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
else:
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
if item.extra.split("|")[7] != "":
tagline = item.extra.split("|")[7]
# tagline= re.sub(r',','.',tagline)
else:
tagline = ""
if "serie" in item.url:
check2 = "serie"
icon = "http://s6.postimg.org/hzcjag975/tvdb.png"
foto = item.show.split("|")[1]
if item.extra.split("|")[5] != "":
critica = item.extra.split("|")[5]
else:
critica = "Esta serie no tiene críticas..."
if not ".png" in item.extra.split("|")[0]:
photo = "http://imgur.com/6uXGkrz.png"
else:
photo = item.extra.split("|")[0].replace(" ", "%20")
try:
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
except:
tagline = ""
else:
critica = item.extra.split("|")[5]
if "%20" in critica:
critica = "No hay críticas"
icon = "http://imgur.com/SenkyxF.png"
photo = item.extra.split("|")[0].replace(" ", "%20")
foto = item.show.split("|")[1]
try:
if tagline == "\"\"":
tagline = " "
except:
tagline = " "
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
check2 = "pelicula"
# Tambien te puede interesar
peliculas = []
if "serie" in item.url:
url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
5] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),')
else:
url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),')
for idp, peli, thumb in tpi:
thumb = re.sub(r'"|}', '', thumb)
if "null" in thumb:
thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png"
else:
thumb = "https://image.tmdb.org/t/p/original" + thumb
peliculas.append([idp, peli, thumb])
check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
'rating': rating}
item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/kdfWEJ6.png")
from channels import infoplus
infoplus.start(item_info, peliculas)
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
@@ -754,49 +206,13 @@ def newest(categoria):
if categoria == 'castellano':
item.url = host + "idioma/Espanol/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response

View File

@@ -74,16 +74,15 @@ def lista(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion != 'actor':
patron = '<li class=item-serie.*?><a href=(.*?) title=(.*?)><img src=(.*?) alt=><span '
patron += 'class=s-title><strong>.*?<\/strong><p>(.*?)<\/p><\/span><\/a><\/li>'
patron = '(?s)<li class="item-serie.*?href="([^"]+).*?title="([^"]+).*?data-src="([^"]+).*?<span '
patron += 'class="s-title">.*?<p>([^<]+)'
else:
patron = '<li><a href=(\/pelicula\/.*?)><figure><img src=(.*?) alt=><\/figure><p class=title>(.*?)<\/p><p '
patron += 'class=year>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '(?s)<li>.*?<a href="(/pelicula/[^"]+)".*?<figure>.*?data-src="([^"]+)".*?p class="title">([^<]+).*?'
patron += 'year">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
url = host + scrapedurl
if item.seccion != 'actor':
@@ -109,11 +108,11 @@ def lista(item):
# Paginacion
if itemlist != []:
actual_page = scrapertools.find_single_match(data, '<a class=active item href=.*?>(.*?)<\/a>')
actual_page = scrapertools.find_single_match(data, '<a class="active item" href=".*?">(.*?)<\/a>')
if actual_page:
next_page_num = int(actual_page) + 1
next_page = scrapertools.find_single_match(data,
'<li><a class= item href=(.*?)\?page=.*?&limit=.*?>Siguiente')
'<li><a class=" item" href="(.*?)\?page=.*?&limit=.*?">Siguiente')
next_page_url = host + next_page + '?page=%s' % next_page_num
if next_page != '':
itemlist.append(Item(channel=item.channel,
@@ -129,15 +128,14 @@ def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion == 'generos':
patron = '<a href=(\/peliculas\/[\D].*?\/) title=Películas de .*?>(.*?)<\/a>'
patron = '<a href="(\/peliculas\/[\D].*?\/)" title="Películas de .*?>(.*?)<\/a>'
elif item.seccion == 'anios':
patron = '<li class=.*?><a href=(.*?)>(\d{4})<\/a> <\/li>'
patron = '<li class=.*?><a href="(.*?)">(\d{4})<\/a> <\/li>'
elif item.seccion == 'actor':
patron = '<li><a href=(.*?)><div.*?<div class=photopurple title=(.*?)><\/div><img src=(.*?)><\/figure>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '<li><a href="(.*?)".*?div.*?<div class="photopurple" title="(.*?)">.*?data-src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
if item.seccion != 'actor':
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.decode('utf-8')
@@ -158,7 +156,6 @@ def seccion(item):
))
else:
for scrapedurl, scrapedname, scrapedthumbnail in matches:
thumbnail = scrapedthumbnail
fanart = ''
title = scrapedname
url = host + scrapedurl
@@ -168,14 +165,14 @@ def seccion(item):
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
thumbnail=scrapedthumbnail,
fanart=fanart,
seccion=item.seccion
))
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
next_page = scrapertools.find_single_match(data, '<li><a class=" item" href="(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page
if next_page != '':
itemlist.append(item.clone(action="seccion",
@@ -240,7 +237,6 @@ def findvideos(item):
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
for element in urls_list:
json_data=jsontools.load(element)
@@ -260,19 +256,19 @@ def findvideos(item):
for urls in video_list:
if urls.language == '':
urls.language = videoitem.language
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
urls.title = item.title + urls.language + '(%s)'
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.quality = quality
video_url.server = ""
video_url.infoLabels = item.infoLabels
else:
server = servertools.get_server_from_url(url)
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality,
server=server))
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality
))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(video_list)
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(
Item(channel=item.channel,
@@ -308,3 +304,8 @@ def newest(categoria):
return []
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -145,7 +145,9 @@ def menuseries(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.url = host + 'busqueda/?s=' + texto
if not item.extra:
item.extra = 'peliculas/'
try:
if texto != '':
return lista(item)
@@ -217,7 +219,7 @@ def lista(item):
else:
item.extra = item.extra.rstrip('s/')
if item.extra in url:
itemlist.append(
new_item=(
Item(channel=item.channel,
contentType=tipo,
action=accion,
@@ -236,21 +238,12 @@ def lista(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
item.plot = scrapertools.find_single_match(data,
'<span>Sinopsis:<\/span>.([^<]+)<span '
'class="text-detail-hide"><\/span>.<\/p>')
# Paginacion
if item.title != 'Buscar' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = host + item.extra + 'pag-' + next_page
next_page_url = item.extra + 'pag-' + next_page
if not next_page_url.startswith("http"):
next_page_url = host + next_page_url
itemlist.append(
Item(channel=item.channel,
action="lista",
@@ -437,9 +430,8 @@ def get_vip(url):
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
data=httptools.downloadpage(new_url, follow_redirects=False).headers
itemlist.extend(servertools.find_video_items(data=str(data)))
data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
itemlist.append(Item(url=data))
return itemlist
@@ -459,22 +451,17 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
# videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = item.thumbnail
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
videoitem.action = 'play'
videoitem.fulltitle = item.title
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.title = item.contentTitle + ' (%s)'
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
n = 0
for videoitem in itemlist:
if 'youtube' in videoitem.url:
@@ -486,7 +473,7 @@ def findvideos(item):
itemlist.pop(1)
# Requerido para FilterTools
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -507,6 +494,11 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []

View File

@@ -24,6 +24,13 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por titulo", context=context,
thumbnail=get_thumb("search.png")))
thumbnail = get_thumb("search_star.png")
itemlist.append(Item(channel='tvmoviedb', title="Buscar actor/actriz", action="search_",
search={'url': 'search/person', 'language': 'es', 'page': 1}, star=True,
thumbnail=thumbnail))
itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por categorias (búsqueda avanzada)", extra="categorias",
context=context,

View File

@@ -74,6 +74,15 @@ def configuracion(item):
platformtools.itemlist_refresh()
return ret
def search_star(item):
logger.info()
itemlist = []
item.type='movie'
itemlist.extend(search_(item))
item.type='tvshow'
itemlist.extend(search_(item))
return itemlist
def search_(item):
texto = platformtools.dialog_input(heading=item.title)
@@ -95,7 +104,17 @@ def search_(item):
item.search['query'] = texto
item.action = "listado_tmdb"
return listado_tmdb(item)
if item.star == True:
types = ['movie','tv']
itemlist = []
for type in types:
item.contentType = type
item.search['type']=type
itemlist.extend(listado_tmdb(item))
return itemlist
else:
return listado_tmdb(item)
def busqueda(item):
@@ -338,6 +357,7 @@ def listado_tmdb(item):
# Listado de actores
if 'nm' in item.infoLabels['imdb_id']:
try:
ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt)
id_cast = ob_tmdb.result["person_results"][0]["id"]
if item.contentType == "movie":
@@ -429,12 +449,13 @@ def listado_tmdb(item):
else:
# Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido
known_for = ob_tmdb.results[i].get("known_for")
type = item.search['type']
if known_for:
from random import randint
random = randint(0, len(known_for) - 1)
new_item.title = "%s [COLOR %s](%s)[/COLOR]" \
new_item.title = "%s [COLOR %s](%s)[/COLOR] (%s)" \
% (new_item.contentTitle, color6,
known_for[random].get("title", known_for[random].get("name")))
known_for[random].get("title", known_for[random].get("name")), type)
if known_for[random]["backdrop_path"]:
new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"]
else:
@@ -536,12 +557,12 @@ def detalles(item):
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color=color5))
try:
images['tmdb'] = ob_tmdb.result["images"]
itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images,
extra="menu"))
except:
pass
# try:
# images['tmdb'] = ob_tmdb.result["images"]
# itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images,
# extra="menu"))
# except:
# pass
try:
if item.contentType == "movie" and item.infoLabels["year"] < 2014:
@@ -591,6 +612,7 @@ def detalles(item):
# Películas/Series similares y recomendaciones
if item.infoLabels['tmdb_id']:
item.extra = item.contentType.replace('tvshow', 'tv')
title = title.replace("película", "Películas").replace("serie", "Series")
itemlist.append(item.clone(title="%s similares" % title, action="listado_tmdb",
search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']),
@@ -608,6 +630,7 @@ def reparto(item):
# Actores y equipo de rodaje de una película/serie
itemlist = []
item.text_color = color1
item.extra=item.contentType.replace('tvshow','tv')
item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])}
ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt)
@@ -1899,6 +1922,8 @@ def newlist(item):
##-------------------- LISTADOS DE IMAGENES ------------------------##
def imagenes(item):
itemlist = []
if item.extra == "menu":
item.folder = not config.is_xbmc()
if "tmdb" in item.images:

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Channel Ver-peliculas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
@@ -18,7 +18,7 @@ from core import tmdb
__channel__ = "ver-peliculas"
host = "http://ver-peliculas.org/"
host = "http://ver-peliculas.io/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -122,10 +122,8 @@ def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug(data)
pattern = '<a href="([^"]+)"[^>]+><img (?:src)?(?:data-original)?="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, pattern)
for url, thumb, title in matches:
year = scrapertools.find_single_match(url, '-(\d+)-online')
title = title.replace("Película", "", 1).partition(" /")[0].partition(":")[0]
@@ -135,10 +133,9 @@ def listado(item):
infoLabels={"year": year},
url=url,
thumbnail=thumb,
contentTitle=title
contentTitle=title.strip()
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<ul class="pagination">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, '<a href="#">\d+</a>.*?<a href="([^"]+)">')
@@ -172,8 +169,7 @@ def findvideos(item):
duplicated = []
data = get_source(item.url)
logger.debug(data)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
movie_info = scrapertools.find_single_match(item.url,
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
movie_host = movie_info[0]
@@ -186,7 +182,7 @@ def findvideos(item):
video_list = json_data['lista']
itemlist = []
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
video_base_url = host + '/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
@@ -200,28 +196,22 @@ def findvideos(item):
playlist = jsontools.load(data)
sources = playlist[['playlist'][0]]
server = playlist['server']
for video_link in sources:
url = video_link['sources']
# if 'onevideo' in url:
# data = get_source(url)
# g_urls = servertools.findvideos(data=data)
# url = g_urls[0][1]
# server = g_urls[0][0]
if url not in duplicated and server!='drive':
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'
title = '(%s) %s (%s)' % (server, item.title, lang)
title = 'Ver en %s [' + lang + ']'
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title=title,
url=url,
server=server,
thumbnail=thumbnail,
action='play'
))
duplicated.append(url)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
@@ -235,6 +225,11 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(category):
logger.info()
item = Item()

View File

@@ -4,13 +4,13 @@ import re
from core import channeltools
from core import httptools
from core import scrapertoolsV2
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
@@ -98,14 +98,12 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = 'class="post-item-image btn-play-item".*?'
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post-item-flags"> (.*?)</div.*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
patron_next_page = 'href="([^"]+)"> &raquo;'
@@ -119,22 +117,16 @@ def peliculas(item):
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("/la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("/en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("/en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("/es.png"):
idiomas_disponibles.append("ESP")
for lang in idiomas1.keys():
if idioma.endswith(lang):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
@@ -179,36 +171,30 @@ def findvideos(item):
logger.info()
itemlist = []
sublist = []
# Descarga la página
url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
data = httptools.downloadpage(item.url).data
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
data = httptools.downloadpage(url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '(/embed/[^"]+).*?'
patron += 'quality text-overflow ">([^<]+).*?'
patron += 'title="([^"]+)'
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad, idioma in matches:
if 'embed' in url:
url = "http://widget.olimpo.link" + url
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
title="Ver en %s %s" %(server, calidad)
))
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
itemlist.append(item.clone(title=k, folder=False, infoLabels = "",
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
@@ -219,5 +205,14 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info()
itemlist = []
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

View File

@@ -7,12 +7,16 @@
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([0-9a-zA-Z-_]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)http://docs.google.com/get_video_info.*?docid=([0-9a-zA-Z-_]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)\"https://(lh.*?).googleusercontent.com/([^\"]+)",
"pattern": "(?s)https://(lh.).googleusercontent.com/([0-9a-zA-Z-_=]+)",
"url": "https://\\1.googleusercontent.com/\\2"
}
]

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "kingvid.tv/(?:embed-|)([A-z0-9]+)",
"url": "http://kingvid.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "kingvid",
"name": "kingvid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/oq0tPhY.png?1"
}

View File

@@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "<title>watch </title>" in data.lower() or "File was deleted" in data:
return False, "[kingvid] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data
match = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(match)
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\}')
video_urls = []
for video_url in matches:
filename = scrapertools.get_filename_from_url(video_url)[-4:]
if video_url.endswith("smil"):
playpath = video_url.rsplit("/", 1)[1].replace(".smil", "")
rtmp = scrapertools.find_single_match(data, 'image\s*:\s*"([^"]+)"')
rtmp = scrapertools.find_single_match(rtmp, 'i/(.*?)_')
video_url = "rtmp://kingvid.tv:1935/vod/ playpath=mp4:%s_n?h=%s " \
"swfUrl=http://kingvid.tv/player7/jwplayer.flash.swf pageUrl=%s" % \
(rtmp, playpath, page_url)
filename = "RTMP"
video_urls.append([filename + " [kingvid]", video_url])
elif video_url[-4:] in ['.mp4', 'm3u8']:
video_urls.append([filename + " [kingvid]", video_url])
video_urls.sort(key=lambda x: x[0], reverse=True)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "letwatch.(?:us|to)/(?:embed-|)([a-z0-9A-Z]+)(?:.html|)",
"url": "http://letwatch.to/embed-\\1.html"
}
]
},
"free": true,
"id": "letwatch",
"name": "letwatch",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if ("File was deleted" or "Not Found") in data:
return False, "[Letwatch] El archivo no existe o ha sido borrado"
if "Video is processing now" in data:
return False, "El vídeo está siendo procesado todavía"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, '\{file\:"([^"]+)",label\:"([^"]+)"\}')
if len(media_urls) > 0:
for media_url, label in media_urls:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [letwatch]", media_url])
else:
matches = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)"
"</script>")
matchjs = jsunpack.unpack(matches).replace("\\", "")
media_urls = scrapertools.find_multiple_matches(matchjs, '\{file\:"([^"]+)",label\:"([^"]+)"\}')
for media_url, label in media_urls:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [letwatch]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "powvideo.net/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"url": "http://powvideo.net/\\1"
"pattern": "powvideo.(?:net|xyz)/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"url": "http://powvideo.net/iframe-\\1-954x562.html"
}
]
},

View File

@@ -13,68 +13,23 @@ host = "http://powvideo.net/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "<title>watch </title>" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado"
if "el archivo ha sido borrado por no respetar" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado por no respetar los Terminos de uso"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
url = page_url.replace(host, "http://powvideo.xyz/iframe-") + "-954x562.html"
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
data = httptools.downloadpage(page_url, cookies=False)
cookie = data.headers['set-cookie']
data = data.data
_0xa3e8 = scrapertools.find_single_match(data, 'var _0xa3e8=(\[[^;]+\]);')
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
file_id, aff = scrapertools.find_single_match(data, "'file_id', '(\d+)',[^']+'aff', '(\d+)',")
_cookie = {"Cookie": cookie.replace("path=/; HttpOnly", "file_id=" + file_id + "; aff=" + aff)}
id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
post = "op=download1&usr_login=&referer=&fname=%s&id=%s&hash=%s" % (fname, id, hash)
import time
time.sleep(7)
data = httptools.downloadpage(page_url, post, headers=_cookie).data
for list in scrapertools.find_multiple_matches(data, '_[^=]+=(\[[^\]]+\]);'):
if len(list) == 703 or len(list) == 711:
key = "".join(eval(list)[7:9])
break
if key.startswith("embed"):
key = key[6:] + key[:6]
matches = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(matches).replace("\\", "")
data = scrapertools.find_single_match(data.replace('"', "'"), "sources\s*=[^\[]*\[([^\]]+)\]")
matches = scrapertools.find_multiple_matches(data, "[src|file]:'([^']+)'")
video_urls = []
for video_url in matches:
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{78,}')
hash = decrypt(_hash, key)
video_url = video_url.replace(_hash, hash)
filename = scrapertools.get_filename_from_url(video_url)[-4:]
if video_url.startswith("rtmp"):
rtmp, playpath = video_url.split("vod/", 1)
video_url = "%svod/ playpath=%s swfUrl=%splayer6/jwplayer.flash.swf pageUrl=%s" % \
(rtmp, playpath, host, page_url)
filename = "RTMP"
elif video_url.endswith(".m3u8"):
video_url += "|User-Agent=" + headers[0][1]
elif video_url.endswith("/v.mp4"):
video_url_flv = re.sub(r'/v.mp4', '/v.flv', video_url)
video_urls.append(["flv [powvideo]", video_url_flv])
video_urls.append([filename + " [powvideo]", video_url])
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
video_urls.append([".mp4" + " [powvideo]", S(_0xa3e8).decode(url)])
video_urls.sort(key=lambda x: x[0], reverse=True)
for video_url in video_urls:
@@ -83,105 +38,323 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
return video_urls
def decrypt(h, k):
import base64
class S:
def __init__(self, _0xa3e8):
self.r = None
self.s = None
self.k = None
self.n = None
self.c = None
self.b = None
self.d = None
if len(h) % 4:
h += "=" * (4 - len(h) % 4)
sig = []
h = base64.b64decode(h.replace("-", "+").replace("_", "/"))
for c in range(len(h)):
sig += [ord(h[c])]
_0xa3e8 = eval(_0xa3e8)
self.t(_0xa3e8[14] + _0xa3e8[15] + _0xa3e8[14] + _0xa3e8[15], _0xa3e8[16])
sec = []
for c in range(len(k)):
sec += [ord(k[c])]
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
return url.replace(_hash, self.p(_hash))
dig = range(256)
g = 0
v = 128
for b in range(len(sec)):
a = (v + (sec[b] & 15)) % 256
c = dig[(g)]
dig[g] = dig[a]
dig[a] = c
g += 1
def t(self, t, i):
self.r = 20
self.s = [1634760805, 857760878, 2036477234, 1797285236]
self.k = []
self.n = [0, 0]
self.c = [0, 0]
self.b = [None] * 64
self.d = 64
a = (v + (sec[b] >> 4 & 15)) % 256
c = dig[g]
dig[g] = dig[a]
dig[a] = c
g += 1
self.sk(self.sa(t))
self.sn(self.sa(i))
k = 0
q = 1
p = 0
n = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
def e(self, t):
s = self.gb(len(t))
i = [s[h] ^ t[h] for h in range(len(t))]
return i
q = 3
for a in range(v):
b = 255 - a
if dig[a] > dig[b]:
c = dig[a]
dig[a] = dig[b]
dig[b] = c
def p(self, t):
import base64
t += "=" * (4 - len(t) % 4)
t = base64.b64decode(t.replace('-', '+').replace('_', '/'))
return self._as(self.e(self.sa(t)))
k = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
@staticmethod
def sa(t):
s = [ord(t[i]) for i in range(len(t))]
return s
q = 5
for a in range(v):
b = 255 - a
if dig[a] > dig[b]:
c = dig[a]
dig[a] = dig[b]
dig[b] = c
@staticmethod
def _as(t):
s = [chr(t[i]) for i in range(len(t))]
return ''.join(s)
k = 0
for b in range(512):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
def sk(self, t):
s = 0
for i in range(8):
self.k.append(
255 & t[s] | self.lshift((255 & t[s + 1]), 8) | self.lshift((255 & t[s + 2]), 16) | self.lshift(
(255 & t[s + 3]), 24))
s += 4
self._r()
q = 7
k = 0
u = 0
d = []
for b in range(len(dig)):
k = (k + q) % 256
n = (p + dig[(n + dig[k]) % 256]) % 256
p = (k + p + dig[n]) % 256
c = dig[k]
dig[k] = dig[n]
dig[n] = c
u = dig[(n + dig[(k + dig[(u + p) % 256]) % 256]) % 256]
d += [u]
def sn(self, t):
self.n[0] = 255 & t[0] | self.lshift((255 & t[1]), 8) | self.lshift((255 & t[2]), 16) | self.lshift(
(255 & t[3]), 24)
self.n[1] = 255 & t[4] | self.lshift((255 & t[5]), 8) | self.lshift((255 & t[6]), 16) | self.lshift(
(255 & t[7]), 24)
self._r()
c = []
for f in range(len(d)):
try:
c += [(256 + (sig[f] - d[f])) % 256]
except:
break
def gb(self, t):
i = [None] * t
h = ""
for s in c:
h += chr(s)
for s in range(t):
if 64 == self.d:
self._g()
self._i()
self.d = 0
return h
i[s] = self.b[self.d]
self.d += 1
return i
def gh(self, t):
i = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
h = self.gb(t)
s = [i[self.rshift(h[b], 4) & 15] for b in range(len(h))]
s.append(i[15 & h[len(h)]])
return ''.join(s)
def _r(self):
self.c[0] = 0
self.c[1] = 0
self.d = 64
def _i(self):
self.c[0] = self.c[0] + 1 & 4294967295
if 0 == self.c[0]:
self.c[1] = self.c[1] + 1 & 4294967295
def _g(self):
i = self.s[0]
s = self.k[0]
h = self.k[1]
b = self.k[2]
r = self.k[3]
n = self.s[1]
o = self.n[0]
e = self.n[1]
c = self.c[0]
p = self.c[1]
a = self.s[2]
f = self.k[4]
u = self.k[5]
g = self.k[6]
y = self.k[7]
k = self.s[3]
l = i
d = s
v = h
_ = b
A = r
w = n
C = o
S = e
j = c
m = p
q = a
x = f
z = u
B = g
D = y
E = k
for F in range(0, self.r, 2):
# 0
t = l + z
A ^= self.lshift(t, 7) | self.bshift(t, 25)
t = A + l
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + A
z ^= self.lshift(t, 13) | self.bshift(t, 19)
t = z + j
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 1
t = w + d
m ^= self.lshift(t, 7) | self.bshift(t, 25)
t = m + w
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + m
d ^= self.lshift(t, 13) | self.bshift(t, 19)
t = d + B
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 2
t = q + C
D ^= self.lshift(t, 7) | self.bshift(t, 25)
t = D + q
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + D
C ^= self.lshift(t, 13) | self.bshift(t, 19)
t = C + v
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 3
t = E + x
_ ^= self.lshift(t, 7) | self.bshift(t, 25)
t = _ + E
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + _
x ^= self.lshift(t, 13) | self.bshift(t, 19)
t = x + S
E ^= self.lshift(t, 18) | self.bshift(t, 14)
# 4
t = l + _
d ^= self.lshift(t, 7) | self.bshift(t, 25)
t = d + l
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + d
_ ^= self.lshift(t, 13) | self.bshift(t, 19)
t = _ + v
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 5
t = w + A
C ^= self.lshift(t, 7) | self.bshift(t, 25)
t = C + w
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + C
A ^= self.lshift(t, 13) | self.bshift(t, 19)
t = A + S
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 6
t = q + m
x ^= self.lshift(t, 7) | self.bshift(t, 25)
t = x + q
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + x
m ^= self.lshift(t, 13) | self.bshift(t, 19)
t = m + j
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 7
t = E + D
z ^= self.lshift(t, 7) | self.bshift(t, 25)
t = z + E
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + z
D ^= self.lshift(t, 13) | self.bshift(t, 19)
t = D + B
E ^= self.lshift(t, 18) | self.bshift(t, 14)
l += i
d += s
v += h
_ += b
A += r
w += n
C += o
S += e
j += c
m += p
q += a
x += f
z += u
B += g
D += y
E += k
self.b[0] = self.bshift(l, 0) & 255
self.b[1] = self.bshift(l, 8) & 255
self.b[2] = self.bshift(l, 16) & 255
self.b[3] = self.bshift(l, 24) & 255
self.b[4] = self.bshift(d, 0) & 255
self.b[5] = self.bshift(d, 8) & 255
self.b[6] = self.bshift(d, 16) & 255
self.b[7] = self.bshift(d, 24) & 255
self.b[8] = self.bshift(v, 0) & 255
self.b[9] = self.bshift(v, 8) & 255
self.b[10] = self.bshift(v, 16) & 255
self.b[11] = self.bshift(v, 24) & 255
self.b[12] = self.bshift(_, 0) & 255
self.b[13] = self.bshift(_, 8) & 255
self.b[14] = self.bshift(_, 16) & 255
self.b[15] = self.bshift(_, 24) & 255
self.b[16] = self.bshift(A, 0) & 255
self.b[17] = self.bshift(A, 8) & 255
self.b[18] = self.bshift(A, 16) & 255
self.b[19] = self.bshift(A, 24) & 255
self.b[20] = self.bshift(w, 0) & 255
self.b[21] = self.bshift(w, 8) & 255
self.b[22] = self.bshift(w, 16) & 255
self.b[23] = self.bshift(w, 24) & 255
self.b[24] = self.bshift(C, 0) & 255
self.b[25] = self.bshift(C, 8) & 255
self.b[26] = self.bshift(C, 16) & 255
self.b[27] = self.bshift(C, 24) & 255
self.b[28] = self.bshift(S, 0) & 255
self.b[29] = self.bshift(S, 8) & 255
self.b[30] = self.bshift(S, 16) & 255
self.b[31] = self.bshift(S, 24) & 255
self.b[32] = self.bshift(j, 0) & 255
self.b[33] = self.bshift(j, 8) & 255
self.b[34] = self.bshift(j, 16) & 255
self.b[35] = self.bshift(j, 24) & 255
self.b[36] = self.bshift(m, 0) & 255
self.b[37] = self.bshift(m, 8) & 255
self.b[38] = self.bshift(m, 16) & 255
self.b[39] = self.bshift(m, 24) & 255
self.b[40] = self.bshift(q, 0) & 255
self.b[41] = self.bshift(q, 8) & 255
self.b[42] = self.bshift(q, 16) & 255
self.b[43] = self.bshift(q, 24) & 255
self.b[44] = self.bshift(x, 0) & 255
self.b[45] = self.bshift(x, 8) & 255
self.b[46] = self.bshift(x, 16) & 255
self.b[47] = self.bshift(x, 24) & 255
self.b[48] = self.bshift(z, 0) & 255
self.b[49] = self.bshift(z, 8) & 255
self.b[50] = self.bshift(z, 16) & 255
self.b[51] = self.bshift(z, 24) & 255
self.b[52] = self.bshift(B, 0) & 255
self.b[53] = self.bshift(B, 8) & 255
self.b[54] = self.bshift(B, 16) & 255
self.b[55] = self.bshift(B, 24) & 255
self.b[56] = self.bshift(D, 0) & 255
self.b[57] = self.bshift(D, 8) & 255
self.b[58] = self.bshift(D, 16) & 255
self.b[59] = self.bshift(D, 24) & 255
self.b[60] = self.bshift(E, 0) & 255
self.b[61] = self.bshift(E, 8) & 255
self.b[62] = self.bshift(E, 16) & 255
self.b[63] = self.bshift(E, 24) & 255
def lshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum << shift_count)
def rshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum >> shift_count)
def bshift(self, num, other):
lnum = self.ToUint32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToUint32(lnum >> shift_count)
@staticmethod
def ToInt32(num):
int32 = num % 2 ** 32
return int32 - 2 ** 32 if int32 >= 2 ** 31 else int32
@staticmethod
def ToUint32(num):
return num % 2 ** 32

View File

@@ -23,7 +23,7 @@ def test_video_exists(page_url):
if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
if reponse.code == 500:
if response.code == 500:
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
return True, ""

View File

@@ -24,39 +24,343 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
matches = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(matches).replace("\\", "")
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
_0xd003 = scrapertools.find_single_match(data, 'var _0xd003=(\[[^;]+\]);')
data = scrapertools.find_single_match(data.replace('"', "'"), "sources\s*=[^\[]*\[([^\]]+)\]")
matches = scrapertools.find_multiple_matches(data, "[src|file]:'([^']+)'")
if len(matches) == 0:
matches = scrapertools.find_multiple_matches(data, "[^',]+")
video_urls = []
for video_url in matches:
if video_url.endswith(".mpd"):
continue
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
hash = _hash[::-1]
hash = hash.replace(hash[1:2], "", 1)
video_url = video_url.replace(_hash, hash)
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
filename = scrapertools.get_filename_from_url(video_url)[-4:]
if video_url.startswith("rtmp"):
rtmp, playpath = video_url.split("vod/", 1)
video_url = "%svod/ playpath=%s swfUrl=%splayer6/jwplayer.flash.swf pageUrl=%s" % \
(rtmp, playpath, host, page_url)
filename = "RTMP"
elif video_url.endswith("/v.mp4"):
video_url_flv = re.sub(r'/v.mp4', '/v.flv', video_url)
video_urls.append(["flv [streamplay]", video_url_flv])
video_urls.append([filename + " [streamplay]", video_url])
video_urls.append([".mp4" + " [streamplay]", S(_0xd003).decode(url)])
video_urls.sort(key=lambda x: x[0], reverse=True)
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
class S:
def __init__(self, _0xd003):
self.r = None
self.s = None
self.k = None
self.n = None
self.c = None
self.b = None
self.d = None
_0xd003 = eval(_0xd003)
self.t(_0xd003[13] + _0xd003[14] + _0xd003[13] + _0xd003[14], _0xd003[15])
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
return url.replace(_hash, self.p(_hash))
def t(self, t, i):
self.r = 20
self.s = [1634760805, 857760878, 2036477234, 1797285236]
self.k = []
self.n = [0, 0]
self.c = [0, 0]
self.b = [None] * 64
self.d = 64
self.sk(self.sa(t))
self.sn(self.sa(i))
def e(self, t):
s = self.gb(len(t))
i = [s[h] ^ t[h] for h in range(len(t))]
return i
def p(self, t):
import base64
t += "=" * (4 - len(t) % 4)
t = base64.b64decode(t.replace('-', '+').replace('_', '/'))
return self._as(self.e(self.sa(t)))
@staticmethod
def sa(t):
s = [ord(t[i]) for i in range(len(t))]
return s
@staticmethod
def _as(t):
s = [chr(t[i]) for i in range(len(t))]
return ''.join(s)
def sk(self, t):
s = 0
for i in range(8):
self.k.append(
255 & t[s] | self.lshift((255 & t[s + 1]), 8) | self.lshift((255 & t[s + 2]), 16) | self.lshift(
(255 & t[s + 3]), 24))
s += 4
self._r()
def sn(self, t):
self.n[0] = 255 & t[0] | self.lshift((255 & t[1]), 8) | self.lshift((255 & t[2]), 16) | self.lshift(
(255 & t[3]), 24)
self.n[1] = 255 & t[4] | self.lshift((255 & t[5]), 8) | self.lshift((255 & t[6]), 16) | self.lshift(
(255 & t[7]), 24)
self._r()
def gb(self, t):
i = [None] * t
for s in range(t):
if 64 == self.d:
self._g()
self._i()
self.d = 0
i[s] = self.b[self.d]
self.d += 1
return i
def gh(self, t):
i = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
h = self.gb(t)
s = [i[self.rshift(h[b], 4) & 15] for b in range(len(h))]
s.append(i[15 & h[len(h)]])
return ''.join(s)
def _r(self):
self.c[0] = 0
self.c[1] = 0
self.d = 64
def _i(self):
self.c[0] = self.c[0] + 1 & 4294967295
if 0 == self.c[0]:
self.c[1] = self.c[1] + 1 & 4294967295
def _g(self):
i = self.s[0]
s = self.k[0]
h = self.k[1]
b = self.k[2]
r = self.k[3]
n = self.s[1]
o = self.n[0]
e = self.n[1]
c = self.c[0]
p = self.c[1]
a = self.s[2]
f = self.k[4]
u = self.k[5]
g = self.k[6]
y = self.k[7]
k = self.s[3]
l = i
d = s
v = h
_ = b
A = r
w = n
C = o
S = e
j = c
m = p
q = a
x = f
z = u
B = g
D = y
E = k
for F in range(0, self.r, 2):
# 0
t = l + z
A ^= self.lshift(t, 7) | self.bshift(t, 25)
t = A + l
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + A
z ^= self.lshift(t, 13) | self.bshift(t, 19)
t = z + j
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 1
t = w + d
m ^= self.lshift(t, 7) | self.bshift(t, 25)
t = m + w
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + m
d ^= self.lshift(t, 13) | self.bshift(t, 19)
t = d + B
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 2
t = q + C
D ^= self.lshift(t, 7) | self.bshift(t, 25)
t = D + q
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + D
C ^= self.lshift(t, 13) | self.bshift(t, 19)
t = C + v
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 3
t = E + x
_ ^= self.lshift(t, 7) | self.bshift(t, 25)
t = _ + E
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + _
x ^= self.lshift(t, 13) | self.bshift(t, 19)
t = x + S
E ^= self.lshift(t, 18) | self.bshift(t, 14)
# 4
t = l + _
d ^= self.lshift(t, 7) | self.bshift(t, 25)
t = d + l
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + d
_ ^= self.lshift(t, 13) | self.bshift(t, 19)
t = _ + v
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 5
t = w + A
C ^= self.lshift(t, 7) | self.bshift(t, 25)
t = C + w
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + C
A ^= self.lshift(t, 13) | self.bshift(t, 19)
t = A + S
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 6
t = q + m
x ^= self.lshift(t, 7) | self.bshift(t, 25)
t = x + q
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + x
m ^= self.lshift(t, 13) | self.bshift(t, 19)
t = m + j
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 7
t = E + D
z ^= self.lshift(t, 7) | self.bshift(t, 25)
t = z + E
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + z
D ^= self.lshift(t, 13) | self.bshift(t, 19)
t = D + B
E ^= self.lshift(t, 18) | self.bshift(t, 14)
l += i
d += s
v += h
_ += b
A += r
w += n
C += o
S += e
j += c
m += p
q += a
x += f
z += u
B += g
D += y
E += k
self.b[0] = self.bshift(l, 0) & 255
self.b[1] = self.bshift(l, 8) & 255
self.b[2] = self.bshift(l, 16) & 255
self.b[3] = self.bshift(l, 24) & 255
self.b[4] = self.bshift(d, 0) & 255
self.b[5] = self.bshift(d, 8) & 255
self.b[6] = self.bshift(d, 16) & 255
self.b[7] = self.bshift(d, 24) & 255
self.b[8] = self.bshift(v, 0) & 255
self.b[9] = self.bshift(v, 8) & 255
self.b[10] = self.bshift(v, 16) & 255
self.b[11] = self.bshift(v, 24) & 255
self.b[12] = self.bshift(_, 0) & 255
self.b[13] = self.bshift(_, 8) & 255
self.b[14] = self.bshift(_, 16) & 255
self.b[15] = self.bshift(_, 24) & 255
self.b[16] = self.bshift(A, 0) & 255
self.b[17] = self.bshift(A, 8) & 255
self.b[18] = self.bshift(A, 16) & 255
self.b[19] = self.bshift(A, 24) & 255
self.b[20] = self.bshift(w, 0) & 255
self.b[21] = self.bshift(w, 8) & 255
self.b[22] = self.bshift(w, 16) & 255
self.b[23] = self.bshift(w, 24) & 255
self.b[24] = self.bshift(C, 0) & 255
self.b[25] = self.bshift(C, 8) & 255
self.b[26] = self.bshift(C, 16) & 255
self.b[27] = self.bshift(C, 24) & 255
self.b[28] = self.bshift(S, 0) & 255
self.b[29] = self.bshift(S, 8) & 255
self.b[30] = self.bshift(S, 16) & 255
self.b[31] = self.bshift(S, 24) & 255
self.b[32] = self.bshift(j, 0) & 255
self.b[33] = self.bshift(j, 8) & 255
self.b[34] = self.bshift(j, 16) & 255
self.b[35] = self.bshift(j, 24) & 255
self.b[36] = self.bshift(m, 0) & 255
self.b[37] = self.bshift(m, 8) & 255
self.b[38] = self.bshift(m, 16) & 255
self.b[39] = self.bshift(m, 24) & 255
self.b[40] = self.bshift(q, 0) & 255
self.b[41] = self.bshift(q, 8) & 255
self.b[42] = self.bshift(q, 16) & 255
self.b[43] = self.bshift(q, 24) & 255
self.b[44] = self.bshift(x, 0) & 255
self.b[45] = self.bshift(x, 8) & 255
self.b[46] = self.bshift(x, 16) & 255
self.b[47] = self.bshift(x, 24) & 255
self.b[48] = self.bshift(z, 0) & 255
self.b[49] = self.bshift(z, 8) & 255
self.b[50] = self.bshift(z, 16) & 255
self.b[51] = self.bshift(z, 24) & 255
self.b[52] = self.bshift(B, 0) & 255
self.b[53] = self.bshift(B, 8) & 255
self.b[54] = self.bshift(B, 16) & 255
self.b[55] = self.bshift(B, 24) & 255
self.b[56] = self.bshift(D, 0) & 255
self.b[57] = self.bshift(D, 8) & 255
self.b[58] = self.bshift(D, 16) & 255
self.b[59] = self.bshift(D, 24) & 255
self.b[60] = self.bshift(E, 0) & 255
self.b[61] = self.bshift(E, 8) & 255
self.b[62] = self.bshift(E, 16) & 255
self.b[63] = self.bshift(E, 24) & 255
def lshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum << shift_count)
def rshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum >> shift_count)
def bshift(self, num, other):
lnum = self.ToUint32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToUint32(lnum >> shift_count)
@staticmethod
def ToInt32(num):
int32 = num % 2 ** 32
return int32 - 2 ** 32 if int32 >= 2 ** 31 else int32
@staticmethod
def ToUint32(num):
return num % 2 ** 32

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me)/(?:embed-|)([A-z0-9]+)",
"pattern": "(?:thevideo.me|tvad.me|thevid.net)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]

View File

@@ -9,7 +9,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "File not found") in data:
if "File was deleted" in data or "File not found" in data:
return False, "[Yourupload] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "zstream.to/(?:embed-|)([A-z0-9]+)",
"url": "http://zstream.to/embed-\\1.html"
}
]
},
"free": true,
"id": "zstream",
"name": "zstream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/UJMfMZJ.png?1"
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "[Zstream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=%s" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
matches = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",label:"([^"]+)"')
for media_url, calidad in matches:
calidad = "." + media_url.rsplit('.', 1)[1] + " " + calidad
video_urls.append([calidad + ' [zstream]', media_url])
return video_urls