@@ -96,7 +96,6 @@ def recientes(item):
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
action ="findvideos"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -33,8 +34,7 @@ def menupeliculas(item):
|
||||
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
|
||||
# <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul>
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -78,7 +78,6 @@ def menudocumentales(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
|
||||
def search(item, texto, categoria=""):
|
||||
logger.info(item.url + " search " + texto)
|
||||
itemlist = []
|
||||
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
|
||||
def peliculas(item, paginacion=True):
|
||||
logger.info()
|
||||
url = item.url
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
|
||||
patron += '<div class="detalles-ficha"[^<]+'
|
||||
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
|
||||
@@ -118,16 +115,11 @@ def peliculas(item, paginacion=True):
|
||||
scrapedtitle = title
|
||||
scrapedplot = clean_plot(plot)
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
|
||||
# Añade al listado de XBMC
|
||||
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
|
||||
# Extrae el paginador
|
||||
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \»\;</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -187,7 +179,7 @@ def enlaces(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
try:
|
||||
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
|
||||
@@ -201,18 +193,6 @@ def enlaces(item):
|
||||
except:
|
||||
pass
|
||||
|
||||
'''
|
||||
<div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div>
|
||||
</li><li id="box-enlace-330690" class="box-enlace">
|
||||
<div class="box-enlace-cabecera">
|
||||
<div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de:
|
||||
<a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div>
|
||||
<div class="datos-act">Actualizado: Hace 8 minutos</div>
|
||||
<div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div>
|
||||
<div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div>
|
||||
</div>
|
||||
'''
|
||||
|
||||
patron = '<div class="box-enlace-cabecera"[^<]+'
|
||||
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
|
||||
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
|
||||
@@ -222,19 +202,15 @@ def enlaces(item):
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
logger.debug("matches=" + repr(matches))
|
||||
|
||||
for thumbnail, usuario, fecha, id, id2, servidores in matches:
|
||||
# <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" />
|
||||
patronservidores = '<img src="[^"]+" title="([^"]+)"'
|
||||
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
|
||||
lista_servidores = ""
|
||||
for servidor in matches2:
|
||||
lista_servidores = lista_servidores + servidor + ", "
|
||||
lista_servidores = lista_servidores[:-2]
|
||||
|
||||
scrapedthumbnail = item.thumbnail
|
||||
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
|
||||
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
|
||||
scrapedplot = item.plot
|
||||
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
|
||||
@@ -250,7 +226,7 @@ def enlaces(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
|
||||
@@ -512,7 +512,7 @@ def episodios(item):
|
||||
else:
|
||||
action = "menu_info_episode"
|
||||
|
||||
seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
|
||||
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
|
||||
for i, url in enumerate(seasons):
|
||||
if i != 0:
|
||||
data_season = httptools.downloadpage(url, add_referer=True).data
|
||||
|
||||
@@ -177,7 +177,8 @@ class main(xbmcgui.WindowDialog):
|
||||
self.infoLabels["originaltitle"] = otmdb.result.get("original_title",
|
||||
otmdb.result.get("original_name", ""))
|
||||
self.trailers = otmdb.get_videos()
|
||||
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
|
||||
if otmdb.result.get("runtime", 0):
|
||||
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
|
||||
else:
|
||||
self.trailers = []
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from core.tmdb import Tmdb
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://www.mejortorrent.com"
|
||||
host = "https://mejortorrent.website"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -29,19 +29,19 @@ def mainlist(item):
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis))
|
||||
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html",
|
||||
url= host + "/torrents-de-peliculas-hd-alta-definicion.html",
|
||||
thumbnail=thumb_pelis_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series))
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html",
|
||||
url= host + "/torrents-de-series-hd-alta-definicion.html",
|
||||
thumbnail=thumb_series_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
|
||||
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az))
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
|
||||
|
||||
return itemlist
|
||||
@@ -55,10 +55,10 @@ def listalfabetico(item):
|
||||
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
|
||||
'U', 'V', 'W', 'X', 'Y', 'Z']:
|
||||
itemlist.append(Item(channel=item.channel, action="getlist", title=letra,
|
||||
url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html"))
|
||||
url= host + "/series-letra-" + letra.lower() + ".html"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="getlist", title="Todas",
|
||||
url="http://www.mejortorrent.com/series-letra..html"))
|
||||
url= host + "/series-letra..html"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -67,7 +67,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto)
|
||||
item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto)
|
||||
try:
|
||||
return buscador(item)
|
||||
|
||||
@@ -81,30 +81,12 @@ def search(item, texto):
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# pelis
|
||||
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
|
||||
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
|
||||
#
|
||||
# series
|
||||
#
|
||||
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
|
||||
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
|
||||
#
|
||||
# docs
|
||||
#
|
||||
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
|
||||
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
|
||||
|
||||
# busca series
|
||||
patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>"
|
||||
patron += ".*?<span style='color:gray;'>([^']+)</span>"
|
||||
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedinfo in matches:
|
||||
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
|
||||
@@ -119,10 +101,7 @@ def buscador(item):
|
||||
# busca pelis
|
||||
patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>"
|
||||
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -135,10 +114,7 @@ def buscador(item):
|
||||
patron += "<font Color='darkblue'>(.*?)</font>.*?"
|
||||
patron += "<td align='right' width='20%'>(.*?)</td>"
|
||||
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedinfo in matches:
|
||||
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -154,23 +130,7 @@ def buscador(item):
|
||||
def getlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# pelis
|
||||
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
|
||||
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
|
||||
#
|
||||
# series
|
||||
#
|
||||
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
|
||||
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
|
||||
#
|
||||
# docs
|
||||
#
|
||||
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
|
||||
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
|
||||
|
||||
if item.url.find("peliculas") > -1:
|
||||
patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+'
|
||||
patron += '<img src="([^"]+)"[^<]+</a>'
|
||||
@@ -202,27 +162,18 @@ def getlist(item):
|
||||
action = "episodios"
|
||||
folder = True
|
||||
extra = "docus"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail in matches:
|
||||
title = scrapertools.get_match(scrapedurl, patron_enlace)
|
||||
title = title.replace("-", " ")
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
|
||||
thumbnail = host + urllib.quote(scrapedthumbnail)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=folder, extra=extra))
|
||||
|
||||
matches = re.compile(patron_title, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
# Cambia el título sacado de la URL por un título con más información.
|
||||
# esta implementación asume que va a encontrar las mismas coincidencias
|
||||
# que en el bucle anterior, lo cual técnicamente es erróneo, pero que
|
||||
# funciona mientras no cambien el formato de la página
|
||||
cnt = 0
|
||||
for scrapedtitle, notused, scrapedinfo in matches:
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
|
||||
@@ -244,7 +195,6 @@ def getlist(item):
|
||||
# Extrae el paginador
|
||||
patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>"
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
@@ -267,18 +217,11 @@ def episodios(item):
|
||||
|
||||
item.thumbnail = scrapertools.find_single_match(data,
|
||||
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
|
||||
item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
|
||||
item.thumbnail = host + + urllib.quote(item.thumbnail)
|
||||
|
||||
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
|
||||
data = scrapertools.get_match(data,
|
||||
"<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>")
|
||||
'''
|
||||
<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td>
|
||||
<td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td>
|
||||
<td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'>
|
||||
<input type='checkbox' name='episodios[1]' value='18741'>
|
||||
'''
|
||||
|
||||
if item.extra == "series":
|
||||
patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+"
|
||||
else:
|
||||
@@ -289,7 +232,6 @@ def episodios(item):
|
||||
patron += "<input type='checkbox' name='([^']+)' value='([^']+)'"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
|
||||
logger.debug('tmdb_title=' + tmdb_title)
|
||||
@@ -306,7 +248,7 @@ def episodios(item):
|
||||
|
||||
title = scrapedtitle + " (" + fecha + ")"
|
||||
|
||||
url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios"
|
||||
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
|
||||
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
|
||||
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
|
||||
logger.debug("post=" + post)
|
||||
@@ -370,20 +312,15 @@ def show_movie_info(item):
|
||||
|
||||
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
|
||||
|
||||
torrent_data = httptools.downloadpage(url).data
|
||||
logger.debug("torrent_data=" + torrent_data)
|
||||
# <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'>
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
|
||||
link = urlparse.urljoin(url, link)
|
||||
|
||||
logger.debug("link=" + link)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
|
||||
|
||||
@@ -402,26 +339,12 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# series
|
||||
#
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent"
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent"
|
||||
#
|
||||
# docus
|
||||
#
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sueo de todos. </a>
|
||||
|
||||
params = dict(urlparse.parse_qsl(item.extra))
|
||||
|
||||
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
|
||||
|
||||
link = scrapertools.get_match(data, patron)
|
||||
|
||||
logger.info("link=" + link)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
@@ -430,12 +353,12 @@ def newest(categoria):
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://www.mejortorrent.com/torrents-de-peliculas.html'
|
||||
item.url = host + "/torrents-de-peliculas.html"
|
||||
|
||||
itemlist = getlist(item)
|
||||
if itemlist[-1].title == "Pagina siguiente >>":
|
||||
itemlist.pop()
|
||||
item.url = 'http://www.mejortorrent.com/torrents-de-series.html'
|
||||
item.url = host + "/torrents-de-series.html"
|
||||
itemlist.extend(getlist(item))
|
||||
if itemlist[-1].title == "Pagina siguiente >>":
|
||||
itemlist.pop()
|
||||
|
||||
@@ -238,23 +238,12 @@ def lista(item):
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data,
|
||||
'<span>Sinopsis:<\/span>.([^<]+)<span '
|
||||
'class="text-detail-hide"><\/span>.<\/p>')
|
||||
|
||||
# Paginacion
|
||||
if item.title != 'Buscar' and actual != '':
|
||||
if itemlist != []:
|
||||
next_page = str(int(actual) + 1)
|
||||
next_page_url = item.extra + 'pag-' + next_page
|
||||
if not next_page_url.startswith("http"):
|
||||
next_page_url = host + next_page_url
|
||||
next_page_url = host + next_page_url
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista",
|
||||
@@ -441,9 +430,8 @@ def get_vip(url):
|
||||
else:
|
||||
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
|
||||
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers
|
||||
itemlist.extend(servertools.find_video_items(data=str(data)))
|
||||
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
|
||||
itemlist.append(Item(url=data))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -463,22 +451,17 @@ def findvideos(item):
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
# videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
if videoitem.quality == '' or videoitem.language == '':
|
||||
videoitem.quality = 'default'
|
||||
videoitem.language = 'Latino'
|
||||
if videoitem.server != '':
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
else:
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.server = 'directo'
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
|
||||
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
|
||||
videoitem.title = item.contentTitle + ' (%s)'
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
n = 0
|
||||
for videoitem in itemlist:
|
||||
if 'youtube' in videoitem.url:
|
||||
@@ -490,7 +473,7 @@ def findvideos(item):
|
||||
itemlist.pop(1)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
@@ -511,6 +494,11 @@ def findvideos(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -11,6 +11,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
|
||||
HOST = 'http://www.yaske.ro'
|
||||
parameters = channeltools.get_channel_parameters('yaske')
|
||||
fanart_host = parameters['fanart']
|
||||
@@ -119,15 +120,9 @@ def peliculas(item):
|
||||
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
|
||||
idiomas_disponibles = []
|
||||
for idioma in matchesidiomas:
|
||||
if idioma.endswith("/la.png"):
|
||||
idiomas_disponibles.append("LAT")
|
||||
elif idioma.endswith("/en.png"):
|
||||
idiomas_disponibles.append("VO")
|
||||
elif idioma.endswith("/en_es.png"):
|
||||
idiomas_disponibles.append("VOSE")
|
||||
elif idioma.endswith("/es.png"):
|
||||
idiomas_disponibles.append("ESP")
|
||||
|
||||
for lang in idiomas1.keys():
|
||||
if idioma.endswith(lang):
|
||||
idiomas_disponibles.append(idiomas1[lang])
|
||||
if idiomas_disponibles:
|
||||
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
|
||||
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
|
||||
@@ -179,36 +174,28 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
sublist = []
|
||||
|
||||
# Descarga la página
|
||||
url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
|
||||
patron = '(?s)id="online".*?server="([^"]+)"'
|
||||
mserver = scrapertools.find_single_match(data, patron)
|
||||
url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
|
||||
data = httptools.downloadpage(url).data
|
||||
if not item.plot:
|
||||
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
|
||||
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
|
||||
|
||||
patron = '(/embed/[^"]+).*?'
|
||||
patron += 'quality text-overflow ">([^<]+).*?'
|
||||
patron += 'title="([^"]+)'
|
||||
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
|
||||
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url, calidad, idioma in matches:
|
||||
if 'embed' in url:
|
||||
url = "http://widget.olimpo.link" + url
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
|
||||
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
|
||||
language=idioma.strip()))
|
||||
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
|
||||
|
||||
# Añadir servidores encontrados, agrupandolos por idioma
|
||||
for server, url, idioma, calidad in matches:
|
||||
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
|
||||
language=idioma.strip(),
|
||||
title="Ver en %s %s" %(server, calidad)
|
||||
))
|
||||
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
|
||||
lista_idioma = filter(lambda i: i.language == k, sublist)
|
||||
if lista_idioma:
|
||||
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
|
||||
itemlist.append(item.clone(title=k, folder=False,
|
||||
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
|
||||
itemlist.extend(lista_idioma)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
|
||||
if itemlist and item.extra != "library":
|
||||
title = "%s [Buscar trailer]" % (item.contentTitle)
|
||||
@@ -221,3 +208,12 @@ def findvideos(item):
|
||||
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
ddd = httptools.downloadpage(item.url).data
|
||||
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
|
||||
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
item.server = servertools.get_server_from_url(item.url)
|
||||
return [item]
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "kingvid.tv/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://kingvid.tv/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "kingvid",
|
||||
"name": "kingvid",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/oq0tPhY.png?1"
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "<title>watch </title>" in data.lower() or "File was deleted" in data:
|
||||
return False, "[kingvid] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url, add_referer = True).data
|
||||
match = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
data = jsunpack.unpack(match)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\}')
|
||||
video_urls = []
|
||||
for video_url in matches:
|
||||
filename = scrapertools.get_filename_from_url(video_url)[-4:]
|
||||
if video_url.endswith("smil"):
|
||||
playpath = video_url.rsplit("/", 1)[1].replace(".smil", "")
|
||||
rtmp = scrapertools.find_single_match(data, 'image\s*:\s*"([^"]+)"')
|
||||
rtmp = scrapertools.find_single_match(rtmp, 'i/(.*?)_')
|
||||
video_url = "rtmp://kingvid.tv:1935/vod/ playpath=mp4:%s_n?h=%s " \
|
||||
"swfUrl=http://kingvid.tv/player7/jwplayer.flash.swf pageUrl=%s" % \
|
||||
(rtmp, playpath, page_url)
|
||||
filename = "RTMP"
|
||||
video_urls.append([filename + " [kingvid]", video_url])
|
||||
elif video_url[-4:] in ['.mp4', 'm3u8']:
|
||||
video_urls.append([filename + " [kingvid]", video_url])
|
||||
|
||||
video_urls.sort(key=lambda x: x[0], reverse=True)
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -16,9 +16,8 @@ def test_video_exists(page_url):
|
||||
|
||||
referer = page_url.replace('iframe', 'preview')
|
||||
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
|
||||
|
||||
if "<title>watch </title>" in data.lower():
|
||||
return False, "[powvideo] El archivo no existe o ha sido borrado"
|
||||
if "File was deleted" in data:
|
||||
return False, "[powvideo] El archivo no existe o ha sido borrado"
|
||||
if "el archivo ha sido borrado por no respetar" in data.lower():
|
||||
return False, "[powvideo] El archivo no existe o ha sido borrado por no respetar los Terminos de uso"
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ def test_video_exists(page_url):
|
||||
|
||||
if "Object not found" in response.data:
|
||||
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
|
||||
if reponse.code == 500:
|
||||
if response.code == 500:
|
||||
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
|
||||
|
||||
return True, ""
|
||||
|
||||
Reference in New Issue
Block a user