@@ -10,15 +10,15 @@ from platformcode import logger
|
||||
|
||||
url_api = ""
|
||||
beeg_salt = ""
|
||||
|
||||
Host = "https://beeg.com"
|
||||
|
||||
def get_api_url():
|
||||
global url_api
|
||||
global beeg_salt
|
||||
data = scrapertools.downloadpage("http://beeg.com")
|
||||
version = re.compile('<script src="//static.beeg.com/cpl/([\d]+).js"').findall(data)[0]
|
||||
js_url = "http:" + re.compile('<script src="(//static.beeg.com/cpl/[\d]+.js)"').findall(data)[0]
|
||||
url_api = "https://api2.beeg.com/api/v6/" + version
|
||||
data = scrapertools.downloadpage(Host)
|
||||
version = re.compile('<script src="/static/cpl/([\d]+).js"').findall(data)[0]
|
||||
js_url = Host + "/static/cpl/" + version + ".js"
|
||||
url_api = Host + "/api/v6/" + version
|
||||
data = scrapertools.downloadpage(js_url)
|
||||
beeg_salt = re.compile('beeg_salt="([^"]+)"').findall(data)[0]
|
||||
|
||||
@@ -53,8 +53,10 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
|
||||
viewmode="movie"))
|
||||
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias",
|
||||
url=url_api + "/index/main/0/pc"))
|
||||
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
|
||||
url=url_api + "/index/main/0/pc", extra="popular"))
|
||||
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
|
||||
url=url_api + "/index/main/0/pc", extra="nonpopular"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=url_api + "/index/search/0/pc?query=%s"))
|
||||
return itemlist
|
||||
@@ -91,7 +93,7 @@ def listcategorias(item):
|
||||
data = scrapertools.cache_page(item.url)
|
||||
JSONData = json.load(data)
|
||||
|
||||
for Tag in JSONData["tags"]["popular"]:
|
||||
for Tag in JSONData["tags"][item.extra]:
|
||||
url = url_api + "/index/tag/0/pc?tag=" + Tag
|
||||
title = Tag
|
||||
title = title[:1].upper() + title[1:]
|
||||
|
||||
@@ -1,120 +1,209 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
Host='http://descargas2020.com'
|
||||
|
||||
host = 'http://descargas2020.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para descargas2020, tr para descargas2020, d20 para descargas2020
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/",
|
||||
thumbnail=get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/",
|
||||
thumbnail=get_thumb('tvshows', auto=True)))
|
||||
#itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/",
|
||||
# viewmode="movie_with_plot"))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/",
|
||||
# viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url= Host+'/buscar',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
|
||||
data_cat = scrapertools.find_single_match(data, patron)
|
||||
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?descargas2020.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
|
||||
if 'peliculas' in item.url:
|
||||
new_item = item.clone(title='Peliculas 4K', url=Host+'/buscar', post='q=4k', action='listado2',
|
||||
pattern='buscar-list')
|
||||
itemlist.append(new_item)
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="pelilist">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
|
||||
if 'Serie' in item.title:
|
||||
patron_listado+='.+?>'
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
patron_listado+='>'
|
||||
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
new_item = item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, quality=scrapedquality)
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
1).strip()
|
||||
|
||||
if 'Serie' in item.title:
|
||||
new_item.action="episodios"
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.contentType = 'tvshow'
|
||||
else:
|
||||
new_item.action="findvideos"
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.contentType = 'movie'
|
||||
itemlist.append(new_item)
|
||||
# Página siguiente
|
||||
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
|
||||
siguiente = scrapertools.find_single_match(data, patron_pag)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?descargas2020.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href=(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron)
|
||||
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
|
||||
if " al " in scrapedtitle:
|
||||
#action="episodios"
|
||||
titulo=scrapedurl.split('http')
|
||||
scrapedurl="http"+titulo[1]
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail,
|
||||
action="findvideos", show=scrapedtitle))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado2(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
@@ -129,36 +218,238 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "descargar-juego/" in url or "/varios/" in url:
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
|
||||
show = title
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], show=show))
|
||||
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title="[COLOR cyan]Página Siguiente >>[/COLOR]",
|
||||
thumbnail=''))
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://descargas2020.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.descargas2020.com/d20/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con descargas2020, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
p = 1
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
@@ -178,40 +469,22 @@ def search(item, texto):
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
new_item = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
url = scrapertools.find_single_match(data, 'location.href = "([^"]+)"')
|
||||
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
|
||||
if url != '':
|
||||
itemlist.extend(new_item)
|
||||
for it in itemlist:
|
||||
it.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = Host+'/peliculas-hd/'
|
||||
action = listado(item)
|
||||
if categoria == '4k':
|
||||
item.url = Host + '/buscar'
|
||||
item.post = 'q=4k'
|
||||
item.pattern = 'buscar-list'
|
||||
action = listado2(item)
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = action
|
||||
if itemlist[-1].title == "[COLOR cyan]Página Siguiente >>[/COLOR]":
|
||||
itemlist.pop()
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
@@ -220,4 +493,4 @@ def newest(categoria):
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
@@ -46,6 +46,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="Deportes", action="entradas", url="%s/deportes/" % host,
|
||||
fanart="http://i.imgur.com/ggFFR8o.png",
|
||||
thumbnail=get_thumb('deporte', auto=True)))
|
||||
itemlist.append(item.clone(title="Programas de tv", action="entradas", url="%s/otros/programas-de-tv/" % host,
|
||||
thumbnail=get_thumb('de la tv', auto=True)))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
|
||||
@@ -134,10 +136,8 @@ def lista(item):
|
||||
def lista_series(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
|
||||
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ def entradas(item):
|
||||
data = get_data(item.url)
|
||||
bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
|
||||
'role="complementary">')
|
||||
contenido = ["series", "deportes", "anime", 'miniseries']
|
||||
contenido = ["series", "deportes", "anime", 'miniseries', 'programas']
|
||||
c_match = [True for match in contenido if match in item.url]
|
||||
# Patron dependiendo del contenido
|
||||
if True in c_match:
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "doramastv",
|
||||
"name": "DoramasTV",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "doramastv.png",
|
||||
"banner": "doramastv.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://doramastv.com/"
|
||||
DEFAULT_HEADERS = []
|
||||
DEFAULT_HEADERS.append(
|
||||
["User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"])
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list([])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pagina_", title="En emision", url=urlparse.urljoin(host, "drama/emision")))
|
||||
itemlist.append(Item(channel=item.channel, action="letras", title="Listado alfabetico",
|
||||
url=urlparse.urljoin(host, "lista-numeros")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="generos", title="Generos", url=urlparse.urljoin(host, "genero/accion")))
|
||||
itemlist.append(Item(channel=item.channel, action="pagina_", title="Ultimos agregados",
|
||||
url=urlparse.urljoin(host, "dramas/ultimos")))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
url=urlparse.urljoin(host, "buscar/anime/ajax/?title=")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def letras(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
|
||||
patron = ' <a href="(\/lista-.+?)">(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapertools.entityunescape(scrapedtitle)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
|
||||
logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pagina_", title=title, url=url, thumbnail=thumbnail, plot=plot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def pagina_(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
data1 = scrapertools.get_match(data, '<div class="animes-bot">(.+?)<!-- fin -->')
|
||||
data1 = data1.replace('\n', '')
|
||||
data1 = data1.replace('\r', '')
|
||||
patron = 'href="(\/drama.+?)".+?<\/div>(.+?)<\/div>.+?src="(.+?)".+?titulo">(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
for scrapedurl, scrapedplot, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.unescape(scrapedtitle).strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
plot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
show=title))
|
||||
|
||||
patron = 'href="([^"]+)" class="next"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, match)
|
||||
scrapedtitle = "Pagina Siguiente >>"
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="pagina_", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
data = data.replace('\n', '')
|
||||
data = data.replace('\r', '')
|
||||
data1 = scrapertools.get_match(data, '<ul id="lcholder">(.+?)</ul>')
|
||||
patron = '<a href="(.+?)".+?>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
show = item.show
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, show=show))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
data = data.replace('\n', '')
|
||||
data = data.replace('\r', '')
|
||||
patron = '<iframe src="(.+?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
data1 = ''
|
||||
for match in matches:
|
||||
data1 += match + '\n'
|
||||
data = data1
|
||||
data = data.replace('%26', '&')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/mvk?v=', 'http://vk.com/video_ext.php?oid=')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/send?v=', 'http://sendvid.com/embed/')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/msend?v=', 'http://sendvid.com/embed/')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/vidweed?v=', 'http://www.videoweed.es/file/')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/nowv?v=', 'http://www.nowvideo.sx/video/')
|
||||
data = data.replace('http://ozhe.larata.in/repro-d/nov?v=', 'http://www.novamov.com/video/')
|
||||
itemlist = []
|
||||
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.folder = False
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
data = data.replace('\n', '')
|
||||
data = data.replace('\r', '')
|
||||
|
||||
data = scrapertools.get_match(data, '<!-- Lista de Generos -->(.+?)<\/div>')
|
||||
patron = '<a href="(.+?)".+?>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapertools.entityunescape(scrapedtitle)
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pagina_", title=title, url=url, thumbnail=thumbnail, plot=plot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = urlparse.urljoin(host, item.url)
|
||||
texto = texto.replace(" ", "+")
|
||||
headers = DEFAULT_HEADERS[:]
|
||||
data = scrapertools.cache_page(item.url + texto, headers=headers)
|
||||
data = data.replace('\n', '')
|
||||
data = data.replace('\r', '')
|
||||
patron = '<a href="(.+?)".+?src="(.+?)".+?titulo">(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.unescape(scrapedtitle).strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot="",
|
||||
show=title))
|
||||
return itemlist
|
||||
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from core.tmdb import Tmdb
|
||||
from platformcode import logger
|
||||
|
||||
host = "https://mejortorrent.website"
|
||||
host = "http://www.mejortorrent.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -40,8 +40,8 @@ def mainlist(item):
|
||||
thumbnail=thumb_series_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
|
||||
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
#itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
|
||||
# url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
|
||||
|
||||
return itemlist
|
||||
@@ -235,6 +235,8 @@ def episodios(item):
|
||||
|
||||
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
|
||||
logger.debug('tmdb_title=' + tmdb_title)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
if item.extra == "series":
|
||||
oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), tipo='tv', idioma_busqueda="es")
|
||||
@@ -248,8 +250,8 @@ def episodios(item):
|
||||
#import web_pdb; web_pdb.set_trace()
|
||||
title = scrapedtitle + " (" + fecha + ")"
|
||||
patron = "<a href='(.*?)'>"
|
||||
|
||||
url = "https://mejortorrent.website"+scrapertools.find_single_match(data,patron)
|
||||
|
||||
url = host + scrapertools.find_single_match(data,patron)
|
||||
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
|
||||
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
|
||||
logger.debug("post=" + post)
|
||||
@@ -319,11 +321,11 @@ def show_movie_info(item):
|
||||
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
|
||||
|
||||
torrent_data = httptools.downloadpage(url).data
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>")
|
||||
link = urlparse.urljoin(url, link)
|
||||
logger.debug("link=" + link)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
|
||||
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False, extra="pelicula"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -334,29 +336,29 @@ def play(item):
|
||||
itemlist = []
|
||||
|
||||
if item.extra == "pelicula":
|
||||
#itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url,
|
||||
# thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
#url https://mejortorrent.website/peli-descargar-torrent-16443-Thor-Ragnarok.html
|
||||
patron = "https://mejortorrent.website/peli-descargar-torrent-((.*?))-"
|
||||
newid = scrapertools.find_single_match(item.url, patron)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
#logger.debug("data=" + data)
|
||||
#url http://www.mejortorrent.com/peli-descargar-torrent-16443-Thor-Ragnarok.html
|
||||
#patron = host + "/peli-descargar-torrent-((.*?))-"
|
||||
#newid = scrapertools.find_single_match(item.url, patron)
|
||||
|
||||
|
||||
|
||||
#params = dict(urlparse.parse_qsl(item.extra))
|
||||
patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1"
|
||||
#https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1
|
||||
#patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1"
|
||||
#http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1
|
||||
#link=scrapertools.find_single_match(data,patron)
|
||||
#data = httptools.downloadpage(link).data
|
||||
|
||||
|
||||
data = httptools.downloadpage(patron).data
|
||||
patron = "Pincha <a href='(.*?)'>"
|
||||
link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron)
|
||||
logger.info("link=" + link)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
#data = httptools.downloadpage(patron).data
|
||||
#patron = "Pincha <a href='(.*?)'>"
|
||||
#link = host + scrapertools.find_single_match(data, patron)
|
||||
#logger.info("link=" + link)
|
||||
#itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
# thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
|
||||
else:
|
||||
#data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
@@ -364,14 +366,14 @@ def play(item):
|
||||
logger.debug("data=" + data)
|
||||
|
||||
params = dict(urlparse.parse_qsl(item.extra))
|
||||
patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id
|
||||
patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id
|
||||
#link=scrapertools.find_single_match(data,patron)
|
||||
#data = httptools.downloadpage(link).data
|
||||
|
||||
|
||||
data = httptools.downloadpage(patron).data
|
||||
patron = "Pincha <a href='(.*?)'>"
|
||||
link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron)
|
||||
link = host + scrapertools.find_single_match(data, patron)
|
||||
logger.info("link=" + link)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
|
||||
@@ -1,106 +1,66 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://torrentlocura.com/"
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://torrentlocura.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentlocura, tr para torrentrapid, d20 para descargas2020
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_movie = get_thumb("channels_movie.png")
|
||||
thumb_tvshow = get_thumb("channels_tvshow.png")
|
||||
thumb_anime = get_thumb("channels_anime.png")
|
||||
itemlist = []
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
pattern="peliculas", thumbnail=get_thumb('movies', auto=True)))
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="submenu", title="Series", url=host,
|
||||
pattern="series", thumbnail=get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="anime", title="Anime", url=host,
|
||||
pattern="anime", thumbnail=get_thumb('anime', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar",
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def anime(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
title = "Anime"
|
||||
url = host + "anime"
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url,
|
||||
pattern="pelilist"))
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url,
|
||||
thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
pattern = '<li><a href="%s%s/">.*?<ul>(.*?)</ul>' % (host, item.pattern)
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
#patron = '<li><a href="http://(?:www.)?torrentlocura.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
pattern = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
if item.pattern in title.lower():
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url,
|
||||
pattern="pelilist"))
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url,
|
||||
thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist"))
|
||||
|
||||
if 'Películas' in item.title:
|
||||
new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2',
|
||||
pattern='buscar-list')
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -108,11 +68,11 @@ def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
pattern = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -121,7 +81,7 @@ def alfabeto(item):
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, pattern=item.pattern))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -129,73 +89,113 @@ def alfabeto(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
# logger.debug("data %s " % data)
|
||||
next_page = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)">')
|
||||
# logger.debug("data %s " % next_page)
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
# logger.debug("data %s " % data)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)" title="(?P<title_to_fix>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>' \
|
||||
'<h2.*?>\s*(?P<title>.*?)\s*</h2><span>(?P<quality>[^<]*)</span>'
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
# logger.debug("data %s " % matches)
|
||||
|
||||
for url, title_to_fix, thumb, title, quality in matches:
|
||||
# fix encoding for title
|
||||
title = title.replace("�", "ñ")
|
||||
|
||||
# title is the clean way but it doesn't work if it's a long, so we have to use title_to_fix
|
||||
title_fix = False
|
||||
if title.endswith(".."):
|
||||
title = title_to_fix
|
||||
title_fix = True
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
if title_fix:
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Ver en linea", "", 1). \
|
||||
replace("Descarga Serie HD", "", 1).strip()
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
show = title
|
||||
|
||||
#TODO quitar calidad del titulo
|
||||
|
||||
if quality:
|
||||
title = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
quality=quality, context=["buscar_trailer"], show=show))
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
if title_fix:
|
||||
title = title.replace("Descargar", "", 1).strip()
|
||||
if title.endswith("gratis"):
|
||||
title = title[:-6].strip()
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if quality:
|
||||
title = "%s [%s]" % (title, quality)
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=quality, context=["buscar_trailer"]))
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentlocura.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
if next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
|
||||
pattern="pagination", thumbnail=get_thumb("next.png")))
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado2(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
@@ -205,70 +205,41 @@ def listado2(item):
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
# logger.debug("data %s " % data)
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
except:
|
||||
post = False
|
||||
|
||||
if post:
|
||||
# logger.debug("post %s" % post)
|
||||
# logger.debug("item.post %s" % item.post)
|
||||
if "pg" in item.post:
|
||||
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
|
||||
# logger.debug("item.post %s" % item.post)
|
||||
else:
|
||||
item.post += "&pg=%s" % post
|
||||
# logger.debug("item.post %s" % item.post)
|
||||
|
||||
# logger.debug("data %s " % next_page)
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
# logger.debug("data %s " % data)
|
||||
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
|
||||
# logger.debug("\n\nu %s " % url)
|
||||
# logger.debug("\nb %s " % thumb)
|
||||
# logger.debug("\nt %s " % title)
|
||||
|
||||
# title is the clean way but it doesn't work if it's a long, so we have to use title_to_fix
|
||||
# title_fix = False
|
||||
# if title.endswith(".."):
|
||||
# title = title_to_fix
|
||||
# title_fix = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
# title = title.replace("Ver online", "", 1).replace("Ver en linea", "", 1). \
|
||||
# replace("Descarga Serie HD", "", 1).strip()
|
||||
|
||||
show = title
|
||||
# if quality:
|
||||
# title = "%s [%s]" % (title, quality)
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], show=show))
|
||||
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
else:
|
||||
# title = title.replace("Descargar", "", 1).strip()
|
||||
# if title.endswith("gratis"):
|
||||
# title = title[:-6].strip()
|
||||
|
||||
# if quality:
|
||||
# title = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
@@ -279,27 +250,130 @@ def listado2(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentlocura.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentlocura.com/tl/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con Torrentlocura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
p = 1
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
# logger.debug("data %s " % data)
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
# logger.debug("pagination %s" % pagination)
|
||||
if pagination:
|
||||
pattern = 'Next</a></li><li><a href="(.*?)(\d+)">Last</a>'
|
||||
url, last_page = scrapertools.find_single_match(pagination, pattern)
|
||||
# logger.debug("data %s " % last_page)
|
||||
list_pages = []
|
||||
for x in range(1, int(last_page) + 1):
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
# logger.debug("data %s%s" % (url, x))
|
||||
# logger.debug("list_pages %s" % list_pages)
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
@@ -310,14 +384,11 @@ def episodios(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
# logger.debug("data %s " % data)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
# logger.debug("data %s " % matches)
|
||||
|
||||
for url, thumb, info in matches:
|
||||
# logger.debug("info %s" % info)
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
@@ -363,135 +434,57 @@ def episodios(item):
|
||||
match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=match["season"],
|
||||
contentEpisodeNumber=match["episode"]))
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
|
||||
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
|
||||
if link != "":
|
||||
link = host + link
|
||||
logger.info("torrent=" + link)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
|
||||
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
parentContent=item))
|
||||
|
||||
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
|
||||
|
||||
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
|
||||
#patron += '<div class="box2">([^<]+)</div[^<]+'
|
||||
#patron += '<div class="box3">([^<]+)</div[^<]+'
|
||||
#patron += '<div class="box4">([^<]+)</div[^<]+'
|
||||
#patron += '<div class="box5">(.*?)</div[^<]+'
|
||||
#patron += '<div class="box6">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
itemlist_ver = []
|
||||
itemlist_descargar = []
|
||||
|
||||
for servername, idioma, calidad, scrapedurl, comentarios in matches:
|
||||
title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
|
||||
servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
|
||||
if comentarios.strip() != "":
|
||||
title = title + " (" + comentarios.strip() + ")"
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
mostrar_server = servertools.is_server_enabled(servername)
|
||||
if mostrar_server:
|
||||
thumbnail = servertools.guess_server_thumbnail(title)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
action = "play"
|
||||
if "partes" in title:
|
||||
action = "extract_url"
|
||||
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
|
||||
if comentarios.startswith("Ver en"):
|
||||
itemlist_ver.append(new_item)
|
||||
else:
|
||||
itemlist_descargar.append(new_item)
|
||||
|
||||
for new_item in itemlist_ver:
|
||||
itemlist.append(new_item)
|
||||
|
||||
for new_item in itemlist_descargar:
|
||||
itemlist.append(new_item)
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
def extract_url(item):
|
||||
logger.info()
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
return itemlist
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
if item.server != "torrent":
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
else:
|
||||
itemlist = [item]
|
||||
|
||||
return itemlist
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.pattern = 'pelilist'
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
action = listado(item)
|
||||
elif categoria == 'series':
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
action = listado(item)
|
||||
elif categoria == '4k':
|
||||
item.url = host + 'buscar/'
|
||||
item.post = 'q=4k'
|
||||
item.pattern = 'buscar-list'
|
||||
action = listado2(item)
|
||||
|
||||
itemlist = action
|
||||
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
@@ -501,4 +494,3 @@ def newest(categoria):
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,117 +1,496 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
import requests
|
||||
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
Host='http://torrentrapid.com'
|
||||
|
||||
host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
payload = {'q': 'data'}
|
||||
payload["q"] = texto
|
||||
data = requests.post("http://torrentrapid.com/buscar", data=payload)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data.text)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
|
||||
data_listado=re.sub("Descargar Todas ", "",data_listado)
|
||||
data_listado=re.sub("Descargar Pel\xedculas ", "",data_listado)
|
||||
data_listado=re.sub("Descargar ", "",data_listado)
|
||||
patron_listado='<li><a href="(.+?)" title="(.+?)"><img src="(.+?)"'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedtitle, scrapedimg in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="findvideos"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
|
||||
data_cat = scrapertools.find_single_match(data, patron)
|
||||
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?torrentrapid.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="pelilist">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
|
||||
if 'Serie' in item.title:
|
||||
patron_listado+='.+?>'
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
patron_listado+='>'
|
||||
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
|
||||
if 'Serie' in item.title:
|
||||
action="episodios"
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
action="findvideos"
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
|
||||
# Página siguiente
|
||||
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
|
||||
siguiente = scrapertools.find_single_match(data, patron_pag)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentrapid.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
def listado2(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href="(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron)
|
||||
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
|
||||
if " al " in scrapedtitle:
|
||||
titulo=scrapedurl.split('http')
|
||||
scrapedurl="http"+titulo[1]
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
except:
|
||||
post = False
|
||||
|
||||
if post:
|
||||
if "pg" in item.post:
|
||||
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
|
||||
else:
|
||||
item.post += "&pg=%s" % post
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
else:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
new_item = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
|
||||
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
|
||||
itemlist.extend(new_item)
|
||||
for it in itemlist:
|
||||
it.channel = item.channel
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentrapid.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentrapid.com/tr/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrentrapid, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
p = 1
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "flashx.(?:tv|pw)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
|
||||
"pattern": "flashx.(?:tv|pw|ws|sx)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
|
||||
"url": "https://www.flashx.tv/\\1.html"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -9,70 +9,41 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "The file is being converted" in data:
|
||||
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
|
||||
data = httptools.downloadpage(url).data
|
||||
if "The file is being converted" in data or "Please try again later" in data:
|
||||
return False, "El fichero está en proceso"
|
||||
elif "no longer exists" in data:
|
||||
return False, "El fichero ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
video_id = scrapertools.get_match(page_url, "http://www.nowvideo.../video/([a-z0-9]+)")
|
||||
|
||||
if premium:
|
||||
# Lee la página de login
|
||||
login_url = "http://www.nowvideo.eu/login.php"
|
||||
data = httptools.downloadpage(login_url).data
|
||||
|
||||
# Hace el login
|
||||
login_url = "http://www.nowvideo.eu/login.php?return="
|
||||
post = "user=" + user + "&pass=" + password + "®ister=Login"
|
||||
headers = {"Referer": "http://www.nowvideo.eu/login.php"}
|
||||
data = httptools.downloadpage(login_url, post, headers=headers).data
|
||||
|
||||
# Descarga la página del vídeo
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
# URL a invocar: http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
|
||||
# En la página:
|
||||
'''
|
||||
flashvars.domain="http://www.nowvideo.eu";
|
||||
flashvars.file="rxnwy9ku2nwx7";
|
||||
flashvars.filekey="83.46.246.226-c7e707c6e20a730c563e349d2333e788";
|
||||
flashvars.advURL="0";
|
||||
flashvars.autoplay="false";
|
||||
flashvars.cid="1";
|
||||
flashvars.user="aaa";
|
||||
flashvars.key="bbb";
|
||||
flashvars.type="1";
|
||||
'''
|
||||
flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"')
|
||||
flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')
|
||||
flashvar_filekey = scrapertools.get_match(data, 'var ' + flashvar_filekey + '="([^"]+)"')
|
||||
flashvar_user = scrapertools.get_match(data, 'flashvars.user="([^"]+)"')
|
||||
flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"')
|
||||
flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"')
|
||||
|
||||
# http://www.nowvideo.eu/api/player.api.php?user=aaa&file=rxnwy9ku2nwx7&pass=bbb&cid=1&cid2=undefined&key=83%2E46%2E246%2E226%2Dc7e707c6e20a730c563e349d2333e788&cid3=undefined
|
||||
url = "http://www.nowvideo.eu/api/player.api.php?user=" + flashvar_user + "&file=" + flashvar_file + "&pass=" + flashvar_key + "&cid=1&cid2=undefined&key=" + flashvar_filekey.replace(
|
||||
".", "%2E").replace("-", "%2D") + "&cid3=undefined"
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
location = scrapertools.get_match(data, 'url=([^\&]+)&')
|
||||
location = location + "?client=FLASH"
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location])
|
||||
else:
|
||||
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
|
||||
if not videourls:
|
||||
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
|
||||
@@ -80,14 +51,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
if videourl.endswith(".mpd"):
|
||||
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
|
||||
videourl = "http://www.nowvideo.sx/download.php%3Ffile=mm" + "%s.mp4" % id
|
||||
|
||||
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
|
||||
ext = scrapertools.get_filename_from_url(videourl)[-4:]
|
||||
videourl = videourl.replace("%3F", "?") + \
|
||||
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
|
||||
video_urls.append([ext + " [nowvideo]", videourl])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user