EliteTorrent: rediseño
Reparación y rediseño
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
"name": "Elite Torrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"language": ["*"],
|
||||
"thumbnail": "elitetorrent.png",
|
||||
"banner": "elitetorrent.png",
|
||||
"categories": [
|
||||
@@ -15,10 +15,10 @@
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
|
||||
@@ -1,111 +1,586 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
BASE_URL = 'http://www.elitetorrent.wesconference.net'
|
||||
host = 'http://www.elitetorrent.biz'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Docus y TV", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/6/docus-y-tv/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/1/estrenos/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/2/peliculas/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas HDRip", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/13/peliculas-hdrip/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas MicroHD", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/17/peliculas-microhd/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas VOSE", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/14/peliculas-vose/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/4/series/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series VOSE", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/16/series-vose/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
thumb_pelis = get_thumb("channels_movie.png")
|
||||
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
patron = '<div class="cab_menu">.*?<\/div>' #Menú principal
|
||||
data1 = scrapertools.get_match(data, patron)
|
||||
patron = '<div id="menu_langen">.*?<\/div>' #Menú de idiomas
|
||||
data1 += scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="(.*?)".*?title="(.*?)"' #Encontrar todos los apartados
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
scrapedtitle = scrapedtitle.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "").title()
|
||||
|
||||
if "castellano" in scrapedtitle.lower(): #Evita la entrada de peliculas castellano del menú de idiomas
|
||||
continue
|
||||
|
||||
if item.extra == "series": #Tratamos Series
|
||||
if not "/serie" in scrapedurl:
|
||||
continue
|
||||
else: #Tratamos Películas
|
||||
if "/serie" in scrapedurl:
|
||||
continue
|
||||
|
||||
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl))
|
||||
|
||||
if item.extra == "series": #Añadimos Series VOSE que está fuera del menú principal
|
||||
itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if "http://www.bajui.com/redi.php" in data:
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
'''
|
||||
<li>
|
||||
<a href="/torrent/23471/mandela-microhd-720p"><img src="thumb_fichas/23471.jpg" border="0" title="Mandela (microHD - 720p)" alt="IMG: Mandela (microHD - 720p)"/></a>
|
||||
<div class="meta">
|
||||
<a class="nombre" href="/torrent/23471/mandela-microhd-720p" title="Mandela (microHD - 720p)">Mandela (microHD - 720p)</a>
|
||||
<span class="categoria">Peliculas microHD</span>
|
||||
<span class="fecha">Hace 2 sem</span>
|
||||
<span class="descrip">Título: Mandela: Del mito al hombre<br />
|
||||
'''
|
||||
patron = '<a href="(/torrent/[^"]+)">'
|
||||
patron += '<img src="(thumb_fichas/[^"]+)" border="0" title="([^"]+)"[^>]+></a>'
|
||||
patron += '.*?<span class="descrip">(.*?)</span>'
|
||||
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?<a href="(.*?)".*?' #url
|
||||
patron += 'title="(.*?)".*?' #título
|
||||
patron += 'src="(.*?)".*?' #thumb
|
||||
patron += "title='(.*?)'.*?" #categoría, idioma
|
||||
patron += '"><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(BASE_URL, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(BASE_URL, scrapedthumbnail)
|
||||
plot = re.sub('<[^<]+?>', '', scrapedplot)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=False))
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcategory, scrapedcalidad, scrapedsize, scrapedsizet in matches:
|
||||
item_local = item.clone() #Creamos copia de Item para trabajar
|
||||
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
title = title.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "")
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
item_local.thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
|
||||
if "---" in scrapedcalidad: #Scrapeamos y limpiamos calidades
|
||||
scrapedcalidad = ''
|
||||
if "microhd" in title.lower():
|
||||
item_local.quality = "microHD"
|
||||
if not "/series-vose/" in item.url and not item_local.quality:
|
||||
item_local.quality = scrapedcalidad
|
||||
if scrapertools.find_single_match(item_local.quality, r'\d+\.\d+'):
|
||||
item_local.quality = ''
|
||||
if not item_local.quality and ("DVDRip" in title or "HDRip" in title or "BR-LINE" in title or "HDTS-SCREENER" in title or "BDRip" in title or "BR-Screener" in title or "DVDScreener" in title or "TS-Screener" in title):
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\((.*?)\)')
|
||||
item_local.quality = item_local.quality.replace("Latino", "")
|
||||
if not scrapedsizet:
|
||||
scrapedsize = ''
|
||||
else:
|
||||
item_local.quality += ' [%s %s]' % (scrapedsize.replace(".", ","), scrapedsizet)
|
||||
|
||||
item_local.language = [] #Verificamos el idioma por si encontramos algo
|
||||
if "latino" in scrapedcategory.lower() or "latino" in item.url or "latino" in title.lower():
|
||||
item_local.language += ["LAT"]
|
||||
if "ingles" in scrapedcategory.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
|
||||
if "VOSE" in scrapedcategory.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
else:
|
||||
item_local.language += ["VO"]
|
||||
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
|
||||
#Limpiamos el título de la basuna innecesaria
|
||||
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
|
||||
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
|
||||
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
|
||||
|
||||
if item_local.extra == "peliculas": #preparamos Item para películas
|
||||
if "/serie" in scrapedurl or "/serie" in item.url:
|
||||
continue
|
||||
item_local.contentType = "movie"
|
||||
item_local.contentTitle = title.strip()
|
||||
else: #preparamos Item para series
|
||||
if not "/serie" in scrapedurl and not "/serie" in item.url:
|
||||
continue
|
||||
item_local.contentType = "episode"
|
||||
epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-(al-\d+)')
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temp.*?-(\d+)')
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)')
|
||||
if not item_local.contentSeason:
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+')
|
||||
if not item_local.contentEpisodeNumber:
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)')
|
||||
if item_local.contentSeason < 1:
|
||||
item_local.contentSeason = 1
|
||||
if item_local.contentEpisodeNumber < 1:
|
||||
item_local.contentEpisodeNumber = 1
|
||||
item_local.contentSerieName = title.strip()
|
||||
if epi_mult:
|
||||
title = '%s, %s' % (epi_mult.replace("-", " "), title)
|
||||
|
||||
item_local.action = "findvideos"
|
||||
item_local.title = title.strip()
|
||||
item_local.infoLabels['year'] = "-"
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Pasada para maquillaje de los títulos obtenidos desde TMDB
|
||||
for item_local in itemlist:
|
||||
title = item_local.title
|
||||
|
||||
# Si TMDB no ha encontrado el vídeo limpiamos el año
|
||||
if item_local.infoLabels['year'] == "-":
|
||||
item_local.infoLabels['year'] = ''
|
||||
item_local.infoLabels['aired'] = ''
|
||||
|
||||
# Preparamos el título para series, con los núm. de temporadas, si las hay
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
item_local.contentTitle= ''
|
||||
if item_local.contentType == "episode":
|
||||
if scrapertools.find_single_match(title, r'(al\s\d+)'):
|
||||
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'(al\s\d+)')
|
||||
if scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'):
|
||||
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
rating = ''
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
#Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
title = '%sx%s %s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%sx%s %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
elif item_local.contentType == "movie":
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_local.infoLabels['year']), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s, %s [%s] [%s]' % (item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s [%s] [%s]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [%s] [%s]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
# Extrae el paginador
|
||||
patronvideos = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \»\;</a>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
patron = '<div class="paginacion">.*?<span class="pagina pag_actual".*?'
|
||||
patron += "<a href='([^']+)'.*?" #url siguiente página
|
||||
patron += 'class="pagina">(\d+)<.*?' #próxima página
|
||||
matches = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = 'class="pagina pag_sig">Siguiente.*?'
|
||||
patron += "<a href='.*?\/page\/(\d+)\/" #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
if not last_page:
|
||||
patron = '<div class="paginacion">.*?'
|
||||
patron += 'class="pagina">(\d+)<\/a><\/div><\/nav><\/div><\/div>' #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if len(matches) > 0:
|
||||
if matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="Página siguiente >>", url=scrapedurl, folder=True,
|
||||
viewmode="movie_with_plot"))
|
||||
if last_page:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(matches[1]) - 1, last_page)
|
||||
else:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if "http://www.bajui.com/redi.php" in data:
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
# <a href="magnet:?xt=urn:btih:d6wtseg33iisp7jexpl44wfcqh7zzjuh&dn=Abraham+Lincoln+Cazador+de+vampiros+%28HDRip%29+%28EliteTorrent.net%29&tr=http://tracker.torrentbay.to:6969/announce" class="enlace_torrent degradado1">Descargar por magnet link</a>
|
||||
link = scrapertools.get_match(data,
|
||||
'<a href="(magnet[^"]+)" class="enlace_torrent[^>]+>Descargar por magnet link</a>')
|
||||
link = urlparse.urljoin(item.url, link)
|
||||
logger.info("link=" + link)
|
||||
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?<a href="(.*?)".*?' #url
|
||||
patron += 'title="(.*?)".*?' #título
|
||||
patron += 'src="(.*?)".*?' #thumb
|
||||
patron += "title='(.*?)'.*?" #categoría, idioma
|
||||
patron += '"><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcategory, scrapedcalidad, scrapedsize, scrapedsizet in matches:
|
||||
item_local = item.clone()
|
||||
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
title = title.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "")
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
item_local.thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
|
||||
if "---" in scrapedcalidad:
|
||||
scrapedcalidad = ''
|
||||
if "microhd" in title.lower():
|
||||
item_local.quality = "microHD"
|
||||
if not "/series-vose/" in item.url and not item_local.quality:
|
||||
item_local.quality = scrapedcalidad
|
||||
if scrapertools.find_single_match(item_local.quality, r'\d+\.\d+'):
|
||||
item_local.quality = ''
|
||||
if not item_local.quality and ("DVDRip" in title or "HDRip" in title or "BR-LINE" in title or "HDTS-SCREENER" in title or "BDRip" in title or "BR-Screener" in title or "DVDScreener" in title or "TS-Screener" in title):
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\((.*?)\)')
|
||||
item_local.quality = item_local.quality.replace("Latino", "")
|
||||
if not scrapedsizet:
|
||||
scrapedsize = ''
|
||||
else:
|
||||
item_local.quality += ' [%s %s]' % (scrapedsize.replace(".", ","), scrapedsizet)
|
||||
|
||||
item_local.language = []
|
||||
if "latino" in scrapedcategory.lower() or "latino" in item.url or "latino" in title.lower():
|
||||
item_local.language += ["LAT"]
|
||||
if "ingles" in scrapedcategory.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
|
||||
if "VOSE" in scrapedcategory.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
else:
|
||||
item_local.language += ["VO"]
|
||||
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
|
||||
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
|
||||
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
|
||||
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
|
||||
|
||||
if not "/serie" in scrapedurl:
|
||||
item_local.contentType = "movie"
|
||||
item_local.extra = "peliculas"
|
||||
item_local.contentTitle = title
|
||||
else:
|
||||
item_local.contentType = "episode"
|
||||
item_local.extra = "series"
|
||||
epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-(al-\d+)')
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temp.*?-(\d+)')
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)')
|
||||
if not item_local.contentSeason:
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+')
|
||||
if not item_local.contentEpisodeNumber:
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)')
|
||||
if item_local.contentSeason < 1:
|
||||
item_local.contentSeason = 1
|
||||
if item_local.contentEpisodeNumber < 1:
|
||||
item_local.contentEpisodeNumber = 1
|
||||
item_local.contentSerieName = title
|
||||
if epi_mult:
|
||||
title = '%s, %s' % (epi_mult.replace("-", " "), title)
|
||||
|
||||
item_local.action = "findvideos"
|
||||
item_local.title = title
|
||||
item_local.infoLabels['year'] = "-"
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Pasada para maquillaje de los títulos obtenidos desde TMDB
|
||||
for item_local in itemlist:
|
||||
title = item_local.title
|
||||
|
||||
# Si TMDB no ha encontrado el vídeo limpiamos el año
|
||||
if item_local.infoLabels['year'] == "-":
|
||||
item_local.infoLabels['year'] = ''
|
||||
item_local.infoLabels['aired'] = ''
|
||||
|
||||
# Preparamos el título para series, con los núm. de temporadas, si las hay
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
item_local.contentTitle= ''
|
||||
if item_local.contentType == "episode":
|
||||
if scrapertools.find_single_match(title, r'(al\s\d+)'):
|
||||
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'(al\s\d+)')
|
||||
if scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'):
|
||||
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
rating = ''
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
#Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
title = '%sx%s %s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%sx%s %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
elif item_local.contentType == "movie":
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_local.infoLabels['year']), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s, %s [%s] [%s]' % (item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s [%s] [%s]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [%s] [%s]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
# Extrae el paginador
|
||||
patron = '<div class="paginacion">.*?<span class="pagina pag_actual".*?'
|
||||
patron += "<a href='([^']+)'.*?" #url siguiente página
|
||||
patron += 'class="pagina">(\d+)<' #próxima página
|
||||
matches = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = 'class="pagina pag_sig">Siguiente.*?'
|
||||
patron += "<a href='.*?\/page\/(\d+)\/" #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
if not last_page:
|
||||
patron = '<div class="paginacion">.*?'
|
||||
patron += 'class="pagina">(\d+)<\/a><\/div><\/nav><\/div><\/div>' #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
if last_page:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(matches[1]) - 1, last_page)
|
||||
else:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado_busqueda", title=title, url=scrapedurl, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] and item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = unicode(data, "utf-8", errors="replace")
|
||||
|
||||
#Añadimos el tamaño para todos
|
||||
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w[b|B]s)\]')
|
||||
item.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía
|
||||
if size:
|
||||
item.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
|
||||
|
||||
#Limpiamos de año y rating de episodios
|
||||
if item.infoLabels['episodio_titulo']:
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
patron = '<div class="enlace_descarga".*?<a href="(.*?\.torrent)"'
|
||||
link_torrent = scrapertools.find_single_match(data, patron)
|
||||
link_torrent = urlparse.urljoin(item_local.url, link_torrent)
|
||||
link_torrent = link_torrent.replace(" ", "%20") #sustituimos espacios por %20, por si acaso
|
||||
#logger.info("link Torrent: " + link_torrent)
|
||||
|
||||
patron = '<div class="enlace_descarga".*?<a href="(magnet:?.*?)"'
|
||||
link_magnet = scrapertools.find_single_match(data, patron)
|
||||
link_magnet = urlparse.urljoin(item_local.url, link_magnet)
|
||||
#logger.info("link Magnet: " + link_magnet)
|
||||
|
||||
#Pintamos el pseudo-título con toda la información disponible del vídeo
|
||||
item_local.action = ""
|
||||
item_local.server = "torrent"
|
||||
|
||||
rating = '' #Ponemos el rating
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
if item_local.contentType == "episode":
|
||||
title = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
if item_local.infoLabels['temporada_num_episodios']:
|
||||
title = '%s (de %s)' % (title, str(item_local.infoLabels['temporada_num_episodios']))
|
||||
title = '%s %s' % (title, item_local.infoLabels['episodio_titulo'])
|
||||
if item_local.infoLabels['episodio_titulo']: #Ya viene con el nombre de Serie
|
||||
title_gen = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language), size)
|
||||
else:
|
||||
title_gen = '%s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (title, item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language), size)
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Enlaces Ver: [/COLOR]%s' % (title_gen)
|
||||
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item_local.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item_local.channel.capitalize(), title_gen)
|
||||
|
||||
itemlist.append(item_local.clone(title=title_gen)) #Título con todos los datos del vídeo
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if link_torrent: # Hay Torrent ?
|
||||
if item_local.quality:
|
||||
item_local.quality += " "
|
||||
item_local.quality += "[Torrent]"
|
||||
item_local.url = link_torrent
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
#Ahora pintamos el link del Magnet, si lo hay
|
||||
if link_magnet: # Hay Magnet ?
|
||||
if item_local.quality:
|
||||
item_local.quality += " "
|
||||
item_local.quality = item_local.quality.replace("[Torrent]", "") + "[Magnet]"
|
||||
item_local.url = link_magnet
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Magnet
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
logger.debug("TORRENT: " + link_torrent + "MAGNET: " + link_magnet + " / title gen/torr: " + title_gen + " / " + title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.url = host + "?s=%s&x=0&y=0" % texto
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
Reference in New Issue
Block a user