Merge branch 'master' into master
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.6" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.8" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,12 +19,11 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» cinemahd » seriesdanko
|
||||
» doramasmp4 » pelisplus
|
||||
» descargas2020
|
||||
» torrentrapid » torrentlocura
|
||||
» mispelisyseries » descargas2020
|
||||
¤ arreglos internos
|
||||
|
||||
¤ Gracias a la colaboración de @t1254362 en esta versión
|
||||
¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -204,14 +204,16 @@ def episodios(item):
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
|
||||
for url, title in matches:
|
||||
url = host + url
|
||||
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
|
||||
epi = scrapertools.find_single_match(title, '.+?(\d+) (?:Sub|Audio|Español)')
|
||||
#epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
|
||||
new_item = item.clone(action="findvideos", url=url, title=title, extra="")
|
||||
if epi:
|
||||
if "Especial" in title:
|
||||
epi=0
|
||||
season, episode = renumbertools.numbered_for_tratk(
|
||||
item.channel, show, 1, int(epi))
|
||||
item.channel, item.contentSerieName, 1, int(epi))
|
||||
new_item.infoLabels["episode"] = episode
|
||||
new_item.infoLabels["season"] = season
|
||||
|
||||
new_item.title = "%sx%s %s" % (season, episode, title)
|
||||
itemlist.append(new_item)
|
||||
|
||||
|
||||
@@ -117,12 +117,12 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
|
||||
show = item.title
|
||||
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
|
||||
patron_caps = '<img alt=".+?" title=".+?" src="([^"]+)">'
|
||||
patron_caps += '<\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
|
||||
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
|
||||
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
|
||||
@@ -148,14 +148,24 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data = scrapertools.find_single_match(data,
|
||||
data1 = scrapertools.find_single_match(data,
|
||||
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
|
||||
patron='src="(.+?)"'
|
||||
logger.info("assfxxv "+data)
|
||||
itemla = scrapertools.find_multiple_matches(data,patron)
|
||||
itemla = scrapertools.find_multiple_matches(data1,patron)
|
||||
if "favicons?domain" in itemla[1]:
|
||||
method = 1
|
||||
data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}")
|
||||
patron='".+?":"(.+?)"'
|
||||
itemla = scrapertools.find_multiple_matches(data2,patron)
|
||||
else:
|
||||
method = 0
|
||||
for i in range(len(itemla)):
|
||||
#for url in itemla:
|
||||
url=itemla[i]
|
||||
if method==0:
|
||||
url=itemla[i]
|
||||
else:
|
||||
import base64
|
||||
b=base64.b64decode(itemla[i])
|
||||
url=b.decode('utf8')
|
||||
#verificar existencia del video (testing)
|
||||
codigo=verificar_video(itemla[i])
|
||||
if codigo==200:
|
||||
@@ -199,5 +209,5 @@ def verificar_video(url):
|
||||
else:
|
||||
codigo1=200
|
||||
else:
|
||||
codigo1=200
|
||||
codigo1=200
|
||||
return codigo1
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent"
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -21,6 +22,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://descargas2020.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para descargas2020, tr para descargas2020, d20 para descargas2020
|
||||
host = 'http://descargas2020.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -26,7 +26,7 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
@@ -40,12 +40,15 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?descargas2020.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><a href="http://(?:www.)?descargas2020.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://descargas2020.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -55,12 +58,12 @@ def submenu(item):
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -91,16 +94,12 @@ def listado(item):
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +108,11 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +131,23 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +155,24 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?descargas2020.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -198,7 +185,7 @@ def listado_busqueda(item):
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,42 +205,116 @@ def listado_busqueda(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
show = real_title
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,65 +324,87 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://descargas2020.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.descargas2020.com/d20/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con descargas2020, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?descargas2020.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
@@ -329,31 +412,46 @@ def findvideos(item):
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +461,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -378,76 +478,128 @@ def episodios(item):
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -459,7 +611,7 @@ def search(item, texto):
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -485,6 +637,24 @@ def newest(categoria):
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
|
||||
@@ -254,7 +254,7 @@ def episodios(item):
|
||||
url = host + scrapertools.find_single_match(data,patron)
|
||||
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
|
||||
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
|
||||
logger.debug("post=" + post)
|
||||
#logger.debug("post=" + post)
|
||||
|
||||
if item.extra == "series":
|
||||
epi = scrapedtitle.split("x")
|
||||
@@ -311,7 +311,6 @@ def show_movie_info(item):
|
||||
pass
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -319,9 +318,11 @@ def show_movie_info(item):
|
||||
for scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
|
||||
|
||||
torrent_data = httptools.downloadpage(url).data
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>")
|
||||
if scrapertools.find_single_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>"):
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>")
|
||||
else:
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(http:\/\/www.mejortorrent.com\/uploads\/torrents\/.*?peliculas\/.*?\.torrent)'>")
|
||||
link = urlparse.urljoin(url, link)
|
||||
logger.debug("link=" + link)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
@@ -363,7 +364,7 @@ def play(item):
|
||||
else:
|
||||
#data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
#logger.debug("data=" + data)
|
||||
|
||||
params = dict(urlparse.parse_qsl(item.extra))
|
||||
patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id
|
||||
@@ -373,7 +374,9 @@ def play(item):
|
||||
|
||||
data = httptools.downloadpage(patron).data
|
||||
patron = "Pincha <a href='(.*?)'>"
|
||||
link = host + scrapertools.find_single_match(data, patron)
|
||||
link = scrapertools.find_single_match(data, patron)
|
||||
if not host in link:
|
||||
link = host + link
|
||||
logger.info("link=" + link)
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
|
||||
@@ -9,7 +9,8 @@
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -1,137 +1,73 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from channelselector import get_thumb
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://mispelisyseries.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url=host,
|
||||
extra="Peliculas", folder=True, thumbnail=get_thumb('movies', auto=True)))
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="menu", title="Series", url=host, extra="Series",
|
||||
folder=True, thumbnail=get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + 'buscar',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu(item):
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info("data="+data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
data = scrapertools.find_single_match(data, item.extra + "</a[^<]+<ul(.*?)</ul>")
|
||||
# logger.info("data="+data)
|
||||
patron = '<li><a href="http://(?:www.)?mispelisyseries.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://mispelisyseries.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = "<li><a.*?href='([^']+)'[^>]+>([^<]+)</a></li>"
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=True))
|
||||
|
||||
|
||||
if title != "Todas las Peliculas":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail,
|
||||
plot=plot, folder=True))
|
||||
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
folder=True))
|
||||
|
||||
if 'películas' in item.title.lower():
|
||||
new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2',
|
||||
pattern='buscar-list')
|
||||
itemlist.append(new_item)
|
||||
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
#try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
# except:
|
||||
# import sys
|
||||
# for line in sys.exc_info():
|
||||
# logger.error("%s" % line)
|
||||
# return []
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas', 'torrent']:
|
||||
item.url = host+"peliculas"
|
||||
|
||||
elif categoria == 'series':
|
||||
item.url = host+"series"
|
||||
|
||||
if categoria == '4k':
|
||||
|
||||
item.url = Host + '/buscar'
|
||||
|
||||
item.post = 'q=4k'
|
||||
|
||||
item.pattern = 'buscar-list'
|
||||
|
||||
action = listado2(item)
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Esta pagina coloca a veces contenido duplicado, intentamos descartarlo
|
||||
dict_aux = {}
|
||||
for i in itemlist:
|
||||
if not i.url in dict_aux:
|
||||
dict_aux[i.url] = i
|
||||
else:
|
||||
itemlist.remove(i)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
# return dict_aux.values()
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -148,93 +84,113 @@ def alfabetico(item):
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
# logger.info("data="+data)
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>')
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo
|
||||
patron += '<span>([^<].*?)<' # la calidad
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, calidad in matches:
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
title = scrapedtitle.strip()
|
||||
if scrapertools.htmlclean(calidad):
|
||||
title += " (" + scrapertools.htmlclean(calidad) + ")"
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
contentTitle = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
patron = '([^<]+)<br>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>')
|
||||
idioma = ''
|
||||
|
||||
if host+"/serie" in url:
|
||||
contentTitle = re.sub('\s+-|\.{3}$', '', contentTitle)
|
||||
capitulo = ''
|
||||
temporada = 0
|
||||
episodio = 0
|
||||
|
||||
if len(matches) == 3:
|
||||
calidad = matches[0].strip()
|
||||
idioma = matches[1].strip()
|
||||
capitulo = matches[2].replace('Cap', 'x').replace('Temp', '').replace(' ', '')
|
||||
temporada, episodio = capitulo.strip().split('x')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
|
||||
language=idioma, contentSeason=int(temporada),
|
||||
contentEpisodeNumber=int(episodio), quality=calidad))
|
||||
|
||||
else:
|
||||
if len(matches) == 2:
|
||||
calidad = matches[0].strip()
|
||||
idioma = matches[1].strip()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
|
||||
language=idioma, contentThumbnail=thumbnail, quality=calidad))
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True))
|
||||
else:
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
'<li><input type="button" class="btn-submit" value="Siguiente" onClick="paginar..(\d+)')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=item.url,
|
||||
extra=item.extra + "&pg=" + next_page_url, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado2(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
@@ -249,154 +205,462 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
logger.debug(data)
|
||||
pattern = '<a href="(?P<url>[^"]+)".*?<img.*?src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "descargar-juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
show = title
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], show=show))
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title="[COLOR cyan]Página Siguiente >>[/COLOR]",
|
||||
thumbnail=''))
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?mispelisyseries.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
# logger.info("data="+data)
|
||||
for index, page in enumerate(list_pages):
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
patron = '<div class="chap-desc"[^<]+'
|
||||
patron += '<a class="chap-title".*?href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, folder=True))
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, "<a class='active' href=[^<]+</a><a\s*href='([^']+)'")
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
def findvideos(item):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if al_url_fa == "":
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"%s(.*?)" ' % host)
|
||||
if al_url_fa != "":
|
||||
al_url_fa = host + al_url_fa
|
||||
logger.info("torrent=" + al_url_fa)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
|
||||
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
parentContent=item))
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist_ver = []
|
||||
itemlist_descargar = []
|
||||
|
||||
for servername, idioma, calidad, scrapedurl, comentarios in matches:
|
||||
title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
|
||||
servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
|
||||
if comentarios.strip() != "":
|
||||
title = title + " (" + comentarios.strip() + ")"
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
mostrar_server = servertools.is_server_enabled(servername)
|
||||
if mostrar_server:
|
||||
thumbnail = servertools.guess_server_thumbnail(title)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
action = "play"
|
||||
if "partes" in title:
|
||||
action = "extract_url"
|
||||
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername, quality=calidad)
|
||||
if comentarios.startswith("Ver en"):
|
||||
itemlist_ver.append(new_item)
|
||||
else:
|
||||
itemlist_descargar.append(new_item)
|
||||
|
||||
itemlist.extend(itemlist_ver)
|
||||
itemlist.extend(itemlist_descargar)
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def extract_url(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
if item.server != "torrent":
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
else:
|
||||
itemlist = [item]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -286,7 +286,7 @@ def newest(categoria):
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + 'peliculas/ultimas-agregadas/'
|
||||
item.url = host + 'peliculas/ultimas-peliculas/'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas/animacion/'
|
||||
@@ -297,7 +297,7 @@ def newest(categoria):
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + 'documentales/'
|
||||
|
||||
itemlist = lista(item)
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
|
||||
@@ -71,11 +71,11 @@ def lista(item):
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title,
|
||||
context=context))
|
||||
if b<29:
|
||||
a=a+1
|
||||
url="https://serieslan.com/pag-"+str(a)
|
||||
url=host+"/pag-"+str(a)
|
||||
if b>10:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
|
||||
@@ -116,14 +116,14 @@ def episodios(item):
|
||||
for pos in name.split(pat):
|
||||
i = i + 1
|
||||
total_episode += 1
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode)
|
||||
if len(name.split(pat)) == i:
|
||||
title += "%sx%s " % (season, str(episode).zfill(2))
|
||||
else:
|
||||
title += "%sx%s_" % (season, str(episode).zfill(2))
|
||||
else:
|
||||
total_episode += 1
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode)
|
||||
|
||||
title += "%sx%s " % (season, str(episode).zfill(2))
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from channels import autoplay
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -20,12 +21,16 @@ HOST = "http://www.seriespapaya.com"
|
||||
IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOSE', 'Español Latino':'lat',
|
||||
'Español Castellano':'es', 'Sub Español':'VOSE'}
|
||||
list_idiomas = IDIOMAS.values()
|
||||
CALIDADES = ['360p', '480p', '720p HD', '1080p HD']
|
||||
CALIDADES = ['360p', '480p', '720p HD', '1080p HD', 'default']
|
||||
list_servers = ['powvideo', 'streamplay', 'filebebo', 'flashx', 'gamovideo', 'nowvideo', 'openload', 'streamango',
|
||||
'streamcloud', 'vidzi', 'clipwatching', ]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, CALIDADES)
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_az = get_thumb("channels_tvshow_az.png")
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
@@ -39,6 +44,8 @@ def mainlist(item):
|
||||
|
||||
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -205,13 +212,23 @@ def findvideos(item):
|
||||
server=server.rstrip(),
|
||||
quality=quality,
|
||||
uploader=uploader),
|
||||
server=server.rstrip(),
|
||||
url=urlparse.urljoin(HOST, url),
|
||||
language=IDIOMAS.get(lang, lang),
|
||||
quality=quality,
|
||||
language=IDIOMAS.get(lang,lang),
|
||||
quality=quality
|
||||
) for lang, date, server, url, linkType, quality, uploader in links]
|
||||
|
||||
|
||||
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -170,25 +170,26 @@ def servers_blacklist(item):
|
||||
server_list = servertools.get_servers_list()
|
||||
dict_values = {}
|
||||
|
||||
list_controls = [{'id': 'filter_servers',
|
||||
'type': "bool",
|
||||
'label': "@30068",
|
||||
'default': False,
|
||||
'enabled': True,
|
||||
'visible': True}]
|
||||
list_controls = [{"id": "filter_servers",
|
||||
"type": "bool",
|
||||
"label": "@30068",
|
||||
"default": False,
|
||||
"enabled": True,
|
||||
"visible": True}]
|
||||
dict_values['filter_servers'] = config.get_setting('filter_servers')
|
||||
|
||||
if dict_values['filter_servers'] == None:
|
||||
dict_values['filter_servers'] = False
|
||||
for i, server in enumerate(sorted(server_list.keys())):
|
||||
server_parameters = server_list[server]
|
||||
controls, defaults = servertools.get_server_controls_settings(server)
|
||||
dict_values[server] = config.get_setting("black_list", server=server)
|
||||
|
||||
control = {'id': server,
|
||||
'type': "bool",
|
||||
'label': ' %s' % server_parameters["name"],
|
||||
'default': defaults.get("black_list", False),
|
||||
'enabled': "eq(-%s,True)" % (i + 1),
|
||||
'visible': True}
|
||||
control = {"id": server,
|
||||
"type": "bool",
|
||||
"label": ' %s' % server_parameters["name"],
|
||||
"default": defaults.get("black_list", False),
|
||||
"enabled": "eq(-%s,True)" % (i + 1),
|
||||
"visible": True}
|
||||
list_controls.append(control)
|
||||
|
||||
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
|
||||
@@ -228,6 +229,8 @@ def servers_favorites(item):
|
||||
'enabled': True,
|
||||
'visible': True}]
|
||||
dict_values['favorites_servers'] = config.get_setting('favorites_servers')
|
||||
if dict_values['favorites_servers'] == None:
|
||||
dict_values['favorites_servers'] = False
|
||||
|
||||
server_names = ['Ninguno']
|
||||
|
||||
|
||||
@@ -8,18 +8,36 @@
|
||||
"thumbnail": "http://imgur.com/EWmLS3d.png",
|
||||
"fanart": "http://imgur.com/V7QZLAL.jpg",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
@@ -37,5 +55,5 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://torrentlocura.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentlocura, tr para torrentrapid, d20 para descargas2020
|
||||
host = 'http://torrentlocura.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -40,12 +40,15 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?torrentlocura.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><a href="http://(?:www.)?torrentlocura.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://torrentlocura.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -92,15 +95,11 @@ def listado(item):
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +108,11 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +131,23 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +155,24 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentlocura.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -193,12 +180,12 @@ def listado(item):
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado2(item):
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,42 +205,116 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
show = real_title
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,65 +324,87 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentlocura.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentlocura.com/tl/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con Torrentlocura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?torrentlocura.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
@@ -329,31 +412,46 @@ def findvideos(item):
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +461,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -378,76 +478,128 @@ def episodios(item):
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -458,8 +610,8 @@ def search(item, texto):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -485,6 +637,24 @@ def newest(categoria):
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent"
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -21,6 +22,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
@@ -28,6 +45,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_4k",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - 4K",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020
|
||||
host = 'http://torrentrapid.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -40,12 +40,15 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?torrentrapid.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><a href="http://(?:www.)?torrentrapid.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://torrentrapid.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -92,15 +95,11 @@ def listado(item):
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +108,11 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +131,23 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +155,24 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentrapid.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -193,12 +180,12 @@ def listado(item):
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado2(item):
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,42 +205,116 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
show = real_title
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,65 +324,87 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentrapid.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentrapid.com/tr/library/include/ajax/get_modallinks.php?links=', "")
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrentrapid, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?torrentrapid.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
@@ -329,31 +412,46 @@ def findvideos(item):
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +461,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -378,76 +478,128 @@ def episodios(item):
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -458,8 +610,8 @@ def search(item, texto):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -485,6 +637,24 @@ def newest(categoria):
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
|
||||
58
plugin.video.alfa/channels/tumejortorrent.json
Normal file
58
plugin.video.alfa/channels/tumejortorrent.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"id": "tumejortorrent",
|
||||
"name": "Tumejortorrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "tumejortorrent.png",
|
||||
"banner": "tumejortorrent.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_4k",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - 4K",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
666
plugin.video.alfa/channels/tumejortorrent.py
Normal file
666
plugin.video.alfa/channels/tumejortorrent.py
Normal file
@@ -0,0 +1,666 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://tumejortorrent.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
patron = '<li><a href="http://(?:www.)?tumejortorrent.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://tumejortorrent.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
except:
|
||||
post = False
|
||||
|
||||
if post:
|
||||
if "pg" in item.post:
|
||||
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
|
||||
else:
|
||||
item.post += "&pg=%s" % post
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?tumejortorrent.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
58
plugin.video.alfa/channels/tvsinpagar.json
Normal file
58
plugin.video.alfa/channels/tvsinpagar.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"id": "tvsinpagar",
|
||||
"name": "Tvsinpagar",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "tvsinpagar.png",
|
||||
"banner": "tvsinpagar.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_4k",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - 4K",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
666
plugin.video.alfa/channels/tvsinpagar.py
Normal file
666
plugin.video.alfa/channels/tvsinpagar.py
Normal file
@@ -0,0 +1,666 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.tvsinpagar.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
patron = '<li><a href="http://(?:www.)?tvsinpagar.com/' + item.extra + '/">.*?<ul.*?>(.*?)</ul>'
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://tvsinpagar.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
patron = '<ul class="alfabeto">(.*?)</ul>'
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + ' [' + calidad + "]"
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
except:
|
||||
post = False
|
||||
|
||||
if post:
|
||||
if "pg" in item.post:
|
||||
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
|
||||
else:
|
||||
item.post += "&pg=%s" % post
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if not real_title:
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
url_real = True
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
elif "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
elif "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
else:
|
||||
calidad_mps = "series/"
|
||||
|
||||
if "no_image" in thumb:
|
||||
real_title_mps = title
|
||||
else:
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb and not "no_image" in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
#detectar si la url creada de tvshow es válida o hay que volver atras
|
||||
url_tvshow = host + calidad_mps + real_title_mps + "/"
|
||||
url_id = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data)
|
||||
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data_serie = data_serie.replace("chapters", "buscar-list")
|
||||
if not scrapertools.find_single_match(data_serie, pattern):
|
||||
context = "movie"
|
||||
url_real = False
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
else:
|
||||
url = url_tvshow
|
||||
else:
|
||||
url = url_id
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url and url_real:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if calidad:
|
||||
title = title + '[' + calidad + "]"
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados:
|
||||
title = real_title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if item.infoLabels['year']: #añadir el año al título general
|
||||
year = '[%s]' % str(item.infoLabels['year'])
|
||||
else:
|
||||
year = ""
|
||||
|
||||
if item.infoLabels['aired'] and item.contentType == "episode": #añadir el año de episodio para series
|
||||
year = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
year = '[%s]' % year
|
||||
|
||||
title_gen = title
|
||||
if item.contentType == "episode": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title_epi = '%sx%s - %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle)
|
||||
title_gen = '%s %s, %s' % (title_epi, year, title)
|
||||
title_torrent = '%s, %s' % (title_epi, item.contentSerieName)
|
||||
else:
|
||||
title_torrent = item.contentTitle
|
||||
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_torrent = '%s [%s]' %(title_torrent, item.infoLabels['quality'])
|
||||
else:
|
||||
title_torrent = '%s (%s)' %(title_torrent, item.infoLabels['quality'])
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '[COLOR gold]**- Título: [/COLOR]%s -**' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Título: [/COLOR]%s' % (title_gen)
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item.channel.capitalize(), title_gen)
|
||||
itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo
|
||||
|
||||
title = title_torrent
|
||||
title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent)
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
|
||||
logger.debug("TORRENT: url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?tvsinpagar.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo)
|
||||
logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False))
|
||||
else:
|
||||
itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Descarga "
|
||||
p = 1
|
||||
logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes))
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo)
|
||||
if item.infoLabels['quality']:
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality'])
|
||||
else:
|
||||
parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality'])
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, infoLabels=item.infoLabels, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
for index, page in enumerate(list_pages):
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
if scrapertools.find_single_match(data, pattern):
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
else:
|
||||
logger.debug(item)
|
||||
logger.debug("data: " + data)
|
||||
return itemlist
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioNEW: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"])
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
|
||||
else: # old style
|
||||
if scrapertools.find_single_match(info, '\[\d{3}\]'):
|
||||
info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'):
|
||||
info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info)
|
||||
elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'):
|
||||
info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info)
|
||||
if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'):
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
|
||||
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
|
||||
|
||||
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
|
||||
logger.debug("patron episodioOLD: " + pattern)
|
||||
logger.debug(info)
|
||||
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
str_lang = ""
|
||||
if match['quality']:
|
||||
item.quality = match['quality']
|
||||
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
item.quality = "%s %s" % (item.quality, match['lang'])
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang)
|
||||
if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados:
|
||||
title = "%s[%s]" % (title, match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'peliculas 4k':
|
||||
item.url = host+'peliculas-hd/4kultrahd/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'anime':
|
||||
item.url = host+'anime/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
if categoria == 'documentales':
|
||||
item.url = host+'documentales/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -398,8 +398,8 @@ def findvideos(item):
|
||||
# Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y
|
||||
# las infoLabels y las imagenes del item si el server no tiene
|
||||
for server in list_servers:
|
||||
if not server.action: # Ignorar las etiquetas
|
||||
continue
|
||||
#if not server.action: # Ignorar/PERMITIR las etiquetas
|
||||
# continue
|
||||
|
||||
server.contentChannel = server.channel
|
||||
server.channel = "videolibrary"
|
||||
|
||||
@@ -25,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
|
||||
|
||||
# Headers por defecto, si no se especifica nada
|
||||
default_headers = dict()
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3163.100 Safari/537.36"
|
||||
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3163.100 Safari/537.36"
|
||||
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
|
||||
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
|
||||
default_headers["Accept-Charset"] = "UTF-8"
|
||||
|
||||
@@ -129,6 +129,17 @@ def token_trakt(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def set_trakt_info(item):
|
||||
logger.info()
|
||||
import xbmcgui
|
||||
# Envia los datos a trakt
|
||||
try:
|
||||
info = item.infoLabels
|
||||
ids = jsontools.dump({'tmdb': info['tmdb_id'] , 'imdb': info['imdb_id'], 'slug': info['title']})
|
||||
xbmcgui.Window(10000).setProperty('script.trakt.ids', ids)
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_trakt_watched(id_type, mediatype, update=False):
|
||||
logger.info()
|
||||
|
||||
|
||||
@@ -136,6 +136,11 @@ def run(item=None):
|
||||
|
||||
# Special play action
|
||||
if item.action == "play":
|
||||
#define la info para trakt
|
||||
try:
|
||||
trakt_tools.set_trakt_info(item)
|
||||
except:
|
||||
pass
|
||||
logger.info("item.action=%s" % item.action.upper())
|
||||
# logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))
|
||||
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
BIN
plugin.video.alfa/resources/media/channels/banner/tvsinpagar.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/banner/tvsinpagar.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 32 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
BIN
plugin.video.alfa/resources/media/channels/thumb/tvsinpagar.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/tvsinpagar.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 28 KiB |
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from channels import kbagi
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
@@ -8,15 +9,16 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
domain = "diskokosmiko.mx"
|
||||
if "kbagi.com" in page_url:
|
||||
from channels import kbagi
|
||||
logueado, error_message = kbagi.login("kbagi.com")
|
||||
if not logueado:
|
||||
return False, error_message
|
||||
domain = "kbagi.com"
|
||||
logueado, error_message = kbagi.login(domain)
|
||||
if not logueado:
|
||||
return False, error_message
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
|
||||
return False, "[kbagi] El archivo no existe o ha sido borrado"
|
||||
return False, "[%s] El archivo no existe o ha sido borrado" %domain
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -31,11 +31,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
|
||||
data = httptools.downloadpage(page_url, cookies=False, headers=header).data
|
||||
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
|
||||
|
||||
try:
|
||||
code = scrapertools.find_single_match(data, '<span[^>]+id="[^"]+">([^<]{40,})</span>' )
|
||||
code = scrapertools.find_single_match(data, '<p style="" id="[^"]+">(.*?)</p>' )
|
||||
_0x59ce16 = eval(scrapertools.find_single_match(data, '_0x59ce16=([^;]+)').replace('parseInt', 'int'))
|
||||
_1x4bfb36 = eval(scrapertools.find_single_match(data, '_1x4bfb36=([^;]+)').replace('parseInt', 'int'))
|
||||
parseInt = eval(scrapertools.find_single_match(data, '_0x30725e,(\(parseInt.*?)\),').replace('parseInt', 'int'))
|
||||
|
||||
@@ -10,6 +10,10 @@ def test_video_exists(page_url):
|
||||
data = scrapertools.cache_page(url=page_url)
|
||||
if "<h1>404 Not Found</h1>" in data:
|
||||
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
|
||||
elif "<h1>File Not Found</h1>" in data:
|
||||
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
|
||||
elif "<h1>Archivo no encontrado</h1>" in data:
|
||||
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
|
||||
else:
|
||||
return True, ""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user