Merge remote-tracking branch 'alfa-addon/master' into fixes
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.14" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.15" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -18,18 +18,17 @@
|
||||
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» seriespapaya » seriesdanko
|
||||
» speedvideo » yourupload
|
||||
» miradetodo » solocastellano
|
||||
» descargacineclasico » poseidonhd
|
||||
» estadepelis » pelismedia
|
||||
» doramasmp4 » descargas2020
|
||||
» mejortorrent » mispelisyseries
|
||||
» torrentlocura » torrentrapid
|
||||
» tumejortorrent » tvsinpagar
|
||||
» dailymotion » ver-peliculas
|
||||
» poseidonhd » cinedetodo
|
||||
» wikiseries » uptobox
|
||||
» allpeliculas » gounlimited
|
||||
» pepecine » descargas2020
|
||||
» mejortorrent » mispelisyseries
|
||||
» torrentlocura » torrentrapid
|
||||
» tumejortorrent » tvsinpagar
|
||||
» elitetorrent » netutv
|
||||
¤ arreglos internos
|
||||
¤ Agradecimientos al equipo SOD, @angedam, @alaquepasa por colaborar con ésta versión.
|
||||
¤ Agradecimientos a @angedam, @alaquepasa por colaborar con ésta versión.
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -148,12 +148,13 @@ def findvideos(item):
|
||||
action = "play",
|
||||
title = calidad,
|
||||
fulltitle = item.title,
|
||||
thumbnail = item.thumbnail,
|
||||
contentThumbnail = item.thumbnail,
|
||||
url = url,
|
||||
language = IDIOMAS['Latino']
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
itemlist.append(Item(channel=item.channel))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
|
||||
75
plugin.video.alfa/channels/cinedetodo.json
Normal file
75
plugin.video.alfa/channels/cinedetodo.json
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"id": "cinedetodo",
|
||||
"name": "CINEDETODO",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s31.postimg.cc/win1ffxyj/cinedetodo.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movies"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
207
plugin.video.alfa/channels/cinedetodo.py
Normal file
207
plugin.video.alfa/channels/cinedetodo.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CinemaHD -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'http://www.cinedetodo.com/'
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
|
||||
itemlist.append(item.clone(title="Generos", action="section", section='genre',
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
# itemlist.append(item.clone(title="Por Calidad", action="section", section='quality',
|
||||
# thumbnail=get_thumb('quality', auto=True)))
|
||||
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
|
||||
thumbnail=get_thumb('alphabet', auto=True)))
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
if item.section == 'alpha':
|
||||
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
|
||||
patron += '<td>(\d{4})</td>'
|
||||
else:
|
||||
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
|
||||
data = get_source(item.url)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentTitle = scrapedtitle[0].strip()
|
||||
else:
|
||||
contentTitle = scrapedtitle
|
||||
|
||||
contentTitle = re.sub('\(.*?\)','', contentTitle)
|
||||
|
||||
title = '%s [%s]'%(contentTitle, year)
|
||||
thumbnail = 'http:'+scrapedthumbnail
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={'year':year}
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
|
||||
action = 'list_all'
|
||||
if item.section == 'quality':
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
|
||||
elif item.section == 'genre':
|
||||
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
|
||||
elif item.section == 'year':
|
||||
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
|
||||
elif item.section == 'alpha':
|
||||
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
|
||||
action = 'list_all'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_one, data_two in matches:
|
||||
|
||||
url = data_one
|
||||
title = data_two
|
||||
if title != 'Ver más':
|
||||
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('"','').replace('&','&')
|
||||
data_video = get_source(scrapedurl)
|
||||
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
|
||||
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
|
||||
language = opt_data[0].strip()
|
||||
language = language.replace('(','').replace(')','')
|
||||
quality = opt_data[1].strip()
|
||||
if url != '' and 'youtube' not in url:
|
||||
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
|
||||
elif 'youtube' in url:
|
||||
trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
|
||||
i.language, i.quality))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
try:
|
||||
itemlist.append(trailer)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host+'/animacion'
|
||||
elif categoria == 'terror':
|
||||
item.url = host+'/terror'
|
||||
elif categoria == 'documentales':
|
||||
item.url = host+'/documental'
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "Elite Torrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"language": ["*"],
|
||||
"thumbnail": "elitetorrent.png",
|
||||
"banner": "elitetorrent.png",
|
||||
"categories": [
|
||||
@@ -15,10 +15,10 @@
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
|
||||
@@ -1,111 +1,586 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
BASE_URL = 'http://www.elitetorrent.wesconference.net'
|
||||
host = 'http://www.elitetorrent.biz'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Docus y TV", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/6/docus-y-tv/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/1/estrenos/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/2/peliculas/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas HDRip", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/13/peliculas-hdrip/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas MicroHD", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/17/peliculas-microhd/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas VOSE", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/14/peliculas-vose/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/4/series/modo:mini", viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series VOSE", action="peliculas",
|
||||
url="http://www.elitetorrent.wesconference.net/categoria/16/series-vose/modo:mini",
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
thumb_pelis = get_thumb("channels_movie.png")
|
||||
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
patron = '<div class="cab_menu">.*?<\/div>' #Menú principal
|
||||
data1 = scrapertools.get_match(data, patron)
|
||||
patron = '<div id="menu_langen">.*?<\/div>' #Menú de idiomas
|
||||
data1 += scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="(.*?)".*?title="(.*?)"' #Encontrar todos los apartados
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
scrapedtitle = scrapedtitle.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "").title()
|
||||
|
||||
if "castellano" in scrapedtitle.lower(): #Evita la entrada de peliculas castellano del menú de idiomas
|
||||
continue
|
||||
|
||||
if item.extra == "series": #Tratamos Series
|
||||
if not "/serie" in scrapedurl:
|
||||
continue
|
||||
else: #Tratamos Películas
|
||||
if "/serie" in scrapedurl:
|
||||
continue
|
||||
|
||||
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl))
|
||||
|
||||
if item.extra == "series": #Añadimos Series VOSE que está fuera del menú principal
|
||||
itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if "http://www.bajui.com/redi.php" in data:
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
'''
|
||||
<li>
|
||||
<a href="/torrent/23471/mandela-microhd-720p"><img src="thumb_fichas/23471.jpg" border="0" title="Mandela (microHD - 720p)" alt="IMG: Mandela (microHD - 720p)"/></a>
|
||||
<div class="meta">
|
||||
<a class="nombre" href="/torrent/23471/mandela-microhd-720p" title="Mandela (microHD - 720p)">Mandela (microHD - 720p)</a>
|
||||
<span class="categoria">Peliculas microHD</span>
|
||||
<span class="fecha">Hace 2 sem</span>
|
||||
<span class="descrip">Título: Mandela: Del mito al hombre<br />
|
||||
'''
|
||||
patron = '<a href="(/torrent/[^"]+)">'
|
||||
patron += '<img src="(thumb_fichas/[^"]+)" border="0" title="([^"]+)"[^>]+></a>'
|
||||
patron += '.*?<span class="descrip">(.*?)</span>'
|
||||
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?<a href="(.*?)".*?' #url
|
||||
patron += 'title="(.*?)".*?' #título
|
||||
patron += 'src="(.*?)".*?' #thumb
|
||||
patron += "title='(.*?)'.*?" #categoría, idioma
|
||||
patron += '"><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(BASE_URL, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(BASE_URL, scrapedthumbnail)
|
||||
plot = re.sub('<[^<]+?>', '', scrapedplot)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=False))
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcategory, scrapedcalidad, scrapedsize, scrapedsizet in matches:
|
||||
item_local = item.clone() #Creamos copia de Item para trabajar
|
||||
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
title = title.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "")
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
item_local.thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
|
||||
if "---" in scrapedcalidad: #Scrapeamos y limpiamos calidades
|
||||
scrapedcalidad = ''
|
||||
if "microhd" in title.lower():
|
||||
item_local.quality = "microHD"
|
||||
if not "/series-vose/" in item.url and not item_local.quality:
|
||||
item_local.quality = scrapedcalidad
|
||||
if scrapertools.find_single_match(item_local.quality, r'\d+\.\d+'):
|
||||
item_local.quality = ''
|
||||
if not item_local.quality and ("DVDRip" in title or "HDRip" in title or "BR-LINE" in title or "HDTS-SCREENER" in title or "BDRip" in title or "BR-Screener" in title or "DVDScreener" in title or "TS-Screener" in title):
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\((.*?)\)')
|
||||
item_local.quality = item_local.quality.replace("Latino", "")
|
||||
if not scrapedsizet:
|
||||
scrapedsize = ''
|
||||
else:
|
||||
item_local.quality += ' [%s %s]' % (scrapedsize.replace(".", ","), scrapedsizet)
|
||||
|
||||
item_local.language = [] #Verificamos el idioma por si encontramos algo
|
||||
if "latino" in scrapedcategory.lower() or "latino" in item.url or "latino" in title.lower():
|
||||
item_local.language += ["LAT"]
|
||||
if "ingles" in scrapedcategory.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
|
||||
if "VOSE" in scrapedcategory.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
else:
|
||||
item_local.language += ["VO"]
|
||||
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
|
||||
#Limpiamos el título de la basuna innecesaria
|
||||
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
|
||||
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
|
||||
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
|
||||
|
||||
if item_local.extra == "peliculas": #preparamos Item para películas
|
||||
if "/serie" in scrapedurl or "/serie" in item.url:
|
||||
continue
|
||||
item_local.contentType = "movie"
|
||||
item_local.contentTitle = title.strip()
|
||||
else: #preparamos Item para series
|
||||
if not "/serie" in scrapedurl and not "/serie" in item.url:
|
||||
continue
|
||||
item_local.contentType = "episode"
|
||||
epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-(al-\d+)')
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temp.*?-(\d+)')
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)')
|
||||
if not item_local.contentSeason:
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+')
|
||||
if not item_local.contentEpisodeNumber:
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)')
|
||||
if item_local.contentSeason < 1:
|
||||
item_local.contentSeason = 1
|
||||
if item_local.contentEpisodeNumber < 1:
|
||||
item_local.contentEpisodeNumber = 1
|
||||
item_local.contentSerieName = title.strip()
|
||||
if epi_mult:
|
||||
title = '%s, %s' % (epi_mult.replace("-", " "), title)
|
||||
|
||||
item_local.action = "findvideos"
|
||||
item_local.title = title.strip()
|
||||
item_local.infoLabels['year'] = "-"
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Pasada para maquillaje de los títulos obtenidos desde TMDB
|
||||
for item_local in itemlist:
|
||||
title = item_local.title
|
||||
|
||||
# Si TMDB no ha encontrado el vídeo limpiamos el año
|
||||
if item_local.infoLabels['year'] == "-":
|
||||
item_local.infoLabels['year'] = ''
|
||||
item_local.infoLabels['aired'] = ''
|
||||
|
||||
# Preparamos el título para series, con los núm. de temporadas, si las hay
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
item_local.contentTitle= ''
|
||||
if item_local.contentType == "episode":
|
||||
if scrapertools.find_single_match(title, r'(al\s\d+)'):
|
||||
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'(al\s\d+)')
|
||||
if scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'):
|
||||
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
rating = ''
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
#Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
title = '%sx%s %s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%sx%s %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
elif item_local.contentType == "movie":
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_local.infoLabels['year']), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s, %s [%s] [%s]' % (item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s [%s] [%s]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [%s] [%s]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
# Extrae el paginador
|
||||
patronvideos = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \»\;</a>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
patron = '<div class="paginacion">.*?<span class="pagina pag_actual".*?'
|
||||
patron += "<a href='([^']+)'.*?" #url siguiente página
|
||||
patron += 'class="pagina">(\d+)<.*?' #próxima página
|
||||
matches = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = 'class="pagina pag_sig">Siguiente.*?'
|
||||
patron += "<a href='.*?\/page\/(\d+)\/" #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
if not last_page:
|
||||
patron = '<div class="paginacion">.*?'
|
||||
patron += 'class="pagina">(\d+)<\/a><\/div><\/nav><\/div><\/div>' #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if len(matches) > 0:
|
||||
if matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="Página siguiente >>", url=scrapedurl, folder=True,
|
||||
viewmode="movie_with_plot"))
|
||||
if last_page:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(matches[1]) - 1, last_page)
|
||||
else:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if "http://www.bajui.com/redi.php" in data:
|
||||
data = scrapertools.cache_page(item.url)
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
# <a href="magnet:?xt=urn:btih:d6wtseg33iisp7jexpl44wfcqh7zzjuh&dn=Abraham+Lincoln+Cazador+de+vampiros+%28HDRip%29+%28EliteTorrent.net%29&tr=http://tracker.torrentbay.to:6969/announce" class="enlace_torrent degradado1">Descargar por magnet link</a>
|
||||
link = scrapertools.get_match(data,
|
||||
'<a href="(magnet[^"]+)" class="enlace_torrent[^>]+>Descargar por magnet link</a>')
|
||||
link = urlparse.urljoin(item.url, link)
|
||||
logger.info("link=" + link)
|
||||
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?<a href="(.*?)".*?' #url
|
||||
patron += 'title="(.*?)".*?' #título
|
||||
patron += 'src="(.*?)".*?' #thumb
|
||||
patron += "title='(.*?)'.*?" #categoría, idioma
|
||||
patron += '"><i>(.*?)<\/i><\/span.*?' #calidad
|
||||
patron += '="dig1">(.*?)<.*?' #tamaño
|
||||
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcategory, scrapedcalidad, scrapedsize, scrapedsizet in matches:
|
||||
item_local = item.clone()
|
||||
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
|
||||
title = title.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "")
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
item_local.thumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
|
||||
if "---" in scrapedcalidad:
|
||||
scrapedcalidad = ''
|
||||
if "microhd" in title.lower():
|
||||
item_local.quality = "microHD"
|
||||
if not "/series-vose/" in item.url and not item_local.quality:
|
||||
item_local.quality = scrapedcalidad
|
||||
if scrapertools.find_single_match(item_local.quality, r'\d+\.\d+'):
|
||||
item_local.quality = ''
|
||||
if not item_local.quality and ("DVDRip" in title or "HDRip" in title or "BR-LINE" in title or "HDTS-SCREENER" in title or "BDRip" in title or "BR-Screener" in title or "DVDScreener" in title or "TS-Screener" in title):
|
||||
item_local.quality = scrapertools.find_single_match(title, r'\((.*?)\)')
|
||||
item_local.quality = item_local.quality.replace("Latino", "")
|
||||
if not scrapedsizet:
|
||||
scrapedsize = ''
|
||||
else:
|
||||
item_local.quality += ' [%s %s]' % (scrapedsize.replace(".", ","), scrapedsizet)
|
||||
|
||||
item_local.language = []
|
||||
if "latino" in scrapedcategory.lower() or "latino" in item.url or "latino" in title.lower():
|
||||
item_local.language += ["LAT"]
|
||||
if "ingles" in scrapedcategory.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
|
||||
if "VOSE" in scrapedcategory.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
else:
|
||||
item_local.language += ["VO"]
|
||||
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
|
||||
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
|
||||
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
|
||||
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
|
||||
|
||||
if not "/serie" in scrapedurl:
|
||||
item_local.contentType = "movie"
|
||||
item_local.extra = "peliculas"
|
||||
item_local.contentTitle = title
|
||||
else:
|
||||
item_local.contentType = "episode"
|
||||
item_local.extra = "series"
|
||||
epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-(al-\d+)')
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temp.*?-(\d+)')
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)')
|
||||
if not item_local.contentSeason:
|
||||
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+')
|
||||
if not item_local.contentEpisodeNumber:
|
||||
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)')
|
||||
if item_local.contentSeason < 1:
|
||||
item_local.contentSeason = 1
|
||||
if item_local.contentEpisodeNumber < 1:
|
||||
item_local.contentEpisodeNumber = 1
|
||||
item_local.contentSerieName = title
|
||||
if epi_mult:
|
||||
title = '%s, %s' % (epi_mult.replace("-", " "), title)
|
||||
|
||||
item_local.action = "findvideos"
|
||||
item_local.title = title
|
||||
item_local.infoLabels['year'] = "-"
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
# Pasada para maquillaje de los títulos obtenidos desde TMDB
|
||||
for item_local in itemlist:
|
||||
title = item_local.title
|
||||
|
||||
# Si TMDB no ha encontrado el vídeo limpiamos el año
|
||||
if item_local.infoLabels['year'] == "-":
|
||||
item_local.infoLabels['year'] = ''
|
||||
item_local.infoLabels['aired'] = ''
|
||||
|
||||
# Preparamos el título para series, con los núm. de temporadas, si las hay
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
item_local.contentTitle= ''
|
||||
if item_local.contentType == "episode":
|
||||
if scrapertools.find_single_match(title, r'(al\s\d+)'):
|
||||
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'(al\s\d+)')
|
||||
if scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'):
|
||||
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
rating = ''
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
#Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
title = '%sx%s %s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%sx%s %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
elif item_local.contentType == "movie":
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_local.infoLabels['year']), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s, %s [%s] [%s]' % (item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
item_local.infoLabels['episodio_titulo'] = '%s [%s] [%s]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series":
|
||||
title = '%s - Temporada %s [%s] [%s]' % (item_local.contentSerieName, item_local.contentSeason, item_local.infoLabels['year'], rating)
|
||||
else:
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
# Extrae el paginador
|
||||
patron = '<div class="paginacion">.*?<span class="pagina pag_actual".*?'
|
||||
patron += "<a href='([^']+)'.*?" #url siguiente página
|
||||
patron += 'class="pagina">(\d+)<' #próxima página
|
||||
matches = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = 'class="pagina pag_sig">Siguiente.*?'
|
||||
patron += "<a href='.*?\/page\/(\d+)\/" #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
if not last_page:
|
||||
patron = '<div class="paginacion">.*?'
|
||||
patron += 'class="pagina">(\d+)<\/a><\/div><\/nav><\/div><\/div>' #total de páginas
|
||||
last_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
if last_page:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(matches[1]) - 1, last_page)
|
||||
else:
|
||||
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado_busqueda", title=title, url=scrapedurl, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] and item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = unicode(data, "utf-8", errors="replace")
|
||||
|
||||
#Añadimos el tamaño para todos
|
||||
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w[b|B]s)\]')
|
||||
item.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía
|
||||
if size:
|
||||
item.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
|
||||
|
||||
#Limpiamos de año y rating de episodios
|
||||
if item.infoLabels['episodio_titulo']:
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
patron = '<div class="enlace_descarga".*?<a href="(.*?\.torrent)"'
|
||||
link_torrent = scrapertools.find_single_match(data, patron)
|
||||
link_torrent = urlparse.urljoin(item_local.url, link_torrent)
|
||||
link_torrent = link_torrent.replace(" ", "%20") #sustituimos espacios por %20, por si acaso
|
||||
#logger.info("link Torrent: " + link_torrent)
|
||||
|
||||
patron = '<div class="enlace_descarga".*?<a href="(magnet:?.*?)"'
|
||||
link_magnet = scrapertools.find_single_match(data, patron)
|
||||
link_magnet = urlparse.urljoin(item_local.url, link_magnet)
|
||||
#logger.info("link Magnet: " + link_magnet)
|
||||
|
||||
#Pintamos el pseudo-título con toda la información disponible del vídeo
|
||||
item_local.action = ""
|
||||
item_local.server = "torrent"
|
||||
|
||||
rating = '' #Ponemos el rating
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
rating = float(item_local.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
|
||||
if item_local.contentType == "episode":
|
||||
title = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
if item_local.infoLabels['temporada_num_episodios']:
|
||||
title = '%s (de %s)' % (title, str(item_local.infoLabels['temporada_num_episodios']))
|
||||
title = '%s %s' % (title, item_local.infoLabels['episodio_titulo'])
|
||||
if item_local.infoLabels['episodio_titulo']: #Ya viene con el nombre de Serie
|
||||
title_gen = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language), size)
|
||||
else:
|
||||
title_gen = '%s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (title, item_local.contentSerieName, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language), size)
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
else:
|
||||
title_gen = '[COLOR gold]Enlaces Ver: [/COLOR]%s' % (title_gen)
|
||||
|
||||
if config.get_setting("quit_channel_name", "videolibrary") == 1 and item_local.contentChannel == "videolibrary":
|
||||
title_gen = '%s: %s' % (item_local.channel.capitalize(), title_gen)
|
||||
|
||||
itemlist.append(item_local.clone(title=title_gen)) #Título con todos los datos del vídeo
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if link_torrent: # Hay Torrent ?
|
||||
if item_local.quality:
|
||||
item_local.quality += " "
|
||||
item_local.quality += "[Torrent]"
|
||||
item_local.url = link_torrent
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
#Ahora pintamos el link del Magnet, si lo hay
|
||||
if link_magnet: # Hay Magnet ?
|
||||
if item_local.quality:
|
||||
item_local.quality += " "
|
||||
item_local.quality = item_local.quality.replace("[Torrent]", "") + "[Magnet]"
|
||||
item_local.url = link_magnet
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Magnet
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
itemlist.append(item_local.clone()) #Pintar pantalla
|
||||
|
||||
logger.debug("TORRENT: " + link_torrent + "MAGNET: " + link_magnet + " / title gen/torr: " + title_gen + " / " + title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.url = host + "?s=%s&x=0&y=0" % texto
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -198,10 +198,19 @@ def listado(item):
|
||||
# Se limpian algunas etiquetas del item inical.
|
||||
for scrapedurl, scrapedthumbnail in matches:
|
||||
item_local = item.clone()
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
item_local.tipo = True
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
del item_local.totalItems
|
||||
if item_local.modo:
|
||||
del item_local.modo
|
||||
if item_local.next_page:
|
||||
del item_local.next_page
|
||||
item_local.pag = True
|
||||
del item_local.pag
|
||||
if item_local.text_color:
|
||||
del item_local.text_color
|
||||
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -271,7 +280,7 @@ def listado(item):
|
||||
#logger.debug(matches)
|
||||
cnt = 0
|
||||
for scrapedtitle, notused, scrapedinfo in matches:
|
||||
item_local = itemlist[cnt] # Vinculamos item_local con la entrada de la lista itemlist (más fácil de leer)
|
||||
item_local = itemlist[cnt] #Vinculamos item_local con la entrada de la lista itemlist (más fácil de leer)
|
||||
|
||||
# Limpiamos títulos, Sacamos datos de calidad, audio y lenguaje
|
||||
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
|
||||
@@ -298,6 +307,9 @@ def listado(item):
|
||||
if "[dual" in title.lower():
|
||||
title_subs = "[Dual]"
|
||||
title = title = re.sub(r'\[D|dual.*?\]', '', title)
|
||||
if scrapertools.find_single_match(title, r'-\s[m|M].*?serie'):
|
||||
title = re.sub(r'-\s[m|M].*?serie', '', title)
|
||||
title_subs += "[Miniserie]"
|
||||
|
||||
if title.endswith('.'):
|
||||
title = title[:-1]
|
||||
@@ -320,6 +332,7 @@ def listado(item):
|
||||
if not item_local.contentSerieName:
|
||||
item_local.contentSerieName = title
|
||||
item_local.infoLabels['tvshowtitle'] = item_local.contentSerieName
|
||||
item_local.infoLabels['title'] = ''
|
||||
if not item_local.contentSerieName:
|
||||
item_local.contentSerieName = "dummy"
|
||||
|
||||
@@ -339,9 +352,7 @@ def listado(item):
|
||||
if "4k" in title.lower() or "hdr" in title.lower():
|
||||
item_local.quality = "4K"
|
||||
title = title.replace("4k-hdr", "").replace("4K-HDR", "").replace("hdr", "").replace("HDR", "").replace("4k", "").replace("4K", "")
|
||||
title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "")
|
||||
if title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "").strip()
|
||||
item_local.title = title
|
||||
|
||||
if item_local.extra == "peliculas":
|
||||
@@ -363,18 +374,22 @@ def listado(item):
|
||||
# Guardamos temporalmente info extra, si lo hay
|
||||
item_local.extra = item_local.extra + title_subs
|
||||
|
||||
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
|
||||
if item_local.contentSeason and (item_local.contentType == "season" or item_local.contentType == "tvshow"):
|
||||
item_local.SeasonBackup = item_local.contentSeason
|
||||
del item_local.infoLabels['season']
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
#Llamamos a TMDB para que complete InfoLabels desde item_local. No se hace desde itemlist porque mezcla bufferes
|
||||
try:
|
||||
#if "(" in title or "[" in title: #Usado para test de limpieza de títulos
|
||||
# logger.debug(title)
|
||||
tmdb.set_infoLabels(item_local, seekTmdb = True)
|
||||
except:
|
||||
logger.debug("TMDB ERROR: ")
|
||||
logger.debug(item_local)
|
||||
cnt += 1
|
||||
if cnt == len(itemlist):
|
||||
break
|
||||
|
||||
#Llamamos a TMDB para que complete InfoLabels
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
|
||||
# Pasada para maqullaje de los títulos obtenidos desde TMDB
|
||||
# Pasada para maqullaje de los títulos obtenidos desde TMDB
|
||||
for item_local in itemlist:
|
||||
title = item_local.title
|
||||
title_subs = ""
|
||||
temporada = ""
|
||||
@@ -408,14 +423,14 @@ def listado(item):
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "episode":
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
title = '%sx%s %s, %s [%s][%s][%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, str(item_local.language))
|
||||
title = '%sx%s %s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.SeasonBackup), str(item_local.contentEpisodeNumber).zfill(2), item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%sx%s %s [%s][%s][%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, str(item_local.language))
|
||||
title = '%sx%s %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (str(item_local.SeasonBackup), str(item_local.contentEpisodeNumber).zfill(2), item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
item_local.infoLabels['title'] = item_local.contentSerieName
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series" or temporada == "[Temp.]":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, str(item_local.contentSeason), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, str(item_local.SeasonBackup), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
@@ -432,23 +447,22 @@ def listado(item):
|
||||
|
||||
elif item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series" or temporada == "[Temp.]":
|
||||
title = '%s - Temporada %s -%s-' % (item_local.contentSerieName, item_local.contentSeason, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'))
|
||||
title = '%s - Temporada %s' % (item_local.contentSerieName, item_local.SeasonBackup)
|
||||
else:
|
||||
title = '%s -%s-' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'))
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
title_subs = title_subs.replace("[", "-").replace("]", "-")
|
||||
|
||||
if item_local.SeasonBackup:
|
||||
del item_local.SeasonBackup
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
item_local.title = title + title_subs
|
||||
item_local.contentTitle += title_subs #añadimos info adicional para display
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
cnt += 1
|
||||
if cnt == len(itemlist):
|
||||
break
|
||||
|
||||
|
||||
if len(itemlist) == 0:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title="No se ha podido cargar el listado"))
|
||||
else:
|
||||
@@ -537,10 +551,12 @@ def listado_busqueda(item):
|
||||
if item_local.category:
|
||||
category = item.category
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
item_local.tipo = True
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
del item_local.totalItems
|
||||
if item_local.text_color:
|
||||
del item_local.text_color
|
||||
item_local.contentThumbnail = ''
|
||||
item_local.thumbnail = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
@@ -574,6 +590,9 @@ def listado_busqueda(item):
|
||||
if "[Dual" in title or "[dual" in title:
|
||||
title_subs = "[Dual]"
|
||||
title = title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
if scrapertools.find_single_match(title, r'-\s[m|M].*?serie'):
|
||||
title = re.sub(r'-\s[m|M].*?serie', '', title)
|
||||
title_subs += "[Miniserie]"
|
||||
|
||||
if title.endswith('.'):
|
||||
title = title[:-1]
|
||||
@@ -608,6 +627,7 @@ def listado_busqueda(item):
|
||||
title = item_local.contentSerieName
|
||||
item_local.title = title
|
||||
item_local.infoLabels['tvshowtitle'] = item_local.contentSerieName
|
||||
item_local.infoLabels['title'] = ''
|
||||
if not item_local.contentSerieName:
|
||||
item_local.contentSerieName = "dummy"
|
||||
item_local.contentSeason = scrapertools.find_single_match(scrapedurl, '.*?-(\d{1,2})-Temp.*?\.html')
|
||||
@@ -628,9 +648,7 @@ def listado_busqueda(item):
|
||||
if "4K" in title or "4k" in title or "HDR" in title or "hdr" in title:
|
||||
item_local.quality = "4K"
|
||||
title = title.replace("4k-hdr", "").replace("4K-HDR", "").replace("hdr", "").replace("HDR", "").replace("4k", "").replace("4K", "")
|
||||
title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "")
|
||||
if title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "").strip()
|
||||
item_local.title = title
|
||||
|
||||
if "/peli-" in scrapedurl:
|
||||
@@ -651,13 +669,18 @@ def listado_busqueda(item):
|
||||
# Guardamos temporalmente info de subtítulos, si lo hay
|
||||
item_local.extra = item_local.extra + title_subs
|
||||
|
||||
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
|
||||
if item_local.contentSeason and (item_local.contentType == "season" or item_local.contentType == "tvshow"):
|
||||
item_local.SeasonBackup = item_local.contentSeason
|
||||
del item_local.infoLabels['season']
|
||||
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
|
||||
#Llamamos a TMDB para que complete InfoLabels desde itemlist. Mejor desde itemlist porque envía las queries en paralelo
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
|
||||
@@ -692,7 +715,7 @@ def listado_busqueda(item):
|
||||
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series" or temporada == "[Temp.]":
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, str(item_local.contentSeason), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
title = '%s - Temporada %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, str(item_local.SeasonBackup), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
else:
|
||||
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), rating, item_local.quality, str(item_local.language))
|
||||
|
||||
@@ -702,13 +725,16 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
if item_local.contentType == "season" or item_local.contentType == "tvshow":
|
||||
if item_local.extra == "series" or temporada == "[Temp.]":
|
||||
title = '%s - Temporada %s -%s-' % (item_local.contentSerieName, item_local.contentSeason, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'))
|
||||
title = '%s - Temporada %s' % (item_local.contentSerieName, item_local.SeasonBackup)
|
||||
else:
|
||||
title = '%s -%s-' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'))
|
||||
title = '%s' % (item_local.contentSerieName)
|
||||
title_subs = title_subs.replace("[", "-").replace("]", "-")
|
||||
|
||||
if item_local.SeasonBackup:
|
||||
del item_local.SeasonBackup
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
item_local.title = title + title_subs
|
||||
item_local.contentTitle += title_subs #añadimos info adicional para display
|
||||
@@ -718,29 +744,41 @@ def listado_busqueda(item):
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >>[/B][/COLOR]", url=url_next_page, next_page=next_page, cnt_pag=cnt_pag, pag=pag, modo=modo, extra=item.extra, tipo=item.tipo))
|
||||
Item(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >>[/B][/COLOR]", url=url_next_page, next_page=next_page, cnt_pag=cnt_pag, pag=pag, modo=modo, extra=item.extra))
|
||||
|
||||
#logger.debug(url_next_page + " / " + next_page + " / " + str(matches_cnt) + " / " + str(cnt_pag) + " / " + str(pag) + " / " + modo + " / " + item.extra + " / " + str(item.tipo))
|
||||
#logger.debug(url_next_page + " / " + next_page + " / " + str(matches_cnt) + " / " + str(cnt_pag) + " / " + str(pag) + " / " + modo + " / " + item.extra ))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
if item.post: #Puede traer datos para una llamada "post". De momento usado para documentales, pero podrían ser series
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
@@ -774,11 +812,13 @@ def findvideos(item):
|
||||
item_local.action = ""
|
||||
item_local.server = "torrent"
|
||||
|
||||
#Limpiamos de año y rating de episodios
|
||||
#Limpiamos de año y rating de episodios, usamos el año del episodio en vez del de la serie
|
||||
if item_local.infoLabels['episodio_titulo']:
|
||||
item_local.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item_local.infoLabels['episodio_titulo'])
|
||||
if item_local.infoLabels['episodio_titulo'] == item_local.contentSerieName:
|
||||
item_local.infoLabels['episodio_titulo'] = ''
|
||||
if item_local.infoLabels['aired'] and item_local.contentType == "episode":
|
||||
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
rating = '' #Ponemos el rating
|
||||
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != '0.0':
|
||||
@@ -809,9 +849,9 @@ def findvideos(item):
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -826,8 +866,8 @@ def findvideos(item):
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -874,7 +914,7 @@ def episodios(item):
|
||||
item_local = item.clone()
|
||||
item_local.action = "findvideos"
|
||||
item_local.contentType = "episode"
|
||||
item_local.infoLabels['title'] = ''
|
||||
item_local.extra = "episodios"
|
||||
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
|
||||
@@ -889,7 +929,11 @@ def episodios(item):
|
||||
title = scrapedtitle.lower()
|
||||
epi = title.split("x")
|
||||
if len(epi) > 1:
|
||||
#temporada = re.sub("\D", "", epi[0])
|
||||
temporada = re.sub("\D", "", epi[0])
|
||||
if temporada:
|
||||
item_local.contentSeason = temporada
|
||||
else:
|
||||
item_local.contentSeason = 1
|
||||
capitulo = re.search("\d+", epi[1])
|
||||
if capitulo:
|
||||
item_local.contentEpisodeNumber = capitulo.group()
|
||||
@@ -897,6 +941,7 @@ def episodios(item):
|
||||
item_local.contentEpisodeNumber = 1
|
||||
|
||||
else: #Se prepara el Post para documentales
|
||||
item_local.contentSeason = 1
|
||||
item_local.contentEpisodeNumber = 1
|
||||
item_local.url = host + "/secciones.php?sec=descargas&ap=contar_varios"
|
||||
item_local.post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo_post})
|
||||
@@ -957,12 +1002,12 @@ def episodios(item):
|
||||
item_local.infoLabels['episodio_titulo'] = '%s [%s] [%s]' % (item_local.contentSerieName, item_local.infoLabels['year'], rating)
|
||||
item_local.infoLabels['title'] = item_local.infoLabels['episodio_titulo']
|
||||
|
||||
item_local.title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
item_local.title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
14
plugin.video.alfa/channels/pelispedia.json
Executable file → Normal file
14
plugin.video.alfa/channels/pelispedia.json
Executable file → Normal file
@@ -21,20 +21,6 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
|
||||
571
plugin.video.alfa/channels/pelispedia.py
Executable file → Normal file
571
plugin.video.alfa/channels/pelispedia.py
Executable file → Normal file
@@ -3,6 +3,7 @@
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
import json
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import channeltools
|
||||
@@ -13,6 +14,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import filetools
|
||||
|
||||
__channel__ = "pelispedia"
|
||||
|
||||
@@ -21,20 +23,9 @@ CHANNEL_HOST = "http://www.pelispedia.tv/"
|
||||
# Configuracion del canal
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = config.get_setting('perfil', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
|
||||
['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']]
|
||||
|
||||
if __perfil__ - 1 >= 0:
|
||||
color1, color2, color3 = perfil[__perfil__ - 1]
|
||||
else:
|
||||
color1 = color2 = color3 = ""
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
@@ -45,52 +36,68 @@ def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=__channel__, title="Películas", text_color=color1, fanart=fanart_host, folder=False,
|
||||
itemlist.append(Item(channel=__channel__, title="Películas", fanart=fanart_host, folder=False,
|
||||
thumbnail=thumbnail_host, text_bold=True))
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=" Novedades", text_color=color2, viewcontent="movies",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), fanart=fanart_host, extra="movies",
|
||||
viewmode="movie_with_plot",
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado", title=" Novedades",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies",
|
||||
viewcontent="movies", viewmode="movie_with_plot", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Directors%20Chair.png"))
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético", text_color=color2,
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host,
|
||||
viewmode="thumbnails",
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies",
|
||||
viewmode="thumbnails", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png"))
|
||||
itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género", text_color=color2,
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png"))
|
||||
itemlist.append(Item(channel=__channel__, action="listado_anio", title=" Por año", text_color=color2,
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
# itemlist.append(Item(channel=__channel__, action="search", title=" Buscar...", text_color=color2,
|
||||
# url=urlparse.urljoin(CHANNEL_HOST, "buscar/?s="), extra="movies", fanart=fanart_host))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Series", text_color=color1, fanart=fanart_host, folder=False,
|
||||
thumbnail=thumbnail_host, text_bold=True))
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=" Novedades", text_color=color2, viewcontent="tvshows",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie", fanart=fanart_host,
|
||||
viewmode="movie_with_plot",
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/TV%20Series.png"))
|
||||
itemlist.append(Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético",
|
||||
text_color=color2, extra="serie", fanart=fanart_host, viewmode="thumbnails",
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png"))
|
||||
itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género", extra="serie",
|
||||
text_color=color2, fanart=fanart_host, url=urlparse.urljoin(CHANNEL_HOST, "series/all/"),
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png"))
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado_anio", title=" Por año", extra="serie", text_color=color2,
|
||||
fanart=fanart_host, url=urlparse.urljoin(CHANNEL_HOST, "series/all/"),
|
||||
itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies",
|
||||
fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png"))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado_anio", title=" Por año",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies",
|
||||
fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
# itemlist.append(Item(channel=__channel__, action="search", title=" Buscar...", text_color=color2,
|
||||
# url=urlparse.urljoin(CHANNEL_HOST, "series/buscar/?s="), extra="serie", fanart=fanart_host))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="", fanart=fanart_host, folder=False, thumbnail=thumbnail_host))
|
||||
itemlist.append(Item(channel=__channel__, action="local_search", title=" Buscar...",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "buscar/?sitesearch=pelispedia.tv&q="), extra="movies",
|
||||
fanart=fanart_host, thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="settings", title="Configuración", text_color=color1,
|
||||
fanart=fanart_host, text_bold=True,
|
||||
thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Series", fanart=fanart_host, folder=False,
|
||||
thumbnail=thumbnail_host, text_bold=True))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=" Novedades",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie",
|
||||
viewcontent="tvshows", viewmode="movie_with_plot", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/TV%20Series.png"))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie",
|
||||
viewmode="thumbnails", fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png"))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie",
|
||||
fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png"))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado_anio", title=" Por año",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie",
|
||||
fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="local_search", title=" Buscar...",
|
||||
url=urlparse.urljoin(CHANNEL_HOST, "series/buscar/?sitesearch=pelispedia.tv&q="), extra="serie",
|
||||
fanart=fanart_host, thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
|
||||
# ~ itemlist.append(Item(channel=__channel__, title="", fanart=fanart_host, folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
# ~ itemlist.append(Item(channel=__channel__, action="settings", title="Configuración",
|
||||
# ~ fanart=fanart_host, text_bold=True,
|
||||
# ~ thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -123,7 +130,7 @@ def listado_alfabetico(item):
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=letra, url=urlparse.urljoin(CHANNEL_HOST, cadena),
|
||||
extra=item.extra, text_color=color2, viewcontent=viewcontent,
|
||||
extra=item.extra, fanart=fanart_host, viewcontent=viewcontent,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png"))
|
||||
|
||||
return itemlist
|
||||
@@ -160,7 +167,7 @@ def listado_genero(item):
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=value, url=urlparse.urljoin(CHANNEL_HOST, cadena2),
|
||||
extra=item.extra, text_color=color2, fanart=fanart_host, viewcontent=viewcontent,
|
||||
extra=item.extra, fanart=fanart_host, viewcontent=viewcontent,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png"))
|
||||
|
||||
return itemlist
|
||||
@@ -197,23 +204,40 @@ def listado_anio(item):
|
||||
if item.extra != "movies":
|
||||
cadena2 += "/"
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado", title=titulo + value, extra=item.extra,
|
||||
url=urlparse.urljoin(CHANNEL_HOST, cadena2), text_color=color2, fanart=fanart_host,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png",
|
||||
viewcontent=viewcontent))
|
||||
itemlist.append(
|
||||
Item(channel=__channel__, action="listado", title=titulo + value, url=urlparse.urljoin(CHANNEL_HOST, cadena2),
|
||||
extra=item.extra, fanart=fanart_host, viewcontent=viewcontent,
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
# Funcion de busqueda desactivada
|
||||
logger.info("texto=%s" % texto)
|
||||
def local_search(item):
|
||||
logger.info()
|
||||
text = ""
|
||||
# ~ if config.get_setting("save_last_search", item.channel):
|
||||
# ~ text = config.get_setting("last_search", item.channel)
|
||||
|
||||
item.url = item.url + "%" + texto.replace(' ', '+') + "%"
|
||||
from platformcode import platformtools
|
||||
texto = platformtools.dialog_input(default=text, heading="Buscar en Pelispedia")
|
||||
if texto is None:
|
||||
return
|
||||
|
||||
# ~ if config.get_setting("save_last_search", item.channel):
|
||||
# ~ config.set_setting("last_search", texto, item.channel)
|
||||
|
||||
return search(item, texto)
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if '/buscar/?' not in item.url:
|
||||
item.url = CHANNEL_HOST if item.extra == 'movies' else CHANNEL_HOST + 'series/'
|
||||
item.url += 'buscar/?sitesearch=pelispedia.tv&q='
|
||||
item.url += texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
return listado(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
@@ -257,7 +281,8 @@ def listado(item):
|
||||
action = "temporadas"
|
||||
content_type = "tvshow"
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ data = httptools.downloadpage(item.url).data
|
||||
data = obtener_data(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
# logger.info("data -- {}".format(data))
|
||||
|
||||
@@ -270,7 +295,7 @@ def listado(item):
|
||||
plot = scrapertools.entityunescape(scrapedplot)
|
||||
|
||||
new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
|
||||
thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra, text_color=color3,
|
||||
thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra,
|
||||
contentType=content_type, fulltitle=title)
|
||||
|
||||
if item.extra == 'serie':
|
||||
@@ -288,7 +313,7 @@ def listado(item):
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
# numero de registros que se muestran por página, se fija a 28 por cada paginación
|
||||
if len(matches) >= 28:
|
||||
if len(matches) >= 28 and '/buscar/?' not in item.url:
|
||||
|
||||
file_php = "666more"
|
||||
tipo_serie = ""
|
||||
@@ -325,7 +350,7 @@ def listado(item):
|
||||
url = item.url.replace("rangeStart=" + ant_inicio, "rangeStart=" + inicio)
|
||||
|
||||
itemlist.append(Item(channel=__channel__, action="listado", title=">> Página siguiente", extra=item.extra,
|
||||
url=url, thumbnail=thumbnail_host, fanart=fanart_host, text_color=color2))
|
||||
url=url, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -335,8 +360,8 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ data = httptools.downloadpage(item.url).data
|
||||
data = obtener_data(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
|
||||
patron = '<li class="clearfix gutterVertical20"><a href="([^"]+)".*?><small>(.*?)</small>.*?' \
|
||||
@@ -353,7 +378,7 @@ def episodios(item):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
@@ -382,7 +407,7 @@ def episodios(item):
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -391,9 +416,8 @@ def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# ~ data = httptools.downloadpage(item.url).data
|
||||
data = obtener_data(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
|
||||
if not item.fanart:
|
||||
@@ -407,7 +431,7 @@ def temporadas(item):
|
||||
if len(matches) > 1:
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
|
||||
new_item = item.clone(text_color=color2, action="episodios", season=temporada, thumbnail=scrapedthumbnail)
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
@@ -429,7 +453,7 @@ def temporadas(item):
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
@@ -441,8 +465,8 @@ def findvideos(item):
|
||||
logger.info("item.url %s" % item.url)
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# ~ data = httptools.downloadpage(item.url).data
|
||||
data = obtener_data(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
|
||||
patron = '<iframe src=".+?id=(\d+)'
|
||||
@@ -453,186 +477,119 @@ def findvideos(item):
|
||||
headers["Referer"] = item.url
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
# Descarta la opción descarga que es de publicidad
|
||||
patron = '<a href="(?!http://go.ad2up.com)([^"]+)".+?><img src="/api/img/([^.]+)'
|
||||
patron = '<a href="([^"]+)".+?><img src="/api/img/([^.]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
# En algunos vídeos hay opción flash "vip" con varias calidades
|
||||
if "api/vip.php" in scrapedurl:
|
||||
data_vip = httptools.downloadpage(scrapedurl).data
|
||||
patron = '<a href="([^"]+)".+?><img src="/api/img/([^.]+).*?<span class="text">([^<]+)<'
|
||||
matches_vip = re.compile(patron, re.DOTALL).findall(data_vip)
|
||||
for url, titlevip, calidad in matches_vip:
|
||||
title = "Ver vídeo en [" + titlevip + "] " + calidad
|
||||
itemlist.append(item.clone(title=title, url=url, action="play"))
|
||||
# fix se ignora esta url ya que no devuelve videos
|
||||
elif "http://www.pelispedia.tv/Pe_Player_Html6/index.php?" in scrapedurl:
|
||||
continue
|
||||
else:
|
||||
title = "Ver vídeo en [" + scrapedtitle + "]"
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="play", extra=item.url, referer=url)
|
||||
|
||||
if scrapedurl.startswith("https://cloud.pelispedia.vip/html5.php"):
|
||||
parms = dict(re.findall('[&|\?]{1}([^=]*)=([^&]*)', scrapedurl))
|
||||
for cal in ['360', '480', '720', '1080']:
|
||||
if parms[cal]:
|
||||
url_v = 'https://pelispedia.video/v.php?id=%s&sub=%s&active=%s' % (parms[cal], parms['sub'], cal)
|
||||
title = "Ver video en [HTML5 " + cal + "p]"
|
||||
new_item = item.clone(title=title, url=url_v, action="play", referer=item.url)
|
||||
itemlist.append(new_item)
|
||||
|
||||
elif scrapedurl.startswith("https://load.pelispedia.vip/embed/"):
|
||||
if scrapedtitle == 'vid': scrapedtitle = 'vidoza'
|
||||
elif scrapedtitle == 'fast': scrapedtitle = 'fastplay'
|
||||
title = "Ver video en [" + scrapedtitle + "]"
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="play", referer=item.url)
|
||||
itemlist.append(new_item)
|
||||
|
||||
|
||||
# Opción "Añadir esta pelicula a la videoteca"
|
||||
if item.extra == "movies" and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta película a la videoteca", url=item.url,
|
||||
infoLabels=item.infoLabels, action="add_pelicula_to_library", extra="findvideos",
|
||||
fulltitle=item.title, text_color=color2))
|
||||
fulltitle=item.title))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("url=%s" % item.url)
|
||||
|
||||
itemlist = []
|
||||
|
||||
subtitle = ""
|
||||
if item.url.startswith("https://pelispedia.video/v.php"):
|
||||
|
||||
# html5 - http://www.pelispedia.vip
|
||||
if item.url.startswith("http://www.pelispedia.vip"):
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
token = generar_token(gsv, 'b0a8c83650f18ccc7c87b16e3c460474'+'yt'+'b0a8c83650f18ccc7c87b16e3c460474'+'2653')
|
||||
playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
else:
|
||||
link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
|
||||
subtitle = ''
|
||||
# ~ logger.info("gsv: %s token: %s ck: %s link: %s" % (gsv, token, ck, link))
|
||||
|
||||
headers = dict()
|
||||
headers["Referer"] = item.referer
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage("https://pelispedia.video/plugins/gkpedia.php", post=post, headers=headers, cookies=False).data
|
||||
|
||||
mp4 = scrapertools.find_single_match(data, '"link":"([^"]*)')
|
||||
if mp4:
|
||||
mp4 = mp4.replace('\/', '/')
|
||||
if 'chomikuj.pl/' in mp4: mp4 += "|Referer=%s" % item.referer
|
||||
itemlist.append(['.mp4', mp4, 0, subtitle])
|
||||
|
||||
break
|
||||
|
||||
from lib import jsunpack
|
||||
match = scrapertools.find_single_match(data, '\.</div><script type="text/rocketscript">(.*?)</script>')
|
||||
data = jsunpack.unpack(match)
|
||||
data = data.replace("\\'", "'")
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, "tracks:\[{file:'([^']+)',label:'Spanish'")
|
||||
media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)',type:'video/mp4'")
|
||||
elif item.url.startswith("https://load.pelispedia.vip/embed/"):
|
||||
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
|
||||
# la calidad más baja tiene que ir primero
|
||||
media_urls = sorted(media_urls, key=lambda k: k[1])
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
token = generar_token(gsv, '4fe554b59d760c9986c903b07af8b7a4'+'yt'+'4fe554b59d760c9986c903b07af8b7a4'+'785446346')
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
# ~ logger.info("gsv: %s token: %s ck: %s" % (gsv, token, ck))
|
||||
|
||||
if len(media_urls) > 0:
|
||||
for url, desc in media_urls:
|
||||
itemlist.append([desc, url, 0, subtitle])
|
||||
headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
|
||||
if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'
|
||||
|
||||
# otro html5 - https://pelispedia.co/ver/f.php
|
||||
elif item.url.startswith("https://pelispedia.co/ver/f.php"):
|
||||
if url != '' and srv != '':
|
||||
itemlist.append(item.clone(url=url, server=srv.lower()))
|
||||
|
||||
headers = dict()
|
||||
headers["Referer"] = item.referer
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
|
||||
if '|fastplay|' in data:
|
||||
packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
|
||||
from lib import jsunpack
|
||||
data = jsunpack.unpack(packed)
|
||||
data = data.replace("\\'", "'")
|
||||
|
||||
sub = scrapertools.find_single_match(data, "subtitulo='([^']+)'")
|
||||
data_sub = httptools.downloadpage(sub).data
|
||||
subtitle = save_sub(data_sub)
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
|
||||
subtitle = ''
|
||||
for fil, lbl in matches:
|
||||
if fil.endswith('.srt') and not fil.endswith('empty.srt'):
|
||||
subtitle = fil
|
||||
if not subtitle.startswith('http'):
|
||||
domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
|
||||
subtitle = domi + subtitle
|
||||
break
|
||||
|
||||
from lib import jsunpack
|
||||
match = scrapertools.find_single_match(data, '<script type="text/rocketscript">(.*?)</script>')
|
||||
data = jsunpack.unpack(match)
|
||||
data = data.replace("\\'", "'")
|
||||
for fil, lbl in matches:
|
||||
if not fil.endswith('.srt'):
|
||||
itemlist.append([lbl, fil, 0, subtitle])
|
||||
|
||||
media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)'")
|
||||
break
|
||||
|
||||
# la calidad más baja tiene que ir primero
|
||||
media_urls = sorted(media_urls, key=lambda k: k[1])
|
||||
|
||||
if len(media_urls) > 0:
|
||||
for url, desc in media_urls:
|
||||
itemlist.append([desc, url, 0, subtitle])
|
||||
|
||||
# NUEVO
|
||||
# otro html5 - http://player.pelispedia.tv/ver?v=
|
||||
elif item.url.startswith("http://player.pelispedia.tv/ver?v="):
|
||||
_id = scrapertools.find_single_match(item.url, 'ver\?v=(.+?)$')
|
||||
|
||||
headers = dict()
|
||||
headers["Referer"] = item.referer
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
sub = scrapertools.find_single_match(data, 'var parametros = "\?pic=20&id=([^&]+)&sub=ES";')
|
||||
sub = "http://player.pelispedia.tv/cdn" + sub
|
||||
data_sub = httptools.downloadpage(sub).data
|
||||
subtitle = save_sub(data_sub)
|
||||
|
||||
csrf_token = scrapertools.find_single_match(data, '<meta name="csrf-token" content="([^"]+)">')
|
||||
|
||||
ct = ""
|
||||
iv = ""
|
||||
s = ""
|
||||
pre_token = '{"ct": %s,"iv": %s,"s":%s}' % (ct, iv, s)
|
||||
|
||||
import base64
|
||||
token = base64.b64encode(pre_token)
|
||||
|
||||
url = "http://player.pelispedia.tv/template/protected.php"
|
||||
post = "fv=%s&url=%s&sou=%s&token=%s" % ("0", _id, "pic", token)
|
||||
# eyJjdCI6IkVNYUd3Z2IwS2szSURzSGFGdkxGWlE9PSIsIml2IjoiZDI0NzhlYzU0OTZlYTJkNWFlOTFkZjAzZTVhZTNlNmEiLCJzIjoiOWM3MTM3MjNhMTkyMjFiOSJ9
|
||||
data = httptools.downloadpage(url, post=post).data
|
||||
|
||||
logger.debug("datito %s " % data)
|
||||
|
||||
media_urls = scrapertools.find_multiple_matches(data, '"url":"([^"]+)".*?"width":([^,]+),')
|
||||
|
||||
# la calidad más baja tiene que ir primero
|
||||
media_urls = sorted(media_urls, key=lambda k: int(k[1]))
|
||||
|
||||
if len(media_urls) > 0:
|
||||
for url, desc in media_urls:
|
||||
itemlist.append([desc, url, 0, subtitle])
|
||||
|
||||
# netu
|
||||
elif item.url.startswith("http://www.pelispedia.tv/netu.html?"):
|
||||
url = item.url.replace("http://www.pelispedia.tv/netu.html?url=", "")
|
||||
|
||||
from servers import netutv
|
||||
media_urls = netutv.get_video_url(urllib.unquote(url))
|
||||
itemlist.append(media_urls[0])
|
||||
|
||||
# flash
|
||||
elif item.url.startswith("http://www.pelispedia.tv"):
|
||||
key = scrapertools.find_single_match(item.url, 'index.php\?id=([^&]+).+?sub=([^&]+)&.+?imagen=([^&]+)')
|
||||
|
||||
# if len(key) > 2:
|
||||
# thumbnail = key[2]
|
||||
if key[1] != "":
|
||||
url_sub = "http://www.pelispedia.tv/sub/%s.srt" % key[1]
|
||||
data_sub = httptools.downloadpage(url_sub).data
|
||||
subtitle = save_sub(data_sub)
|
||||
|
||||
url = "http://www.pelispedia.tv/gkphp_flv/plugins/gkpluginsphp.php"
|
||||
post = "link=" + urllib.quote(key[0])
|
||||
|
||||
data = httptools.downloadpage(url, post=post).data
|
||||
|
||||
media_urls = scrapertools.find_multiple_matches(data, 'link":"([^"]+)","type":"([^"]+)"')
|
||||
|
||||
# la calidad más baja tiene que ir primero
|
||||
media_urls = sorted(media_urls, key=lambda k: k[1])
|
||||
|
||||
if len(media_urls) > 0:
|
||||
for url, desc in media_urls:
|
||||
url = url.replace("\\", "")
|
||||
itemlist.append([desc, url, 0, subtitle])
|
||||
|
||||
# openload
|
||||
elif item.url.startswith("https://load.pelispedia.co/embed/openload.co"):
|
||||
|
||||
url = item.url.replace("/embed/", "/stream/")
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, '<meta name="og:url" content="([^"]+)"')
|
||||
|
||||
from servers import openload
|
||||
media_urls = openload.get_video_url(url)
|
||||
itemlist.append(media_urls[0])
|
||||
|
||||
# raptu
|
||||
elif item.url.startswith("https://load.pelispedia.co/embed/raptu.com"):
|
||||
url = item.url.replace("/embed/", "/stream/")
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, '<meta property="og:url" content="([^"]+)"')
|
||||
from servers import raptu
|
||||
media_urls = raptu.get_video_url(url)
|
||||
if len(media_urls) > 0:
|
||||
for desc, url, numero, subtitle in media_urls:
|
||||
itemlist.append([desc, url, numero, subtitle])
|
||||
|
||||
else:
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
@@ -640,26 +597,150 @@ def play(item):
|
||||
videoitem.title = item.title
|
||||
videoitem.channel = __channel__
|
||||
|
||||
logger.info("retorna itemlist: %s" % itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def save_sub(data):
|
||||
import os
|
||||
try:
|
||||
ficherosubtitulo = os.path.join(config.get_data_path(), 'subtitulo_pelispedia.srt')
|
||||
if os.path.exists(ficherosubtitulo):
|
||||
try:
|
||||
os.remove(ficherosubtitulo)
|
||||
except IOError:
|
||||
logger.error("Error al eliminar el archivo " + ficherosubtitulo)
|
||||
raise
|
||||
|
||||
fichero = open(ficherosubtitulo, "wb")
|
||||
fichero.write(data)
|
||||
fichero.close()
|
||||
subtitle = ficherosubtitulo
|
||||
except:
|
||||
subtitle = ""
|
||||
logger.error("Error al descargar el subtítulo")
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
return subtitle
|
||||
def obtener_data(url, referer=''):
|
||||
headers = {}
|
||||
if referer != '': headers['Referer'] = referer
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
if "Javascript is required" in data:
|
||||
ck = decodificar_cookie(data)
|
||||
logger.info("Javascript is required. Cookie necesaria %s" % ck)
|
||||
|
||||
headers['Cookie'] = ck
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
# Guardar la cookie y eliminar la que pudiera haber anterior
|
||||
cks = ck.split("=")
|
||||
cookie_file = filetools.join(config.get_data_path(), 'cookies.dat')
|
||||
cookie_data = filetools.read(cookie_file)
|
||||
cookie_data = re.sub(r"www\.pelispedia\.tv\tFALSE\t/\tFALSE\t\tsucuri_(.*)\n", "", cookie_data)
|
||||
cookie_data += "www.pelispedia.tv\tFALSE\t/\tFALSE\t\t%s\t%s\n" % (cks[0], cks[1])
|
||||
filetools.write(cookie_file, cookie_data)
|
||||
logger.info("Añadida cookie %s con valor %s" % (cks[0], cks[1]))
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def rshift(val, n): return val>>n if val >= 0 else (val+0x100000000)>>n
|
||||
|
||||
def decodificar_cookie(data):
|
||||
S = re.compile("S='([^']*)'").findall(data)[0]
|
||||
A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
s = {}
|
||||
l = 0
|
||||
U = 0
|
||||
L = len(S)
|
||||
r = ''
|
||||
|
||||
for u in range(0, 64):
|
||||
s[A[u]] = u
|
||||
|
||||
for i in range(0, L):
|
||||
if S[i] == '=': continue
|
||||
c = s[S[i]]
|
||||
U = (U << 6) + c
|
||||
l += 6
|
||||
while (l >= 8):
|
||||
l -= 8
|
||||
a = rshift(U, l) & 0xff
|
||||
r += chr(a)
|
||||
|
||||
r = re.sub(r"\s+|/\*.*?\*/", "", r)
|
||||
r = re.sub("\.substr\(([0-9]*),([0-9*])\)", r"[\1:(\1+\2)]", r)
|
||||
r = re.sub("\.charAt\(([0-9]*)\)", r"[\1]", r)
|
||||
r = re.sub("\.slice\(([0-9]*),([0-9*])\)", r"[\1:\2]", r)
|
||||
r = r.replace("String.fromCharCode", "chr")
|
||||
r = r.replace("location.reload();", "")
|
||||
|
||||
pos = r.find("document.cookie")
|
||||
nomvar = r[0]
|
||||
l1 = r[2:pos-1]
|
||||
l2 = r[pos:-1].replace("document.cookie=", "").replace("+"+nomvar+"+", "+g+")
|
||||
|
||||
g = eval(l1)
|
||||
return eval(l2).replace(";path=/;max-age=86400", "")
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
import hashlib
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
block = None
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
while number_of_derived_words < target_key_size:
|
||||
if block is not None:
|
||||
hasher.update(block)
|
||||
|
||||
hasher.update(passwd)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
for i in range(1, iterations):
|
||||
hasher.update(block)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
|
||||
|
||||
number_of_derived_words += len(block)/4
|
||||
|
||||
return {
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
|
||||
def obtener_cripto(password, plaintext):
|
||||
import os, base64, json
|
||||
SALT_LENGTH = 8
|
||||
BLOCK_SIZE = 16
|
||||
KEY_SIZE = 32
|
||||
|
||||
salt = os.urandom(SALT_LENGTH)
|
||||
iv = os.urandom(BLOCK_SIZE)
|
||||
|
||||
paddingLength = 16 - (len(plaintext) % 16)
|
||||
paddedPlaintext = plaintext+chr(paddingLength)*paddingLength
|
||||
|
||||
kdf = evpKDF(password, salt)
|
||||
|
||||
try: # Intentar con librería AES del sistema
|
||||
from Crypto.Cipher import AES
|
||||
cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
|
||||
except: # Si falla intentar con librería del addon
|
||||
import jscrypto
|
||||
cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
|
||||
ciphertext = cipherSpec.encrypt(paddedPlaintext)
|
||||
|
||||
return json.dumps({'ct': base64.b64encode(ciphertext), 'iv': iv.encode("hex"), 's': salt.encode("hex")}, sort_keys=True, separators=(',', ':'))
|
||||
|
||||
def generar_token(gsv, pwd):
|
||||
txt = obtener_cripto(pwd, gsv)
|
||||
|
||||
_0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
retorn = ''
|
||||
for ch in txt:
|
||||
valors[cicle] = ord(ch)
|
||||
cicle += 1
|
||||
if cicle == 3:
|
||||
primer = _0x382d28[valors[0] >> 0x2]
|
||||
segon = _0x382d28[((valors[0] & 0x3) << 0x4) | (valors[1] >> 0x4)]
|
||||
tercer = _0x382d28[((valors[1] & 0xf) << 0x2) | (valors[2] >> 0x6)]
|
||||
quart = _0x382d28[valors[2] & 0x3f]
|
||||
retorn += primer + segon + tercer + quart
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
|
||||
return retorn
|
||||
|
||||
@@ -28,14 +28,6 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
@@ -43,6 +35,24 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Español",
|
||||
"Inglés",
|
||||
"Latino",
|
||||
"VO",
|
||||
"VOS",
|
||||
"VOSI",
|
||||
"OVOS"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,45 +13,59 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item, InfoLabels
|
||||
from platformcode import config, logger
|
||||
from channels import filtertools
|
||||
|
||||
host = "https://pepecine.io"
|
||||
|
||||
IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'su': 'VOSE', 'vo': 'VO', 'otro': 'OVOS'}
|
||||
list_idiomas = IDIOMAS.values()
|
||||
list_language = ['default']
|
||||
|
||||
host = "https://pepecinehd.tv"
|
||||
perpage = 20
|
||||
|
||||
def mainlist1(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action='movies_menu'))
|
||||
#itemlist.append(item.clone(title="Series", action='tvshows_menu'))
|
||||
return itemlist
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Ultimas",
|
||||
url=host+'/tv-peliculas-online',
|
||||
action='list_latest',
|
||||
indexp=1,
|
||||
type='movie'))
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Todas",
|
||||
url= host+'/ver-online',
|
||||
action='list_all',
|
||||
page='1',
|
||||
type='movie'))
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Género",
|
||||
url= host,
|
||||
action='genero',
|
||||
page='1',
|
||||
type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title = "", action =""))
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Buscar",
|
||||
url= host+'/esta-online?q=',
|
||||
action='search',
|
||||
page='1',
|
||||
type='movie'))
|
||||
itemlist.append(Item(title = "Películas"))
|
||||
|
||||
itemlist.append(item.clone(
|
||||
title = " Últimas películas",
|
||||
url = host + '/las-peliculas-online',
|
||||
action = 'list_latest',
|
||||
type = 'movie'))
|
||||
|
||||
itemlist.append(item.clone(title = " Películas por género",
|
||||
url = host + '/ver-pelicula',
|
||||
action = 'genero',
|
||||
type = 'movie'))
|
||||
|
||||
itemlist.append(item.clone(title = " Todas las películas",
|
||||
url = host + '/ver-pelicula',
|
||||
action = 'list_all',
|
||||
type = 'movie'))
|
||||
|
||||
itemlist.append(Item(title = "Series"))
|
||||
|
||||
itemlist.append(item.clone(title = " Últimas series",
|
||||
url = host + '/las-series-online',
|
||||
action = 'list_latest',
|
||||
type = 'series'))
|
||||
|
||||
itemlist.append(item.clone(title = " Series por género",
|
||||
url = host + '/ver-serie-tv',
|
||||
action = 'genero',
|
||||
type = 'series'))
|
||||
|
||||
itemlist.append(item.clone(title = " Todas las series",
|
||||
url = host + '/ver-serie-tv',
|
||||
action ='list_all',
|
||||
type = 'series'))
|
||||
|
||||
itemlist.append(item.clone(title = "Buscar",
|
||||
url = host + '/donde-ver?q=',
|
||||
action ='search',
|
||||
type = 'movie'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,40 +73,25 @@ def genero(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace("\n","")
|
||||
bloque = scrapertools.find_single_match(data, 'Peliculas</h2><div id="SlideMenu1" class="s2">.*?SlideMenu1_Folder">.*?</ul></li>')
|
||||
patron = '<a href="([^"]+).*?'
|
||||
patron += '<li>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
patron = '<a href="(\?genre[^"]+)"[^>]*>[^>]+>(.+?)</li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(action = "list_all",
|
||||
channel = item.channel,
|
||||
page='1',
|
||||
title = scrapedtitle,
|
||||
type= item.type,
|
||||
url = host + scrapedurl
|
||||
))
|
||||
itemlist.append(item.clone(action = "list_all",
|
||||
title = scrapedtitle,
|
||||
url = item.url + scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("categoria: %s" % categoria)
|
||||
itemlist = []
|
||||
|
||||
def tvshows_menu(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Ultimas",
|
||||
url=host+'/ver-tv-serie-online',
|
||||
action='list_latest',
|
||||
type='serie'))
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
url=host + '/serie-tv',
|
||||
action='list_all',
|
||||
page='1',
|
||||
type='series'))
|
||||
itemlist.append(item.clone(title="Buscar",
|
||||
url= host+'/esta-online?q=',
|
||||
action='search',
|
||||
page='1',
|
||||
type='series'))
|
||||
if categoria == 'peliculas':
|
||||
itemlist = list_latest(Item(url = host + '/las-peliculas-online',
|
||||
type = 'movie'))
|
||||
elif categoria == 'series':
|
||||
itemlist = list_latest(Item(url = host + '/las-series-online',
|
||||
type = 'series'))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -101,43 +100,41 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.extra = "busca"
|
||||
if texto != '':
|
||||
return sub_search(item)
|
||||
else:
|
||||
if texto == '':
|
||||
return []
|
||||
|
||||
return sub_search(item)
|
||||
|
||||
def search_section(item, data, sectionType):
|
||||
logger.info()
|
||||
sectionResultsRE = re.findall("<a[^<]+href *= *[\"'](?P<url>[^\"']+)[^>]>[^<]*<img[^>]+src *= *[\"'](?P<thumbnail>[^\"']+).*?<figcaption[^\"']*[\"'](?P<title>.*?)\">", data, re.MULTILINE | re.DOTALL)
|
||||
|
||||
itemlist = []
|
||||
for url, thumbnail, title in sectionResultsRE:
|
||||
newitem = item.clone(action = "seasons" if sectionType == "series" else "findvideos",
|
||||
title = title,
|
||||
thumbnail = thumbnail,
|
||||
url = url)
|
||||
if sectionType == "series":
|
||||
newitem.show = title;
|
||||
itemlist.append(newitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
f1 = "Peliculas"
|
||||
action = "findvideos"
|
||||
if item.type == "series":
|
||||
action = "list_all"
|
||||
f1 = "Series"
|
||||
patron = 'Ver %s .*?id="%s' %(f1, item.type)
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = 'col-sm-4 pretty-figure">\s*<a href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
path = scrapertools.find_single_match(scrapedthumbnail, "w\w+(/\w+.....)")
|
||||
filtro_list = {"poster_path": path}
|
||||
filtro_list = filtro_list.items()
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
extra = "one",
|
||||
infoLabels={'filtro': filtro_list},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
fulltitle = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
|
||||
searchSections = re.findall("<div[^>]+id *= *[\"'](?:movies|series)[\"'].*?</div>", data, re.MULTILINE | re.DOTALL)
|
||||
|
||||
logger.info("Search sections = {0}".format(len(searchSections)))
|
||||
itemlist.extend(search_section(item, searchSections[0], "movies"))
|
||||
itemlist.extend(search_section(item, searchSections[1], "series"))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
@@ -147,6 +144,10 @@ def get_source(url):
|
||||
|
||||
def list_latest(item):
|
||||
logger.info()
|
||||
|
||||
if not item.indexp:
|
||||
item.indexp = 1
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) ')
|
||||
@@ -156,108 +157,209 @@ def list_latest(item):
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
count = 0
|
||||
for thumbnail, title, url, language in matches:
|
||||
count +=1
|
||||
if count >= item.indexp and count < item.indexp + perpage:
|
||||
path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
|
||||
filtro_list = {"poster_path": path}
|
||||
filtro_list = filtro_list.items()
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
contentTitle=title,
|
||||
url=host+url,
|
||||
thumbnail=thumbnail,
|
||||
language=language,
|
||||
infoLabels={'filtro': filtro_list},
|
||||
extra="one",
|
||||
action='findvideos'))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
item.indexp += perpage
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Siguiente >>",
|
||||
url=item.url,
|
||||
extra="one",
|
||||
indexp=item.indexp,
|
||||
action='list_latest'))
|
||||
return itemlist
|
||||
count += 1
|
||||
|
||||
if count < item.indexp:
|
||||
continue
|
||||
|
||||
if count >= item.indexp + perpage:
|
||||
break;
|
||||
|
||||
path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
|
||||
filtro_list = {"poster_path": path}
|
||||
filtro_list = filtro_list.items()
|
||||
itemlist.append(item.clone(action = 'findvideos',
|
||||
title = title,
|
||||
url = host + url,
|
||||
thumbnail = thumbnail,
|
||||
language = language,
|
||||
infoLabels = {'filtro': filtro_list},
|
||||
)
|
||||
)
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Desde novedades no tenemos el elemento item.channel
|
||||
if item.channel:
|
||||
itemlist.append(item.clone(title = "Página siguiente >>>",
|
||||
indexp = item.indexp + perpage
|
||||
)
|
||||
)
|
||||
if item.indexp > 1:
|
||||
itemlist.append(item.clone(title = "<<< Página anterior",
|
||||
indexp = item.indexp - perpage
|
||||
)
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
|
||||
if not item.page:
|
||||
item.page = 1
|
||||
|
||||
genero = scrapertools.find_single_match(item.url, "genre=(\w+)")
|
||||
data= get_source(item.url)
|
||||
token = scrapertools.find_single_match(data, "token:.*?'(.*?)'")
|
||||
url = host+'/titles/paginate?_token=%s&perPage=24&page=%s&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, item.page, item.type, genero)
|
||||
url = host+'/titles/paginate?_token=%s&perPage=%d&page=%d&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, perpage, item.page, item.type, genero)
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
if item.type == "series":
|
||||
# Remove links to speed-up (a lot!) json load
|
||||
data = re.sub(",? *[\"']link[\"'] *: *\[.+?\] *([,}])", "\g<1>", data)
|
||||
|
||||
dict_data = jsontools.load(data)
|
||||
items = dict_data['items']
|
||||
for dict in items:
|
||||
new_item = Item(channel=item.channel,
|
||||
title=dict['title']+' [%s]' % dict['year'],
|
||||
plot = dict['plot'],
|
||||
thumbnail=dict['poster'],
|
||||
url=dict['link'],
|
||||
infoLabels={'year':dict['year']})
|
||||
|
||||
for element in items:
|
||||
new_item = item.clone(
|
||||
title = element['title']+' [%s]' % element['year'],
|
||||
plot = element['plot'],
|
||||
thumbnail = element['poster'],
|
||||
infoLabels = {'year':element['year']})
|
||||
|
||||
if "link" in element:
|
||||
new_item.url = element["link"]
|
||||
new_item.extra = "links_encoded"
|
||||
|
||||
if item.type == 'movie':
|
||||
new_item.contentTitle=dict['title']
|
||||
new_item.fulltitle=dict['title']
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = element['title']
|
||||
new_item.fulltitle = element['title']
|
||||
if new_item.extra != "links_encoded":
|
||||
new_item.url = host + "/ver-pelicula/" + str(element['id'])
|
||||
|
||||
elif item.type == 'series':
|
||||
new_item.contentSerieName = dict['title']
|
||||
new_item.action = ''
|
||||
new_item.action = 'seasons'
|
||||
new_item.url = host + "/ver-serie-tv/" + str(element['id'])
|
||||
new_item.show = element['title']
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist.append(item.clone(title='Siguiente>>>',
|
||||
url=item.url,
|
||||
action='list_all',
|
||||
type= item.type,
|
||||
page=str(int(item.page) + 1)))
|
||||
|
||||
itemlist.append(item.clone(title = 'Página siguiente >>>',
|
||||
page = item.page + 1))
|
||||
|
||||
if (int(item.page) > 1):
|
||||
itemlist.append(item.clone(title = '<<< Página anterior',
|
||||
page = item.page - 1))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info("url: %s" % item.url)
|
||||
itemlist = seasons(item)
|
||||
|
||||
if len(itemlist) > 0 and itemlist[0].action != "findvideos":
|
||||
episodes = []
|
||||
for season in itemlist:
|
||||
episodes.extend([episode for episode in seasons_episodes(season)])
|
||||
itemlist = episodes
|
||||
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
reSeasons = re.findall("href *= *[\"']([^\"']+)[\"'][^\"']+[\"']sezon[^>]+>([^<]+)+", data)
|
||||
|
||||
itemlist = [item.clone(action = "seasons_episodes",
|
||||
title = title,
|
||||
url = url) for url, title in reSeasons]
|
||||
|
||||
if len(itemlist) == 1:
|
||||
itemlist = seasons_episodes(itemlist[0])
|
||||
|
||||
# Opción "Añadir esta serie a la videoteca de XBMC"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def seasons_episodes(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
reEpisodes = re.findall("<a[^>]+col-sm-3[^>]+href *= *[\"'](?P<url>[^\"']+).*?<img[^>]+src *= *[\"'](?P<thumbnail>[^\"']+).*?<a[^>]+>(?P<title>.*?)</a>", data, re.MULTILINE | re.DOTALL)
|
||||
|
||||
seasons = [item.clone(action = "findvideos",
|
||||
title = re.sub("<b>Episodio (\d+)</b> - T(\d+) \|[^\|]*\| ".format(item.show), "\g<2>x\g<1> - ", title),
|
||||
thumbnail = thumbnail,
|
||||
url = url) for url, thumbnail, title in reEpisodes]
|
||||
|
||||
return seasons
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
if item.extra == "one":
|
||||
|
||||
if item.extra != "links_encoded":
|
||||
|
||||
# data = httptools.downloadpage(item.url).data
|
||||
# linksRE = re.findall("getFavicon\('(?P<url>[^']+)[^>]+>[^>]+>(?P<language>[^<]+).+?<td[^>]+>(?P<quality>[^<]*).+?<td[^>]+>(?P<antiquity>[^<]*)", data, re.MULTILINE | re.DOTALL)
|
||||
# for url, language, quality, antiquity in linksRE:
|
||||
# logger.info("URL = " + url);
|
||||
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = "renderTab.bind.*?'([^']+).*?"
|
||||
patron += "app.utils.getFavicon.*?<b>(.*?) .*?"
|
||||
patron += "app.utils.getFavicon.*?<img [^>]*src *= *[\"']/([^\.]+).*?"
|
||||
patron += 'color:#B1FFC5;">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedlanguage, scrapedquality in matches:
|
||||
title = "Ver enlace en %s " + "[" + scrapedlanguage + "]" + "[" + scrapedquality + "]"
|
||||
if scrapedlanguage != 'zc':
|
||||
itemlist.append(item.clone(action='play',
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
language=scrapedlanguage
|
||||
))
|
||||
for scrapedurl, language, scrapedquality in matches:
|
||||
isDD = language.startswith("z")
|
||||
if isDD:
|
||||
language = language[1:]
|
||||
|
||||
language = language[0:2]
|
||||
language = IDIOMAS.get(language, language)
|
||||
|
||||
title = ("Ver" if not isDD else "Descargar") + " enlace en %s [" + language + "] [" + scrapedquality + "]"
|
||||
if not isDD:
|
||||
itemlist.append(item.clone(action = 'play',
|
||||
title = title,
|
||||
url = scrapedurl,
|
||||
language = language
|
||||
)
|
||||
)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
else:
|
||||
for link in item.url:
|
||||
language = scrapertools.find_single_match(link['label'], '(.*?) <img')
|
||||
if language != 'zc':
|
||||
|
||||
language = scrapertools.find_single_match(link['label'], '/([^\.]+)')
|
||||
isDD = language.startswith("z")
|
||||
if isDD:
|
||||
language = language[1:]
|
||||
|
||||
language = language[0:2]
|
||||
|
||||
if not isDD:
|
||||
itemlist.append(item.clone(action='play',
|
||||
title=item.title,
|
||||
title = item.title,
|
||||
url= link['url'],
|
||||
language=language,
|
||||
language=IDIOMAS.get(language, language),
|
||||
quality=link['quality']))
|
||||
itemlist=servertools.get_servers_itemlist(itemlist)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = '%s [%s]' % (videoitem.server.capitalize(), videoitem.language.capitalize())
|
||||
videoitem.title = '%s [%s] [%s]' % (videoitem.server.capitalize(), videoitem.language, videoitem.quality)
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if itemlist:
|
||||
if itemlist and not item.show:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la videoteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
itemlist.append(item.clone(title="Añadir a la videoteca",
|
||||
text_color="green",
|
||||
action="add_pelicula_to_library"
|
||||
))
|
||||
return itemlist
|
||||
return filtertools.get_links(itemlist, item, list_idiomas)
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
@@ -274,7 +274,7 @@ def findvideos(item):
|
||||
#title = '%s [%s]' % (item.title, language)
|
||||
itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs,
|
||||
language=language, quality=quality, infoLabels = item.infoLabels))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
@@ -236,7 +236,7 @@ def listado(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -272,8 +272,8 @@ def listado(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -309,7 +309,7 @@ def listado(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if title.endswith("torrent gratis"): title = title[:-15]
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
@@ -324,16 +324,9 @@ def listado(item):
|
||||
if not "HDR" in item_local.quality:
|
||||
item_local.quality += " HDR"
|
||||
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title_alt.endswith(' '):
|
||||
title_alt = title_alt[:-1]
|
||||
while title_alt.startswith(' '):
|
||||
title_alt = title_alt[+1:]
|
||||
while item_local.quality.endswith(' '):
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
title = title.strip()
|
||||
title_alt = title_alt.strip()
|
||||
item_local.quality = item_local.quality.strip()
|
||||
|
||||
if not title: #Usamos solo el title_alt en caso de que no exista el título original
|
||||
title = title_alt
|
||||
@@ -416,9 +409,9 @@ def listado(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
|
||||
if category == "newest": #Viene de Novedades. Marquemos el título con el nombre del canal
|
||||
title += ' -%s-' % item_local.channel.capitalize()
|
||||
@@ -427,7 +420,7 @@ def listado(item):
|
||||
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + year)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + " / year: " + str(item_local.infoLabels['year']))
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) == 0:
|
||||
@@ -447,15 +440,10 @@ def listado_busqueda(item):
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página. Dejamos que la web lo controle
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_pag = 0 # Contador de líneas leídas de Matches
|
||||
category = "" # Guarda la categoria que viene desde una busqueda global
|
||||
|
||||
if item.cnt_pag:
|
||||
cnt_pag = item.cnt_pag # Se guarda en la lista de páginas anteriores en Item
|
||||
del item.cnt_pag
|
||||
|
||||
if item.category:
|
||||
category = item.category
|
||||
del item.category
|
||||
if item.totalItems:
|
||||
del item.totalItems
|
||||
if item.text_bold:
|
||||
@@ -578,12 +566,14 @@ def listado_busqueda(item):
|
||||
title_lista += [scrapedurl_alt]
|
||||
else:
|
||||
title_lista += [scrapedurl]
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
|
||||
continue
|
||||
cnt_title += 1 # Sería una línea real más para Itemlist
|
||||
|
||||
#Creamos una copia de Item para cada contenido
|
||||
item_local = item.clone()
|
||||
if item_local.category:
|
||||
del item_local.category
|
||||
if item_local.tipo:
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
@@ -594,6 +584,10 @@ def listado_busqueda(item):
|
||||
del item_local.pattern
|
||||
if item_local.title_lista:
|
||||
del item_local.title_lista
|
||||
item_local.adult = True
|
||||
del item_local.adult
|
||||
item_local.folder = True
|
||||
del item_local.folder
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
@@ -620,7 +614,7 @@ def listado_busqueda(item):
|
||||
|
||||
#Determinamos y marcamos idiomas distintos del castellano
|
||||
item_local.language = []
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower():
|
||||
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or ".com/pelicula/" in scrapedurl or ".com/series-vo" in scrapedurl or "-vo/" in scrapedurl or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
|
||||
item_local.language += ["VOS"]
|
||||
title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" [VOS", "").replace(" (V.O.S.E)", "").replace(" VO", "").replace("Subtitulos", "")
|
||||
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in scrapedurl or "latino" in calidad.lower() or "argentina" in calidad.lower():
|
||||
@@ -654,8 +648,8 @@ def listado_busqueda(item):
|
||||
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
|
||||
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
|
||||
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower():
|
||||
item_local.language += ["DUAL"]
|
||||
if "[dual" in title.lower() or "multileng" in title.lower() or "multileng" in item_local.quality.lower() or (("espa" in title.lower() or "spani" in title.lower()) and "VOS" in item_local.language):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
title = re.sub(r'\[[D|d]ual.*?\]', '', title)
|
||||
title = re.sub(r'\[[M|m]ultileng.*?\]', '', title)
|
||||
item_local.quality = re.sub(r'\[[M|m]ultileng.*?\]', '', item_local.quality)
|
||||
@@ -691,7 +685,7 @@ def listado_busqueda(item):
|
||||
|
||||
title = title.replace("Ver online ", "").replace("Descarga Serie HD ", "").replace("Descargar Serie HD ", "").replace("Descarga Serie ", "").replace("Descargar Serie ", "").replace("Ver en linea ", "").replace("Ver en linea", "").replace("HD ", "").replace("(Proper)", "").replace("RatDVD", "").replace("DVDRiP", "").replace("DVDRIP", "").replace("DVDR", "").replace("DVD9", "").replace("DVD", "").replace("DVB", "").replace("- ES ", "").replace("ES ", "").replace("COMPLETA", "").replace("(", "-").replace(")", "-").replace(".", " ").strip()
|
||||
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
title = title.replace("Descargar torrent ", "").replace("Descarga Gratis ", "").replace("Descargar Estreno ", "").replace("Descargar Estrenos ", "").replace("Pelicula en latino ", "").replace("Descargar Pelicula ", "").replace("Descargar Peliculas ", "").replace("Descargar peliculas ", "").replace("Descargar Todas ", "").replace("Descargar Otras ", "").replace("Descargar ", "").replace("Descarga ", "").replace("Bajar ", "").replace("HDRIP ", "").replace("HDRiP ", "").replace("HDRip ", "").replace("RIP ", "").replace("Rip", "").replace("RiP", "").replace("XviD", "").replace("AC3 5.1", "").replace("AC3", "").replace("1080p ", "").replace("720p ", "").replace("DVD-Screener ", "").replace("TS-Screener ", "").replace("Screener ", "").replace("BdRemux ", "").replace("BR ", "").replace("4KULTRA", "").replace("FULLBluRay", "").replace("FullBluRay", "").replace("BluRay", "").replace("Bonus Disc", "").replace("de Cine ", "").replace("TeleCine ", "").replace("latino", "").replace("Latino", "").replace("argentina", "").replace("Argentina", "").strip()
|
||||
|
||||
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
|
||||
titulo = ''
|
||||
@@ -715,19 +709,18 @@ def listado_busqueda(item):
|
||||
if title.endswith(" -"): title = title[:-2]
|
||||
if "en espa" in title: title = title[:-11]
|
||||
#title = re.sub(r'^\s', '', title)
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año')
|
||||
while title.startswith(' '):
|
||||
title = title[+1:]
|
||||
while title.endswith(' '):
|
||||
title = title[:-1]
|
||||
title = title.replace("a?o", 'año').replace("a?O", 'año').replace("A?o", 'Año').replace("A?O", 'Año').strip()
|
||||
|
||||
#Preparamos calidad
|
||||
item_local.quality = item_local.quality.replace("[ ", "").replace(" ]", "") #Preparamos calidad para Series
|
||||
item_local.quality = re.sub(r'\[\d{4}\]', '', item_local.quality) #Quitar año, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Cap.*?\]', '', item_local.quality) #Quitar episodios, si lo tiene
|
||||
item_local.quality = re.sub(r'\[Docu.*?\]', '', item_local.quality) #Quitar tipo contenidos, si lo tiene
|
||||
if "[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower())): #Mirar si es DUAL
|
||||
item_local.language += ["DUAL"] #Salvar DUAL en idioma
|
||||
#Mirar si es DUAL
|
||||
if "VOS" in item_local.language and "DUAL" not in item_local.language and ("[sp" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "cast" in item_local.quality.lower() or "spani" in item_local.quality.lower()):
|
||||
item_local.language[0:0] = ["DUAL"]
|
||||
if ("[es-" in item_local.quality.lower() or (("cast" in item_local.quality.lower() or "espa" in item_local.quality.lower() or "spani" in item_local.quality.lower()) and ("eng" in item_local.quality.lower() or "ing" in item_local.quality.lower()))) and "DUAL" not in item_local.language: #Mirar si es DUAL
|
||||
item_local.language[0:0] = ["DUAL"] #Salvar DUAL en idioma
|
||||
item_local.quality = re.sub(r'\[[es|ES]-\w+]', '', item_local.quality) #borrar DUAL
|
||||
item_local.quality = re.sub(r'[\s|-][c|C]aste.+', '', item_local.quality) #Borrar después de Castellano
|
||||
item_local.quality = re.sub(r'[\s|-][e|E]spa.+', '', item_local.quality) #Borrar después de Español
|
||||
@@ -735,9 +728,7 @@ def listado_busqueda(item):
|
||||
item_local.quality = re.sub(r'[\s|-][i|I|e|E]ngl.+', '', item_local.quality) #Borrar después de Inglés-English
|
||||
item_local.quality = item_local.quality.replace("[", "").replace("]", " ").replace("ALTA DEFINICION", "HDTV").replace(" Cap", "")
|
||||
#Borrar palabras innecesarias restantes
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "")
|
||||
while item_local.quality.endswith(" "): #Borrar espacios de cola
|
||||
item_local.quality = item_local.quality[:-1]
|
||||
item_local.quality = item_local.quality.replace("Espaol", "").replace("Español", "").replace("Espa", "").replace("Castellano ", "").replace("Castellano", "").replace("Spanish", "").replace("English", "").replace("Ingles", "").replace("Latino", "").replace("+Subs", "").replace("-Subs", "").replace("Subs", "").replace("VOSE", "").replace("VOS", "").strip()
|
||||
|
||||
#Limpieza final del título y guardado en las variables según su tipo de contenido
|
||||
item_local.title = title
|
||||
@@ -816,7 +807,7 @@ def listado_busqueda(item):
|
||||
#Agrega el item local a la lista itemlist
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
|
||||
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
@@ -872,12 +863,12 @@ def listado_busqueda(item):
|
||||
if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados:
|
||||
title = title.replace("[", "-").replace("]", "-")
|
||||
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "")
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title)
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title)
|
||||
title = title.replace("--", "").replace(" []", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title).strip()
|
||||
title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title).strip()
|
||||
item_local.title = title
|
||||
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / calidad ORG: " + calidad + " / year: " + year + " / tamaño: " + size)
|
||||
logger.debug("url: " + item_local.url + " / title: " + item_local.title + " / content title: " + item_local.contentTitle + "/" + item_local.contentSerieName + " / calidad: " + item_local.quality + "[" + str(item_local.language) + "]" + " / year: " + str(item_local.infoLabels['year']))
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
@@ -889,7 +880,6 @@ def listado_busqueda(item):
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
import xbmc
|
||||
from core import channeltools
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -997,18 +987,31 @@ def findvideos(item):
|
||||
verificar_enlaces_descargas = -1 #Verificar todos los enlaces Descargar
|
||||
verificar_enlaces_descargas_validos = True #"¿Contar sólo enlaces 'verificados' en Descargar?"
|
||||
excluir_enlaces_descargas = [] #Lista vacía de servidores excluidos en Descargar
|
||||
|
||||
|
||||
# Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal,
|
||||
# con la función "play_from_library"
|
||||
unify_status = False
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
try:
|
||||
import xbmc
|
||||
if xbmc.getCondVisibility('Window.IsMedia') == 1:
|
||||
unify_status = config.get_setting("unify")
|
||||
except:
|
||||
unify_status = config.get_setting("unify")
|
||||
|
||||
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
num_episodios = item.infoLabels['temporada_num_episodios']
|
||||
else:
|
||||
num_episodios = 1
|
||||
|
||||
# Obtener la información actualizada del Episodio, si no la hay
|
||||
if not item.infoLabels['tmdb_id'] or (not item.infoLabels['episodio_titulo'] and item.contentType == 'episode'):
|
||||
tmdb.set_infoLabels(item, True)
|
||||
elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
|
||||
tmdb.set_infoLabels(item, True)
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
if item.infoLabels['temporada_num_episodios'] and num_episodios > item.infoLabels['temporada_num_episodios']:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
@@ -1028,6 +1031,8 @@ def findvideos(item):
|
||||
item.infoLabels['episodio_titulo'] = re.sub(r'\s?\[.*?\]', '', item.infoLabels['episodio_titulo'])
|
||||
if item.infoLabels['episodio_titulo'] == item.contentSerieName:
|
||||
item.infoLabels['episodio_titulo'] = ''
|
||||
if item.infoLabels['aired'] and item.contentType == "episode":
|
||||
item.infoLabels['year'] = scrapertools.find_single_match(str(item.infoLabels['aired']), r'\/(\d{4})')
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
@@ -1057,10 +1062,10 @@ def findvideos(item):
|
||||
else:
|
||||
title = item_local.title
|
||||
title_gen = title
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', title_gen) #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen) #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "") #Quitamos etiquetas vacías
|
||||
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', title_gen).strip() #Quitamos etiquetas vacías
|
||||
title_gen = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', title_gen).strip() #Quitamos colores vacíos
|
||||
title_gen = title_gen.replace(" []", "").strip() #Quitamos etiquetas vacías
|
||||
|
||||
if not unify_status: #Si Titulos Inteligentes NO seleccionados:
|
||||
title_gen = '**- [COLOR gold]Enlaces Ver: [/COLOR]%s[COLOR gold] -**[/COLOR]' % (title_gen)
|
||||
@@ -1074,9 +1079,9 @@ def findvideos(item):
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red][%s][/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip() #Quitamos colores vacíos
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
|
||||
@@ -1156,9 +1161,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = item_local.title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1249,9 +1254,9 @@ def findvideos(item):
|
||||
item_local.action = "play"
|
||||
item_local.server = servidor
|
||||
item_local.url = enlace
|
||||
item_local.title = parte_title.replace("[]", "")
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = parte_title.replace("[]", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
itemlist.append(item_local.clone())
|
||||
except:
|
||||
pass
|
||||
@@ -1426,10 +1431,10 @@ def episodios(item):
|
||||
item_local.title = '%s [%s] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.infoLabels['year'], rating, item_local.quality, str(item_local.language))
|
||||
|
||||
#Quitamos campos vacíos
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "")
|
||||
item_local.title = item_local.title.replace(" []", "")
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title)
|
||||
item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace(" []", "").strip()
|
||||
item_local.title = item_local.title.replace(" []", "").strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
|
||||
item_local.title = re.sub(r'\s\[COLOR \w+\]-\[\/COLOR\]', '', item_local.title).strip()
|
||||
if num_episodios < item_local.contentEpisodeNumber:
|
||||
num_episodios = item_local.contentEpisodeNumber
|
||||
if num_episodios and not item_local.infoLabels['temporada_num_episodios']:
|
||||
|
||||
@@ -38,8 +38,8 @@ def mainlist(item):
|
||||
Item(channel=item.channel,
|
||||
title="Español",
|
||||
action="listado",
|
||||
url=host + "peliculas/en-espanol/"
|
||||
))
|
||||
url=host + "peliculas/en-espanol/",
|
||||
thumbnail = get_thumb("channels_spanish.png")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Latino",
|
||||
@@ -54,9 +54,10 @@ def mainlist(item):
|
||||
thumbnail=get_thumb("channels_vos.png")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Categorias",
|
||||
title="Generos",
|
||||
action="categories",
|
||||
url=host
|
||||
url=host,
|
||||
thumbnail=get_thumb('genres', auto=True)
|
||||
))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
@@ -95,7 +96,6 @@ def search(item, texto):
|
||||
post = "keyword=%s" % texto
|
||||
data = httptools.downloadpage(item.url, post=post).data
|
||||
data = data.replace('\\"', '"').replace('\\/', '/')
|
||||
logger.debug("data %s" % data)
|
||||
|
||||
pattern = 'url\((.*?)\).+?<a href="([^"]+)".*?class="ss-title">(.*?)</a>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
@@ -146,14 +146,6 @@ def listado(item):
|
||||
title=">> Página siguiente",
|
||||
url=url,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="desc">([^<]+)</div>').strip()
|
||||
item.fanart = scrapertools.find_single_match(data, '<meta property="og:image" content="([^"]+)"/>')
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -172,10 +164,13 @@ def findvideos(item):
|
||||
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
|
||||
movie_info = scrapertools.find_single_match(item.url,
|
||||
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
|
||||
|
||||
|
||||
movie_host = movie_info[0]
|
||||
movie_id = movie_info[1]
|
||||
movie_name = movie_info[2]
|
||||
sub = video_info[1]
|
||||
movie_id = scrapertools.find_single_match(data,'id=idpelicula value=(.*?)>')
|
||||
movie_name = scrapertools.find_single_match(data,'id=nombreslug value=(.*?)>')
|
||||
sub = scrapertools.find_single_match(data, 'id=imdb value=(.*?)>')
|
||||
sub = '%s/subtix/%s.srt' % (movie_host, sub)
|
||||
url_base = 'http://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (movie_host, movie_id, movie_name)
|
||||
data = httptools.downloadpage(url_base).data
|
||||
json_data = jsontools.load(data)
|
||||
@@ -185,8 +180,10 @@ def findvideos(item):
|
||||
video_base_url = host + '/core/videofinal.php'
|
||||
if video_list[videoitem] != None:
|
||||
video_lang = video_list[videoitem]
|
||||
languages = ['latino', 'spanish', 'subtitulos']
|
||||
languages = ['latino', 'spanish', 'subtitulos', 'subtitulosp']
|
||||
for lang in languages:
|
||||
if lang not in video_lang:
|
||||
continue
|
||||
if video_lang[lang] != None:
|
||||
if not isinstance(video_lang[lang], int):
|
||||
video_id = video_lang[lang][0]["video"]
|
||||
@@ -199,15 +196,20 @@ def findvideos(item):
|
||||
for video_link in sources:
|
||||
url = video_link['sources']
|
||||
if url not in duplicated and server!='drive':
|
||||
lang = lang.capitalize()
|
||||
if lang == 'Spanish':
|
||||
|
||||
if lang == 'spanish':
|
||||
lang = 'Español'
|
||||
elif 'sub' in lang:
|
||||
lang = 'Subtitulada'
|
||||
lang = lang.capitalize()
|
||||
title = 'Ver en %s [' + lang + ']'
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
itemlist.append(item.clone(title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
action='play'
|
||||
action='play',
|
||||
language=lang
|
||||
|
||||
))
|
||||
duplicated.append(url)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
37
plugin.video.alfa/channels/wikiseries.json
Normal file
37
plugin.video.alfa/channels/wikiseries.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"id": "wikiseries",
|
||||
"name": "WikiSeries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vo", "vose"],
|
||||
"thumbnail": "https://s31.postimg.cc/tnmcrytnv/16142379_1847422438815031_3788419094563167644_n.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Español",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
251
plugin.video.alfa/channels/wikiseries.py
Normal file
251
plugin.video.alfa/channels/wikiseries.py
Normal file
@@ -0,0 +1,251 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel wikiseries -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'http://www.wikiseriesonline.nu/'
|
||||
|
||||
list_language = ['Latino', 'Español', 'VOSE', 'VO']
|
||||
list_quality = []
|
||||
list_servers = ['openload']
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Nuevos Capitulos", action="list_all", url=host + 'category/episode',
|
||||
thumbnail=get_thumb('new episodes', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'category/serie',
|
||||
thumbnail=get_thumb('all', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Generos", action="genres",
|
||||
url=host + 'latest-episodes', thumbnail=get_thumb('genres', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapedtitle.replace('×','x')
|
||||
|
||||
contentSerieName = scrapedtitle
|
||||
action = 'seasons'
|
||||
|
||||
if 'episode' in item.url:
|
||||
scrapedtitle, season, episode = scrapertools.find_single_match(scrapedtitle, '(.*?) (\d+)x(\d+)')
|
||||
contentSerieName = scrapedtitle
|
||||
scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle)
|
||||
action='findvideos'
|
||||
|
||||
thumbnail = scrapedthumbnail
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail, contentSerieName=contentSerieName, action=action,
|
||||
context=filtertools.context(item, list_language, list_quality))
|
||||
|
||||
if 'episode' in item.url:
|
||||
new_item.contentSeasonNumber = season
|
||||
new_item.contentepisodeNumber = episode
|
||||
new_item.context = []
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>»</a>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
|
||||
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
patron = '<li> <a href=(/category/.*?)>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
if scrapedtitle != 'Series':
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=host + scrapedurl, action='list_all'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = 'data-season-num=1>(.*?)</span>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedseason in matches:
|
||||
contentSeasonNumber = scrapedseason
|
||||
title = 'Temporada %s' % scrapedseason
|
||||
infoLabels['season'] = contentSeasonNumber
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title=title,
|
||||
contentSeasonNumber=contentSeasonNumber, infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName,
|
||||
extra1='library'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def all_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
return itemlist
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
season = item.contentSeasonNumber
|
||||
patron = '<li class=ep-list-item id=s%se(\d+)>.*?<a href=(.*?) >.*?name>(.*?)<.*?class=lgn (.*?)</a>' % season
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedepi, scrapedurl, scrapedtitle, languages in matches:
|
||||
url = scrapedurl
|
||||
language = scrapertools.find_multiple_matches(languages, 'title=(.*?)>')
|
||||
contentEpisodeNumber = scrapedepi
|
||||
title = '%sx%s - %s %s' % (season, contentEpisodeNumber, scrapedtitle, language)
|
||||
infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
|
||||
contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
|
||||
language=language, infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, text):
|
||||
logger.info()
|
||||
|
||||
item.url = item.url + text
|
||||
item.text = text
|
||||
item.type = 'search'
|
||||
if text != '':
|
||||
#return list_all(item)
|
||||
return search_results(item)
|
||||
|
||||
|
||||
def search_results(item):
|
||||
import urllib
|
||||
itemlist = []
|
||||
headers={"Origin": "http://www.wikiseriesonline.nu",
|
||||
"Accept-Encoding": "gzip, deflate", "Host": "www.wikiseriesonline.nu",
|
||||
"Accept-Language": "es-ES,es;q=0.8,en;q=0.6",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
"Accept": "*/*", "Referer": item.url,
|
||||
"X-Requested-With": "XMLHttpRequest", "Connection": "keep-alive", "Content-Length": "7"}
|
||||
post = {"n":item.text}
|
||||
post = urllib.urlencode(post)
|
||||
url = host + 'wp-content/themes/wikiSeries/searchajaxresponse.php'
|
||||
data = httptools.downloadpage(url, post=post, headers=headers).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = "<!-- .Posts -->.*?<a href=(.*?)>.*?src=(.*?) .*?titleinst>(.*?)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
if item.text.lower() in scrapedtitle.lower():
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, action='seasons',
|
||||
context=filtertools.context(item, list_language, list_quality)))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
data=get_source(item.url)
|
||||
patron = '<a href=(/reproductor.*?)target'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for link in matches:
|
||||
video_data = get_source(host+link)
|
||||
language = ''
|
||||
if 'latino' in link.lower():
|
||||
language='Latino'
|
||||
elif 'español' in link.lower():
|
||||
language = 'Español'
|
||||
elif 'subtitulado' in link.lower():
|
||||
language = 'VOSE'
|
||||
elif 'vo' in link.lower():
|
||||
language = 'VO'
|
||||
|
||||
url = scrapertools.find_single_match(video_data, '<iframe src=(.*?) scrolling')
|
||||
title = '%s [%s]'
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -458,7 +458,7 @@ def get_season_and_episode(title):
|
||||
"""
|
||||
filename = ""
|
||||
|
||||
patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)",
|
||||
patrons = ["(\d+)x(\d+)", "(\d+)×(\d+)", "(?:s|t)(\d+)e(\d+)",
|
||||
"(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"]
|
||||
|
||||
for patron in patrons:
|
||||
|
||||
@@ -203,13 +203,17 @@ def trakt_check(itemlist):
|
||||
id_result = ''
|
||||
# check = u'\u221a'
|
||||
check = 'v'
|
||||
get_sync_from_file()
|
||||
synced = False
|
||||
try:
|
||||
for item in itemlist:
|
||||
info = item.infoLabels
|
||||
|
||||
if info != '' and info['mediatype'] in ['movie', 'episode'] and item.channel != 'videolibrary':
|
||||
|
||||
if not synced:
|
||||
get_sync_from_file()
|
||||
synced = True
|
||||
|
||||
mediatype = 'movies'
|
||||
id_type = 'tmdb'
|
||||
|
||||
|
||||
10
plugin.video.alfa/lib/megaserver/client.py
Executable file → Normal file
10
plugin.video.alfa/lib/megaserver/client.py
Executable file → Normal file
@@ -6,7 +6,6 @@ import time
|
||||
import urllib
|
||||
from threading import Thread
|
||||
|
||||
from Crypto.Cipher import AES
|
||||
from file import File
|
||||
from handler import Handler
|
||||
from platformcode import logger
|
||||
@@ -164,8 +163,13 @@ class Client(object):
|
||||
return self.base64urlencode(self.a32_to_str(a))
|
||||
|
||||
def aes_cbc_decrypt(self, data, key):
|
||||
decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
|
||||
#decryptor = aes.AESModeOfOperationCBC(key, iv='\0' * 16)
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16)
|
||||
#decryptor = aes.AESModeOfOperationCBC(key, iv='\0' * 16)
|
||||
except:
|
||||
import jscrypto
|
||||
decryptor = jscrypto.new(key, jscrypto.MODE_CBC, '\0' * 16)
|
||||
return decryptor.decrypt(data)
|
||||
|
||||
def aes_cbc_decrypt_a32(self,data, key):
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import urllib2
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util import Counter
|
||||
|
||||
class Cursor(object):
|
||||
def __init__(self, file):
|
||||
@@ -61,8 +59,14 @@ class Cursor(object):
|
||||
|
||||
def prepare_decoder(self,offset):
|
||||
initial_value = self.initial_value + int(offset/16)
|
||||
self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value))
|
||||
#self.decryptor = aes.AESModeOfOperationCTR(f=self,key=self._client.a32_to_str(self.k),counter=aes.Counter(initial_value=initial_value))
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util import Counter
|
||||
self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value))
|
||||
except:
|
||||
from pyaes import aes
|
||||
self.decryptor = aes.AESModeOfOperationCTR(f=self,key=self._client.a32_to_str(self.k),counter=aes.Counter(initial_value=initial_value))
|
||||
|
||||
rest = offset - int(offset/16)*16
|
||||
if rest:
|
||||
self.decode(str(0)*rest)
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
@@ -3,6 +3,10 @@
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "clipwatching.com/(e.*?.html)",
|
||||
"url": "http://clipwatching.com/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "clipwatching.com/(\\w+)",
|
||||
"url": "http://clipwatching.com/\\1.html"
|
||||
|
||||
@@ -7,31 +7,21 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
response = httptools.downloadpage(page_url)
|
||||
if response.code == 404:
|
||||
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
response = httptools.downloadpage(page_url, cookies=False)
|
||||
cookie = {'Cookie': response.headers["set-cookie"]}
|
||||
data = response.data.replace("\\", "")
|
||||
|
||||
'''
|
||||
"240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
|
||||
'''
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
|
||||
qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
|
||||
for calidad, urls in qualities:
|
||||
if calidad == "auto":
|
||||
continue
|
||||
patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(urls, patron)
|
||||
for stream_type, stream_url in matches:
|
||||
@@ -41,10 +31,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
follow_redirects=False).headers.get("location", stream_url)
|
||||
else:
|
||||
data_m3u8 = httptools.downloadpage(stream_url).data
|
||||
stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
|
||||
stream_url_http = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
|
||||
if stream_url_http:
|
||||
stream_url = stream_url_http
|
||||
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
42
plugin.video.alfa/servers/gounlimited.json
Normal file
42
plugin.video.alfa/servers/gounlimited.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://gounlimited.to/embed-(.*?).html",
|
||||
"url": "https://gounlimited.to/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "gounlimited",
|
||||
"name": "gounlimited",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s31.postimg.cc/bsiaj2q2j/goo.png"
|
||||
}
|
||||
32
plugin.video.alfa/servers/gounlimited.py
Normal file
32
plugin.video.alfa/servers/gounlimited.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector GoUnlimited By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if data == "File was deleted":
|
||||
return False, "[gounlimited] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed_data)
|
||||
patron = "file:(.*?),label:(.*?)}"
|
||||
matches = re.compile(patron, re.DOTALL).findall(unpacked)
|
||||
for url, quality in matches:
|
||||
video_urls.append(['%s' % quality, url])
|
||||
video_urls.sort(key=lambda x: int(x[0]))
|
||||
return video_urls
|
||||
@@ -8,45 +8,38 @@ from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
|
||||
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "var userid = '';" in data.lower():
|
||||
return False, "[netutv] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
if "hash=" in page_url:
|
||||
data = urllib.unquote(httptools.downloadpage(page_url).data)
|
||||
id_video = scrapertools.find_single_match(data, "vid\s*=\s*'([^']+)'")
|
||||
id_video = scrapertools.find_single_match(data, "vid':'([^']+)'")
|
||||
else:
|
||||
id_video = page_url.rsplit("=", 1)[1]
|
||||
page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video
|
||||
data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data
|
||||
|
||||
js_wise = scrapertools.find_single_match(data_page_url_hqq,
|
||||
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
|
||||
data_unwise = jswise(js_wise).replace("\\", "")
|
||||
at = scrapertools.find_single_match(data_unwise, 'var at\s*=\s*"([^"]+)"')
|
||||
http_referer = scrapertools.find_single_match(data_unwise, 'var http_referer\s*=\s*"([^"]+)"')
|
||||
|
||||
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
|
||||
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
|
||||
data_player = httptools.downloadpage(url, add_referer=True).data
|
||||
|
||||
data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"')
|
||||
data = ""
|
||||
for d in data_unescape:
|
||||
data += urllib.unquote(d)
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&')
|
||||
if not subtitle:
|
||||
subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&')
|
||||
@@ -55,7 +48,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
|
||||
if js_wise:
|
||||
data_unwise_player = jswise(js_wise).replace("\\", "")
|
||||
|
||||
vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}')
|
||||
matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]')
|
||||
params = {}
|
||||
@@ -69,22 +61,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
if not value_var and data_unwise_player:
|
||||
value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value)
|
||||
params[key] = value_var
|
||||
|
||||
params = urllib.urlencode(params)
|
||||
head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
|
||||
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
|
||||
|
||||
media_urls = []
|
||||
url_data = jsontools.load(data)
|
||||
media_url = tb(url_data["html5_file"].replace("#", ""))
|
||||
|
||||
media_url = "https:" + tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
|
||||
video_urls = []
|
||||
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
@@ -95,7 +82,6 @@ def tb(b_m3u8_2):
|
||||
while j < len(b_m3u8_2):
|
||||
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
|
||||
j += 3
|
||||
|
||||
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
|
||||
|
||||
|
||||
@@ -105,15 +91,12 @@ def tb(b_m3u8_2):
|
||||
def jswise(wise):
|
||||
## js2python
|
||||
def js_wise(wise):
|
||||
|
||||
w, i, s, e = wise
|
||||
|
||||
v0 = 0;
|
||||
v1 = 0;
|
||||
v2 = 0
|
||||
v3 = [];
|
||||
v4 = []
|
||||
|
||||
while True:
|
||||
if v0 < 5:
|
||||
v4.append(w[v0])
|
||||
@@ -131,12 +114,10 @@ def jswise(wise):
|
||||
v3.append(s[v2])
|
||||
v2 += 1
|
||||
if len(w) + len(i) + len(s) + len(e) == len(v3) + len(v4) + len(e): break
|
||||
|
||||
v5 = "".join(v3);
|
||||
v6 = "".join(v4)
|
||||
v1 = 0
|
||||
v7 = []
|
||||
|
||||
for v0 in range(0, len(v3), 2):
|
||||
v8 = -1
|
||||
if ord(v6[v1]) % 2: v8 = 1
|
||||
@@ -144,7 +125,6 @@ def jswise(wise):
|
||||
v1 += 1
|
||||
if v1 >= len(v4): v1 = 0
|
||||
return "".join(v7)
|
||||
|
||||
## loop2unobfuscated
|
||||
while True:
|
||||
wise = re.search("var\s.+?\('([^']+)','([^']+)','([^']+)','([^']+)'\)", wise, re.DOTALL)
|
||||
|
||||
@@ -14,7 +14,8 @@ def test_video_exists(page_url):
|
||||
|
||||
if "Streaming link:" in data:
|
||||
return True, ""
|
||||
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data:
|
||||
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data\
|
||||
or "Page not found" in data:
|
||||
return False, "[Uptobox] El archivo no existe o ha sido borrado"
|
||||
wait = scrapertools.find_single_match(data, "You have to wait ([0-9]+) (minute|second)")
|
||||
if len(wait) > 0:
|
||||
|
||||
Reference in New Issue
Block a user