Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.12" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.13" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,17 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ PoseidonHD ¤ TuPelicula ¤ SeriesLan
|
||||
¤ danimados ¤ pelisgratis ¤ rexpelis
|
||||
¤ sipeliculas ¤ yape ¤ DivxTotal
|
||||
¤ EliteTorrent ¤ GranTorrent ¤ Newpct1
|
||||
¤ pelispedia ¤ pepecine ¤ cuevana2
|
||||
¤ cuevana2español
|
||||
¤ cinetux ¤ porntrex ¤ repelis
|
||||
¤ fembed ¤ uptobox ¤ vivo
|
||||
¤ seriesmetro ¤ DivxTotal ¤ EliteTorrent
|
||||
¤ EstrenosGo ¤ GranTorrent
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ AnimeBoom ¤ SeriesMetro ¤ Pack canales +18
|
||||
¤ Pack canales +18
|
||||
|
||||
Agradecimientos a @mrgaturus y @paeznet por colaborar en ésta versión
|
||||
Agradecimientos a @paeznet por colaborar en ésta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
16
plugin.video.alfa/channels/LIKUOO.json
Normal file
16
plugin.video.alfa/channels/LIKUOO.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "LIKUOO",
|
||||
"name": "LIKUOO",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
88
plugin.video.alfa/channels/LIKUOO.py
Normal file
88
plugin.video.alfa/channels/LIKUOO.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://www.likuoo.video'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?<a href="([^"]+)" title="(.*?)">.*?src="(.*?)".*?<div class="runtime">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Alfa favoritos (Mis enlaces)
|
||||
# ============================
|
||||
# Alfa favoritos
|
||||
# ==============
|
||||
# - Lista de enlaces guardados como favoritos, solamente en Alfa, no Kodi.
|
||||
# - Los enlaces se organizan en carpetas que puede definir el usuario.
|
||||
# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: user_favorites.json
|
||||
# - Se puede copiar user_favorites.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces,
|
||||
# - Los enlaces se organizan en carpetas (virtuales) que puede definir el usuario.
|
||||
# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: alfavorites-default.json
|
||||
# - Se puede copiar alfavorites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces,
|
||||
# pero se detecta por código y se ajusta al dispositivo actual.
|
||||
# - Se pueden tener distintos ficheros de alfavoritos y alternar entre ellos, pero solamente uno de ellos es la "lista activa".
|
||||
# - Los ficheros deben estar en config.get_data_path() y empezar por alfavorites- y terminar en .json
|
||||
|
||||
# Requerimientos en otros módulos para ejecutar este canal:
|
||||
# - Añadir un enlace a este canal en channelselector.py
|
||||
@@ -14,35 +16,106 @@
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import os, re
|
||||
from datetime import datetime
|
||||
|
||||
from core import filetools
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
from core import filetools, jsontools
|
||||
|
||||
|
||||
def fechahora_actual():
|
||||
return datetime.now().strftime('%Y-%m-%d %H:%M')
|
||||
|
||||
# Helpers para listas
|
||||
# -------------------
|
||||
|
||||
PREFIJO_LISTA = 'alfavorites-'
|
||||
|
||||
# Devuelve el nombre de la lista activa (Ej: alfavorites-default.json)
|
||||
def get_lista_activa():
|
||||
return config.get_setting('lista_activa', default = PREFIJO_LISTA + 'default.json')
|
||||
|
||||
# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: alfavorites-Prueba.json => Prueba)
|
||||
def get_name_from_filename(filename):
|
||||
return filename.replace(PREFIJO_LISTA, '').replace('.json', '')
|
||||
|
||||
# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => alfavorites-Prueba.json)
|
||||
def get_filename_from_name(name):
|
||||
return PREFIJO_LISTA + name + '.json'
|
||||
|
||||
# Apuntar en un fichero de log los códigos de los ficheros que se hayan compartido
|
||||
def save_log_lista_shared(msg):
|
||||
msg = fechahora_actual() + ': ' + msg + os.linesep
|
||||
fullfilename = os.path.join(config.get_data_path(), 'alfavorites_shared.log')
|
||||
with open(fullfilename, 'a') as f: f.write(msg); f.close()
|
||||
|
||||
# Limpiar texto para usar como nombre de fichero
|
||||
def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char = ' '):
|
||||
import unicodedata
|
||||
try:
|
||||
txt = unicode(txt, 'utf-8')
|
||||
except NameError: # unicode is a default on python 3
|
||||
pass
|
||||
txt = unicodedata.normalize('NFKD', txt).encode('ascii', 'ignore')
|
||||
txt = txt.decode('utf-8').strip()
|
||||
if blank_char != ' ': txt = txt.replace(' ', blank_char)
|
||||
txt = re.sub(disallowed_chars, '', txt)
|
||||
return str(txt)
|
||||
|
||||
|
||||
|
||||
# Clase para cargar y guardar en el fichero de Alfavoritos
|
||||
# --------------------------------------------------------
|
||||
class AlfavoritesData:
|
||||
|
||||
def __init__(self):
|
||||
self.user_favorites_file = os.path.join(config.get_data_path(), 'user_favorites.json')
|
||||
def __init__(self, filename = None):
|
||||
|
||||
# Si no se especifica ningún fichero se usa la lista_activa (si no la hay se crea)
|
||||
if filename == None:
|
||||
filename = get_lista_activa()
|
||||
|
||||
self.user_favorites_file = os.path.join(config.get_data_path(), filename)
|
||||
|
||||
if not os.path.exists(self.user_favorites_file):
|
||||
self.user_favorites = []
|
||||
else:
|
||||
try:
|
||||
self.user_favorites = jsontools.load(filetools.read(self.user_favorites_file))
|
||||
except:
|
||||
fichero_anterior = os.path.join(config.get_data_path(), 'user_favorites.json')
|
||||
if os.path.exists(fichero_anterior): # formato anterior, convertir (a eliminar después de algunas versiones)
|
||||
jsondata = jsontools.load(filetools.read(fichero_anterior))
|
||||
self.user_favorites = jsondata
|
||||
self.info_lista = {}
|
||||
self.save()
|
||||
filetools.remove(fichero_anterior)
|
||||
else:
|
||||
self.user_favorites = []
|
||||
else:
|
||||
jsondata = jsontools.load(filetools.read(self.user_favorites_file))
|
||||
if not 'user_favorites' in jsondata or not 'info_lista' in jsondata: # formato incorrecto
|
||||
self.user_favorites = []
|
||||
else:
|
||||
self.user_favorites = jsondata['user_favorites']
|
||||
self.info_lista = jsondata['info_lista']
|
||||
|
||||
|
||||
if len(self.user_favorites) == 0:
|
||||
self.user_favorites.append({ 'title': 'Carpeta por defecto', 'items': [] })
|
||||
self.info_lista = {}
|
||||
|
||||
# Crear algunas carpetas por defecto
|
||||
self.user_favorites.append({ 'title': 'Películas', 'items': [] })
|
||||
self.user_favorites.append({ 'title': 'Series', 'items': [] })
|
||||
self.user_favorites.append({ 'title': 'Otros', 'items': [] })
|
||||
|
||||
self.save()
|
||||
|
||||
def save(self):
|
||||
filetools.write(self.user_favorites_file, jsontools.dump(self.user_favorites))
|
||||
if 'created' not in self.info_lista:
|
||||
self.info_lista['created'] = fechahora_actual()
|
||||
self.info_lista['updated'] = fechahora_actual()
|
||||
|
||||
jsondata = {}
|
||||
jsondata['user_favorites'] = self.user_favorites
|
||||
jsondata['info_lista'] = self.info_lista
|
||||
if not filetools.write(self.user_favorites_file, jsontools.dump(jsondata)):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se puede grabar la lista!', os.path.basename(self.user_favorites_file))
|
||||
|
||||
|
||||
# ============================
|
||||
@@ -55,21 +128,21 @@ def addFavourite(item):
|
||||
|
||||
# Si se llega aquí mediante el menú contextual, hay que recuperar los parámetros action y channel
|
||||
if item.from_action:
|
||||
item.__dict__["action"] = item.__dict__.pop("from_action")
|
||||
item.__dict__['action'] = item.__dict__.pop('from_action')
|
||||
if item.from_channel:
|
||||
item.__dict__["channel"] = item.__dict__.pop("from_channel")
|
||||
item.__dict__['channel'] = item.__dict__.pop('from_channel')
|
||||
|
||||
# Limpiar título y quitar color
|
||||
# Limpiar título
|
||||
item.title = re.sub(r'\[COLOR [^\]]*\]', '', item.title.replace('[/COLOR]', '')).strip()
|
||||
if item.text_color:
|
||||
item.__dict__.pop("text_color")
|
||||
item.__dict__.pop('text_color')
|
||||
|
||||
# Diálogo para escoger/crear carpeta
|
||||
i_perfil = _selecciona_perfil(alfav, 'Guardar enlace en:')
|
||||
if i_perfil == -1: return False
|
||||
|
||||
# Detectar que el mismo enlace no exista ya en la carpeta
|
||||
campos = ['channel','action','url','extra'] # si todos estos campos coinciden se considera que el enlace ya existe
|
||||
campos = ['channel','action','url','extra','list_type'] # si todos estos campos coinciden se considera que el enlace ya existe
|
||||
for enlace in alfav.user_favorites[i_perfil]['items']:
|
||||
it = Item().fromurl(enlace)
|
||||
repe = True
|
||||
@@ -81,11 +154,14 @@ def addFavourite(item):
|
||||
platformtools.dialog_notification('Enlace repetido', 'Ya tienes este enlace en la carpeta')
|
||||
return False
|
||||
|
||||
# Si es una película, completar información de tmdb si no se tiene activado tmdb_plus_info
|
||||
if item.contentType == 'movie' and not config.get_setting('tmdb_plus_info', default=False):
|
||||
# Si es una película/serie, completar información de tmdb si no se tiene activado tmdb_plus_info (para season/episodio no hace falta pq ya se habrá hecho la "segunda pasada")
|
||||
if (item.contentType == 'movie' or item.contentType == 'tvshow') and not config.get_setting('tmdb_plus_info', default=False):
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels(item, True) # obtener más datos en "segunda pasada" (actores, duración, ...)
|
||||
|
||||
# Añadir fecha en que se guarda
|
||||
item.date_added = fechahora_actual()
|
||||
|
||||
# Guardar
|
||||
alfav.user_favorites[i_perfil]['items'].append(item.tourl())
|
||||
alfav.save()
|
||||
@@ -102,6 +178,7 @@ def addFavourite(item):
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
alfav = AlfavoritesData()
|
||||
item.category = get_name_from_filename(os.path.basename(alfav.user_favorites_file))
|
||||
|
||||
itemlist = []
|
||||
last_i = len(alfav.user_favorites) - 1
|
||||
@@ -128,11 +205,10 @@ def mainlist(item):
|
||||
plot = '%d enlaces en la carpeta' % len(perfil['items'])
|
||||
itemlist.append(Item(channel=item.channel, action='mostrar_perfil', title=perfil['title'], plot=plot, i_perfil=i_perfil, context=context))
|
||||
|
||||
plot = '* Crea diferentes carpetas para guardar tus enlaces favoritos dentro de Alfa.[CR]'
|
||||
plot += '* Para añadir enlaces a las carpetas accede al menú contextual desde cualquier punto de Alfa.[CR]'
|
||||
plot += '* Los enlaces pueden ser canales, secciones dentro de los canales, búsquedas, e incluso películas y series aunque para esto último es preferible utilizar la videoteca.'
|
||||
itemlist.append(item.clone(action='crear_perfil', title='Crear nueva carpeta ...', plot=plot, folder=False))
|
||||
itemlist.append(item.clone(action='crear_perfil', title='Crear nueva carpeta ...', folder=False))
|
||||
|
||||
itemlist.append(item.clone(action='mainlist_listas', title='Gestionar listas de enlaces'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -149,38 +225,15 @@ def mostrar_perfil(item):
|
||||
ruta_runtime = config.get_runtime_path()
|
||||
|
||||
for i_enlace, enlace in enumerate(alfav.user_favorites[i_perfil]['items']):
|
||||
context = []
|
||||
|
||||
if i_enlace > 0:
|
||||
context.append({'title': 'Mover arriba del todo', 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'top'})
|
||||
context.append({'title': 'Mover hacia arriba', 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'arriba'})
|
||||
if i_enlace < last_i:
|
||||
context.append({'title': 'Mover hacia abajo', 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'abajo'})
|
||||
context.append({'title': 'Mover abajo del todo', 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'bottom'})
|
||||
|
||||
if len(alfav.user_favorites) > 1: # si se tiene más de una carpeta permitir mover entre ellas
|
||||
context.append({'title': 'Mover a otra carpeta', 'channel': item.channel, 'action': 'editar_enlace_carpeta',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': 'Cambiar título', 'channel': item.channel, 'action': 'editar_enlace_titulo',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': 'Cambiar color', 'channel': item.channel, 'action': 'editar_enlace_color',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': 'Cambiar thumbnail', 'channel': item.channel, 'action': 'editar_enlace_thumbnail',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': 'Eliminar enlace', 'channel': item.channel, 'action': 'eliminar_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
it = Item().fromurl(enlace)
|
||||
it.context = context
|
||||
it.plot = '[COLOR blue]Canal: ' + it.channel + '[/COLOR][CR]' + it.plot
|
||||
it.context = [ {'title': '[COLOR blue]Modificar enlace[/COLOR]', 'channel': item.channel, 'action': 'acciones_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil} ]
|
||||
|
||||
it.plot += '[CR][CR][COLOR blue]Canal:[/COLOR] ' + it.channel + ' [COLOR blue]Action:[/COLOR] ' + it.action
|
||||
if it.extra != '': it.plot += ' [COLOR blue]Extra:[/COLOR] ' + it.extra
|
||||
it.plot += '[CR][COLOR blue]Url:[/COLOR] ' + it.url if isinstance(it.url, str) else '...'
|
||||
if it.date_added != '': it.plot += '[CR][COLOR blue]Added:[/COLOR] ' + it.date_added
|
||||
|
||||
# Si no es una url, ni tiene la ruta del sistema, convertir el path ya que se habrá copiado de otro dispositivo.
|
||||
# Sería más óptimo que la conversión se hiciera con un menú de importar, pero de momento se controla en run-time.
|
||||
@@ -277,6 +330,37 @@ def eliminar_perfil(item):
|
||||
return True
|
||||
|
||||
|
||||
def acciones_enlace(item):
|
||||
logger.info()
|
||||
|
||||
acciones = ['Cambiar título', 'Cambiar color', 'Cambiar thumbnail', 'Mover a otra carpeta', 'Mover a otra lista', 'Eliminar enlace',
|
||||
'Mover arriba del todo', 'Mover hacia arriba', 'Mover hacia abajo', 'Mover abajo del todo']
|
||||
|
||||
ret = platformtools.dialog_select('Acción a ejecutar', acciones)
|
||||
if ret == -1:
|
||||
return False # pedido cancel
|
||||
elif ret == 0:
|
||||
return editar_enlace_titulo(item)
|
||||
elif ret == 1:
|
||||
return editar_enlace_color(item)
|
||||
elif ret == 2:
|
||||
return editar_enlace_thumbnail(item)
|
||||
elif ret == 3:
|
||||
return editar_enlace_carpeta(item)
|
||||
elif ret == 4:
|
||||
return editar_enlace_lista(item)
|
||||
elif ret == 5:
|
||||
return eliminar_enlace(item)
|
||||
elif ret == 6:
|
||||
return mover_enlace(item.clone(direccion='top'))
|
||||
elif ret == 7:
|
||||
return mover_enlace(item.clone(direccion='arriba'))
|
||||
elif ret == 8:
|
||||
return mover_enlace(item.clone(direccion='abajo'))
|
||||
elif ret == 9:
|
||||
return mover_enlace(item.clone(direccion='bottom'))
|
||||
|
||||
|
||||
def editar_enlace_titulo(item):
|
||||
logger.info()
|
||||
alfav = AlfavoritesData()
|
||||
@@ -402,6 +486,44 @@ def editar_enlace_carpeta(item):
|
||||
return True
|
||||
|
||||
|
||||
def editar_enlace_lista(item):
|
||||
logger.info()
|
||||
alfav = AlfavoritesData()
|
||||
|
||||
if not alfav.user_favorites[item.i_perfil]: return False
|
||||
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
# Diálogo para escoger lista
|
||||
opciones = []
|
||||
itemlist_listas = mainlist_listas(item)
|
||||
for it in itemlist_listas:
|
||||
if it.lista != '' and '[lista activa]' not in it.title: # descarta item crear y lista activa
|
||||
opciones.append(it.lista)
|
||||
|
||||
if len(opciones) == 0:
|
||||
platformtools.dialog_ok('Alfa', 'No hay otras listas dónde mover el enlace.', 'Puedes crearlas desde el menú Gestionar listas de enlaces')
|
||||
return False
|
||||
|
||||
ret = platformtools.dialog_select('Seleccionar lista destino', opciones)
|
||||
|
||||
if ret == -1:
|
||||
return False # pedido cancel
|
||||
|
||||
alfav_destino = AlfavoritesData(opciones[ret])
|
||||
|
||||
# Diálogo para escoger/crear carpeta en la lista de destino
|
||||
i_perfil = _selecciona_perfil(alfav_destino, 'Seleccionar carpeta destino', -1)
|
||||
if i_perfil == -1: return False
|
||||
|
||||
alfav_destino.user_favorites[i_perfil]['items'].append(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
|
||||
del alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]
|
||||
alfav_destino.save()
|
||||
alfav.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def eliminar_enlace(item):
|
||||
logger.info()
|
||||
alfav = AlfavoritesData()
|
||||
@@ -466,3 +588,339 @@ def _mover_item(lista, i_selected, direccion):
|
||||
lista.insert(last_i, lista.pop(i_selected))
|
||||
|
||||
return lista
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
# Gestionar diferentes listas de alfavoritos
|
||||
# ------------------------------------------
|
||||
|
||||
def mainlist_listas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.category = 'Listas'
|
||||
|
||||
lista_activa = get_lista_activa()
|
||||
|
||||
import glob
|
||||
|
||||
path = os.path.join(config.get_data_path(), PREFIJO_LISTA+'*.json')
|
||||
for fichero in glob.glob(path):
|
||||
lista = os.path.basename(fichero)
|
||||
nombre = get_name_from_filename(lista)
|
||||
titulo = nombre if lista != lista_activa else '[COLOR gold]%s[/COLOR] [lista activa]' % nombre
|
||||
|
||||
itemlist.append(item.clone(action='acciones_lista', lista=lista, title=titulo, folder=False))
|
||||
|
||||
itemlist.append(item.clone(action='acciones_nueva_lista', title='Crear/descargar lista / Info ...', folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def acciones_lista(item):
|
||||
logger.info()
|
||||
|
||||
acciones = ['Establecer como lista activa', 'Cambiar nombre de la lista',
|
||||
'Compartir en tinyupload', 'Eliminar lista', 'Información de la lista']
|
||||
|
||||
ret = platformtools.dialog_select(item.lista, acciones)
|
||||
|
||||
if ret == -1:
|
||||
return False # pedido cancel
|
||||
elif ret == 0:
|
||||
return activar_lista(item)
|
||||
elif ret == 1:
|
||||
return renombrar_lista(item)
|
||||
elif ret == 2:
|
||||
return compartir_lista(item)
|
||||
elif ret == 3:
|
||||
return eliminar_lista(item)
|
||||
elif ret == 4:
|
||||
return informacion_lista(item)
|
||||
|
||||
|
||||
def activar_lista(item):
|
||||
logger.info()
|
||||
|
||||
fullfilename = os.path.join(config.get_data_path(), item.lista)
|
||||
if not os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se encuentra la lista!', item.lista)
|
||||
return False
|
||||
|
||||
config.set_setting('lista_activa', item.lista)
|
||||
|
||||
from channelselector import get_thumb
|
||||
item_inicio = Item(title=config.get_localized_string(70527), channel="alfavorites", action="mainlist",
|
||||
thumbnail=get_thumb("mylink.png"),
|
||||
category=config.get_localized_string(70527), viewmode="thumbnails")
|
||||
platformtools.itemlist_update(item_inicio, replace=True)
|
||||
return True
|
||||
|
||||
|
||||
def renombrar_lista(item):
|
||||
logger.info()
|
||||
|
||||
fullfilename_current = os.path.join(config.get_data_path(), item.lista)
|
||||
if not os.path.exists(fullfilename_current):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se encuentra la lista!', fullfilename_current)
|
||||
return False
|
||||
|
||||
nombre = get_name_from_filename(item.lista)
|
||||
titulo = platformtools.dialog_input(default=nombre, heading='Nombre de la lista')
|
||||
if titulo is None or titulo == '' or titulo == nombre:
|
||||
return False
|
||||
titulo = text_clean(titulo, blank_char='_')
|
||||
|
||||
filename = get_filename_from_name(titulo)
|
||||
fullfilename = os.path.join(config.get_data_path(), filename)
|
||||
|
||||
# Comprobar que el nuevo nombre no exista
|
||||
if os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, ya existe una lista con este nombre!', fullfilename)
|
||||
return False
|
||||
|
||||
# Rename del fichero
|
||||
if not filetools.rename(fullfilename_current, filename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se ha podido renombrar la lista!', fullfilename)
|
||||
return False
|
||||
|
||||
# Update settings si es la lista activa
|
||||
if item.lista == get_lista_activa():
|
||||
config.set_setting('lista_activa', filename)
|
||||
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def eliminar_lista(item):
|
||||
logger.info()
|
||||
|
||||
fullfilename = os.path.join(config.get_data_path(), item.lista)
|
||||
if not os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se encuentra la lista!', item.lista)
|
||||
return False
|
||||
|
||||
if item.lista == get_lista_activa():
|
||||
platformtools.dialog_ok('Alfa', 'La lista activa no se puede eliminar', item.lista)
|
||||
return False
|
||||
|
||||
if not platformtools.dialog_yesno('Eliminar lista', '¿Estás seguro de querer borrar la lista %s ?' % item.lista): return False
|
||||
filetools.remove(fullfilename)
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def informacion_lista(item):
|
||||
logger.info()
|
||||
|
||||
fullfilename = os.path.join(config.get_data_path(), item.lista)
|
||||
if not os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se encuentra la lista!', item.lista)
|
||||
return False
|
||||
|
||||
alfav = AlfavoritesData(item.lista)
|
||||
|
||||
txt = 'Lista: [COLOR gold]%s[/COLOR]' % item.lista
|
||||
txt += '[CR]Creada el %s y modificada el %s' % (alfav.info_lista['created'], alfav.info_lista['updated'])
|
||||
|
||||
if 'downloaded_date' in alfav.info_lista:
|
||||
txt += '[CR]Descargada el %s desde [COLOR blue]%s[/COLOR]' % (alfav.info_lista['downloaded_date'], alfav.info_lista['downloaded_from'])
|
||||
|
||||
if 'tinyupload_date' in alfav.info_lista:
|
||||
txt += '[CR]Compartida en tinyupload el %s con el código [COLOR blue]%s[/COLOR]' % (alfav.info_lista['tinyupload_date'], alfav.info_lista['tinyupload_code'])
|
||||
|
||||
txt += '[CR]Número de carpetas: %d' % len(alfav.user_favorites)
|
||||
for perfil in alfav.user_favorites:
|
||||
txt += '[CR]- %s (%d enlaces)' % (perfil['title'], len(perfil['items']))
|
||||
|
||||
platformtools.dialog_textviewer('Información de la lista', txt)
|
||||
return True
|
||||
|
||||
|
||||
def compartir_lista(item):
|
||||
logger.info()
|
||||
|
||||
fullfilename = os.path.join(config.get_data_path(), item.lista)
|
||||
if not os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se encuentra la lista!', fullfilename)
|
||||
return False
|
||||
|
||||
try:
|
||||
progreso = platformtools.dialog_progress_bg('Compartir lista', 'Conectando con tinyupload ...')
|
||||
|
||||
# Acceso a la página principal de tinyupload para obtener datos necesarios
|
||||
from core import httptools, scrapertools
|
||||
data = httptools.downloadpage('http://s000.tinyupload.com/index.php').data
|
||||
upload_url = scrapertools.find_single_match(data, 'form action="([^"]+)')
|
||||
sessionid = scrapertools.find_single_match(upload_url, 'sid=(.+)')
|
||||
|
||||
progreso.update(10, 'Subiendo fichero', 'Espera unos segundos a que acabe de subirse tu fichero de lista a tinyupload')
|
||||
|
||||
# Envío del fichero a tinyupload mediante multipart/form-data
|
||||
from lib import MultipartPostHandler
|
||||
import urllib2
|
||||
opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
|
||||
params = { 'MAX_FILE_SIZE' : '52428800', 'file_description' : '', 'sessionid' : sessionid, 'uploaded_file' : open(fullfilename, 'rb') }
|
||||
handle = opener.open(upload_url, params)
|
||||
data = handle.read()
|
||||
|
||||
progreso.close()
|
||||
|
||||
if not 'File was uploaded successfuly' in data:
|
||||
logger.debug(data)
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se ha podido subir el fichero a tinyupload.com!')
|
||||
return False
|
||||
|
||||
codigo = scrapertools.find_single_match(data, 'href="index\.php\?file_id=([^"]+)')
|
||||
|
||||
except:
|
||||
platformtools.dialog_ok('Alfa', 'Error, al intentar subir el fichero a tinyupload.com!', item.lista)
|
||||
return False
|
||||
|
||||
# Apuntar código en fichero de log y dentro de la lista
|
||||
save_log_lista_shared('Subido fichero %s a tinyupload.com. El código para descargarlo es: %s' % (item.lista, codigo))
|
||||
|
||||
alfav = AlfavoritesData(item.lista)
|
||||
alfav.info_lista['tinyupload_date'] = fechahora_actual()
|
||||
alfav.info_lista['tinyupload_code'] = codigo
|
||||
alfav.save()
|
||||
|
||||
platformtools.dialog_ok('Alfa', 'Subida lista a tinyupload. Si quieres compartirla con alguien, pásale este código:', codigo)
|
||||
return True
|
||||
|
||||
|
||||
|
||||
def acciones_nueva_lista(item):
|
||||
logger.info()
|
||||
|
||||
acciones = ['Crear una nueva lista',
|
||||
'Descargar lista con código de tinyupload',
|
||||
'Descargar lista de una url directa',
|
||||
'Información sobre las listas']
|
||||
|
||||
ret = platformtools.dialog_select('Listas de enlaces', acciones)
|
||||
|
||||
if ret == -1:
|
||||
return False # pedido cancel
|
||||
|
||||
elif ret == 0:
|
||||
return crear_lista(item)
|
||||
|
||||
elif ret == 1:
|
||||
codigo = platformtools.dialog_input(default='', heading='Código de descarga de tinyupload') # 05370382084539519168
|
||||
if codigo is None or codigo == '':
|
||||
return False
|
||||
return descargar_lista(item, 'http://s000.tinyupload.com/?file_id=' + codigo)
|
||||
|
||||
elif ret == 2:
|
||||
url = platformtools.dialog_input(default='https://', heading='URL de dónde descargar la lista')
|
||||
if url is None or url == '':
|
||||
return False
|
||||
return descargar_lista(item, url)
|
||||
|
||||
elif ret == 3:
|
||||
txt = '- Puedes tener diferentes listas, pero solamente una de ellas está activa. La lista activa es la que se muestra en "Mis enlaces" y dónde se guardan los enlaces que se vayan añadiendo.'
|
||||
txt += '[CR]- Puedes ir cambiando la lista activa y alternar entre las que tengas.'
|
||||
txt += '[CR]- Puedes compartir una lista a través de tinyupload y luego pasarle el código resultante a tus amistades para que se la puedan bajar.'
|
||||
txt += '[CR]- Puedes descargar una lista si te pasan un código de tinyupload o una url dónde esté alojada.'
|
||||
txt += '[CR]- Si lo quieres hacer manualmente, puedes copiar una lista alfavorites-*.json que te hayan pasado a la carpeta userdata del addon. Y puedes subir estos json a algún servidor y pasar sus urls a tus amigos para compartirlas.'
|
||||
txt += '[CR]- Para compartir listas desde el addon se utiliza el servicio de tinyupload.com por ser gratuíto, privado y relativamente rápido. Los ficheros se guardan mientras no pasen 100 días sin que nadie lo descargue, son privados porque requieren un código para acceder a ellos, y la limitación de 50MB es suficiente para las listas.'
|
||||
|
||||
platformtools.dialog_textviewer('Información sobre las listas', txt)
|
||||
return False
|
||||
|
||||
|
||||
def crear_lista(item):
|
||||
logger.info()
|
||||
|
||||
titulo = platformtools.dialog_input(default='', heading='Nombre de la lista')
|
||||
if titulo is None or titulo == '':
|
||||
return False
|
||||
titulo = text_clean(titulo, blank_char='_')
|
||||
|
||||
filename = get_filename_from_name(titulo)
|
||||
fullfilename = os.path.join(config.get_data_path(), filename)
|
||||
|
||||
# Comprobar que el fichero no exista ya
|
||||
if os.path.exists(fullfilename):
|
||||
platformtools.dialog_ok('Alfa', 'Error, ya existe una lista con este nombre!', fullfilename)
|
||||
return False
|
||||
|
||||
# Provocar que se guarde con las carpetas vacías por defecto
|
||||
alfav = AlfavoritesData(filename)
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def descargar_lista(item, url):
|
||||
logger.info()
|
||||
from core import httptools, scrapertools
|
||||
|
||||
if 'tinyupload.com/' in url:
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
data = httptools.downloadpage(url).data
|
||||
logger.debug(data)
|
||||
down_url, url_name = scrapertools.find_single_match(data, ' href="(download\.php[^"]*)"><b>([^<]*)')
|
||||
url_json = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url)) + down_url
|
||||
except:
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se puede descargar la lista!', url)
|
||||
return False
|
||||
|
||||
elif 'zippyshare.com/' in url:
|
||||
from core import servertools
|
||||
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing('zippyshare', url)
|
||||
|
||||
if not puedes:
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se puede descargar la lista!', motivo)
|
||||
return False
|
||||
url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/alfavorites-testeanding.json
|
||||
url_name = url_json[url_json.rfind('/')+1:]
|
||||
|
||||
elif 'friendpaste.com/' in url:
|
||||
url_json = url if url.endswith('/raw') else url + '/raw'
|
||||
url_name = 'friendpaste'
|
||||
|
||||
else:
|
||||
url_json = url
|
||||
url_name = url[url.rfind('/')+1:]
|
||||
|
||||
|
||||
# Download json
|
||||
data = httptools.downloadpage(url_json).data
|
||||
|
||||
# Verificar formato json de alfavorites y añadir info de la descarga
|
||||
jsondata = jsontools.load(data)
|
||||
if 'user_favorites' not in jsondata or 'info_lista' not in jsondata:
|
||||
logger.debug(data)
|
||||
platformtools.dialog_ok('Alfa', 'Error, el fichero descargado no tiene el formato esperado!')
|
||||
return False
|
||||
|
||||
jsondata['info_lista']['downloaded_date'] = fechahora_actual()
|
||||
jsondata['info_lista']['downloaded_from'] = url
|
||||
data = jsontools.dump(jsondata)
|
||||
|
||||
# Pedir nombre para la lista descargada
|
||||
nombre = get_name_from_filename(url_name)
|
||||
titulo = platformtools.dialog_input(default=nombre, heading='Nombre para guardar la lista')
|
||||
if titulo is None or titulo == '':
|
||||
return False
|
||||
titulo = text_clean(titulo, blank_char='_')
|
||||
|
||||
filename = get_filename_from_name(titulo)
|
||||
fullfilename = os.path.join(config.get_data_path(), filename)
|
||||
|
||||
# Si el nuevo nombre ya existe pedir confirmación para sobrescribir
|
||||
if os.path.exists(fullfilename):
|
||||
if not platformtools.dialog_yesno('Alfa', 'Ya existe una lista con este nombre.', '¿ Sobrescribir el fichero ?', filename):
|
||||
return False
|
||||
|
||||
if not filetools.write(fullfilename, data):
|
||||
platformtools.dialog_ok('Alfa', 'Error, no se puede grabar la lista!', filename)
|
||||
|
||||
platformtools.dialog_ok('Alfa', 'Ok, lista descargada correctamente', filename)
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
@@ -219,173 +219,17 @@ def idioma(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
try:
|
||||
filtro_idioma = config.get_setting("filterlanguages", item.channel)
|
||||
filtro_enlaces = config.get_setting("filterlinks", item.channel)
|
||||
except:
|
||||
filtro_idioma = 3
|
||||
filtro_enlaces = 2
|
||||
dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if filtro_enlaces != 0:
|
||||
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
|
||||
return
|
||||
if list_enlaces:
|
||||
itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
|
||||
text_bold=True))
|
||||
itemlist.extend(list_enlaces)
|
||||
if filtro_enlaces != 1:
|
||||
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item)
|
||||
if list_enlaces:
|
||||
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
|
||||
text_bold=True))
|
||||
itemlist.extend(list_enlaces)
|
||||
tmdb.set_infoLabels(item, __modo_grafico__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
if itemlist:
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
# def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
# logger.info()
|
||||
# lista_enlaces = []
|
||||
# matches = []
|
||||
# if type == "online": t_tipo = "Ver Online"
|
||||
# if type == "descarga": t_tipo = "Descargar"
|
||||
# data = data.replace("\n", "")
|
||||
# if type == "online":
|
||||
# patron = '(?is)class="playex.*?sheader'
|
||||
# bloque1 = scrapertools.find_single_match(data, patron)
|
||||
# patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
|
||||
# match = scrapertools.find_multiple_matches(data, patron)
|
||||
# for scrapedoption, language in match:
|
||||
# scrapedserver = ""
|
||||
# lazy = ""
|
||||
# if "lazy" in bloque1:
|
||||
# lazy = "lazy-"
|
||||
# patron = '(?s)id="%s".*?metaframe.*?%ssrc="([^"]+)' % (scrapedoption, lazy)
|
||||
# url = scrapertools.find_single_match(bloque1, patron)
|
||||
# if "goo.gl" in url:
|
||||
# url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
# if "drive.php" in url:
|
||||
# scrapedserver = "gvideo"
|
||||
# if "player" in url:
|
||||
# scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
|
||||
# if "ok" in scrapedserver: scrapedserver = "okru"
|
||||
# matches.append([url, scrapedserver, "", language.strip(), t_tipo])
|
||||
# bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
|
||||
# bloque2 = bloque2.replace("\t", "").replace("\r", "")
|
||||
# patron = '(?s)optn" href="([^"]+)'
|
||||
# patron += '.*?alt="([^\.]+)'
|
||||
# patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
|
||||
# patron += '.*?src="[^>]+"?/>([^<]+)'
|
||||
# patron += '.*?/span>([^<]+)'
|
||||
# matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
|
||||
# filtrados = []
|
||||
# for match in matches:
|
||||
# scrapedurl = match[0]
|
||||
# scrapedserver = match[1]
|
||||
# scrapedcalidad = match[2]
|
||||
# language = match[3]
|
||||
# scrapedtipo = match[4]
|
||||
# if t_tipo.upper() not in scrapedtipo.upper():
|
||||
# continue
|
||||
# title = " Mirror en %s (" + language + ")"
|
||||
# if len(scrapedcalidad.strip()) > 0:
|
||||
# title += " (Calidad " + scrapedcalidad.strip() + ")"
|
||||
#
|
||||
# if filtro_idioma == 3 or item.filtro:
|
||||
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
# url=scrapedurl, server=scrapedserver,
|
||||
# extra=item.url, contentThumbnail = item.thumbnail,
|
||||
# language=language))
|
||||
# else:
|
||||
# idioma = dict_idiomas[language]
|
||||
# if idioma == filtro_idioma:
|
||||
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
# url=scrapedurl, server=scrapedserver,
|
||||
# extra=item.url, contentThumbnail = item.thumbnail,
|
||||
# language=language))
|
||||
# else:
|
||||
# if language not in filtrados:
|
||||
# filtrados.append(language)
|
||||
# lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
|
||||
# if filtro_idioma != 3:
|
||||
# if len(filtrados) > 0:
|
||||
# title = "Mostrar también enlaces filtrados en %s" % ", ".join(filtrados)
|
||||
# lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
|
||||
# filtro=True))
|
||||
# return lista_enlaces
|
||||
#
|
||||
#
|
||||
# def play(item):
|
||||
# logger.info()
|
||||
# itemlist = []
|
||||
# if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
|
||||
# data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
# id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
# item.url = "http://docs.google.com/get_video_info?docid=" + id
|
||||
# if item.server == "okru":
|
||||
# item.url = "https://ok.ru/videoembed/" + id
|
||||
# if item.server == "youtube":
|
||||
# item.url = "https://www.youtube.com/embed/" + id
|
||||
# elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
# data = httptools.downloadpage(item.url).data
|
||||
# scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
# if scrapedurl == "":
|
||||
# scrapedurl = scrapertools.find_single_match(data, '(?i)frame.*?src="(http[^"]+)')
|
||||
# if scrapedurl == "":
|
||||
# scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
|
||||
# elif "goo.gl" in scrapedurl:
|
||||
# scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
|
||||
# "location", "")
|
||||
# item.url = scrapedurl
|
||||
# item.server = ""
|
||||
# itemlist.append(item.clone())
|
||||
# itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
# for i in itemlist:
|
||||
# i.thumbnail = i.contentThumbnail
|
||||
# return itemlist
|
||||
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer == None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def findvideos(item):
|
||||
import urllib
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = 'class="title">([^>]+)</span>.*?data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+)'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="title">.*?src.*?/>([^>]+)</span>.*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
#logger.info("Intel66")
|
||||
#scrapertools.printMatches(matches)
|
||||
for language, tp, pt, nm in matches:
|
||||
|
||||
language = language.strip()
|
||||
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(CHANNEL_HOST+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
|
||||
@@ -398,23 +242,45 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
|
||||
#logger.info("Intel33 %s" %url)
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel44")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
#logger.info("Intel66a")
|
||||
#scrapertools.printMatches(matches)
|
||||
for hidden_url, quality, language in matches:
|
||||
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s][%s]' % (quality, IDIOMAS[language])
|
||||
else:
|
||||
title = ''
|
||||
new_data = get_source(hidden_url)
|
||||
url = scrapertools.find_single_match(new_data, '"url":"([^"]+)"')
|
||||
new_data = httptools.downloadpage(hidden_url).data
|
||||
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
|
||||
url = url.replace('\\/', '/')
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel55")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_url(url):
|
||||
if "cinetux.me" in url:
|
||||
d1 = httptools.downloadpage(url).data
|
||||
if "mail" in url:
|
||||
id = scrapertools.find_single_match(d1, '<img src="[^#]+#(\w+)')
|
||||
#logger.info("Intel77b %s" %id)
|
||||
url = "https://my.mail.ru/video/embed/" + id
|
||||
else:
|
||||
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
|
||||
#logger.info("Intel22a %s" %d1)
|
||||
#logger.info("Intel77a %s" %url)
|
||||
url = url.replace("povwideo","powvideo")
|
||||
return url
|
||||
|
||||
@@ -599,7 +599,7 @@ def findvideos(item):
|
||||
item_local.url = scrapedurl
|
||||
if host not in item_local.url and host.replace('https', 'http') not in item_local.url and not item.armagedon:
|
||||
item_local.url = host + item_local.url
|
||||
if item_local.url and not item.armagedon:
|
||||
if item_local.url and not item.armagedon and item.emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
|
||||
@@ -398,7 +398,7 @@ def findvideos(item):
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
item_local.url = link_torrent
|
||||
if item_local.url and not item.armagedon:
|
||||
if item_local.url and item.emergency_urls and not item.armagedon:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
|
||||
@@ -47,6 +47,28 @@
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls",
|
||||
"type": "list",
|
||||
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Guardar",
|
||||
"Borrar",
|
||||
"Actualizar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls_torrents",
|
||||
"type": "bool",
|
||||
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-1,'No')"
|
||||
},
|
||||
{
|
||||
"id": "timeout_downloadpage",
|
||||
"type": "list",
|
||||
|
||||
@@ -657,7 +657,6 @@ def findvideos(item):
|
||||
if not data_torrent and not data_directo:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
patron = '<div class="content"><a href="([^"]+).*?'
|
||||
patron += '(?:<div class="content_medium">(.*?)<\/div>.*?)?'
|
||||
@@ -665,13 +664,26 @@ def findvideos(item):
|
||||
matches_torrent = re.compile(patron, re.DOTALL).findall(data_torrent)
|
||||
matches_directo = re.compile(patron, re.DOTALL).findall(data_directo)
|
||||
if not matches_torrent and not matches_directo and scrapertools.find_single_match(data_directo, '<div id="where_i_am".*?<a href="[^"]+">Ver Online<\/a>.*?href="([^"]+)">') != url: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
if data_torrent:
|
||||
logger.error(data_torrent)
|
||||
if data_directo:
|
||||
logger.error(data_directo)
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches_torrent = item.emergency_urls[1] #Guardamos los matches de los .Torrents
|
||||
try:
|
||||
matches_directo = item.emergency_urls[3] #Guardamos los matches de Directos, si los hay
|
||||
except:
|
||||
pass
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if len(itemlist) == 0:
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
if data_torrent:
|
||||
logger.error(data_torrent)
|
||||
if data_directo:
|
||||
logger.error(data_directo)
|
||||
if item.videolibray_emergency_urls:
|
||||
return item
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches_torrent)
|
||||
@@ -679,8 +691,17 @@ def findvideos(item):
|
||||
#logger.debug(data_torrent)
|
||||
#logger.debug(data_directo)
|
||||
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = [] #Iniciamos emergency_urls
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
item.emergency_urls.append(matches_torrent) #Guardamos los matches_torrent iniciales
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los matches_torrent finales
|
||||
item.emergency_urls.append(matches_directo) #Guardamos los matches_directo iniciales
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los matches_directo finales
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Si es un Episodio suelto, tratamos de poner un enlace a la Serie completa
|
||||
if item.extra3 == 'completa':
|
||||
@@ -702,7 +723,7 @@ def findvideos(item):
|
||||
item_local.contentType = 'tvshow'
|
||||
item_local.extra = 'series'
|
||||
item_local.action = 'episodios'
|
||||
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
|
||||
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
|
||||
|
||||
#Buscamos la url de la serie y verificamos que existe
|
||||
patron_serie = '<div class="linkMoreMovies"><div class="linkMore"><a href="([^"]+)">'
|
||||
@@ -741,106 +762,130 @@ def findvideos(item):
|
||||
item_local.language = ["%s" % IDIOMAS[scrapedlang]]
|
||||
|
||||
#Leemos la página definitiva para el enlace al .torrent
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<div class="linksDescarga"><span class="titulo">Descargar Torrent: <\/span><br><a href="([^"]+)" class="TTlink">»\s?(.*?)\s?«<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
data = ''
|
||||
if not item.armagedon:
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<div class="linksDescarga"><span class="titulo">Descargar Torrent: <\/span><br><a href="([^"]+)" class="TTlink">»\s?(.*?)\s?«<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
else:
|
||||
matches = item.emergency_urls[2][0] #Guardamos los matches de Directos, si los hay
|
||||
del item.emergency_urls[2][0] #Una vez tratado lo limpiamos
|
||||
data = 'xyz123' #iniciamos data para que no dé problemas
|
||||
|
||||
if item.videolibray_emergency_urls: #Si esyamos añadiendo a Videoteca...
|
||||
item.emergency_urls[2].append(matches) #Salvamos este matches
|
||||
|
||||
if not data or not matches:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / URL: " + item_local.url + " / DATA: " + data)
|
||||
continue #si no hay más datos, algo no funciona, pasamos a Ver Online
|
||||
continue #si no hay más datos, algo no funciona, pasamos a Ver Online
|
||||
|
||||
#logger.debug(patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
|
||||
for scrapedtorrent, scrapedtitle in matches:
|
||||
item_local = item_local.clone()
|
||||
quality = item_local.quality
|
||||
qualityscraped = ''
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
item_local.contentEpisodeNumber = 0
|
||||
|
||||
#Si son episodios múltiples, los listamos con sus títulos
|
||||
if len(matches) > 1 or len(itemlist_alt) > 1:
|
||||
if item_local.contentType == 'episode' or item_local.contentType == 'season':
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)'):
|
||||
qualityscraped = '%s' % scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)')
|
||||
if scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'))
|
||||
if not qualityscraped:
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
else:
|
||||
qualityscraped = '%s' % scrapedtitle
|
||||
|
||||
#Si todavía no sabemos el num de Episodio, lo buscamos
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
try:
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)'):
|
||||
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)')
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
except:
|
||||
pass
|
||||
|
||||
#Buscamos calidades
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+p)'):
|
||||
qualityscraped += ' ' + scrapertools.find_single_match(scrapedtitle, '(\d+p)')
|
||||
if qualityscraped:
|
||||
quality = '[%s] %s' % (qualityscraped, item_local.quality)
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(host + scrapedtorrent)
|
||||
else:
|
||||
item_local = item_local.clone()
|
||||
quality = item_local.quality
|
||||
qualityscraped = ''
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
item_local.contentEpisodeNumber = 0
|
||||
|
||||
#Si son episodios múltiples, los listamos con sus títulos
|
||||
if len(matches) > 1 or len(itemlist_alt) > 1:
|
||||
if item_local.contentType == 'episode' or item_local.contentType == 'season':
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)'):
|
||||
qualityscraped = '%s' % scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)')
|
||||
if scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'))
|
||||
if not qualityscraped:
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
else:
|
||||
qualityscraped = '%s' % scrapedtitle
|
||||
|
||||
#Si todavía no sabemos el num de Episodio, lo buscamos
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
try:
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)'):
|
||||
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)')
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
except:
|
||||
pass
|
||||
|
||||
#Buscamos calidades
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+p)'):
|
||||
qualityscraped += ' ' + scrapertools.find_single_match(scrapedtitle, '(\d+p)')
|
||||
if qualityscraped:
|
||||
quality = '[%s] %s' % (qualityscraped, item_local.quality)
|
||||
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.url = host + scrapedtorrent
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
quality += ' [%s]' % size
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language))
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality)
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality)
|
||||
quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = "torrent" #Seridor Torrent
|
||||
|
||||
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.url = host + scrapedtorrent
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon:
|
||||
item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria
|
||||
del item.emergency_urls[0][0] #Una vez tratado lo limpiamos
|
||||
|
||||
size = ''
|
||||
if not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
quality += ' [%s]' % size
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % quality
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language))
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality)
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality)
|
||||
quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = "torrent" #Seridor Torrent
|
||||
|
||||
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
item_local.quality = quality #Calidad
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
|
||||
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
if not item.videolibray_emergency_urls:
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
|
||||
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
|
||||
itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
|
||||
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
|
||||
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
|
||||
itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
|
||||
#Si son múltiples episodios, ordenamos
|
||||
if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'):
|
||||
itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
|
||||
tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios
|
||||
itemlist.extend(itemlist_alt)
|
||||
#Si son múltiples episodios, ordenamos
|
||||
if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'):
|
||||
itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
|
||||
tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios
|
||||
itemlist.extend(itemlist_alt)
|
||||
|
||||
#Ahora tratamos los servidores directo
|
||||
itemlist_alt = []
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if matches_directo:
|
||||
for scrapedurl, scrapedquality, scrapedlang in matches_directo: #leemos los torrents con la diferentes calidades
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
@@ -861,13 +906,22 @@ def findvideos(item):
|
||||
item_local.language = ["%s" % IDIOMAS[scrapedlang]] #Salvamos el idioma, si lo hay
|
||||
|
||||
#Leemos la página con el enlace al Servidor
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<div class="linksDescarga"><span class="titulo">Video Online:\s?([^<]+)?<\/span><br><br><a href="([^"]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
data = ''
|
||||
if not item.armagedon:
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<div class="linksDescarga"><span class="titulo">Video Online:\s?([^<]+)?<\/span><br><br><a href="([^"]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
else:
|
||||
matches = item.emergency_urls[4][0] #Guardamos los matches de Directos, si los hay
|
||||
del item.emergency_urls[4][0] #Una vez tratado lo limpiamos
|
||||
data = 'xyz123' #iniciamos data para que no dé problemas
|
||||
|
||||
if item.videolibray_emergency_urls: #Si esyamos añadiendo a Videoteca...
|
||||
item.emergency_urls[4].append(matches) #Salvamos este matches
|
||||
|
||||
if not data or not matches:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El enlace no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
@@ -877,114 +931,121 @@ def findvideos(item):
|
||||
#logger.debug(patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
|
||||
for scrapedtitle, scrapedenlace in matches:
|
||||
item_local = item_local.clone()
|
||||
|
||||
enlace = ''
|
||||
devuelve = ''
|
||||
mostrar_server = ''
|
||||
capitulo = ''
|
||||
|
||||
servidor = scrapedtitle.strip()
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
if not servidor or "Capituo" in servidor or "Capitulo" in servidor or scrapertools.find_single_match(servidor, '(\d+[x|X]\d+)'):
|
||||
capitulo = scrapertools.find_single_match(servidor, '(\d+[x|X]\d+)')
|
||||
servidor = scrapertools.find_single_match(scrapedenlace, ':\/\/(.*?)\.')
|
||||
quality = item_local.quality
|
||||
|
||||
qualityscraped = ''
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
item_local.contentEpisodeNumber = 0
|
||||
|
||||
#Si son episodios múltiples, los listamos con sus títulos
|
||||
if (len(matches) > 1 or len(itemlist_alt) > 1) and not servidor in scrapedtitle:
|
||||
if not capitulo and (item_local.contentType == 'episode' or item_local.contentType == 'season'):
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)'):
|
||||
qualityscraped = '%s' % scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)')
|
||||
if scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'))
|
||||
if not qualityscraped:
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
elif capitulo:
|
||||
if scrapertools.find_single_match(capitulo, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
qualityscraped = '%s' % capitulo
|
||||
else:
|
||||
qualityscraped = '%s' % scrapedtitle
|
||||
|
||||
#Si todavía no sabemos el num de Episodio, lo buscamos
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
try:
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)'):
|
||||
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)')
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
except:
|
||||
pass
|
||||
|
||||
#Buscamos calidades
|
||||
if scrapertools.find_single_match(scrapedenlace, '(\d+p)'):
|
||||
qualityscraped += ' ' + scrapertools.find_single_match(scrapedenlace, '(\d+p)')
|
||||
if qualityscraped:
|
||||
quality = '[%s] %s' % (qualityscraped, item_local.quality)
|
||||
|
||||
if scrapertools.find_single_match(item.url, '(\d+x\d+.*?\d+x\d+)') and not capitulo and not qualityscraped:
|
||||
quality = '[%s] %s' % (scrapertools.find_single_match(scrapedenlace, '(\d+x\d+)'), quality)
|
||||
elif capitulo and not qualityscraped:
|
||||
quality = '[%s] %s' % (capitulo, quality)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item_local = item_local.clone()
|
||||
|
||||
enlace = ''
|
||||
devuelve = ''
|
||||
mostrar_server = ''
|
||||
capitulo = ''
|
||||
|
||||
servidor = scrapedtitle.strip()
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
if not servidor or "Capituo" in servidor or "Capitulo" in servidor or scrapertools.find_single_match(servidor, '(\d+[x|X]\d+)'):
|
||||
capitulo = scrapertools.find_single_match(servidor, '(\d+[x|X]\d+)')
|
||||
servidor = scrapertools.find_single_match(scrapedenlace, ':\/\/(.*?)\.')
|
||||
quality = item_local.quality
|
||||
|
||||
qualityscraped = ''
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
item_local.contentEpisodeNumber = 0
|
||||
|
||||
#Si son episodios múltiples, los listamos con sus títulos
|
||||
if (len(matches) > 1 or len(itemlist_alt) > 1) and not servidor in scrapedtitle:
|
||||
if not capitulo and (item_local.contentType == 'episode' or item_local.contentType == 'season'):
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)'):
|
||||
qualityscraped = '%s' % scrapertools.find_single_match(scrapedtitle, '(\d+[x|X]\d+(?:-\d{1,2})?)')
|
||||
if scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '[c|C]ap.*?(\d+)'))
|
||||
elif scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtorrent, '[s|S]\d{1,2}[e|E](\d{1,2})'))
|
||||
if not qualityscraped:
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
elif capitulo:
|
||||
if scrapertools.find_single_match(capitulo, '\d+[x|X](\d+)'):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(scrapedtitle, '\d+[x|X](\d+)'))
|
||||
qualityscraped = '%s' % capitulo
|
||||
else:
|
||||
qualityscraped = '%s' % scrapedtitle
|
||||
|
||||
#Si todavía no sabemos el num de Episodio, lo buscamos
|
||||
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
|
||||
try:
|
||||
if scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)'):
|
||||
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(scrapedtitle, '(\d+)[x|X](\d+)')
|
||||
qualityscraped = '%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
except:
|
||||
pass
|
||||
|
||||
#Buscamos calidades
|
||||
if scrapertools.find_single_match(scrapedenlace, '(\d+p)'):
|
||||
qualityscraped += ' ' + scrapertools.find_single_match(scrapedenlace, '(\d+p)')
|
||||
if qualityscraped:
|
||||
quality = '[%s] %s' % (qualityscraped, item_local.quality)
|
||||
|
||||
if scrapertools.find_single_match(item.url, '(\d+x\d+.*?\d+x\d+)') and not capitulo and not qualityscraped:
|
||||
quality = '[%s] %s' % (scrapertools.find_single_match(scrapedenlace, '(\d+x\d+)'), quality)
|
||||
elif capitulo and not qualityscraped:
|
||||
quality = '[%s] %s' % (capitulo, quality)
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % quality
|
||||
|
||||
#Verificamos el si el enlace del servidor está activo
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
|
||||
try: #Obtenemos el enlace
|
||||
if mostrar_server:
|
||||
devuelve = servertools.findvideosbyserver(scrapedenlace, servidor) #existe el link ?
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1] #Se guarda el link
|
||||
if not enlace:
|
||||
continue
|
||||
#Verificamos el si el enlace del servidor está activo
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
|
||||
try: #Obtenemos el enlace
|
||||
if mostrar_server:
|
||||
devuelve = servertools.findvideosbyserver(scrapedenlace, servidor) #existe el link ?
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1] #Se guarda el link
|
||||
if not enlace:
|
||||
continue
|
||||
|
||||
item_local.alive = servertools.check_video_link(enlace, servidor, timeout=timeout) #activo el link ?
|
||||
#Si el link no está activo se ignora
|
||||
if "??" in item_local.alive: #dudoso
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), quality, str(item_local.language))
|
||||
elif "no" in item_local.alive.lower(): #No está activo. Lo preparo, pero no lo pinto
|
||||
item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.alive, servidor.capitalize(), quality, str(item_local.language))
|
||||
logger.debug(item_local.alive + ": ALIVE / " + servidor + " / " + enlace)
|
||||
raise
|
||||
else: #Sí está activo
|
||||
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), quality, str(item_local.language))
|
||||
|
||||
#Ahora pintamos el link Directo
|
||||
item_local.url = enlace
|
||||
|
||||
item_local.alive = servertools.check_video_link(enlace, servidor, timeout=timeout) #activo el link ?
|
||||
#Si el link no está activo se ignora
|
||||
if "??" in item_local.alive: #dudoso
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), quality, str(item_local.language))
|
||||
elif "no" in item_local.alive.lower(): #No está activo. Lo preparo, pero no lo pinto
|
||||
item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.alive, servidor.capitalize(), quality, str(item_local.language))
|
||||
logger.debug(item_local.alive + ": ALIVE / " + servidor + " / " + enlace)
|
||||
raise
|
||||
else: #Sí está activo
|
||||
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), quality, str(item_local.language))
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality)
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality)
|
||||
quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
#Ahora pintamos el link Directo
|
||||
item_local.url = enlace
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality)
|
||||
quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality)
|
||||
quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = servidor #Seridor Directo
|
||||
|
||||
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
except:
|
||||
logger.error('ERROR al procesar enlaces DIRECTOS: ' + servidor + ' / ' + scrapedenlace)
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = servidor #Servidor Directo
|
||||
|
||||
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
item_local.quality = quality #Calidad
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
except:
|
||||
logger.error('ERROR al procesar enlaces DIRECTOS: ' + servidor + ' / ' + scrapedenlace)
|
||||
|
||||
#logger.debug("DIRECTO: " + scrapedenlace + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
#logger.debug("DIRECTO: " + scrapedenlace + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
if item.videolibray_emergency_urls: #Si estamos cargados emergency_urls, no vamos
|
||||
return item
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
@@ -997,11 +1058,11 @@ def findvideos(item):
|
||||
#Si son múltiples episodios, ordenamos
|
||||
if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'):
|
||||
itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
|
||||
tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios
|
||||
tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios
|
||||
itemlist.extend(itemlist_alt)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item) #Lanzamos Autoplay
|
||||
autoplay.start(itemlist, item) #Lanzamos Autoplay
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -428,15 +428,17 @@ def findvideos(item):
|
||||
#logger.debug(item)
|
||||
|
||||
timeout_find = timeout
|
||||
follow_redirects=True
|
||||
if item.videolibray_emergency_urls: #Si se están cacheando enlaces aumentamos el timeout
|
||||
timeout_find = timeout * 2
|
||||
elif item.emergency_urls: #Si se llama desde la Videoteca con enlaces cacheados...
|
||||
timeout_find = timeout / 2 #reducimos el timeout antes de saltar a los enlaces cacheados
|
||||
follow_redirects=False
|
||||
|
||||
#Bajamos los datos de la página
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout_find).data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout_find, follow_redirects=follow_redirects).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -633,6 +635,8 @@ def episodios(item):
|
||||
temp_next = ''
|
||||
item.extra = "episodios"
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
|
||||
if not item.infoLabels['tmdb_id']:
|
||||
tmdb.set_infoLabels(item, True)
|
||||
@@ -716,6 +720,8 @@ def episodios(item):
|
||||
|
||||
if item.ow_force == '1': #Si viene formazado la reconstrucción de la serie, lo hacemo
|
||||
item.contentType = "tvshow"
|
||||
if not modo_ultima_temp: #Si se quiere actualiar toda la serie en vez de la última temporada...
|
||||
item.contentType = "tvshow"
|
||||
|
||||
temp_lista = []
|
||||
temp_bucle = 0
|
||||
|
||||
15
plugin.video.alfa/channels/javlin.json
Normal file
15
plugin.video.alfa/channels/javlin.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "javlin",
|
||||
"name": "javlin",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://javl.in/wp-content/uploads/2015/07/favicon1.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
85
plugin.video.alfa/channels/javlin.py
Normal file
85
plugin.video.alfa/channels/javlin.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
|
||||
host = 'http://javl.in'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host+ "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<option class="level-0" value="([^"]+)">([^"]+) \((.*?)\)<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,number in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle) + " ("+ str(number) + ")"
|
||||
scrapedurl = "http://javl.in/?cat=" + scrapedurl
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/justporn.json
Normal file
16
plugin.video.alfa/channels/justporn.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "justporn",
|
||||
"name": "justporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://xxx.justporno.tv/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/justporn.py
Normal file
89
plugin.video.alfa/channels/justporn.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://xxx.justporno.tv'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/latest-updates/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,numero in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " (" + numero + ")"
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = 'video_url: \'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/mporno.json
Normal file
16
plugin.video.alfa/channels/mporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "mporno",
|
||||
"name": "mporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://mporno.tv/templates/fluidporn/img/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/mporno.py
Normal file
89
plugin.video.alfa/channels/mporno.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://mporno.tv'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/most-recent/", plot="/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/", plot="/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url=host + "/most-viewed/", plot="/most-viewed/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/", plot="/longest/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/videos/%s/page1.html" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "")
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " " + cantidad
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
|
||||
|
||||
patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
contentTitle = scrapedtitle
|
||||
title = scrapedtitle
|
||||
scrapedurl = scrapedurl.replace("/thumbs/", "/videos/") + ".mp4"
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = item.plot
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<a href=\'([^\']+)\' class="next">Next >></a>'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
# next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
|
||||
# plot = item.plot
|
||||
# next_page = next_page[0]
|
||||
# next_page = host + plot + next_page
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page, plot=plot ) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/muchoporno.json
Normal file
16
plugin.video.alfa/channels/muchoporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "muchoporno",
|
||||
"name": "muchoporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.muchoporno.xxx/assets/css/logo/images/sprite-muchoporno.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
85
plugin.video.alfa/channels/muchoporno.py
Normal file
85
plugin.video.alfa/channels/muchoporno.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.muchoporno.xxx'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?data-stats-video-name="([^"]+)".*?<img src="([^"]+)".*?<span class="ico-minutos sprite" title="Length"></span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<source src="([^"]+)" type="video/mp4"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
title = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
@@ -78,12 +78,13 @@
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Aleatorio",
|
||||
"Descargas2020",
|
||||
"Tumejortorrent",
|
||||
"Torrentrapid",
|
||||
"Torrentlocura",
|
||||
"Tvsinpagar",
|
||||
"Planetatorrent",
|
||||
"Torrentrapid",
|
||||
"Tumejortorrent",
|
||||
"Descargas2020",
|
||||
"Mispelisyseries"
|
||||
]
|
||||
},
|
||||
@@ -91,7 +92,7 @@
|
||||
"id": "clonenewpct1_channels_list",
|
||||
"type": "text",
|
||||
"label": "Lista de clones de NewPct1 y orden de uso",
|
||||
"default": "('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
|
||||
"default": "('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
|
||||
"enabled": true,
|
||||
"visible": false
|
||||
},
|
||||
|
||||
@@ -6,6 +6,7 @@ import urllib
|
||||
import urlparse
|
||||
import datetime
|
||||
import ast
|
||||
import random
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
@@ -31,27 +32,45 @@ channel_py = 'newpct1'
|
||||
#Código para permitir usar un único canal para todas las webs clones de NewPct1
|
||||
#Cargamos en .json del canal para ver las listas de valores en settings
|
||||
clone_list = channeltools.get_channel_json(channel_py)
|
||||
for settings in clone_list['settings']: #Se recorren todos los settings
|
||||
if settings['id'] == "clonenewpct1_channels_list": #Encontramos en setting
|
||||
clone_list = settings['default'] #Carga lista de clones
|
||||
for settings in clone_list['settings']: #Se recorren todos los settings
|
||||
if settings['id'] == "clonenewpct1_channels_list": #Encontramos en setting
|
||||
clone_list = settings['default'] #Carga lista de clones
|
||||
break
|
||||
clone_list = ast.literal_eval(clone_list) #la convierte en array
|
||||
clone_list = ast.literal_eval(clone_list) #la convierte en array
|
||||
host_index = 0
|
||||
host_index = config.get_setting('clonenewpct1_channel_default', channel_py) #Clone por defecto
|
||||
i = 0
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if i == host_index:
|
||||
channel_clone_name = channel_clone #Nombre del Canal elegido
|
||||
host = host_clone #URL del Canal elegido
|
||||
if active_clone == "1": #Comprueba que el clone esté activo
|
||||
host_index = config.get_setting('clonenewpct1_channel_default', channel_py) #Clone por defecto
|
||||
|
||||
clone_list_random = [] #Iniciamos la lista aleatoria de clones
|
||||
|
||||
if host_index == 0: #Si el clones es "Aleatorio"...
|
||||
i = 0
|
||||
j = 2 #... marcamos el último de los clones "buenos"
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if i <= j and active_clone == "1":
|
||||
clone_list_random += [clone_list[i]] #... añadimos el clone activo "bueno" a la lista
|
||||
else:
|
||||
break
|
||||
channel_clone_name = "*** DOWN ***" #es un fallo masivo ???
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if active_clone == "1": #Comprueba que el clone esté activo
|
||||
channel_clone_name = channel_clone #Nombre del Canal elegido
|
||||
host = host_clone #URL del Canal elegido
|
||||
i += 1
|
||||
if clone_list_random: #Si hay clones en la lista aleatoria...
|
||||
clone_list = [random.choice(clone_list_random)] #Seleccionamos un clone aleatorio
|
||||
#logger.debug(clone_list)
|
||||
host_index = 1 #mutamos el num. de clone para que se procese en el siguiente loop
|
||||
|
||||
if host_index > 0 or not clone_list_random: #Si el Clone por defecto no es Aleatorio, o hay ya un aleatorio sleccionado...
|
||||
i = 1
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if i == host_index:
|
||||
channel_clone_name = channel_clone #Nombre del Canal elegido
|
||||
host = host_clone #URL del Canal elegido
|
||||
if active_clone == "1": #Comprueba que el clone esté activo
|
||||
break
|
||||
i += 1
|
||||
channel_clone_name = "*** DOWN ***" #es un fallo masivo ???
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if active_clone == "1": #Comprueba que el clone esté activo
|
||||
channel_clone_name = channel_clone #Nombre del Canal elegido
|
||||
host = host_clone #URL del Canal elegido
|
||||
break
|
||||
i += 1
|
||||
|
||||
item = Item()
|
||||
if item.channel != channel_py:
|
||||
@@ -139,6 +158,11 @@ def submenu(item):
|
||||
itemlist = []
|
||||
item.extra2 = ''
|
||||
|
||||
#Renombramos el canal al nombre de clone inicial desde la URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
@@ -218,6 +242,11 @@ def submenu_novedades(item):
|
||||
itemlist_alt = []
|
||||
item.extra2 = ''
|
||||
|
||||
#Renombramos el canal al nombre de clone inicial desde la URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
data = ''
|
||||
timeout_search=timeout * 2 #Más tiempo para Novedades, que es una búsqueda
|
||||
thumb_settings = get_thumb("setting_0.png")
|
||||
@@ -315,6 +344,11 @@ def submenu_novedades(item):
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
#Renombramos el canal al nombre de clone inicial desde la URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
data = ''
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
@@ -365,6 +399,12 @@ def alfabeto(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
#Renombramos el canal al nombre de clone inicial desde la URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
|
||||
clase = "pelilist" # etiqueta para localizar zona de listado de contenidos
|
||||
url_next_page ='' # Control de paginación
|
||||
cnt_tot = 30 # Poner el num. máximo de items por página
|
||||
@@ -1249,7 +1289,7 @@ def findvideos(item):
|
||||
#Renombramos el canal al nombre de clone elegido. Actualizados URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
item.category = host.capitalize()
|
||||
|
||||
verify_fo = True #Verificamos si el clone a usar está activo
|
||||
item, data = generictools.fail_over_newpct1(item, verify_fo)
|
||||
@@ -1374,10 +1414,11 @@ def findvideos(item):
|
||||
pass
|
||||
|
||||
patron = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
|
||||
patron_mult = 'torrent:check:status|' + patron
|
||||
if 'planetatorrent' in item.channel_host:
|
||||
patron = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
|
||||
patron_mult += '|<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"'
|
||||
patron_mult = 'torrent:check:status|' + patron + '|<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"'
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron_alt = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
|
||||
if scrapertools.find_single_match(data, patron):
|
||||
patron = patron_alt
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron) or not videolibrarytools.verify_url_torrent(scrapertools.find_single_match(data, patron)): # Si no hay datos o url, error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
@@ -1475,7 +1516,7 @@ def findvideos(item):
|
||||
if not item_local.url: #error en url?
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
if item.emergency_urls: #Hay urls de emergencia?
|
||||
item.item_local = item.emergency_urls[0][0] #Guardamos la url del .Torrent
|
||||
item_local.url = item.emergency_urls[0][0] #Restauramos la url del .Torrent
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
itemlist.append(item.clone(action='', title=item.category + ': [COLOR hotpink]Usando enlaces de emergencia[/COLOR]'))
|
||||
|
||||
@@ -1821,12 +1862,14 @@ def episodios(item):
|
||||
pass
|
||||
|
||||
modo_ultima_temp_alt = modo_ultima_temp
|
||||
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
|
||||
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
|
||||
modo_ultima_temp_alt = False
|
||||
|
||||
max_temp = 1
|
||||
if item.infoLabels['number_of_seasons']:
|
||||
max_temp = item.infoLabels['number_of_seasons']
|
||||
else:
|
||||
modo_ultima_temp_alt = False #No sabemos cuantas temporadas hay
|
||||
y = []
|
||||
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
|
||||
patron = 'season (\d+)'
|
||||
@@ -2043,11 +2086,12 @@ def episodios(item):
|
||||
else: #Si es un solo episodio, se formatea ya
|
||||
item_local.title = "%sx%s -" % (match["season"], str(match["episode"]).zfill(2))
|
||||
|
||||
if first: #Si es el primer episodio, comprobamos que ...
|
||||
first = False
|
||||
if item_local.contentSeason < max_temp: #... la temporada sea la última ...
|
||||
modo_ultima_temp_alt = False #... si no, por seguridad leeremos toda la serie
|
||||
|
||||
if modo_ultima_temp_alt and item.library_playcounts: #Si solo se actualiza la última temporada de Videoteca
|
||||
if first: #Si es el primer episodio, comprobamos que ...
|
||||
first = False
|
||||
if item_local.contentSeason < max_temp: #... la temporada sea la última ...
|
||||
modo_ultima_temp_alt = False #... si no, por seguridad leeremos toda la serie
|
||||
if item_local.contentSeason < max_temp and modo_ultima_temp_alt:
|
||||
list_pages = [] #Sale del bucle de leer páginas
|
||||
break #Sale del bucle actual del FOR de episodios por página
|
||||
@@ -2055,7 +2099,7 @@ def episodios(item):
|
||||
# continue
|
||||
|
||||
if season_display > 0:
|
||||
if item_local.contentSeason > season_display:
|
||||
if item_local.contentSeason > season_display or (not modo_ultima_temp_alt and item_local.contentSeason != season_display):
|
||||
continue
|
||||
elif item_local.contentSeason < season_display:
|
||||
list_pages = [] #Sale del bucle de leer páginas
|
||||
|
||||
16
plugin.video.alfa/channels/pandamovie.json
Normal file
16
plugin.video.alfa/channels/pandamovie.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "pandamovie",
|
||||
"name": "pandamovie",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.pandamovies.com/templates/pandamovies/images/logo.png?v1482157699",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
81
plugin.video.alfa/channels/pandamovie.py
Normal file
81
plugin.video.alfa/channels/pandamovie.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host= 'https://pandamovies.pw'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if item.title == "Categorias" :
|
||||
data = scrapertools.get_match(data,'<a href="#">Genres</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace("https:", "")
|
||||
scrapedurl = "https:" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<a class="clip-link" title="([^"]+)" href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page_url =="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<a.*?href="([^"]+)" >Next »</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/perfectgirls.json
Normal file
16
plugin.video.alfa/channels/perfectgirls.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "perfectgirls",
|
||||
"name": "perfectgirls",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.perfectgirls.net/images/no-sprite/perfect-girl-regular.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
86
plugin.video.alfa/channels/perfectgirls.py
Normal file
86
plugin.video.alfa/channels/perfectgirls.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.perfectgirls.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
url = urlparse.urljoin(item.url,scrapedurl) + "/1"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=url , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)".*?<time>([^"]+)</time>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,time in matches:
|
||||
plot = ""
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/porn300.json
Normal file
16
plugin.video.alfa/channels/porn300.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "porn300",
|
||||
"name": "porn300",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.porn300.com/android-icon-192x192.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
112
plugin.video.alfa/channels/porn300.py
Normal file
112
plugin.video.alfa/channels/porn300.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.porn300.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/es/videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/es/mas-vistos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/es/mas-votados/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/es/pornostars/?sort=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/?sort=videos"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/es/buscar/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg> ([^"]+)</li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?<img itemprop="thumbnailUrl" src="([^"]+)".*?</svg>\s+(.*?) </li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<source src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/porneq.json
Normal file
16
plugin.video.alfa/channels/porneq.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "porneq",
|
||||
"name": "porneq",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://porneq.com/uploads/porneq-logo-home-png554cf1a970e6d.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
71
plugin.video.alfa/channels/porneq.py
Normal file
71
plugin.video.alfa/channels/porneq.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://porneq.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/videos/browse/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/videos/most-viewed/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="peliculas", url=host + "/videos/most-liked/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/show/big+tits&sort=w"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/show/%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"video-setup".*?file: "(.*?)",'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = str(scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/pornhive.json
Normal file
16
plugin.video.alfa/channels/pornhive.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "pornhive",
|
||||
"name": "pornhive",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.pornhive.tv/assets/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
88
plugin.video.alfa/channels/pornhive.py
Normal file
88
plugin.video.alfa/channels/pornhive.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.pornhive.tv/en'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="MOVIES" , action="peliculas", url=host))
|
||||
# No busca los videos tiene un capchka
|
||||
# itemlist.append( Item(channel=item.channel, title="CHANNELS" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?keyword=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Categories(.*?)<li class="divider"')
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="col-lg-3 col-md-3 col-sm-4 col-xs-6 col-thumb panel-video-\d+">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
@@ -75,7 +75,7 @@ def lista(item):
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
scrapedthumbnail = host + "%s" % scrapedthumbnail
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
|
||||
if '>HD<' in quality:
|
||||
@@ -83,7 +83,6 @@ def lista(item):
|
||||
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentThumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
|
||||
|
||||
15
plugin.video.alfa/channels/qwertty.json
Normal file
15
plugin.video.alfa/channels/qwertty.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "qwertty",
|
||||
"name": "qwertty",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://qwertty.net/wp-content/uploads/2018/05/favicon.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
88
plugin.video.alfa/channels/qwertty.py
Normal file
88
plugin.video.alfa/channels/qwertty.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://qwertty.net'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Recientes" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/?filter=most-viewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/?filter=popular"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/?filter=random"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
pornhub = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
data = scrapertools.cachePage(pornhub)
|
||||
patron = '"videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
15
plugin.video.alfa/channels/redtube.json
Normal file
15
plugin.video.alfa/channels/redtube.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "redtube",
|
||||
"name": "redtube",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://thumbs-cdn.redtube.com/www-static/cdn_files/redtube/images/pc/logo/redtube_logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
110
plugin.video.alfa/channels/redtube.py
Normal file
110
plugin.video.alfa/channels/redtube.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'https://es.redtube.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/mostviewed"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstar"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?search=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?<span class="duration">(.*?)</a>.*?<a title="([^"]+)" href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedhd = scrapertools.find_single_match(duration, '<span class="hd-video-text">(.*?)</span>')
|
||||
if scrapedhd == 'HD':
|
||||
duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>')
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
title = title.replace(" </span>", "").replace(" ", "")
|
||||
scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
@@ -27,7 +27,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vid
|
||||
|
||||
__channel__='repelis'
|
||||
|
||||
host = "https://repelis.io"
|
||||
host = "https://repelisgo.io"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -93,7 +93,7 @@ def peliculas(item):
|
||||
bloquex = scrapertools.find_single_match(data, 'window.__NUXT__={.*?movies":(.*?\])')
|
||||
dict = jsontools.load(bloquex)
|
||||
else:
|
||||
dd = httptools.downloadpage("https://repelis.io/graph", post=jsontools.dump(item.post), headers=headers).data
|
||||
dd = httptools.downloadpage(host + "/graph", post=jsontools.dump(item.post), headers=headers).data
|
||||
dict = jsontools.load(dd)["data"]["movies"]
|
||||
for datos in dict:
|
||||
scrapedurl = host + "/pelicula/" + datos["slug"] + "-" + datos["id"]
|
||||
@@ -222,6 +222,7 @@ def findvideos(item):
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if "storage" in url1:
|
||||
|
||||
@@ -64,7 +64,7 @@ def list_all(item):
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = scrapedurl
|
||||
|
||||
scrapedtitle = scrapedtitle.lower().replace('enlace permanente a', '').capitalize()
|
||||
contentSerieName = scrapedtitle
|
||||
action = 'seasons'
|
||||
|
||||
|
||||
16
plugin.video.alfa/channels/sexgalaxy.json
Normal file
16
plugin.video.alfa/channels/sexgalaxy.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexgalaxy",
|
||||
"name": "sexgalaxy",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://sexgalaxy.net/wp-content/uploads/2016/11/logogalaxy_red.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
98
plugin.video.alfa/channels/sexgalaxy.py
Normal file
98
plugin.video.alfa/channels/sexgalaxy.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://sexgalaxy.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/new-releases/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canales" , action="canales", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def canales (item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(host)
|
||||
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sexkino.json
Normal file
16
plugin.video.alfa/channels/sexkino.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexkino",
|
||||
"name": "sexkino",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://sexkino.to/wp-content/uploads/2016/12/sexkino.to_.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
113
plugin.video.alfa/channels/sexkino.py
Normal file
113
plugin.video.alfa/channels/sexkino.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://sexkino.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.sexkino mainlist")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("pelisalacarta.sexkino categorias")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " ("+cantidad+")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info("pelisalacarta.sexkino anual")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("pelisalacarta.sexkino peliculas")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
#hay que hacer que coincida con el buscador
|
||||
patron = '<article.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?>(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + date + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("pelisalacarta.a0 findvideos")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<tr id=(.*?)</tr>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match,'href="([^"]+)" target')
|
||||
title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>')
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("pelisalacarta.sexkino play")
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sexofilm.json
Normal file
16
plugin.video.alfa/channels/sexofilm.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sexofilm",
|
||||
"name": "sexofilm",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://i0.wp.com/sexofilm.com/xbox/wp-content/uploads/2016/06/SexoFilm-Logo-230x54-LOGO-MOBILE.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
110
plugin.video.alfa/channels/sexofilm.py
Normal file
110
plugin.video.alfa/channels/sexofilm.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://sexofilm.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/xtreme-adult-wing/adult-dvds/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/xtreme-adult-wing/porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/xtreme-adult-wing/porn-clips-movie-scene/"))
|
||||
itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="peliculas", url=host + "/topics/sexo-music-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Xshows" , action="peliculas", url=host + "/xshows/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url =host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" Porn DVD", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
16
plugin.video.alfa/channels/spankwire.json
Normal file
16
plugin.video.alfa/channels/spankwire.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "spankwire",
|
||||
"name": "spankwire",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
97
plugin.video.alfa/channels/spankwire.py
Normal file
97
plugin.video.alfa/channels/spankwire.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.spankwire.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/home1/Straight/Month/Duration"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/Straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)" />.*?<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/Submitted/59"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video_thumb_wrapper">.*?<a href="([^"]+)".*?data-original="([^"]+)".*?title="([^"]+)".*?<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
#Para el buscador
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
if url=="#":
|
||||
scrapedurl = scrapertools.find_single_match(data,'playerData.cdnPath480 = \'([^\']+)\'')
|
||||
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = scrapedurl, url=scrapedurl))
|
||||
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/sunporno.json
Normal file
16
plugin.video.alfa/channels/sunporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "sunporno",
|
||||
"name": "sunporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://sunstatic.fuckandcdn.com/sun/sunstatic/v31/common/sunporno/img/logo_top.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
91
plugin.video.alfa/channels/sunporno.py
Normal file
91
plugin.video.alfa/channels/sunporno.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.sunporno.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/most-recent/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-viewed/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/date-last-week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largas" , action="peliculas", url=host + "/long-movies/date-last-month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s/" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div class="category-item">(.*?)<div id="goupBlock"')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*(.*?)\s*<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl + "/most-recent/"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div id="mainThumbsContainer" class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("https:", "http:")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/tabooshare.json
Normal file
16
plugin.video.alfa/channels/tabooshare.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "tabooshare",
|
||||
"name": "tabooshare",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://tabooshare.com/wp-content/uploads/2017/06/cropped-TSa-180x180.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
73
plugin.video.alfa/channels/tabooshare.py
Normal file
73
plugin.video.alfa/channels/tabooshare.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://tabooshare.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
|
||||
# itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>')
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href="(.*?)"')
|
||||
if next_page_url=="http://NaughtyPorn.net/":
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class="current">.*?<a href=\'(.*?)\'')
|
||||
if next_page_url!="":
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
"name": "ThumbZilla",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": "en",
|
||||
"language": "*",
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
|
||||
"thumbnail": "https://image.spreadshirtmedia.com/image-server/v1/designs/1002274824,width=178,height=178/thumbzilla-womens-white-tee-big-logo.png",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
@@ -35,3 +35,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from channelselector import get_thumb
|
||||
|
||||
__channel__ = "thumbzilla"
|
||||
|
||||
host = 'https://www.thumbzilla.com/'
|
||||
host = 'https://www.thumbzilla.com'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
@@ -44,36 +44,28 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
|
||||
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + 'newest',
|
||||
viewmode="movie", thumbnail=get_thumb("/channels_adult.png")))
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + 'tending',
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + 'top',
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + 'popular',
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + 'hd',
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + 'hd',
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
|
||||
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
|
||||
return itemlist
|
||||
@@ -100,7 +92,6 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
@@ -108,20 +99,15 @@ def videos(item):
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, thumbnail=scrapedthumbnail,
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -130,12 +116,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -144,23 +127,17 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def play(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '"quality":"([^"]+)","videoUrl":"([^"]+)"'
|
||||
patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for calidad, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('\\', '')
|
||||
for scrapedurl,calidad in matches:
|
||||
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
|
||||
server = servertools.get_server_from_url(scrapedurl)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, mediatype='movie', url=scrapedurl))
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import os, traceback
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import filetools
|
||||
@@ -35,7 +35,8 @@ def channel_config(item):
|
||||
def list_movies(item, silent=False):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
dead_list = []
|
||||
zombie_list = []
|
||||
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH):
|
||||
for f in ficheros:
|
||||
if f.endswith(".nfo"):
|
||||
@@ -47,10 +48,57 @@ def list_movies(item, silent=False):
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_content_as_watched_on_alfa(nfo_path)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
|
||||
|
||||
if len(new_item.library_urls) > 1:
|
||||
multicanal = True
|
||||
else:
|
||||
multicanal = False
|
||||
|
||||
## verifica la existencia de los canales, en caso de no existir el canal se pregunta si se quieren
|
||||
## eliminar los enlaces de dicho canal
|
||||
|
||||
for canal_org in new_item.library_urls:
|
||||
canal = generictools.verify_channel(canal_org)
|
||||
logger.error(canal)
|
||||
try:
|
||||
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
|
||||
logger.debug('El canal %s parece correcto' % channel_verify)
|
||||
except:
|
||||
dead_item = Item(multicanal=multicanal,
|
||||
contentType='movie',
|
||||
dead=canal,
|
||||
path=raiz,
|
||||
nfo=nfo_path,
|
||||
library_urls=new_item.library_urls,
|
||||
infoLabels={'title': new_item.contentTitle})
|
||||
if canal not in dead_list and canal not in zombie_list:
|
||||
confirm = platformtools.dialog_yesno('Videoteca',
|
||||
'Parece que el canal [COLOR red]%s[/COLOR] ya no existe.' % canal.upper(),
|
||||
'Deseas eliminar los enlaces de este canal?')
|
||||
|
||||
elif canal in zombie_list:
|
||||
confirm = False
|
||||
else:
|
||||
confirm = True
|
||||
|
||||
if confirm:
|
||||
delete(dead_item)
|
||||
if canal not in dead_list:
|
||||
dead_list.append(canal)
|
||||
continue
|
||||
else:
|
||||
if canal not in zombie_list:
|
||||
zombie_list.append(canal)
|
||||
|
||||
if len(dead_list) > 0:
|
||||
for canal in dead_list:
|
||||
if canal in new_item.library_urls:
|
||||
del new_item.library_urls[canal]
|
||||
|
||||
|
||||
new_item.nfo = nfo_path
|
||||
new_item.path = raiz
|
||||
new_item.thumbnail = new_item.contentThumbnail
|
||||
@@ -67,7 +115,7 @@ def list_movies(item, silent=False):
|
||||
try:
|
||||
new_item, new_item, overwrite = generictools.redirect_clone_newpct1(new_item, head_nfo, new_item, raiz)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
# Menu contextual: Marcar como visto/no visto
|
||||
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
|
||||
@@ -85,10 +133,8 @@ def list_movies(item, silent=False):
|
||||
num_canales -= 1
|
||||
if num_canales > 1:
|
||||
texto_eliminar = config.get_localized_string(60018)
|
||||
multicanal = True
|
||||
else:
|
||||
texto_eliminar = config.get_localized_string(60019)
|
||||
multicanal = False
|
||||
|
||||
new_item.context = [{"title": texto_visto,
|
||||
"action": "mark_content_as_watched",
|
||||
@@ -113,10 +159,12 @@ def list_movies(item, silent=False):
|
||||
def list_tvshows(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
dead_list = []
|
||||
zombie_list = []
|
||||
# Obtenemos todos los tvshow.nfo de la videoteca de SERIES recursivamente
|
||||
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.TVSHOWS_PATH):
|
||||
for f in ficheros:
|
||||
|
||||
if f == "tvshow.nfo":
|
||||
tvshow_path = filetools.join(raiz, f)
|
||||
# logger.debug(tvshow_path)
|
||||
@@ -127,10 +175,58 @@ def list_tvshows(item):
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_content_as_watched_on_alfa(tvshow_path)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
|
||||
try: #A veces da errores aleatorios, por no encontrar el .nfo. Probablemente problemas de timing
|
||||
|
||||
if len(item_tvshow.library_urls) > 1:
|
||||
multicanal = True
|
||||
else:
|
||||
multicanal = False
|
||||
|
||||
## verifica la existencia de los canales, en caso de no existir el canal se pregunta si se quieren
|
||||
## eliminar los enlaces de dicho canal
|
||||
|
||||
for canal in item_tvshow.library_urls:
|
||||
canal = generictools.verify_channel(canal)
|
||||
try:
|
||||
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
|
||||
logger.debug('El canal %s parece correcto' % channel_verify)
|
||||
except:
|
||||
dead_item = Item(multicanal=multicanal,
|
||||
contentType='tvshow',
|
||||
dead=canal,
|
||||
path=raiz,
|
||||
nfo=tvshow_path,
|
||||
library_urls=item_tvshow.library_urls,
|
||||
infoLabels={'title': item_tvshow.contentTitle})
|
||||
if canal not in dead_list and canal not in zombie_list:
|
||||
confirm = platformtools.dialog_yesno('Videoteca',
|
||||
'Parece que el canal [COLOR red]%s[/COLOR] ya no existe.' % canal.upper(),
|
||||
'Deseas eliminar los enlaces de este canal?')
|
||||
|
||||
elif canal in zombie_list:
|
||||
confirm = False
|
||||
else:
|
||||
confirm = True
|
||||
|
||||
if confirm:
|
||||
delete(dead_item)
|
||||
if canal not in dead_list:
|
||||
dead_list.append(canal)
|
||||
continue
|
||||
else:
|
||||
if canal not in zombie_list:
|
||||
zombie_list.append(canal)
|
||||
|
||||
if len(dead_list) > 0:
|
||||
for canal in dead_list:
|
||||
if canal in item_tvshow.library_urls:
|
||||
del item_tvshow.library_urls[canal]
|
||||
|
||||
### continua la carga de los elementos de la videoteca
|
||||
|
||||
try: #A veces da errores aleatorios, por no encontrar el .nfo. Probablemente problemas de timing
|
||||
item_tvshow.title = item_tvshow.contentTitle
|
||||
item_tvshow.path = raiz
|
||||
item_tvshow.nfo = tvshow_path
|
||||
@@ -146,6 +242,7 @@ def list_tvshows(item):
|
||||
|
||||
except:
|
||||
logger.error('No encuentra: ' + str(tvshow_path))
|
||||
logger.error(traceback.format_exc())
|
||||
continue
|
||||
|
||||
# Menu contextual: Buscar automáticamente nuevos episodios o no
|
||||
@@ -164,10 +261,8 @@ def list_tvshows(item):
|
||||
num_canales -= 1
|
||||
if num_canales > 1:
|
||||
texto_eliminar = config.get_localized_string(60024)
|
||||
multicanal = True
|
||||
else:
|
||||
texto_eliminar = config.get_localized_string(60025)
|
||||
multicanal = False
|
||||
|
||||
item_tvshow.context = [{"title": texto_visto,
|
||||
"action": "mark_content_as_watched",
|
||||
@@ -189,7 +284,14 @@ def list_tvshows(item):
|
||||
# "channel": "videolibrary"}]
|
||||
|
||||
# logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n'))
|
||||
itemlist.append(item_tvshow)
|
||||
|
||||
## verifica la existencia de los canales ##
|
||||
|
||||
logger.debug(item_tvshow)
|
||||
if len(item_tvshow.library_urls) > 0:
|
||||
itemlist.append(item_tvshow)
|
||||
|
||||
|
||||
|
||||
if itemlist:
|
||||
itemlist = sorted(itemlist, key=lambda it: it.title.lower())
|
||||
@@ -368,7 +470,7 @@ def findvideos(item):
|
||||
try:
|
||||
item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
item_json.contentChannel = "local"
|
||||
# Soporte para rutas relativas en descargas
|
||||
if filetools.is_relative(item_json.url):
|
||||
@@ -413,7 +515,7 @@ def findvideos(item):
|
||||
try:
|
||||
item_canal, it, overwrite = generictools.redirect_clone_newpct1(item_canal)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
nom_canal = item_canal.channel
|
||||
|
||||
# Importamos el canal de la parte seleccionada
|
||||
@@ -427,7 +529,7 @@ def findvideos(item):
|
||||
try:
|
||||
item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
list_servers = []
|
||||
|
||||
try:
|
||||
@@ -452,6 +554,7 @@ def findvideos(item):
|
||||
template = "An exception of type %s occured. Arguments:\n%r"
|
||||
message = template % (type(ex).__name__, ex.args)
|
||||
logger.error(message)
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
# Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y
|
||||
# las infoLabels y las imagenes del item si el server no tiene
|
||||
@@ -778,7 +881,7 @@ def mark_tvshow_as_updatable(item):
|
||||
def delete(item):
|
||||
def delete_all(_item):
|
||||
for file in filetools.listdir(_item.path):
|
||||
if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json"):
|
||||
if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json")or file.endswith(".torrent"):
|
||||
filetools.remove(filetools.join(_item.path, file))
|
||||
raiz, carpeta_serie, ficheros = filetools.walk(_item.path).next()
|
||||
if ficheros == []:
|
||||
@@ -803,39 +906,45 @@ def delete(item):
|
||||
heading = config.get_localized_string(70084)
|
||||
else:
|
||||
heading = config.get_localized_string(70085)
|
||||
|
||||
if item.multicanal:
|
||||
# Obtener listado de canales
|
||||
opciones = [config.get_localized_string(70086) % k.capitalize() for k in item.library_urls.keys() if
|
||||
k != "downloads"]
|
||||
opciones.insert(0, heading)
|
||||
if item.dead == '':
|
||||
opciones = [config.get_localized_string(70086) % k.capitalize() for k in item.library_urls.keys() if
|
||||
k != "downloads"]
|
||||
opciones.insert(0, heading)
|
||||
|
||||
index = platformtools.dialog_select(config.get_localized_string(30163), opciones)
|
||||
index = platformtools.dialog_select(config.get_localized_string(30163), opciones)
|
||||
|
||||
if index == 0:
|
||||
# Seleccionado Eliminar pelicula/serie
|
||||
delete_all(item)
|
||||
if index == 0:
|
||||
# Seleccionado Eliminar pelicula/serie
|
||||
delete_all(item)
|
||||
|
||||
elif index > 0:
|
||||
# Seleccionado Eliminar canal X
|
||||
canal = opciones[index].replace(config.get_localized_string(70079), "").lower()
|
||||
elif index > 0:
|
||||
# Seleccionado Eliminar canal X
|
||||
canal = opciones[index].replace(config.get_localized_string(70079), "").lower()
|
||||
else:
|
||||
return
|
||||
else:
|
||||
canal = item.dead
|
||||
|
||||
num_enlaces = 0
|
||||
for fd in filetools.listdir(item.path):
|
||||
if fd.endswith(canal + '].json'):
|
||||
if filetools.remove(filetools.join(item.path, fd)):
|
||||
num_enlaces += 1
|
||||
num_enlaces = 0
|
||||
for fd in filetools.listdir(item.path):
|
||||
if fd.endswith(canal + '].json') or scrapertools.find_single_match(fd, '%s]_\d+.torrent' % canal):
|
||||
if filetools.remove(filetools.join(item.path, fd)):
|
||||
num_enlaces += 1
|
||||
|
||||
if num_enlaces > 0:
|
||||
# Actualizar .nfo
|
||||
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
|
||||
del item_nfo.library_urls[canal]
|
||||
filetools.write(item.nfo, head_nfo + item_nfo.tojson())
|
||||
if num_enlaces > 0:
|
||||
# Actualizar .nfo
|
||||
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
|
||||
del item_nfo.library_urls[canal]
|
||||
if item_nfo.emergency_urls and item_nfo.emergency_urls.get(canal, False):
|
||||
del item_nfo.emergency_urls[canal]
|
||||
filetools.write(item.nfo, head_nfo + item_nfo.tojson())
|
||||
|
||||
msg_txt = config.get_localized_string(70087) % (num_enlaces, canal)
|
||||
logger.info(msg_txt)
|
||||
platformtools.dialog_notification(heading, msg_txt)
|
||||
platformtools.itemlist_refresh()
|
||||
msg_txt = config.get_localized_string(70087) % (num_enlaces, canal)
|
||||
logger.info(msg_txt)
|
||||
platformtools.dialog_notification(heading, msg_txt)
|
||||
platformtools.itemlist_refresh()
|
||||
|
||||
else:
|
||||
if platformtools.dialog_yesno(heading,
|
||||
@@ -867,7 +976,6 @@ def check_season_playcount(item, season):
|
||||
|
||||
def check_tvshow_playcount(item, season):
|
||||
logger.info()
|
||||
# logger.debug(item)
|
||||
if season:
|
||||
temporadas_serie = 0
|
||||
temporadas_vistas_serie = 0
|
||||
|
||||
@@ -267,11 +267,19 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
|
||||
if response["headers"].get('content-encoding') == 'gzip':
|
||||
logger.info("Descomprimiendo...")
|
||||
data_alt = response["data"]
|
||||
try:
|
||||
response["data"] = gzip.GzipFile(fileobj=StringIO(response["data"])).read()
|
||||
logger.info("Descomprimido")
|
||||
except:
|
||||
logger.info("No se ha podido descomprimir")
|
||||
logger.info("No se ha podido descomprimir con gzip. Intentando con zlib")
|
||||
response["data"] = data_alt
|
||||
try:
|
||||
import zlib
|
||||
response["data"] = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(response["data"])
|
||||
except:
|
||||
logger.info("No se ha podido descomprimir con zlib")
|
||||
response["data"] = data_alt
|
||||
|
||||
# Anti Cloudflare
|
||||
if bypass_cloudflare and count_retries < 5:
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import errno
|
||||
import math
|
||||
import traceback
|
||||
|
||||
from core import filetools
|
||||
from core import scraper
|
||||
@@ -195,6 +196,7 @@ def save_movie(item):
|
||||
item_nfo.emergency_urls.update({item.channel: True})
|
||||
except:
|
||||
logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % item.contentTitle)
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
if filetools.write(json_path, item.tojson()):
|
||||
p_dialog.update(100, 'Añadiendo película...', item.contentTitle)
|
||||
@@ -441,6 +443,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
|
||||
except:
|
||||
if e.contentType == 'episode':
|
||||
logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % e.contentTitle)
|
||||
logger.error(traceback.format_exc())
|
||||
continue
|
||||
|
||||
# No hay lista de episodios, no hay nada que guardar
|
||||
@@ -568,6 +571,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
|
||||
except:
|
||||
logger.error("Error al actualizar tvshow.nfo")
|
||||
logger.error("No se ha podido guardar las urls de emergencia de %s en la videoteca" % tvshow_item.contentSerieName)
|
||||
logger.error(traceback.format_exc())
|
||||
fallidos = -1
|
||||
else:
|
||||
# ... si ha sido correcto actualizamos la videoteca de Kodi
|
||||
@@ -731,16 +735,18 @@ def emergency_urls(item, channel=None, path=None):
|
||||
channel = __import__('channels.%s' % channel, fromlist=["channels.%s" % channel])
|
||||
if hasattr(channel, 'findvideos'): #Si el canal tiene "findvideos"...
|
||||
item.videolibray_emergency_urls = True #... se marca como "lookup"
|
||||
item_res = getattr(channel, 'findvideos')(item) #... se procesa
|
||||
channel_save = item.channel #... guarda el canal original por si hay fail-over en Newpct1
|
||||
item_res = getattr(channel, 'findvideos')(item) #... se procesa Findvideos
|
||||
item_res.channel = channel_save #... restaura el canal original por si hay fail-over en Newpct1
|
||||
item_res.category = channel_save.capitalize() #... y la categoría
|
||||
del item_res.videolibray_emergency_urls #... y se borra la marca de lookup
|
||||
except:
|
||||
logger.error('ERROR al procesar el episodio')
|
||||
logger.error('ERROR al procesar el título en Findvideos del Canal: ' + item.channel + ' / ' + item.title)
|
||||
logger.error(traceback.format_exc())
|
||||
item_res = item.clone() #Si ha habido un error, se devuelve el Item original
|
||||
|
||||
#Si el usuario ha activado la opción "emergency_urls_torrents", se descargarán los archivos .torrent de cada título
|
||||
else: #Si se han cacheado con éxito los enlaces...
|
||||
logger.debug('HOLA')
|
||||
logger.debug(item_res.emergency_urls)
|
||||
try:
|
||||
channel_bis = generictools.verify_channel(item.channel)
|
||||
if config.get_setting("emergency_urls_torrents", channel_bis) and item_res.emergency_urls and path != None:
|
||||
@@ -760,13 +766,15 @@ def emergency_urls(item, channel=None, path=None):
|
||||
item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') #se guarda el "path" relativo
|
||||
i += 1
|
||||
except:
|
||||
logger.error('ERROR al cachear el .torrent de: ' + item.channel + ' / ' + item.title)
|
||||
logger.error(traceback.format_exc())
|
||||
item_res = item.clone() #Si ha habido un error, se devuelve el Item original
|
||||
|
||||
#logger.debug(item_res.emergency_urls)
|
||||
return item_res #Devolvemos el Item actualizado con los enlaces de emergencia
|
||||
|
||||
|
||||
def caching_torrents(url, torrents_path=None, decode_flag=False, timeout=10, lookup=False):
|
||||
def caching_torrents(url, torrents_path=None, timeout=10, lookup=False, data_torrent=False):
|
||||
if torrents_path != None:
|
||||
logger.info("path = " + torrents_path)
|
||||
else:
|
||||
@@ -774,98 +782,80 @@ def caching_torrents(url, torrents_path=None, decode_flag=False, timeout=10, loo
|
||||
import urllib
|
||||
import re
|
||||
from core import httptools
|
||||
torrent_file = ''
|
||||
|
||||
"""
|
||||
Descarga en el path recibido el .torrent de la url recibida, y pasa el decode
|
||||
Devuelve el path real del .torrent, o el path vacío si la operación no ha tenido éxito
|
||||
"""
|
||||
|
||||
def decode(text):
|
||||
try:
|
||||
src = tokenize(text)
|
||||
data = decode_item(src.next, src.next())
|
||||
for token in src: # look for more tokens
|
||||
raise SyntaxError("trailing junk")
|
||||
except (AttributeError, ValueError, StopIteration):
|
||||
try:
|
||||
data = data
|
||||
except:
|
||||
data = src
|
||||
|
||||
return data
|
||||
|
||||
def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
|
||||
i = 0
|
||||
while i < len(text):
|
||||
m = match(text, i)
|
||||
s = m.group(m.lastindex)
|
||||
i = m.end()
|
||||
if m.lastindex == 2:
|
||||
yield "s"
|
||||
yield text[i:i + int(s)]
|
||||
i = i + int(s)
|
||||
else:
|
||||
yield s
|
||||
|
||||
def decode_item(next, token):
|
||||
if token == "i":
|
||||
# integer: "i" value "e"
|
||||
data = int(next())
|
||||
if next() != "e":
|
||||
raise ValueError
|
||||
elif token == "s":
|
||||
# string: "s" value (virtual tokens)
|
||||
data = next()
|
||||
elif token == "l" or token == "d":
|
||||
# container: "l" (or "d") values "e"
|
||||
data = []
|
||||
tok = next()
|
||||
while tok != "e":
|
||||
data.append(decode_item(next, tok))
|
||||
tok = next()
|
||||
if token == "d":
|
||||
data = dict(zip(data[0::2], data[1::2]))
|
||||
else:
|
||||
raise ValueError
|
||||
return data
|
||||
if torrents_path == None:
|
||||
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
|
||||
if not videolibrary_path:
|
||||
torrents_path = ''
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path #Si hay un error, devolvemos el "path" vacío
|
||||
torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path de descarga temporal
|
||||
if '.torrent' not in torrents_path:
|
||||
torrents_path += '.torrent' #path para dejar el .torrent
|
||||
torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path
|
||||
|
||||
if url.endswith(".rar"): #No es un archivo .torrent
|
||||
logger.error('No es un archivo Torrent: ' + url)
|
||||
torrents_path = ''
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path #Si hay un error, devolvemos el "path" vacío
|
||||
|
||||
#Módulo PRINCIPAL
|
||||
try:
|
||||
if lookup:
|
||||
torrents_path = lookup
|
||||
else:
|
||||
if '.torrent' not in torrents_path:
|
||||
torrents_path += '.torrent' #path para dejar el .torrent
|
||||
|
||||
torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path
|
||||
response = httptools.downloadpage(url, timeout=timeout) #Descargamos el .torrent
|
||||
if not response.sucess:
|
||||
logger.error('Archivo .torrent no encontrado: ' + url)
|
||||
torrents_path = ''
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path #Si hay un error, devolvemos el "path" vacío
|
||||
torrent_file = response.data
|
||||
|
||||
|
||||
if "used CloudFlare" in torrent_file: #Si tiene CloudFlare, usamos este proceso
|
||||
response = httptools.downloadpage("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), timeout=timeout)
|
||||
if not response.sucess:
|
||||
logger.error('Archivo .torrent no encontrado: ' + url)
|
||||
torrents_path = ''
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path #Si hay un error, devolvemos el "path" vacío
|
||||
torrent_file = response.data
|
||||
|
||||
if decode_flag:
|
||||
torrent_file = decode(torrent_file) #decodificamos el .torrent
|
||||
if not scrapertools.find_single_match(torrent_file, '^d\d+:\w+\d+:'): #No es un archivo .torrent (RAR, ZIP, HTML,..., vacío)
|
||||
logger.error('No es un archivo Torrent: ' + url)
|
||||
torrents_path = ''
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path #Si hay un error, devolvemos el "path" vacío
|
||||
|
||||
if not lookup:
|
||||
filetools.write(torrents_path_encode, torrent_file) #Salvamos el .torrent
|
||||
if not filetools.write(torrents_path_encode, torrent_file): #Salvamos el .torrent
|
||||
logger.error('ERROR: Archivo .torrent no escrito: ' + torrents_path_encode)
|
||||
torrents_path = '' #Si hay un error, devolvemos el "path" vacío
|
||||
torrent_file = '' #... y el buffer del .torrent
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path
|
||||
except:
|
||||
torrents_path = '' #Si hay un error, devolvemos el "path" vacío
|
||||
|
||||
torrent_file = '' #... y el buffer del .torrent
|
||||
logger.error('Error en el proceso de descarga del .torrent: ' + url + ' / ' + torrents_path_encode)
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#logger.debug(torrents_path)
|
||||
if data_torrent:
|
||||
return (torrents_path, torrent_file)
|
||||
return torrents_path
|
||||
|
||||
|
||||
def verify_url_torrent(url, decode_flag=False, timeout=5):
|
||||
def verify_url_torrent(url, timeout=5):
|
||||
"""
|
||||
Verifica si el archivo .torrent al que apunta la url está disponible, descargándolo en un area temporal
|
||||
Entrada: url
|
||||
@@ -874,12 +864,8 @@ def verify_url_torrent(url, decode_flag=False, timeout=5):
|
||||
|
||||
if not url or url == 'javascript:;': #Si la url viene vacía...
|
||||
return False #... volvemos con error
|
||||
torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent, sin decode
|
||||
if torrents_path: #Si ha tenido éxito...
|
||||
torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent
|
||||
if torrents_path: #Si ha tenido éxito...
|
||||
return True
|
||||
try:
|
||||
torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent, sin decode
|
||||
if torrents_path: #Si ha tenido éxito...
|
||||
return True
|
||||
except:
|
||||
return False #en caso de error, False
|
||||
else:
|
||||
return False
|
||||
|
||||
127
plugin.video.alfa/lib/MultipartPostHandler.py
Normal file
127
plugin.video.alfa/lib/MultipartPostHandler.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#-*- coding: utf-8 -*-
|
||||
#
|
||||
####
|
||||
# 2006/02 Will Holcomb <wholcomb@gmail.com>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# 2007/07/26 Slightly modified by Brian Schneider
|
||||
#
|
||||
# in order to support unicode files ( multipart_encode function )
|
||||
# From http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
|
||||
#
|
||||
# 2013/07 Ken Olum <kdo@cosmos.phy.tufts.edu>
|
||||
#
|
||||
# Removed one of \r\n and send Content-Length
|
||||
#
|
||||
# 2014/05 Applied Fedora rpm patch
|
||||
#
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=920778
|
||||
# http://pkgs.fedoraproject.org/cgit/python-MultipartPostHandler2.git/diff/python-MultipartPostHandler2-cut-out-main.patch?id=c1638bb3e45596232b4d02f1e69901db0c28cfdb
|
||||
#
|
||||
# 2014/05/09 Sérgio Basto <sergio@serjux.com>
|
||||
#
|
||||
# Better deal with None values, don't throw an exception and just send an empty string.
|
||||
# Simplified text example
|
||||
#
|
||||
"""
|
||||
Usage:
|
||||
Enables the use of multipart/form-data for posting forms
|
||||
|
||||
Inspirations:
|
||||
Upload files in python:
|
||||
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
|
||||
urllib2_file:
|
||||
Fabien Seisen: <fabien@seisen.org>
|
||||
|
||||
Example:
|
||||
import MultipartPostHandler, urllib2
|
||||
|
||||
opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
|
||||
params = { "username" : "bob", "password" : "riviera",
|
||||
"file" : open("filename", "rb") }
|
||||
opener.open("http://wwww.bobsite.com/upload/", params)
|
||||
"""
|
||||
|
||||
import urllib
|
||||
import urllib2
|
||||
import mimetools, mimetypes
|
||||
import os, stat
|
||||
from cStringIO import StringIO
|
||||
|
||||
class Callable:
|
||||
def __init__(self, anycallable):
|
||||
self.__call__ = anycallable
|
||||
|
||||
# Controls how sequences are uncoded. If true, elements may be given multiple values by
|
||||
# assigning a sequence.
|
||||
doseq = 1
|
||||
|
||||
class MultipartPostHandler(urllib2.BaseHandler):
|
||||
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
|
||||
|
||||
def http_request(self, request):
|
||||
data = request.get_data()
|
||||
if data is not None and type(data) != str:
|
||||
v_files = []
|
||||
v_vars = []
|
||||
try:
|
||||
for(key, value) in data.items():
|
||||
if type(value) == file:
|
||||
v_files.append((key, value))
|
||||
else:
|
||||
v_vars.append((key, value))
|
||||
except TypeError:
|
||||
systype, value, traceback = sys.exc_info()
|
||||
raise TypeError, "not a valid non-string sequence or mapping object", traceback
|
||||
|
||||
if len(v_files) == 0:
|
||||
data = urllib.urlencode(v_vars, doseq)
|
||||
else:
|
||||
boundary, data = self.multipart_encode(v_vars, v_files)
|
||||
contenttype = 'multipart/form-data; boundary=%s' % boundary
|
||||
# ~ if(request.has_header('Content-Type')
|
||||
# ~ and request.get_header('Content-Type').find('multipart/form-data') != 0):
|
||||
# ~ print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
|
||||
request.add_unredirected_header('Content-Type', contenttype)
|
||||
|
||||
request.add_data(data)
|
||||
return request
|
||||
|
||||
def multipart_encode(vars, files, boundary = None, buffer = None):
|
||||
if boundary is None:
|
||||
boundary = mimetools.choose_boundary()
|
||||
if buffer is None:
|
||||
buffer = StringIO()
|
||||
for(key, value) in vars:
|
||||
buffer.write('--%s\r\n' % boundary)
|
||||
buffer.write('Content-Disposition: form-data; name="%s"' % key)
|
||||
if value is None:
|
||||
value = ""
|
||||
# if type(value) is not str, we need str(value) to not error with cannot concatenate 'str'
|
||||
# and 'dict' or 'tuple' or somethingelse objects
|
||||
buffer.write('\r\n\r\n' + str(value) + '\r\n')
|
||||
for(key, fd) in files:
|
||||
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
|
||||
filename = fd.name.split('/')[-1]
|
||||
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
|
||||
buffer.write('--%s\r\n' % boundary)
|
||||
buffer.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
|
||||
buffer.write('Content-Type: %s\r\n' % contenttype)
|
||||
buffer.write('Content-Length: %s\r\n' % file_size)
|
||||
fd.seek(0)
|
||||
buffer.write('\r\n' + fd.read() + '\r\n')
|
||||
buffer.write('--' + boundary + '--\r\n')
|
||||
buffer = buffer.getvalue()
|
||||
return boundary, buffer
|
||||
multipart_encode = Callable(multipart_encode)
|
||||
|
||||
https_request = http_request
|
||||
@@ -14,6 +14,7 @@ import urllib
|
||||
import urlparse
|
||||
import datetime
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
@@ -183,7 +184,7 @@ def update_title(item):
|
||||
rating_new = round(rating_new, 1)
|
||||
item.title = item.title.replace("[" + str(rating_old) + "]", "[" + str(rating_new) + "]")
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if item.wanted: #Actualizamos Wanted, si existe
|
||||
item.wanted = item.contentTitle
|
||||
if new_item.contentSeason: #Restauramos el núm. de Temporada después de TMDB
|
||||
@@ -236,7 +237,7 @@ def refresh_screen(item):
|
||||
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
|
||||
time.sleep(1) #Dejamos tiempo para que se ejecute
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
platformtools.itemlist_update(item) #refrescamos la pantalla con el nuevo Item
|
||||
|
||||
@@ -329,7 +330,7 @@ def post_tmdb_listado(item, itemlist):
|
||||
if rating == 0.0:
|
||||
rating = ''
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', item.channel)
|
||||
|
||||
@@ -346,7 +347,7 @@ def post_tmdb_listado(item, itemlist):
|
||||
try:
|
||||
tmdb.set_infoLabels(item_local, __modo_grafico__) #pasamos otra vez por TMDB
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(item_local)
|
||||
|
||||
# Si TMDB no ha encontrado nada y hemos usado el año de la web, lo intentamos sin año
|
||||
@@ -357,7 +358,7 @@ def post_tmdb_listado(item, itemlist):
|
||||
try:
|
||||
tmdb.set_infoLabels(item_local, __modo_grafico__) #pasamos otra vez por TMDB
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if not item_local.infoLabels['tmdb_id']: #ha tenido éxito?
|
||||
item_local.infoLabels['year'] = year #no, restauramos el año y lo dejamos ya
|
||||
|
||||
@@ -539,7 +540,7 @@ def post_tmdb_seasons(item, itemlist):
|
||||
try:
|
||||
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
item_season = item.clone()
|
||||
if item_season.season_colapse: #Quitamos el indicador de listado por Temporadas
|
||||
@@ -554,7 +555,7 @@ def post_tmdb_seasons(item, itemlist):
|
||||
rating = float(item_season.infoLabels['rating'])
|
||||
rating = round(rating, 1)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if rating and rating == 0.0:
|
||||
rating = ''
|
||||
|
||||
@@ -586,7 +587,7 @@ def post_tmdb_seasons(item, itemlist):
|
||||
try:
|
||||
tmdb.set_infoLabels(item_local, True) #TMDB de cada Temp
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
if item_local.infoLabels['temporada_air_date']: #Fecha de emisión de la Temp
|
||||
item_local.title += ' [%s]' % str(scrapertools.find_single_match(str(item_local.infoLabels['temporada_air_date']), r'\/(\d{4})'))
|
||||
@@ -597,7 +598,7 @@ def post_tmdb_seasons(item, itemlist):
|
||||
# rating = float(item_local.infoLabels['rating'])
|
||||
# rating = round(rating, 1)
|
||||
# except:
|
||||
# pass
|
||||
# logger.error(traceback.format_exc())
|
||||
#if rating and rating > 0.0:
|
||||
# item_local.title += ' [%s]' % str(rating)
|
||||
|
||||
@@ -813,6 +814,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
num_episodios = item_local.infoLabels['temporada_num_episodios']
|
||||
except:
|
||||
num_episodios = 0
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Preparamos el Rating del vídeo
|
||||
rating = ''
|
||||
@@ -823,7 +825,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
if rating == 0.0:
|
||||
rating = ''
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
# Si TMDB no ha encontrado el vídeo limpiamos el año
|
||||
if item_local.infoLabels['year'] == "-":
|
||||
@@ -883,7 +885,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
try:
|
||||
num_episodios_lista[item_local.contentSeason] = num_episodios
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#logger.debug("title: " + item_local.title + " / url: " + item_local.url + " / calidad: " + item_local.quality + " / Season: " + str(item_local.contentSeason) + " / EpisodeNumber: " + str(item_local.contentEpisodeNumber) + " / num_episodios_lista: " + str(num_episodios_lista) + str(num_episodios_flag))
|
||||
#logger.debug(item_local)
|
||||
@@ -900,6 +902,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
item_local.infoLabels['temporada_num_episodios'] = int(num_episodios_lista[item_local.contentSeason])
|
||||
except:
|
||||
logger.error("ERROR 07: EPISODIOS: Num de Temporada fuera de rango " + " / TEMPORADA: " + str(item_local.contentSeason) + " / " + str(item_local.contentEpisodeNumber) + " / MAX_TEMPORADAS: " + str(num_temporada_max) + " / LISTA_TEMPORADAS: " + str(num_episodios_lista))
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Permitimos la actualización de los títulos, bien para uso inmediato, o para añadir a la videoteca
|
||||
itemlist.append(item.clone(title="** [COLOR yelow]Actualizar Títulos - vista previa videoteca[/COLOR] **", action="actualizar_titulos", tmdb_stat=False, from_action=item.action, from_title_tmdb=item.title, from_update=True))
|
||||
@@ -938,6 +941,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
videolibrarytools.save_tvshow(item, itemlist_fake) #Se actualiza el .nfo
|
||||
except:
|
||||
logger.error("ERROR 08: EPISODIOS: No se ha podido actualizar la URL a la nueva Temporada")
|
||||
logger.error(traceback.format_exc())
|
||||
itemlist.append(item.clone(title="[COLOR yellow]Añadir esta Serie a Videoteca-[/COLOR]" + title, action="add_serie_to_library"))
|
||||
|
||||
elif modo_serie_temp == 1: #si es Serie damos la opción de guardar la última temporada o la serie completa
|
||||
@@ -1006,6 +1010,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
item.unify = config.get_setting("unify")
|
||||
except:
|
||||
item.unify = config.get_setting("unify")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
if item.contentSeason_save: #Restauramos el num. de Temporada
|
||||
item.contentSeason = item.contentSeason_save
|
||||
@@ -1027,7 +1032,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
try:
|
||||
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
#Restauramos la información de max num. de episodios por temporada despues de TMDB
|
||||
try:
|
||||
if item.infoLabels['temporada_num_episodios']:
|
||||
@@ -1036,7 +1041,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
else:
|
||||
item.infoLabels['temporada_num_episodios'] = num_episodios
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Ajustamos el nombre de la categoría
|
||||
if item.channel == channel_py:
|
||||
@@ -1068,7 +1073,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
if rating == 0.0:
|
||||
rating = ''
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
if item.quality.lower() in ['gb', 'mb']:
|
||||
item.quality = item.quality.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
|
||||
@@ -1085,6 +1090,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
tiempo = item.infoLabels['duration']
|
||||
except:
|
||||
tiempo = item.infoLabels['duration']
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
elif item.contentChannel == 'videolibrary': #No hay, viene de la Videoteca? buscamos en la DB
|
||||
#Leo de la BD de Kodi la duración de la película o episodio. En "from_fields" se pueden poner las columnas que se quiera
|
||||
@@ -1095,7 +1101,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
else:
|
||||
nun_records, records = get_field_from_kodi_DB(item, from_fields='c09') #Leo de la BD de Kodi la duración del episodio
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if nun_records > 0: #Hay registros?
|
||||
#Es un array, busco el campo del registro: añadir en el FOR un fieldX por nueva columna
|
||||
for strFileName, field1 in records:
|
||||
@@ -1111,7 +1117,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
if not scrapertools.find_single_match(item.quality, '(\[\d+:\d+)'): #si ya tiene la duración, pasamos
|
||||
item.quality += ' [/COLOR][COLOR white][%s:%s h]' % (str(horas).zfill(2), str(resto).zfill(2)) #Lo agrego a Calidad del Servidor
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Ajustamos el nombre de la categoría
|
||||
if item.channel != channel_py:
|
||||
@@ -1193,8 +1199,9 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
return (item, itemlist)
|
||||
|
||||
|
||||
def get_torrent_size(url):
|
||||
def get_torrent_size(url, data_torrent=False):
|
||||
logger.info()
|
||||
from core import videolibrarytools
|
||||
|
||||
"""
|
||||
|
||||
@@ -1203,9 +1210,11 @@ def get_torrent_size(url):
|
||||
Calcula el tamaño de los archivos que contienen un .torrent. Descarga el archivo .torrent en una carpeta,
|
||||
lo lee y descodifica. Si contiene múltiples archivos, suma el tamaño de todos ellos
|
||||
|
||||
Llamada: generictools.get_torrent_size(url)
|
||||
Llamada: generictools.get_torrent_size(url, data_torrent=False)
|
||||
Entrada: url: url del archivo .torrent
|
||||
Entrada: data_torrent: Flag por si se quiere el contenido del .torretn de vuelta
|
||||
Salida: size: str con el tamaño y tipo de medida ( MB, GB, etc)
|
||||
Salida: torrent: dict() con el contenido del .torrent (opcional)
|
||||
|
||||
"""
|
||||
|
||||
@@ -1271,6 +1280,7 @@ def get_torrent_size(url):
|
||||
|
||||
#Móludo principal
|
||||
size = ""
|
||||
torrent = ''
|
||||
try:
|
||||
#torrents_path = config.get_videolibrary_path() + '/torrents' #path para dejar el .torrent
|
||||
|
||||
@@ -1281,25 +1291,12 @@ def get_torrent_size(url):
|
||||
#urllib.urlretrieve(url, torrents_path + "/generictools.torrent") #desacargamos el .torrent a la carpeta
|
||||
#torrent_file = open(torrents_path + "/generictools.torrent", "rb").read() #leemos el .torrent
|
||||
|
||||
response = httptools.downloadpage(url, timeout=2) #Descargamos el .torrent
|
||||
if not response.sucess:
|
||||
size = ''
|
||||
return size #Si hay un error, devolvemos el "size" vacío
|
||||
torrent_file = response.data
|
||||
torrents_path, torrent_file = videolibrarytools.caching_torrents(url, timeout=2, lookup=True, data_torrent=True)
|
||||
if not torrent_file:
|
||||
if data_torrent:
|
||||
return (size, torrent)
|
||||
return size #Si hay un error, devolvemos el "size" y "torrent" vacíos
|
||||
|
||||
if "used CloudFlare" in torrent_file: #Si tiene CloudFlare, usamos este proceso
|
||||
#try:
|
||||
# urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(),
|
||||
# torrents_path + "/generictools.torrent")
|
||||
# torrent_file = open(torrents_path + "/generictools.torrent", "rb").read()
|
||||
#except:
|
||||
# torrent_file = ""
|
||||
response = httptools.downloadpage("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip())
|
||||
if not response.sucess:
|
||||
size = ''
|
||||
return size #Si hay un error, devolvemos el "size" vacío
|
||||
torrent_file = response.data
|
||||
|
||||
torrent = decode(torrent_file) #decodificamos el .torrent
|
||||
|
||||
#si sólo tiene un archivo, tomamos la longitud y la convertimos a una unidad legible, si no dará error
|
||||
@@ -1311,20 +1308,27 @@ def get_torrent_size(url):
|
||||
|
||||
#si tiene múltiples archivos sumamos la longitud de todos
|
||||
if not size:
|
||||
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+).*?}")
|
||||
sizet = sum([int(i) for i in check_video])
|
||||
size = convert_size(sizet)
|
||||
try:
|
||||
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+).*?}")
|
||||
sizet = sum([int(i) for i in check_video])
|
||||
size = convert_size(sizet)
|
||||
except:
|
||||
pass
|
||||
|
||||
except:
|
||||
logger.error('ERROR al buscar el tamaño de un .Torrent: ' + str(url))
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#try:
|
||||
# os.remove(torrents_path + "/generictools.torrent") #borramos el .torrent
|
||||
#except:
|
||||
# pass
|
||||
|
||||
#logger.debug(str(url) + ' / ' + str(size))
|
||||
#logger.debug(str(url))
|
||||
logger.info(str(size))
|
||||
|
||||
if data_torrent:
|
||||
return (size, torrent)
|
||||
return size
|
||||
|
||||
|
||||
@@ -1401,7 +1405,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
|
||||
if nun_records == 0: #hay error?
|
||||
logger.error("Error en la SQL: " + sql + ": 0 registros") #No estará catalogada o hay un error en el SQL
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
return (nun_records, records)
|
||||
|
||||
@@ -1455,7 +1459,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
verify_torrent, patron1, patron_alt = patron.split('|') #Si es así, los separamos y los tratamos
|
||||
patron = patron1
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Array con los datos de los canales alternativos
|
||||
#Cargamos en .json del canal para ver las listas de valores en settings
|
||||
@@ -1530,6 +1534,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
url_alt += [scrapertools.find_single_match(item.url, 'http.*?\/temporada-\d+.*?\/capitulo.?-\d+.*?\/')]
|
||||
except:
|
||||
logger.error("ERROR 88: " + item.action + ": Error al convertir la url: " + item.url)
|
||||
logger.error(traceback.format_exc())
|
||||
logger.debug('URLs convertidas: ' + str(url_alt))
|
||||
|
||||
if patron == True: #solo nos han pedido verificar el clone
|
||||
@@ -1545,6 +1550,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
data_comillas = data.replace("'", "\"")
|
||||
except:
|
||||
data = ''
|
||||
logger.error(traceback.format_exc())
|
||||
if not data: #no ha habido suerte, probamos con la siguiente url
|
||||
logger.error("ERROR 01: " + item.action + ": La Web no responde o la URL es erronea: " + url)
|
||||
continue
|
||||
@@ -1700,6 +1706,7 @@ def web_intervenida(item, data, desactivar=True):
|
||||
json.dump(json_data, outfile, sort_keys = True, indent = 2, ensure_ascii = False)
|
||||
except:
|
||||
logger.error("ERROR 98 al salvar el archivo: %s" % channel_path)
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
@@ -1764,10 +1771,11 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
delete_stat = 0
|
||||
canal_org_des_list = []
|
||||
json_path_list = []
|
||||
emergency_urls_force = False
|
||||
|
||||
if item.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
|
||||
del item.ow_force
|
||||
logger.error('** item.ow_force: ' + item.path) #aviso que ha habido una incidencia
|
||||
#if item.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
|
||||
# del item.ow_force
|
||||
# logger.error('** item.ow_force: ' + item.path) #aviso que ha habido una incidencia
|
||||
if it.ow_force == '1': #Ha podido qudar activado de una pasada anteriores
|
||||
del it.ow_force
|
||||
if path and it.contentType != 'movies':
|
||||
@@ -1777,6 +1785,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
logger.error('** .nfo ACTUALIZADO: it.ow_force: ' + nfo) #aviso que ha habido una incidencia
|
||||
except:
|
||||
logger.error('** .nfo ERROR actualizar: it.ow_force: ' + nfo) #aviso que ha habido una incidencia
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Array con los datos de los canales alternativos
|
||||
#Cargamos en .json de Newpct1 para ver las listas de valores en settings
|
||||
@@ -1819,46 +1828,72 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
if it.emergency_urls:
|
||||
item.emergency_urls = it.emergency_urls #Refrescar desde el .nfo
|
||||
|
||||
verify_cached_torrents() #TEMPORAL: verificamos si los .torrents son correctos
|
||||
try: #Si ha habido errores, vemos la lista y los reparamos
|
||||
json_error_path = filetools.join(config.get_runtime_path(), 'error_cached_torrents.json')
|
||||
if filetools.exists(json_error_path): #hay erroer que hay que reparar?
|
||||
from core import jsontools
|
||||
json_error_file = jsontools.load(filetools.read(json_error_path)) #Leemos la lista de errores
|
||||
if not json_error_file:
|
||||
filetools.remove(json_error_path) #si ya no quedan errores, borramos el .json
|
||||
elif path in json_error_file: #está este títu,o en la lista de errores?
|
||||
json_error_file.pop(path) #sí. Lo quitamos
|
||||
if not json_error_file:
|
||||
filetools.remove(json_error_path) #si ya no quedan errores, borramos el .json
|
||||
else:
|
||||
filetools.write(json_error_path, jsontools.dump(json_error_file)) #si quedan, actualizamos el .json
|
||||
if item.contentType == 'movie': #si es una pelicula, forzamos su actualización
|
||||
emergency_urls_force = True
|
||||
else: #si es una serie, que regenere los episodios que faltan (en error)
|
||||
item.ow_force = '1' #... de todas las temporadas
|
||||
except:
|
||||
logger.error('Error en el proceso de REPARACION de vídeos con .torrents dañados')
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Arreglo temporal para Newpct1
|
||||
if channel in fail_over_list or channel_alt == 'videolibrary':
|
||||
channel_bis = channel_py
|
||||
if not item.url and it.library_urls and channel_alt == 'videolibrary':
|
||||
for canal_vid, url_vid in it.library_urls.items(): #Se recorre "item.library_urls" para buscar canales candidatos
|
||||
try:
|
||||
if channel in fail_over_list or channel_alt == 'videolibrary':
|
||||
channel_bis = channel_py
|
||||
if not item.url and it.library_urls and channel_alt == 'videolibrary':
|
||||
for canal_vid, url_vid in it.library_urls.items(): #Se recorre "item.library_urls" para buscar canales candidatos
|
||||
canal_vid_alt = "'%s'" % canal_vid
|
||||
if canal_vid_alt in fail_over_list: #Se busca si es un clone de newpct1
|
||||
channel_bis = channel_py
|
||||
channel_alt = canal_vid
|
||||
channel = "'%s'" % channel_alt
|
||||
break
|
||||
else:
|
||||
channel_bis = canal_vid
|
||||
if channel_bis == channel_py and config.get_setting("emergency_urls", channel_bis) == 1 and config.get_setting("emergency_urls_torrents", channel_bis) and item.emergency_urls and item.emergency_urls.get(channel_alt, False):
|
||||
raiz, carpetas_series, ficheros = filetools.walk(path).next()
|
||||
objetivo = '[%s]_01.torrent' % channel_alt
|
||||
encontrado = False
|
||||
for fichero in ficheros:
|
||||
if objetivo in fichero:
|
||||
encontrado = True
|
||||
break
|
||||
if not encontrado:
|
||||
logger.error('REGENERANDO: ' + str(item.emergency_urls))
|
||||
item.emergency_urls.pop(channel_alt, None)
|
||||
|
||||
if item.url: #Viene de actualización de videoteca de series
|
||||
#Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar
|
||||
if (config.get_setting("emergency_urls", item.channel) == 1 and (not item.emergency_urls or (item.emergency_urls and not item.emergency_urls.get(channel_alt, False)))) or (config.get_setting("emergency_urls", item.channel) == 2 and item.emergency_urls.get(channel_alt, False)) or config.get_setting("emergency_urls", item.channel) == 3 or emergency_urls_force:
|
||||
intervencion += ", ('1', '%s', '%s', '', '', '', '', '', '', '', '*', '%s', 'emerg')" % (channel_alt, channel_alt, config.get_setting("emergency_urls", item.channel))
|
||||
|
||||
elif it.library_urls: #Viene de "listar peliculas´"
|
||||
for canal_vid, url_vid in it.library_urls.items(): #Se recorre "item.library_urls" para buscar canales candidatos
|
||||
canal_vid_alt = "'%s'" % canal_vid
|
||||
if canal_vid_alt in fail_over_list: #Se busca si es un clone de newpct1
|
||||
if canal_vid_alt in fail_over_list: #Se busca si es un clone de newpct1
|
||||
channel_bis = channel_py
|
||||
channel_alt = canal_vid
|
||||
channel = "'%s'" % channel_alt
|
||||
break
|
||||
else:
|
||||
channel_bis = canal_vid
|
||||
if channel_bis == channel_py and config.get_setting("emergency_urls", channel_bis) == 1 and config.get_setting("emergency_urls_torrents", channel_bis) and item.emergency_urls and item.emergency_urls.get(channel_alt, False):
|
||||
raiz, carpetas_series, ficheros = filetools.walk(path).next()
|
||||
objetivo = '[%s]_01.torrent' % channel_alt
|
||||
encontrado = False
|
||||
for fichero in ficheros:
|
||||
if objetivo in fichero:
|
||||
encontrado = True
|
||||
break
|
||||
if not encontrado:
|
||||
logger.error('REGENERANDO: ' + str(item.emergency_urls))
|
||||
item.emergency_urls.pop(channel_alt, None)
|
||||
|
||||
if item.url: #Viene de actualización de videoteca de series
|
||||
#Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar
|
||||
if (config.get_setting("emergency_urls", item.channel) == 1 and (not item.emergency_urls or (item.emergency_urls and not item.emergency_urls.get(channel_alt, False)))) or (config.get_setting("emergency_urls", item.channel) == 2 and item.emergency_urls.get(channel_alt, False)) or config.get_setting("emergency_urls", item.channel) == 3:
|
||||
intervencion += ", ('1', '%s', '%s', '', '', '', '', '', '', '', '*', '%s', 'emerg')" % (channel_alt, channel_alt, config.get_setting("emergency_urls", item.channel))
|
||||
|
||||
elif it.library_urls: #Viene de "listar peliculas´"
|
||||
for canal_vid, url_vid in it.library_urls.items(): #Se recorre "item.library_urls" para buscar canales candidatos
|
||||
canal_vid_alt = "'%s'" % canal_vid
|
||||
if canal_vid_alt in fail_over_list: #Se busca si es un clone de newpct1
|
||||
channel_bis = channel_py
|
||||
else:
|
||||
channel_bis = canal_vid
|
||||
#Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar
|
||||
if (config.get_setting("emergency_urls", channel_bis) == 1 and (not it.emergency_urls or (it.emergency_urls and not it.emergency_urls.get(canal_vid, False)))) or (config.get_setting("emergency_urls", channel_bis) == 2 and it.emergency_urls.get(canal_vid, False)) or config.get_setting("emergency_urls", channel_bis) == 3:
|
||||
intervencion += ", ('1', '%s', '%s', '', '', '', '', '', '', '', '*', '%s', 'emerg')" % (canal_vid, canal_vid, config.get_setting("emergency_urls", channel_bis))
|
||||
#Analizamos si el canal ya tiene las urls de emergencia: guardar o borrar
|
||||
if (config.get_setting("emergency_urls", channel_bis) == 1 and (not it.emergency_urls or (it.emergency_urls and not it.emergency_urls.get(canal_vid, False)))) or (config.get_setting("emergency_urls", channel_bis) == 2 and it.emergency_urls.get(canal_vid, False)) or config.get_setting("emergency_urls", channel_bis) == 3 or emergency_urls_force:
|
||||
intervencion += ", ('1', '%s', '%s', '', '', '', '', '', '', '', '*', '%s', 'emerg')" % (canal_vid, canal_vid, config.get_setting("emergency_urls", channel_bis))
|
||||
except:
|
||||
logger.error('Error en el proceso de ALMACENAMIENTO de URLs de Emergencia')
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
#Ahora tratamos las webs intervenidas, tranformamos la url, el nfo y borramos los archivos obsoletos de la serie
|
||||
if channel not in intervencion and channel_py_alt not in intervencion and category not in intervencion and channel_alt != 'videolibrary': #lookup
|
||||
@@ -2009,7 +2044,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
try:
|
||||
response = httptools.downloadpage(url_total, only_headers=True)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if not response.sucess:
|
||||
logger.error('Web ' + canal_des_def.upper() + ' INACTIVA. Regla no procesada: ' + str(canal_org_des_list[i]))
|
||||
item = item_back.clone() #Restauro las imágenes inciales
|
||||
@@ -2122,6 +2157,101 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
return (item, it, overwrite)
|
||||
|
||||
|
||||
def verify_cached_torrents():
|
||||
logger.info()
|
||||
import json
|
||||
|
||||
"""
|
||||
Verifica que todos los archivos .torrent estén descomprimidos. Si no lo están, los descomprime y regraba
|
||||
|
||||
Método para uso temporal y controlado
|
||||
|
||||
Deja el archivo verify_cached_torrents.json como marca de que se ha ejecutado para esa versión de Alfa
|
||||
"""
|
||||
|
||||
try:
|
||||
#Localiza los paths donde dejar el archivo .json de control, y de la Videoteca
|
||||
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
|
||||
if json_path:
|
||||
logger.info('Torrents verificados anteriormente: NOS VAMOS')
|
||||
return
|
||||
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
|
||||
json_error_path = filetools.join(config.get_runtime_path(), 'error_cached_torrents.json')
|
||||
json_error_path_BK = filetools.join(config.get_runtime_path(), 'error_cached_torrents_BK.json')
|
||||
|
||||
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
|
||||
movies = config.get_setting("folder_movies")
|
||||
series = config.get_setting("folder_tvshows")
|
||||
torrents_movies = filetools.join(videolibrary_path, config.get_setting("folder_movies")) #path de CINE
|
||||
torrents_series = filetools.join(videolibrary_path, config.get_setting("folder_tvshows")) #path de SERIES
|
||||
|
||||
#Inicializa variables
|
||||
torren_list = []
|
||||
torren_list.append(torrents_movies)
|
||||
torren_list.append(torrents_series)
|
||||
i = 0
|
||||
j = 0
|
||||
k = 0
|
||||
descomprimidos = []
|
||||
errores = []
|
||||
json_data = dict()
|
||||
|
||||
#Recorre las carpetas de CINE y SERIES de la Videoteca, leyendo, descomprimiendo y regrabando los archivos .torrent
|
||||
for contentType in torren_list:
|
||||
for root, folders, files in filetools.walk(contentType):
|
||||
for file in files:
|
||||
if not '.torrent' in file:
|
||||
continue
|
||||
i += 1
|
||||
torrent_file = ''
|
||||
torrent_path = filetools.join(root, file)
|
||||
torrent_file = filetools.read(torrent_path)
|
||||
if not scrapertools.find_single_match(torrent_file, '^d\d+:\w+\d+:'):
|
||||
logger.debug('Torrent comprimido: DESCOMPRIMIENDO: ' + str(torrent_path))
|
||||
try:
|
||||
torrent_file_deco = ''
|
||||
import zlib
|
||||
torrent_file_deco = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(torrent_file)
|
||||
except:
|
||||
k += 1
|
||||
errores += [torrent_path]
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error('No es un archivo TORRENT. Archivo borrado: ' + str(torrent_path))
|
||||
if not json_data.get(root, False):
|
||||
json_data[root] = 'ERROR'
|
||||
if scrapertools.find_single_match(file, '^\d+x\d+'):
|
||||
torrent_json = re.sub(r'\]_\d+.torrent$', '].json', torrent_path)
|
||||
filetools.remove(torrent_json)
|
||||
filetools.remove(torrent_path)
|
||||
continue
|
||||
|
||||
if not scrapertools.find_single_match(torrent_file_deco, '^d\d+:\w+\d+:'):
|
||||
logger.error('Error de DESCOMPRESIÓN: ' + str(torrent_path))
|
||||
k += 1
|
||||
errores += [torrent_path]
|
||||
else:
|
||||
filetools.write(torrent_path, torrent_file_deco)
|
||||
j += 1
|
||||
descomprimidos += [torrent_path]
|
||||
else:
|
||||
#logger.info('Torrent OK. No hace falta descompresión: ' + str(torrent_path))
|
||||
h = 0
|
||||
|
||||
if json_data:
|
||||
filetools.write(json_error_path, json.dumps(json_data))
|
||||
filetools.write(json_error_path_BK, json.dumps(json_data))
|
||||
filetools.write(json_path, json.dumps({"torrent_verify": True}))
|
||||
except:
|
||||
logger.error('Error en el proceso de VERIFICACIÓN de los .torrents')
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
logger.error(str(i) + ' archivos .torrent revisados. / ' + str(j) + ' descomporimidos / ' + str(k) + ' errores')
|
||||
if descomprimidos:
|
||||
logger.error('Lista de .torrents DESCOMPRIMIDOS: ' + str(descomprimidos))
|
||||
if errores:
|
||||
logger.error('Lista de .torrents en ERROR: ' + str(errores))
|
||||
|
||||
|
||||
def dejuice(data):
|
||||
logger.info()
|
||||
# Metodo para desobfuscar datos de JuicyCodes
|
||||
|
||||
@@ -93,12 +93,20 @@ def dialog_numeric(_type, heading, default=""):
|
||||
return d
|
||||
|
||||
|
||||
def dialog_textviewer(heading, text): # disponible a partir de kodi 16
|
||||
return xbmcgui.Dialog().textviewer(heading, text)
|
||||
|
||||
|
||||
|
||||
def itemlist_refresh():
|
||||
xbmc.executebuiltin("Container.Refresh")
|
||||
|
||||
|
||||
def itemlist_update(item):
|
||||
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")")
|
||||
def itemlist_update(item, replace=False):
|
||||
if replace: # reset the path history
|
||||
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ", replace)")
|
||||
else:
|
||||
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")")
|
||||
|
||||
|
||||
def render_items(itemlist, parent_item):
|
||||
@@ -1094,27 +1102,37 @@ def play_torrent(item, xlistitem, mediaurl):
|
||||
time.sleep(0.5) #Dejamos tiempo para que se ejecute
|
||||
|
||||
#Nuevo método de descarga previa del .torrent. Si da error, miramos si hay alternatica local. Si ya es local, lo usamos
|
||||
url = ''
|
||||
url_stat = False
|
||||
torrents_path = ''
|
||||
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
|
||||
if not filetools.exists(videolibrary_path): #Si no existe el path, pasamos al modo clásico
|
||||
url_stat = True
|
||||
elif not filetools.exists(videolibrary_path + 'temp_torrents_Alfa'): #Si no existe la carpeta temporal para .torrents, la creamos
|
||||
filetools.mkdir(videolibrary_path + 'temp_torrents_Alfa')
|
||||
torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path de descarga temporal
|
||||
|
||||
videolibrary_path = False
|
||||
else:
|
||||
torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path descarga temporal
|
||||
if videolibrary_path and not filetools.exists(filetools.join(videolibrary_path, 'temp_torrents_Alfa')): #Si no existe la carpeta temporal, la creamos
|
||||
filetools.mkdir(filetools.join(videolibrary_path, 'temp_torrents_Alfa'))
|
||||
|
||||
#identificamos si es una url o un path de archivo. Los Magnets los tratamos de la forma clásica
|
||||
if not item.url.startswith("\\") and not item.url.startswith("/") and not item.url.startswith("magnet:") and not url_stat:
|
||||
timeout = 10
|
||||
if item.torrent_alt:
|
||||
timeout = 5
|
||||
item.url = videolibrarytools.caching_torrents(item.url, torrents_path=torrents_path, timeout=timeout) #Descargamos el .torrent
|
||||
if item.url: url_stat = True
|
||||
|
||||
if not item.url and item.torrent_alt: #Si hay error, se busca un .torrent alternativo
|
||||
item.url = item.torrent_alt #El .torrent alternativo puede estar en una url o en local
|
||||
url = videolibrarytools.caching_torrents(item.url, torrents_path=torrents_path, timeout=timeout) #Descargamos el .torrent
|
||||
if url:
|
||||
url_stat = True
|
||||
item.url = url
|
||||
if "torrentin" in torrent_options[seleccion][1]:
|
||||
item.url = 'file://' + item.url
|
||||
|
||||
if not url and item.torrent_alt: #Si hay error, se busca un .torrent alternativo
|
||||
if (item.torrent_alt.startswith("\\") or item.torrent_alt.startswith("/")) and videolibrary_path:
|
||||
item.url = item.torrent_alt #El .torrent alternativo puede estar en una url o en local
|
||||
elif not item.url.startswith("\\") and not item.url.startswith("/") and not item.url.startswith("magnet:"):
|
||||
item.url = item.torrent_alt
|
||||
|
||||
#Si es un archivo .torrent local, actualizamos el path relativo a path absoluto
|
||||
if item.url.startswith("\\") or item.url.startswith("/") and not url_stat: #.torrent alternativo local
|
||||
if (item.url.startswith("\\") or item.url.startswith("/")) and not url_stat and videolibrary_path: #.torrent alternativo local
|
||||
movies = config.get_setting("folder_movies")
|
||||
series = config.get_setting("folder_tvshows")
|
||||
if item.contentType == 'movie':
|
||||
@@ -1122,7 +1140,11 @@ def play_torrent(item, xlistitem, mediaurl):
|
||||
else:
|
||||
folder = series #o series
|
||||
item.url = filetools.join(videolibrary_path, folder, item.url) #dirección del .torrent local en la Videoteca
|
||||
|
||||
if filetools.copy(item.url, torrents_path, silent=True): #se copia a la carpeta generíca para evitar problemas de encode
|
||||
item.url = torrents_path
|
||||
if "torrentin" in torrent_options[seleccion][1]: #Si es Torrentin, hay que añadir un prefijo
|
||||
item.url = 'file://' + item.url
|
||||
|
||||
mediaurl = urllib.quote_plus(item.url)
|
||||
#Llamada con más parámetros para completar el título
|
||||
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']:
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
@@ -140,6 +141,7 @@ def check_addon_updates(verbose=False):
|
||||
|
||||
except:
|
||||
logger.error('Error al comprobar actualizaciones del addon!')
|
||||
logger.error(traceback.format_exc())
|
||||
if verbose:
|
||||
platformtools.dialog_notification('Alfa actualizaciones', 'Error al comprobar actualizaciones')
|
||||
return False
|
||||
|
||||
@@ -125,6 +125,8 @@
|
||||
<setting label="Gestión de actualizaciones urgentes de módulos de Alfa (Quick Fixes):" type="lsep"/>
|
||||
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="Intervalo entre actualizaciones automáticas (horas)" default="12"/>
|
||||
<setting id="addon_update_message" type="bool" label="Quiere ver mensajes de las actualizaciones" default="false"/>
|
||||
|
||||
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>
|
||||
</category>
|
||||
|
||||
</settings>
|
||||
|
||||
@@ -16,9 +16,9 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
page_url = page_url.replace("/v/","/api/sources/")
|
||||
page_url = page_url.replace("/v/","/api/source/")
|
||||
data = httptools.downloadpage(page_url, post={}).data
|
||||
data = jsontools.load(data)
|
||||
for videos in data["data"]:
|
||||
video_urls.append([videos["label"] + " [fembed]", videos["file"]])
|
||||
video_urls.append([videos["label"] + " [fembed]", "https://www.fembed.com" + videos["file"]])
|
||||
return video_urls
|
||||
|
||||
@@ -20,10 +20,12 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
packed = scrapertools.find_multiple_matches(data, "(?s)<script>\s*eval(.*?)\s*</script>")
|
||||
scrapertools.printMatches(packed)
|
||||
for pack in packed:
|
||||
unpacked = jsunpack.unpack(pack)
|
||||
if "tida" in unpacked:
|
||||
videos = scrapertools.find_multiple_matches(unpacked, 'tid.="([^"]+)')
|
||||
logger.info("Intel11 %s" %unpacked)
|
||||
if "ldaa" in unpacked:
|
||||
videos = scrapertools.find_multiple_matches(unpacked, 'lda.="([^"]+)')
|
||||
video_urls = []
|
||||
for video in videos:
|
||||
if not video.startswith("//"):
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "uptobox.com/([a-z0-9]+)",
|
||||
"url": "http://uptobox.com/\\1"
|
||||
"url": "http://uptostream.com/iframe/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "uptostream.com/iframe/([a-z0-9]+)",
|
||||
"pattern": "uptostream.com/(?:iframe/|)([a-z0-9]+)",
|
||||
"url": "http://uptostream.com/iframe/\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -8,32 +8,19 @@ from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Vivo] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
enc_data = scrapertools.find_single_match(data, "Core.InitializeStream \('(.*?)'\)")
|
||||
logger.debug(enc_data)
|
||||
enc_data = scrapertools.find_single_match(data, 'data-stream="([^"]+)')
|
||||
dec_data = base64.b64decode(enc_data)
|
||||
|
||||
logger.debug(dec_data)
|
||||
|
||||
for url in eval(dec_data):
|
||||
video_urls.append(['vivo', url])
|
||||
|
||||
video_urls.append(['vivo', dec_data])
|
||||
return video_urls
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Service for updating new episodes on library series
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import datetime, imp, math, threading
|
||||
import datetime, imp, math, threading, traceback
|
||||
|
||||
from core import channeltools, filetools, videolibrarytools
|
||||
from platformcode import config, logger
|
||||
@@ -32,7 +32,7 @@ def update(path, p_dialog, i, t, serie, overwrite):
|
||||
serie.category = category
|
||||
serie, it, overwrite = generictools.redirect_clone_newpct1(serie, head_nfo, it, path, overwrite)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
channel_enabled = channeltools.is_enabled(serie.channel)
|
||||
|
||||
@@ -89,7 +89,7 @@ def update(path, p_dialog, i, t, serie, overwrite):
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo')
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
return insertados_total > 0
|
||||
|
||||
@@ -127,7 +127,7 @@ def check_for_update(overwrite=True):
|
||||
try:
|
||||
serie, serie, overwrite_forced = generictools.redirect_clone_newpct1(serie, head_nfo, serie, path, overwrite, lookup=True)
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
if overwrite_forced == True:
|
||||
overwrite = True
|
||||
serie.update_next = ''
|
||||
@@ -143,7 +143,7 @@ def check_for_update(overwrite=True):
|
||||
if not estado or estado == False or not serie.library_playcounts: #Si no se ha pasado antes, lo hacemos ahora
|
||||
serie, estado = videolibrary.verify_playcount_series(serie, path) #También se pasa si falta un PlayCount por completo
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
else:
|
||||
if estado: #Si ha tenido éxito la actualización...
|
||||
estado_verify_playcount_series = True #... se marca para cambiar la opción de la Videoteca
|
||||
@@ -159,7 +159,7 @@ def check_for_update(overwrite=True):
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo')
|
||||
except:
|
||||
pass
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
continue
|
||||
|
||||
|
||||
Reference in New Issue
Block a user