Actualizados

bajui: eliminado, web ya no existe
divxtotal: eliminado, web ya no existe
guaridavalencianista: eliminado, web ya no existe
lacajita: eliminado, web ya no existe
doom: fix
locopelis: fix
retroseriestv: nuevo canal
veseriesonline: agregado json
hdfull: fix para aparecer infoplus en findvideos
This commit is contained in:
Intel1
2018-07-23 17:46:38 -05:00
parent e68e6d8a4c
commit 8e7140f4aa
15 changed files with 340 additions and 1563 deletions

View File

@@ -1,26 +0,0 @@
{
"id": "bajui",
"name": "Bajui",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "bajui.png",
"banner": "bajui.png",
"fanart": "bajui.png",
"categories": [
"movie",
"tvshow",
"documentary",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,247 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui.org/descargas/categoria/2/peliculas",
fanart=item.fanart, thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart, thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Documentales", action="menudocumentales",
fanart=item.fanart, thumbnail=get_thumb('documentaries', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
fanart=item.fanart, thumbnail=get_thumb('search', auto=True)))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas - Novedades", action="peliculas", url=item.url,
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
scrapedurl = urlparse.urljoin(item.url, url)
itemlist.append(Item(channel=item.channel, title="Películas en " + title, action="peliculas", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart))
return itemlist
def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui.org/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
return itemlist
def menudocumentales(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
return itemlist
def search(item, texto, categoria=""):
logger.info(item.url + " search " + texto)
itemlist = []
url = item.url
texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url)
try:
item.url = "http://www.bajui.org/descargas/busqueda/%s"
item.url = item.url % texto
itemlist.extend(peliculas(item))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item, paginacion=True):
logger.info()
url = item.url
data = httptools.downloadpage(url).data
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
patron += '<div class="detalles-ficha"[^<]+'
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
patron += '<span class="categoria-det">[^<]+</span>[^<]+'
patron += '<span class="descrip-det">(.*?)</span>[^<]+'
patron += '</div>.*?<a href="([^"]+)"[^<]+'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for title, plot, url, thumbnail in matches:
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
itemlist.append(
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
fanart=item.fanart, viewmode="movie_with_plot"))
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \&raquo\;</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion:
itemlist.extend(peliculas(pagitem))
else:
itemlist.append(pagitem)
return itemlist
def clean_plot(scrapedplot):
scrapedplot = scrapedplot.replace("\n", "").replace("\r", "")
scrapedplot = re.compile("TÍTULO ORIGINAL[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("AÑO[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Año[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("DURACIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Duración[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("PAIS[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("PAÍS[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Pais[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("País[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("DIRECTOR[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("DIRECCIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Dirección[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("REPARTO[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Reparto[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Interpretación[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("GUIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Guión[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("MÚSICA[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Música[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("FOTOGRAFÍA[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Fotografía[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("PRODUCTORA[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Producción[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Montaje[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Vestuario[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("GÉNERO[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("GENERO[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Genero[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Género[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("PREMIOS[^<]+<br />", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("SINOPSIS", re.DOTALL).sub("", scrapedplot)
scrapedplot = re.compile("Sinopsis", re.DOTALL).sub("", scrapedplot)
scrapedplot = scrapertools.htmlclean(scrapedplot)
return scrapedplot
def enlaces(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
try:
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
item.plot = clean_plot(item.plot)
except:
pass
try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
except:
pass
patron = '<div class="box-enlace-cabecera"[^<]+'
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
patron += '<div class="datos-act">Actualizado. ([^<]+)</div>.*?'
patron += '<div class="datos-boton-mostrar"><a id="boton-mostrar-\d+" class="boton" href="javascript.mostrar_enlaces\((\d+)\,\'([^\']+)\'[^>]+>Mostrar enlaces</a></div>[^<]+'
patron += '<div class="datos-servidores"><div class="datos-servidores-cell">(.*?)</div></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for thumbnail, usuario, fecha, id, id2, servidores in matches:
patronservidores = '<img src="[^"]+" title="([^"]+)"'
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
lista_servidores = ""
for servidor in matches2:
lista_servidores = lista_servidores + servidor + ", "
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
itemlist.append(
Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=item.title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, context="4|5",
fanart=item.fanart))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.plot = item.plot
videoitem.thumbnail = item.thumbnail
videoitem.fulltitle = item.fulltitle
try:
parsed_url = urlparse.urlparse(videoitem.url)
fichero = parsed_url.path
partes = fichero.split("/")
titulo = partes[len(partes) - 1]
videoitem.title = titulo + " - [" + videoitem.server + "]"
except:
videoitem.title = item.title
return itemlist

View File

@@ -1,39 +0,0 @@
{
"id": "divxtotal",
"name": "Divxtotal",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/Madj03A.jpg",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,500 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
import urllib
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
host = "http://www.divxtotal.co"
__modo_grafico__ = config.get_setting('modo_grafico', "divxtotal")
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="[COLOR orange][B]Películas[/B][/COLOR]", action="scraper",
url = host + "/peliculas/", thumbnail=get_thumb('movies', auto=True),
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(item.clone(title="[COLOR orange][B] Películas HD[/B][/COLOR]", action="scraper",
url = host + "/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png",
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR orange][B]Series[/B][/COLOR]", action="scraper",
url = host + "/series/", thumbnail=get_thumb('tvshows', auto=True),
contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search",
thumbnail=get_thumb('search', auto=True)))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscador(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, check in matches:
if "N/A" in check:
checkmt = "tvshow"
else:
checkmt = "movie"
titulo = title
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*\[.*?]\]", "", title)
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
else:
if item.extra == "search":
new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title,
contentTitle=title, show=title, contentType="tvshow", library=True)
else:
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
def scraper(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = '<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, year in matches:
titulo = re.sub(r"\d+\d+\d+\d+|\(.*?\).*", "", title)
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*", "", title)
title = title.replace("Autosia", "Autopsia")
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url,
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
else:
patron = '(?s)<p class="secconimagen"><a href="([^"]+)"'
patron += ' title="[^"]+"><img src="([^"]+)".*?'
patron += 'rel="bookmark">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumb, title in matches:
titulo = title.strip()
title = re.sub(r"\d+x.*|\(.*?\)", "", title)
new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url,
thumbnail=thumb,
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
extra = item.extra.split("|")[3]
try:
fanart_extra = item.extra.split("|")[4]
except:
fanart_extra = item.extra.split("|")[3]
try:
fanart_info = item.extra.split("|")[5]
except:
fanart_extra = item.extra.split("|")[3]
elif len(item.extra.split("|")) == 3:
fanart = item.extra.split("|")[2]
extra = item.extra.split("|")[0]
fanart_extra = item.extra.split("|")[0]
fanart_info = item.extra.split("|")[1]
elif len(item.extra.split("|")) == 2:
fanart = item.extra.split("|")[1]
extra = item.extra.split("|")[0]
fanart_extra = item.extra.split("|")[0]
fanart_info = item.extra.split("|")[1]
else:
extra = item.extra
fanart_extra = item.extra
fanart_info = item.extra
try:
logger.info(fanart_extra)
logger.info(fanart_info)
except:
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
itemlist.append(item.clone(action="epis",
title="[COLOR saddlebrown][B]Temporada [/B][/COLOR]" + "[COLOR sandybrown][B]" + temporada + "[/B][/COLOR]",
url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle,
show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info,
datalibrary=data, folder=True))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
item.fanart = fanart
item.extra = extra
if config.get_videolibrary_support() and itemlist:
if len(bloque_episodios) == 1:
extra = "epis"
else:
extra = "epis###serie_add"
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
'imdb_id': item.infoLabels['imdb_id']}
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
action="add_serie_to_library", extra=extra, url=item.url,
contentSerieName=item.fulltitle, infoLabels=infoLabels,
thumbnail='http://imgur.com/xQNTqqy.png', datalibrary=data))
return itemlist
def epis(item):
logger.info()
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = '<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>'
matches = scrapertools.find_multiple_matches(item.url, patron)
for idioma, url, epi in matches:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio
itemlist.append(
item.clone(title="[COLOR orange]" + epi + "[/COLOR]" + "[COLOR sandybrown] " + idioma + "[/COLOR]", url=url,
action="findvideos", show=item.show, fanart=item.extra, extra=item.extra,
fanart_extra=item.fanart_extra, fanart_info=item.fanart_info, folder=True))
if item.extra != "serie_add":
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
item.fanart = item.extra
if item.infoLabels['title']: title = "[COLOR burlywood]" + item.infoLabels['title'] + "[/COLOR]"
item.title = item.title + " -- \"" + title + "\""
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
patron = '<a href="(' + host + '/wp-content/uploads/.*?' + capitulo + '.*?.torrent)'
url_capitulo = scrapertools.find_single_match(data, patron)
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
extra = item.fanart
else:
capitulo = item.title
url_capitulo = item.url
ext_v, size = ext_size(url_capitulo)
try:
fanart = item.fanart_extra
except:
fanart = item.extra.split("|")[0]
itemlist.append(Item(channel=item.channel,
title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]",
url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail,
extra=item.extra, fulltitle=item.fulltitle, folder=False))
if item.infoLabels['episode'] and item.library:
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
if thumbnail == "":
thumbnail = item.thumbnail
if not "assets.fanart" in item.fanart_info:
fanart = item.fanart_info
else:
fanart = item.fanart
itemlist.append(Item(channel=item.channel, title="[COLOR darksalmon][B] info[/B][/COLOR]",
action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art,
thumb_info=item.thumb_info, extra=item.extra, show=item.show,
InfoLabels=item.infoLabels, folder=False))
if not item.infoLabels['episode']:
itemlist.append(
Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url,
action="findtemporadas", server="torrent",
thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType,
contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art,
thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True))
else:
url = scrapertools.find_single_match(data, '<h3 class="orange text-center">.*?href="([^"]+)"')
item.infoLabels['year'] = None
ext_v, size = ext_size(url)
itemlist.append(Item(channel=item.channel,
title="[COLOR saddlebrown][B]Torrent [/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]",
url=url, action="play", server="torrent", fanart=item.fanart, thumbnail=item.thumbnail,
extra=item.extra, InfoLabels=item.infoLabels, folder=False))
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.infoLabels['title']}
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/xQNTqqy.png'))
return itemlist
def info_capitulos(item, images={}):
logger.info()
try:
url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str(
item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml"
if "/0" in url:
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
if "<filename>episodes" in data:
image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>')
image = "http://thetvdb.com/banners/" + image
else:
try:
image = item.InfoLabels['episodio_imagen']
except:
image = "http://imgur.com/ZiEAVOD.png"
foto = item.thumb_info
if not ".png" in foto:
foto = "http://imgur.com/PRiEW1D.png"
try:
title = item.InfoLabels['episodio_titulo']
except:
title = ""
title = "[COLOR red][B]" + title + "[/B][/COLOR]"
try:
plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]"
except:
plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>')
if plot == "":
plot = "Sin información todavia"
try:
rating = item.InfoLabels['episodio_vote_average']
except:
rating = 0
try:
if rating >= 5 and rating < 8:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]"
elif rating >= 8 and rating < 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]"
elif rating == 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]"
else:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
except:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
except:
title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
plot = "Este capitulo no tiene informacion..."
plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
image = "http://s6.postimg.cc/ub7pb76c1/noinfo.png"
foto = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png"
rating = ""
ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
ventana.doModal()
def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
i = 0
while i < len(text):
m = match(text, i)
s = m.group(m.lastindex)
i = m.end()
if m.lastindex == 2:
yield "s"
yield text[i:i + int(s)]
i = i + int(s)
else:
yield s
def decode_item(next, token):
if token == "i":
# integer: "i" value "e"
data = int(next())
if next() != "e":
raise ValueError
elif token == "s":
# string: "s" value (virtual tokens)
data = next()
elif token == "l" or token == "d":
# container: "l" (or "d") values "e"
data = []
tok = next()
while tok != "e":
data.append(decode_item(next, tok))
tok = next()
if token == "d":
data = dict(zip(data[0::2], data[1::2]))
else:
raise ValueError
return data
def decode(text):
try:
src = tokenize(text)
data = decode_item(src.next, src.next())
for token in src: # look for more tokens
raise SyntaxError("trailing junk")
except (AttributeError, ValueError, StopIteration):
try:
data = data
except:
data = src
return data
def convert_size(size):
import math
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
return '%s %s' % (s, size_name[i])
def get_year(url):
data = httptools.downloadpage(url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
year = scrapertools.find_single_match(data, '<h3>.*?(\d+\d+\d+\d+)')
if year == "":
year = " "
return year
def ext_size(url):
torrents_path = config.get_videolibrary_path() + '/torrents'
if not os.path.exists(torrents_path):
os.mkdir(torrents_path)
try:
urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url, torrents_path + "/temp.torrent")
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
torrent = decode(pepe)
try:
name = torrent["info"]["name"]
sizet = torrent["info"]['length']
sizet = convert_size(sizet)
except:
name = "no disponible"
try:
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}")
size = max([int(i) for i in check_video])
for file in torrent["info"]["files"]:
manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"])
if str(size) in manolo:
video = manolo
size = convert_size(size)
ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video)
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
except:
try:
size = sizet
ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name)
except:
size = "NO REPRODUCIBLE"
ext_v = ""
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
if "rar" in ext_v:
ext_v = ext_v + " -- No reproducible"
size = ""
return ext_v, size
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host + '/peliculas/'
item.contentType="movie"
itemlist = scraper(item)
if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -35,7 +35,9 @@ def mainlist(item):
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url='%s%s'%(host,'peliculas/page/1')
url='%s%s'%(host,'peliculas/'),
first=0
))
itemlist.append(
@@ -43,7 +45,7 @@ def mainlist(item):
action="seccion",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url='%s%s' % (host, 'peliculas/page/1'),
url='%s%s' % (host, 'peliculas/'),
))
itemlist.append(
@@ -51,7 +53,8 @@ def mainlist(item):
action="lista",
thumbnail=get_thumb('more watched', auto=True),
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
url='%s%s'%(host,'top-imdb/page/1'),
url='%s%s'%(host,'top-imdb/'),
first=0
))
itemlist.append(
@@ -69,9 +72,7 @@ def lista(item):
logger.info()
itemlist = []
max_items = 20
next_page_url = ''
next = False
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
@@ -80,23 +81,13 @@ def lista(item):
matches = re.compile(patron, re.DOTALL).findall(data)
if item.next_page != 'b':
if len(matches) > max_items:
next_page_url = item.url
matches = matches[:max_items]
next_page = 'b'
else:
matches = matches[max_items:]
next_page = 'a'
next_page_str = scrapertools.find_single_match(data,"<li class='active'><a class=''>(\d+)</a>")
next_page_num = int(next_page_str)+1
page_base = re.sub(r'(page\/\d+)','', item.url)
next_page_url = '%s%s%s'%(page_base,'page/',next_page_num)
first = item.first
last = first + 19
if last > len(matches):
last = len(matches)
next = True
if next_page_url:
next_page_url = next_page_url
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches:
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = scrapedurl
thumbnail = scrapedthumbnail
@@ -118,17 +109,17 @@ def lista(item):
contentTitle=title
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Paginacion
if next_page_url != '':
itemlist.append(
Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
extra=item.extra,
next_page=next_page
))
if not next:
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
first = 0
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
return itemlist
@@ -153,7 +144,8 @@ def seccion(item):
action='lista',
title=title,
url=url,
thumbnail=thumbnail
thumbnail=thumbnail,
first=0
))
return itemlist
@@ -162,6 +154,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.first=0
if texto != '':
return lista(item)
@@ -178,6 +171,7 @@ def newest(categoria):
item.url = host + 'categoria/animacion/'
elif categoria == 'terror':
item.url = host + '/categoria/terror/'
item.first=0
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()

View File

@@ -1,12 +0,0 @@
{
"id": "guaridavalencianista",
"name": "La Guarida valencianista",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "guaridavalencianista.png",
"banner": "guaridavalencianista.png",
"categories": [
"documentary"
]
}

View File

@@ -1,177 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades", action="listvideos",
url="http://guaridavalencia.blogspot.com.es"))
# itemlist.append( Item(channel=item.channel, title="Documentales - Series Disponibles" , action="DocuSeries" , url="http://guaridavalencia.blogspot.com/"))
itemlist.append(
Item(channel=item.channel, title="Categorias", action="DocuTag", url="http://guaridavalencia.blogspot.com.es"))
itemlist.append(Item(channel=item.channel, title="Partidos de liga (Temporada 2014/2015)", action="listvideos",
url="http://guaridavalencia.blogspot.com.es/search/label/PARTIDOS%20DEL%20VCF%20%28TEMPORADA%202014-15%29"))
return itemlist
def DocuSeries(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[1]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def DocuTag(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
# ~ patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
patronvideos = "<li[^<]+<a dir='ltr' href='([^']+)'>([^<]+)</a[^<]+<span dir='ltr'>[^0-9]+([0-9]+)[^<]+</span[^<]+</li[^<]+"
# ~ patronvideos = "<li[^<]+<a dir='ltr' href='([^']+)'[^<]+([^<]+)</a>"
# ~ [^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
# Se debe quitar saltos de linea en match[1]
scrapedtitle = match[1][1:-1] + " (" + match[2] + ")"
# ~ scrapedtitle = match[1]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def DocuARCHIVO(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+"
patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[1] + " " + match[2]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def listvideos(item):
logger.info()
itemlist = []
scrapedthumbnail = ""
scrapedplot = ""
# Descarga la página
data = scrapertools.cache_page(item.url)
patronvideos = "<h3 class='post-title entry-title'[^<]+"
patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?"
patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedtitle = match[1]
scrapedtitle = re.sub("<[^>]+>", " ", scrapedtitle)
scrapedtitle = scrapertools.unescape(scrapedtitle)[1:-1]
scrapedurl = match[0]
regexp = re.compile(r'src="(http[^"]+)"')
matchthumb = regexp.search(match[2])
if matchthumb is not None:
scrapedthumbnail = matchthumb.group(1)
matchplot = re.compile('<div align="center">(<img.*?)</span></div>', re.DOTALL).findall(match[2])
if len(matchplot) > 0:
scrapedplot = matchplot[0]
# print matchplot
else:
scrapedplot = ""
scrapedplot = re.sub("<[^>]+>", " ", scrapedplot)
scrapedplot = scrapertools.unescape(scrapedplot)
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
# Extrae la marca de siguiente página
patronvideos = "<a class='blog-pager-older-link' href='([^']+)'"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedtitle = "Página siguiente"
scrapedurl = urlparse.urljoin(item.url, matches[0])
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
# ~ return itemlist
def findvideos(item):
logger.info()
data = scrapertools.cachePage(item.url)
# Busca los enlaces a los videos
listavideos = servertools.findvideos(data)
if item is None:
item = Item()
itemlist = []
for video in listavideos:
scrapedtitle = video[0].strip() + " - " + item.title.strip()
scrapedurl = video[1]
server = video[2]
itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="play", server=server, url=scrapedurl,
thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=False))
return itemlist

View File

@@ -28,16 +28,12 @@ def settingCanal(item):
def login():
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
sid = scrapertools.find_single_match(data, patron)
post = urllib.urlencode({'__csrf_magic': sid}) + "&username=" + config.get_setting('hdfulluser',
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
httptools.downloadpage(host, post=post)
@@ -56,15 +52,12 @@ def mainlist(item):
else:
login()
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Favoritos[/B][/COLOR]",
@@ -72,7 +65,6 @@ def menupeliculas(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True))
@@ -89,15 +81,12 @@ def menupeliculas(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True))
return itemlist
def menuseries(item):
logger.info()
itemlist = []
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Siguiendo[/B][/COLOR]",
@@ -105,9 +94,7 @@ def menuseries(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Para Ver[/B][/COLOR]",
url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos",
url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Estreno",
@@ -132,20 +119,16 @@ def menuseries(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True))
return itemlist
def search(item, texto):
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
item.title = "Buscar..."
item.url = host + "/buscar"
try:
return fichas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -158,59 +141,44 @@ def search(item, texto):
def series_abc(item):
logger.info()
itemlist = []
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ#"
for l in az:
itemlist.append(
Item(channel=item.channel, action='fichas', title=l, url=host + "/series/abc/" + l.replace('#', '9')))
return itemlist
def items_usuario(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Fichas usuario
url = item.url.split("?")[0]
post = item.url.split("?")[1]
old_start = scrapertools.get_match(post, 'start=([^&]+)&')
limit = scrapertools.get_match(post, 'limit=(\d+)')
start = "%s" % (int(old_start) + int(limit))
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
## Carga las fichas de usuario
data = httptools.downloadpage(url, post=post).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
try:
title = ficha['title']['es'].strip()
except:
title = ficha['title']['en'].strip()
try:
title = title.encode('utf-8')
except:
pass
show = title
try:
thumbnail = host + "/thumbs/" + ficha['thumbnail']
except:
thumbnail = host + "/thumbs/" + ficha['thumb']
try:
url = urlparse.urljoin(host, '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
action = "episodios"
@@ -237,37 +205,26 @@ def items_usuario(item):
action = "findvideos"
str = get_status(status, 'movies', ficha['id'])
if str != "": title += str
# try: title = title.encode('utf-8')
# except: pass
itemlist.append(
Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail,
show=show, folder=True))
if len(itemlist) == int(limit):
itemlist.append(
Item(channel=item.channel, action="items_usuario", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def listado_series(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl + "###0;1"
itemlist.append(
Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
show=scrapedtitle, contentType="tvshow"))
return itemlist
@@ -278,22 +235,19 @@ def fichas(item):
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
if len(s_p) == 1:
data = s_p[0]
if 'Lo sentimos</h3>' in s_p[0]:
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
' ') + "[/COLOR] sin resultados")]
' ') + "[/COLOR] sin resultados")]
else:
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
'<a href="([^"]+)"[^<]+' + \
@@ -305,32 +259,23 @@ def fichas(item):
r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
data
)
patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
#thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
thumbnail = scrapedthumbnail
language = ''
title = scrapedtitle.strip()
show = title
contentTitle = scrapedtitle.strip()
if scrapedlangs != ">":
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])"
if scrapedrating != ">":
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
infoLabels['rating']=valoracion
title += " ([COLOR orange]" + valoracion + "[/COLOR])"
url = urlparse.urljoin(item.url, scrapedurl)
if "/serie" in url or "/tags-tv" in url:
action = "episodios"
url += "###" + scrapedid + ";1"
@@ -341,10 +286,8 @@ def fichas(item):
url += "###" + scrapedid + ";2"
type = "movies"
contentType = "movie"
str = get_status(status, type, scrapedid)
if str != "": title += str
if item.title == "Buscar...":
bus = host[-4:]
tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' %bus)
@@ -356,7 +299,7 @@ def fichas(item):
language =language, infoLabels=infoLabels))
else:
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=contentTitle, thumbnail=thumbnail,
folder=True, contentType=contentType, contentTitle=contentTitle,
language =language, infoLabels=infoLabels))
## Paginación
@@ -364,7 +307,6 @@ def fichas(item):
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page_url), folder=True))
return itemlist
@@ -372,25 +314,19 @@ def episodios(item):
logger.info()
id = "0"
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
## Temporadas
data = agrupa_datos(httptools.downloadpage(item.url).data)
if id == "0":
## Se saca el id de la serie de la página cuando viene de listado_series
id = scrapertools.get_match(data, "<script>var sid = '([^']+)';</script>")
url_targets = url_targets.replace('###0', '###' + id)
str = get_status(status, "shows", id)
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
@@ -443,9 +379,6 @@ def episodios(item):
title = temporada + "x" + episodio + " - " + title.decode('utf-8') + ' ' + idiomas
except:
title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas
# try: title = temporada + "x" + episodio + " - " + title + ' ' + idiomas
# except: pass
# except: title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas
str = get_status(status, 'episodes', episode['id'])
if str != "": title += str
try:
@@ -473,25 +406,17 @@ def novedades_episodios(item):
## Episodios
url = item.url.split("?")[0]
post = item.url.split("?")[1]
old_start = scrapertools.get_match(post, 'start=([^&]+)&')
start = "%s" % (int(old_start) + 24)
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
for episode in episodes:
thumbnail = host + "/thumbs/" + episode['thumbnail']
temporada = episode['season']
episodio = episode['episode']
if len(episodio) == 1: episodio = '0' + episodio
if episode['languages'] != "[]":
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
@@ -499,95 +424,70 @@ def novedades_episodios(item):
idiomas = idiomas
else:
idiomas = ""
try:
show = episode['show']['title']['es'].strip()
contentSerieName = episode['show']['title']['es'].strip()
except:
show = episode['show']['title']['en'].strip()
show = "[COLOR whitesmoke][B]" + show + "[/B][/COLOR]"
contentSerieName = episode['show']['title']['en'].strip()
show = "[COLOR whitesmoke][B]" + contentSerieName + "[/B][/COLOR]"
if episode['title']:
try:
title = episode['title']['es'].strip()
except:
title = episode['title']['en'].strip()
if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio
try:
title = temporada + "x" + episodio + " - " + show.decode('utf-8') + ": " + title.decode(
'utf-8') + ' ' + idiomas
except:
title = temporada + "x" + episodio + " - " + show.decode('iso-8859-1') + ": " + title.decode(
'iso-8859-1') + ' ' + idiomas
str = get_status(status, 'episodes', episode['id'])
if str != "": title += str
try:
title = title.encode('utf-8')
except:
title = title.encode('iso-8859-1')
# try: show = show.encode('utf-8')
# except: show = show.encode('iso-8859-1')
url = urlparse.urljoin(host, '/serie/' + episode[
'permalink'] + '/temporada-' + temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
Item(channel=item.channel, action="findvideos", title=title, contentSerieName=contentSerieName, url=url, thumbnail=thumbnail,
folder=True, contentType="episode"))
if len(itemlist) == 24:
itemlist.append(
Item(channel=item.channel, action="novedades_episodios", title=">> Página siguiente", url=next_page,
folder=True))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/peliculas"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True))
return itemlist
def generos_series(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/series"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True))
return itemlist
@@ -599,7 +499,6 @@ def findvideos(item):
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
id = ""
type = ""
@@ -616,21 +515,15 @@ def findvideos(item):
title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
it1.append(
item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
thumbnail=item.thumbnail, show=item.show))
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
try:
data_js = jhexdecode(data_js)
except:
@@ -639,10 +532,8 @@ def findvideos(item):
decode_aa = ""
for match in data_js:
decode_aa += aadecode(match)
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
@@ -653,14 +544,11 @@ def findvideos(item):
for match in data_decrypt:
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))
server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')
url = '%s%s' % (server_url, match['code'])
url = re.sub(r'\'|"|\s|\+', '', url)
url = re.sub(r'var_\d+\[\d+\]', '', url)
embed = prov["e"]
matches.append([match["lang"], match["quality"], url, embed])
for idioma, calidad, url, embed in matches:
mostrar_server = True
option = "Ver"
@@ -675,14 +563,12 @@ def findvideos(item):
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
@@ -700,7 +586,6 @@ def findvideos(item):
return itemlist
def play(item):
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
@@ -708,7 +593,6 @@ def play(item):
item.url = item.url.split("###")[0]
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
data = httptools.downloadpage(host + "/a/status", post=post).data
devuelve = servertools.findvideosbyserver(item.url, item.server)
if devuelve:
item.url = devuelve[0][1]
@@ -741,10 +625,8 @@ def extrae_idiomas(bloqueidiomas):
textoidiomas = textoidiomas + idioma +" "
# TODO y dejar esto
language.append(idioma)
return textoidiomas, language
## --------------------------------------------------------------------------------
def set_status(item):
@@ -752,28 +634,21 @@ def set_status(item):
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
# item.url = item.url.split("###")[0]
if "Abandonar" in item.title:
path = "/a/status"
post = "target_id=" + id + "&target_type=" + type + "&target_status=0"
elif "Seguir" in item.title:
target_status = "3"
path = "/a/status"
post = "target_id=" + id + "&target_type=" + type + "&target_status=3"
elif "Agregar a Favoritos" in item.title:
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=1"
elif "Quitar de Favoritos" in item.title:
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
data = httptools.downloadpage(host + path, post=post).data
title = "[COLOR green][B]OK[/B][/COLOR]"
return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url,
thumbnail=item.thumbnail, show=item.show, folder=False)]
@@ -783,27 +658,22 @@ def get_status(status, type, id):
state = {'0': '', '1': 'Finalizada', '2': 'Pendiente', '3': 'Siguiendo'}
else:
state = {'0': '', '1': 'Visto', '2': 'Pendiente'}
str = "";
str1 = "";
str2 = ""
try:
if id in status['favorites'][type]:
str1 = " [COLOR orange][B]Favorito[/B][/COLOR]"
except:
str1 = ""
try:
if id in status['status'][type]:
str2 = state[status['status'][type][id]]
if str2 != "": str2 = "[COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]"
except:
str2 = ""
if str1 != "" or str2 != "":
str = " (" + str1 + str2 + " )"
return str
@@ -811,20 +681,16 @@ def get_status(status, type, id):
## --------------------------------------------------------------------------------
def jhexdecode(t):
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
r = re.sub(r'_\d+x\w+', 'var_0', r)
def to_hx(c):
h = int("%s" % c.groups(0), 16)
if 19 < h < 160:
return chr(h)
else:
return ""
r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
f = eval(scrapertools.get_match(r, '\s*var_0\s*=\s*([^;]+);'))
for i, v in enumerate(f):
r = r.replace('[[var_0[%s]]' % i, "." + f[i])
@@ -833,12 +699,11 @@ def jhexdecode(t):
r = r.replace('(var_0[%s]' % i, "(\"" + f[i] + "\"")
r = r.replace('[var_0[%s]]' % i, "." + f[i])
if v == "": r = r.replace('var_0[%s]' % i, '""')
r = re.sub(r':(function.*?\})', r":'\g<1>'", r)
r = re.sub(r':(var[^,]+),', r":'\g<1>',", r)
return r
def obfs(data, key, n=126):
chars = list(data)
for i in range(0, len(chars)):
@@ -846,5 +711,4 @@ def obfs(data, key, n=126):
if c <= n:
number = (ord(chars[i]) + key) % n
chars[i] = chr(number)
return "".join(chars)

View File

@@ -1,69 +0,0 @@
{
"id": "lacajita",
"name": "LaCajita",
"language": ["cast", "lat"],
"active": false,
"adult": false,
"thumbnail": "http://i.imgur.com/LVdupxc.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,297 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__modo_grafico__ = config.get_setting("modo_grafico", "lacajita")
__perfil__ = config.get_setting("perfil", "lacajita")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://lacajita.xyz"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades DVD", action=""))
item.text_color = color2
itemlist.append(item.clone(title=" En Español", action="entradas", url=host + "/estrenos-dvd/es/", page=0))
itemlist.append(item.clone(title=" En Latino", action="entradas", url=host + "/estrenos-dvd/la/", page=0))
itemlist.append(item.clone(title=" En VOSE", action="entradas", url=host + "/estrenos-dvd/vos/", page=0))
item.text_color = color1
itemlist.append(item.clone(title="Estrenos", action=""))
item.text_color = color2
itemlist.append(item.clone(title=" En Español", action="entradas", url=host + "/estrenos/es/", page=0))
itemlist.append(item.clone(title=" En Latino", action="entradas", url=host + "/estrenos/la/", page=0))
itemlist.append(item.clone(title=" En VOSE", action="entradas", url=host + "/estrenos/vos/", page=0))
item.text_color = color1
itemlist.append(item.clone(title="Más Vistas", action="updated", url=host + "/listado-visto/", page=0))
itemlist.append(item.clone(title="Actualizadas", action="updated", url=host + "/actualizado/", page=0))
item.text_color = color5
itemlist.append(item.clone(title="Por género", action="indices"))
itemlist.append(item.clone(title="Buscar...", action="search", text_color=color4))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = "%s/search.php?q1=%s" % (host, texto)
item.action = "busqueda"
item.page = 0
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def entradas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>')
patron = "<li.*?href='([^']+)'.*?src='([^']+)'.*?>([^<]+)</p>(.*?)</button>"
matches = scrapertools.find_multiple_matches(bloque, patron)
matches_ = matches[item.page:item.page + 20]
for scrapedurl, scrapedthumbnail, scrapedtitle, data_idioma in matches_:
idiomas = []
if "es.png" in data_idioma:
idiomas.append("ESP")
if "la.png" in data_idioma:
idiomas.append("LAT")
if "vos.png" in data_idioma:
idiomas.append("VOSE")
titulo = scrapedtitle
if idiomas:
titulo += " [%s]" % "/".join(idiomas)
scrapedurl = host + scrapedurl
scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/")
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "")
filtro = {"poster_path": filtro_thumb}.items()
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels={'filtro': filtro}, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle, language =
idiomas))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.page + 20:
page = item.page + 20
itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3))
return itemlist
def updated(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, "<li.*?href='([^']+)'.*?src='([^']+)'.*?>([^<]+)</p>")
matches_ = matches[item.page:item.page + 20]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches_:
if scrapedtitle == "Today":
continue
scrapedurl = host + scrapedurl
scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/")
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "")
filtro = {"poster_path": filtro_thumb}.items()
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=scrapedtitle,
contentTitle=scrapedtitle, infoLabels={'filtro': filtro}, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.page + 20:
page = item.page + 20
itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3))
else:
next = scrapertools.find_single_match(data, '<a href="([^"]+)">>>')
if next:
next = item.url + next
itemlist.append(item.clone(title=">> Página Siguiente", page=0, url=next, text_color=color3))
return itemlist
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, "<li.*?href='([^']+)'.*?src='([^']+)'.*?>\s*([^<]+)</a>")
matches_ = matches[item.page:item.page + 25]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches_:
scrapedurl = host + scrapedurl
scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/")
if re.search(r"\(\d{4}\)", scrapedtitle):
title = scrapedtitle.rsplit("(", 1)[0]
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
infoLabels = {'year': year}
else:
title = scrapedtitle
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "")
filtro = {"poster_path": filtro_thumb}.items()
infoLabels = {'filtro': filtro}
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=scrapedtitle,
contentTitle=title, infoLabels=infoLabels, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=title))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.page + 25:
page = item.page + 25
itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3))
else:
next = scrapertools.find_single_match(data, '<a href="([^"]+)">>>')
if next:
next = item.url + next
itemlist.append(item.clone(title=">> Página Siguiente", page=0, url=next, text_color=color3))
return itemlist
def indices(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
matches = scrapertools.find_multiple_matches(data,
'<li><a href="([^"]+)"><i class="fa fa-bookmark-o"></i>\s*(.*?)</a>')
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
itemlist.append(item.clone(action="updated", url=scrapedurl, title=scrapedtitle, page=0))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="grid_content2 sno">.*?src="([^"]+)".*?href="([^"]+)".*?src=\'(.*?)(?:.png|.jpg)\'' \
'.*?<span>.*?<span>(.*?)</span>.*?<span>(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for idioma, url, servidor, calidad, detalle in matches:
url = host + url
servidor = servidor.rsplit("/", 1)[1]
servidor = servidor.replace("uploaded", "uploadedto").replace("streamin.to", "streaminto")
if "streamix" in servidor:
servidor = "streamixcloud"
try:
servers_module = __import__("servers." + servidor)
mostrar_server = servertools.is_server_enabled(servidor)
if not mostrar_server:
continue
except:
continue
if "es.png" in idioma:
idioma = "ESP"
elif "la.png" in idioma:
idioma = "LAT"
elif "vos.png" in idioma:
idioma = "VOSE"
title = "%s - %s - %s" % (servidor, idioma, calidad)
if detalle:
title += " (%s)" % detalle
itemlist.append(item.clone(action="play", url=url, title=title, server=servidor, text_color=color3,
language = idioma, quality = calidad))
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library",
extra="findvideos", text_color="green"))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'window.open\("([^"]+)"')
enlaces = servertools.findvideosbyserver(url, item.server)
if enlaces:
itemlist.append(item.clone(action="play", url=enlaces[0][1]))
else:
enlaces = servertools.findvideos(url, True)
if enlaces:
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.page = 0
try:
if categoria == "terror":
item.url = host +"/listado/terror/"
item.action = "updated"
itemlist = updated(item)
elif categoria == 'castellano':
item.url = host + "/estrenos/es/"
item.action = "entradas"
elif categoria == 'latino':
item.url = host + "/estrenos/la/"
item.action = "entradas"
if categoria != 'terror':
itemlist = entradas(item)
if itemlist[-1].action == item.action:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -2,15 +2,21 @@
import re
import urlparse
import urllib
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'Latino': 'Latino', 'Español': 'Español', 'Sub español': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
@@ -18,12 +24,18 @@ list_servers = [
'openload',
]
host = 'http://www.locopelis.com/'
host = 'https://www.locopelis.com/'
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'}
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
@@ -127,8 +139,6 @@ def todas(item):
idioma = scrapertools.decodeHtmlentities(idioma_id)
# if idioma == 'Espa&ntilde;ol':
# idioma ='Español'
logger.debug('idioma original: %s' % idioma_id)
logger.debug('idioma: %s' % idioma)
if idioma in audio:
idioma = audio[idioma]
@@ -338,24 +348,27 @@ def search(item, texto):
return []
def get_link(data):
new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src=(.*?) scrolling')
return new_url
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
if item.language == 'Espa&ntilde;ol':
item.language == 'Español'
for videoitem in itemlist:
videoitem.language = IDIOMAS[item.language]
videoitem.title = item.contentTitle + ' (' + videoitem.server + ') (' + videoitem.language + ')'
videoitem.channel = item.channel
videoitem.folder = False
videoitem.extra = item.thumbnail
videoitem.fulltitle = item.title
videoitem.quality = 'default'
videoitem.infoLabels = item.infoLabels
itemlist = []
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s [%s]' % (server, item.language)
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language,
server=server, infoLabels=item.infoLabels))
# Requerido para FilterTools

View File

@@ -0,0 +1,22 @@
{
"id": "retroseriestv",
"name": "RetroSeriesTV",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://retroseriestv.com/wp-content/uploads/2017/04/Logo3-5.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -0,0 +1,214 @@
# -*- coding: utf-8 -*-
# -*- Channel RetroSeriesTV -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://retroseriestv.com/'
# IDIOMAS = {'la': 'LAT', 'es': 'Cast'}
# list_language = IDIOMAS.values()
# list_quality = []
# list_servers = ['openload']
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host + 'seriestv/', thumbnail=get_thumb('all',
auto=True)))
itemlist.append(item.clone(title="Generos", action="section", url=host, thumbnail=get_thumb('genres', auto=True),
section='genres'))
itemlist.append(item.clone(title="Por Año", action="section", url=host, thumbnail=get_thumb('year', auto=True),
section='year'))
itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True),
section='abc'))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id=post-.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?<span>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='seasons',
title=contentSerieName,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'rel=next.*?href=(.*?) ')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<ul class=%s(.*?)</ul>' % item.section)
patron = '<a href=(.*?)>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl.strip()
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=url, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span class=title>Temporada(\d+) <'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
infoLabels = item.infoLabels
infoLabels['season'] = scrapedtitle
title = 'Temporada %s' % scrapedtitle
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseason',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
infoLabels = item.infoLabels
season = infoLabels['season']
patron = '<img src=([^>]+)></a></div><div class=numerando>%s - (\d+)</div>' % season
patron += '<div class=episodiotitle><a href=(.*?)>(.*?)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches:
title = '%sx%s - %s' % (season, scrapedepi, scrapedtitle)
infoLabels['episode'] = scrapedepi
if scrapedepi > 0:
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='findvideos',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id=([^ ]+) class=play-box-iframe .*?src=(.*?) frameborder=0.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
#language = scrapertools.find_single_match(data, '#%s.*?dt_flag><img src=.*?flags/(.*?).png' % option)
#title = '%s [%s]'
language = ''
title = '%s'
SerieName = item.contentSerieName
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=SerieName, url=scrapedurl,
action='play', language=language, infoLabels=item.infoLabels))
#itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server.capitalize(), i.language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?year>(.*?)<.*?<p>(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:
url = scrapedurl
contentSerieName = scrapedtitle.replace(' /','')
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='seasons',
title=contentSerieName,
url=url,
thumbnail=thumbnail,
plot=scrapedplot,
contentSerieName=contentSerieName,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []

View File

@@ -0,0 +1,37 @@
{
"id": "veseriesonline",
"name": "VeSeriesOnline",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://www.veseriesonline.com/wp-content/themes/theme-fb4/images/logo.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}