Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
Unknown
2018-02-23 22:17:42 -03:00
22 changed files with 812 additions and 3906 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.18" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.0" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,10 +19,14 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» seriesblanco » rapidvideo
» kbagi » bitertv
» doomtv » miltorrents
» newpct » newpct1
» youtube » flashx
» kbagi » pelismagnet
» gnula » animemovil
» cinecalidad » cuelgame
» divxtotal » cinemahd
¤ arreglos internos
¤ Agradecimientos a @Paquito Porras por PelisUltra.
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -4,6 +4,7 @@ import re
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
@@ -11,7 +12,7 @@ from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
__perfil__ = ''
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
@@ -31,10 +32,12 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", thumbnail=item.thumbnail,
url=host+'/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20', text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="list_by_json", title="En emisión", thumbnail=item.thumbnail,
text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
@@ -55,14 +58,8 @@ def openconfig(item):
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
item.url = "%s/api/buscador?q=%s&letra=ALL&genero=ALL&estado=2&offset=0&limit=30" % (host, texto.replace(" ", "+"))
return list_by_json(item)
def recientes(item):
@@ -71,7 +68,9 @@ def recientes(item):
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
data = re.sub(r'\n|\s{2,}','', data)
bloque = scrapertools.find_single_match(data, '<ul class="hover">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
@@ -116,13 +115,13 @@ def recientes(item):
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
data= data.get('result')
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
url = "%s/%s/" % (host, it["slug"])
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it['id']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
@@ -132,22 +131,22 @@ def listado(item):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context(item), contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
@@ -160,68 +159,48 @@ def indices(item):
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
itemlist.append(item.clone(title="Por Género", url="%s/anime" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime" % host))
itemlist.append(item.clone(action="list_by_json", title="Lista completa de Animes",
url="%s/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
data = re.sub('\n|\s{2,}', '', data)
if 'Letra' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="letra"(.*?)</select>')
patron = '<option value="(\w)"'
elif 'Género' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="genero"(.*?)</select>')
patron = '<option value="(\d+.*?)/'
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
url = '%s/api/buscador?q=&letra=%s&genero=ALL&estado=2&offset=0&limit=20' % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
value = scrapertools.find_single_match(title, '(\d+)"')
title = scrapertools.find_single_match(title, '\d+">(.*?)<')
url = '%s/api/buscador?q=&letra=ALL&genero=%s&estado=2&offset=0&limit=20' % (host, value)
itemlist.append(item.clone(action="list_by_json", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context(item), contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
data = re.sub('\n|\s{2,}', '', data)
show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="x-sinopsis">\s*(.*?)</div>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
@@ -252,56 +231,44 @@ def episodios(item):
return itemlist
def peliculas(item):
def list_by_json(item):
logger.info()
itemlist = []
repeat = 1
status = False
if item.url =='':
item.url = host+"/api/buscador?limit=30&estado=1&dia=%s"
repeat = 6
for element in range(0,repeat):
if repeat != 1:
data = jsontools.load(httptools.downloadpage(item.url % element).data)
else:
data = jsontools.load(httptools.downloadpage(item.url).data)
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
status = data.get('status')
json_data = data.get('result')
elem_data = json_data['items']
for item_data in elem_data:
url = '%s/%s/' % (host, item_data['slug'])
title = item_data['title']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "",
title)
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data['id']
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
itemlist.append(
item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
@@ -310,80 +277,69 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}', '', data)
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
akiba_url = scrapertools.find_single_match(data, '<div class="x-link"><a href="(.*?)"')
url = httptools.downloadpage('http:'+akiba_url, follow_redirects=False).headers.get('location')
title = '%s (%s)' % (item.title, 'akiba')
itemlist.append(item.clone(title=title, url=url, action='play'))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
info = scrapertools.find_single_match(data, 'episodio_info=(.*?);')
dict_info = jsontools.load(info)
return itemlist
servers = dict_info['stream']['servers']
id = dict_info['id']
access_point = dict_info['stream']['accessPoint']
expire = dict_info['stream']['expire']
callback = dict_info['stream']['callback']
signature = dict_info['stream']['signature']
last_modify = dict_info['stream']['last_modify']
for server in servers:
stream_info = 'http:%s/%s/%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % \
(access_point, id, server, expire, callback, signature, last_modify)
def play(item):
logger.info()
dict_stream = jsontools.load(httptools.downloadpage(stream_info).data)
if item.server:
return [item]
if dict_stream['status']:
kind = dict_stream['result']['kind']
try:
if kind == 'iframe':
url = dict_stream['result']['src']
title = '%s (%s)' % (item.title, server)
elif kind == 'jwplayer':
url_style = dict_stream['result']['setup']
if server != 'rin':
itemlist = []
if 'playlist' in url_style:
part = 1
for media_list in url_style['playlist']:
url = media_list['file']
title = '%s (%s) - parte %s' % (item.title, server, part)
itemlist.append(item.clone(title=title, url=url, action='play'))
part += 1
else:
url = url_style['file']
title = '%s (%s)' % (item.title, server)
else:
src_list = url_style['sources']
for source in src_list:
url = source['file']
quality = source['label']
title = '%s [%s](%s)' % (item.title, quality, server)
itemlist.append(item.clone(title=title, url=url, action='play'))
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
elif kind == 'javascript':
if 'jsCode' in dict_stream['result']:
jscode = dict_stream['result']['jsCode']
url = scrapertools.find_single_match(jscode, 'xmlhttp.open\("GET", "(.*?)"')
title = '%s (%s)' % (item.title, server)
if url != '':
itemlist.append(item.clone(title=title, url=url, action='play'))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
@@ -392,7 +348,7 @@ def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.url = host
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla

View File

@@ -15,7 +15,6 @@ from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'portugues': 'Portugues'}
list_language = IDIOMAS.values()
logger.debug('lista_language: %s' % list_language)
list_quality = ['1080p', '720p', '480p', '360p', '240p', 'default']
list_servers = [
@@ -115,7 +114,7 @@ def submenu(item):
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True),
url=host + '/apiseries/seriebyword/',
url=host + '/?s=',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
host=item.host,
))
@@ -351,15 +350,12 @@ def get_urls(item, link):
data = httptools.downloadpage(url, post=post, headers=headers).data
dict_data = jsontools.load(data)
logger.debug(dict_data['link'])
logger.debug(data)
return dict_data['link']
def play(item):
logger.info()
itemlist = []
logger.debug('item: %s' % item)
if 'juicyapi' not in item.url:
itemlist = servertools.find_video_items(data=item.url)
@@ -398,72 +394,9 @@ def newest(categoria):
return itemlist
def busqueda(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["url"]
plot = entry["content"]
plot = scrapertools.htmlclean(plot)
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
title = scrapertools.find_single_match(title, '(.*?) \(.*?\)')
year = re.sub(r'.*?\((\d{4})\)', '', title)
title = year
fulltitle = title
new_item = item.clone(action="findvideos",
title=title,
fulltitle=fulltitle,
url=url,
thumbnail=thumbnail,
contentTitle=title,
contentType="movie",
plot=plot,
infoLabels={'year': year, 'sinopsis': plot}
)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(
Item(channel=item.channel,
action="busqueda",
title=">> Página Siguiente",
url=url_next
))
return itemlist
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, 'name="cx" value="(.*?)"')
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz" \
"=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www" \
".google.com&start=0" % (cx, texto)
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
texto = texto.replace(" ", "-")
item.url = host + '/?s=' + texto
if texto != '':
return peliculas(item)

View File

@@ -53,7 +53,7 @@ def list_all(item):
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>.*?Qlty>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches:

View File

@@ -1,7 +1,7 @@
{
"id": "cuelgame",
"name": "Cuelgame",
"active": false,
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "cuelgame.png",
@@ -31,4 +31,4 @@
"visible": true
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,83 +3,30 @@
import os
import re
import urllib
from threading import Thread
import xbmc
import xbmcgui
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from core.scrapertools import decodeHtmlentities as dhe
from platformcode import config, logger
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
host = "http://www.divxtotal.co"
__modo_grafico__ = config.get_setting('modo_grafico', "divxtotal")
# Para la busqueda en bing evitando baneos
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
# br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="[COLOR orange][B]Películas[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png",
url = host + "/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png",
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(item.clone(title="[COLOR orange][B] Películas HD[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png",
url = host + "/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png",
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR orange][B]Series[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/series/", thumbnail="http://imgur.com/GPX2wLt.png",
url = host + "/series/", thumbnail="http://imgur.com/GPX2wLt.png",
contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search",
@@ -90,7 +37,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.divxtotal.com/?s=" + texto
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
@@ -106,22 +53,16 @@ def buscador(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>')
for url, title, check in patron:
patron = '<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, check in matches:
if "N/A" in check:
checkmt = "tvshow"
else:
checkmt = "movie"
titulo = title
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*\[.*?]\]", "", title)
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
@@ -138,9 +79,7 @@ def buscador(item):
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -153,7 +92,6 @@ def buscador(item):
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
@@ -162,14 +100,10 @@ def scraper(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>')
for url, title, year in patron:
patron = '<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, year in matches:
titulo = re.sub(r"\d+\d+\d+\d+|\(.*?\).*", "", title)
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*", "", title)
title = title.replace("Autosia", "Autopsia")
@@ -178,14 +112,12 @@ def scraper(item):
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<p class="secconimagen"><a href="([^"]+)" title="[^"]+"><img src="([^"]+)".*?title="[^"]+">([^"]+)</a>')
for url, thumb, title in patron:
patron = '(?s)<p class="secconimagen"><a href="([^"]+)"'
patron += ' title="[^"]+"><img src="([^"]+)".*?'
patron += 'rel="bookmark">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumb, title in matches:
titulo = title.strip()
title = re.sub(r"\d+x.*|\(.*?\)", "", title)
new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url,
@@ -193,7 +125,6 @@ def scraper(item):
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
@@ -215,21 +146,14 @@ def scraper(item):
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
if item.extra == "search":
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
@@ -264,8 +188,7 @@ def findtemporadas(item):
except:
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -298,9 +221,9 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url,
'<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
for idioma, url, epi in patron:
patron = '<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>'
matches = scrapertools.find_multiple_matches(item.url, patron)
for idioma, url, epi in matches:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio
itemlist.append(
@@ -320,19 +243,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if not item.infoLabels['episode']:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
url_capitulo = scrapertools.find_single_match(data,
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
patron = '<a href="(' + host + '/wp-content/uploads/.*?' + capitulo + '.*?.torrent)'
url_capitulo = scrapertools.find_single_match(data, patron)
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
@@ -350,7 +265,6 @@ def findvideos(item):
title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]",
url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail,
extra=item.extra, fulltitle=item.fulltitle, folder=False))
if item.infoLabels['episode'] and item.library:
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
if thumbnail == "":
@@ -363,15 +277,13 @@ def findvideos(item):
action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art,
thumb_info=item.thumb_info, extra=item.extra, show=item.show,
InfoLabels=item.infoLabels, folder=False))
if not item.infoLabels['episode']:
itemlist.append(
Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url,
action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1],
action="findtemporadas", server="torrent",
thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType,
contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art,
thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True))
else:
url = scrapertools.find_single_match(data, '<h3 class="orange text-center">.*?href="([^"]+)"')
item.infoLabels['year'] = None
@@ -388,7 +300,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/xQNTqqy.png'))
return itemlist
@@ -401,7 +312,6 @@ def info_capitulos(item, images={}):
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
if "<filename>episodes" in data:
image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>')
image = "http://thetvdb.com/banners/" + image
@@ -431,7 +341,6 @@ def info_capitulos(item, images={}):
except:
rating = 0
try:
if rating >= 5 and rating < 8:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]"
elif rating >= 8 and rating < 10:
@@ -444,90 +353,17 @@ def info_capitulos(item, images={}):
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
except:
title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
plot = "Este capitulo no tiene informacion..."
plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
image = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
rating = ""
ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
ventana.doModal()
class TextBox2(xbmcgui.WindowDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.getTitle = kwargs.get('title')
self.getPlot = kwargs.get('plot')
self.getThumbnail = kwargs.get('thumbnail')
self.getFanart = kwargs.get('fanart')
self.getRating = kwargs.get('rating')
self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/K6wduMe.png')
self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
self.addControl(self.background)
self.background.setAnimations(
[('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
self.addControl(self.thumbnail)
self.thumbnail.setAnimations([('conditional',
'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
self.addControl(self.plot)
self.plot.setAnimations(
[('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
'conditional',
'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
self.addControl(self.fanart)
self.fanart.setAnimations(
[('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
'conditional',
'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
self.addControl(self.title)
self.title.setText(self.getTitle)
self.title.setAnimations(
[('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
self.addControl(self.rating)
self.rating.setText(self.getRating)
self.rating.setAnimations(
[('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
xbmc.sleep(200)
try:
self.plot.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
self.plot.setText(self.getPlot)
def get(self):
self.show()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
self.close()
def test():
return True
def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
i = 0
while i < len(text):
@@ -576,7 +412,6 @@ def decode(text):
data = data
except:
data = src
return data
@@ -591,381 +426,6 @@ def convert_size(size):
return '%s %s' % (s, size_name[i])
def fanartv(item, id_tvdb, id, images={}):
headers = [['Content-Type', 'application/json']]
from core import jsontools
if item.contentType == "movie":
url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \
% id
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb
try:
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
if data and not "error message" in data:
for key, value in data.items():
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
else:
images = []
except:
images = []
return images
def filmaffinity(item, infoLabels):
title = infoLabels["title"].replace(" ", "+")
try:
year = infoLabels["year"]
except:
year = ""
sinopsis = infoLabels["sinopsis"]
if year == "":
if item.contentType != "movie":
tipo = "serie"
url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title
else:
tipo = "película"
url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title
try:
data = browser(url_bing)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
try:
data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data
except:
data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data
else:
try:
data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data
except:
data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
else:
tipo = "Pelicula"
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
title, year)
data = httptools.downloadpage(url, cookies=False).data
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
if url_filmaf:
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
data = httptools.downloadpage(url_filmaf, cookies=False).data
else:
if item.contentType != "movie":
tipo = "serie"
url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title
else:
tipo = "película"
url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title
try:
data = browser(url_bing)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
data = httptools.downloadpage("http://" + url_filma, cookies=False).data
else:
data = httptools.downloadpage(url_filma, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
sinopsis_f = sinopsis_f.replace("<br><br />", "\n")
sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f)
try:
year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>')
except:
year_f = ""
try:
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
except:
rating_filma = "Sin puntuacion"
critica = ""
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
matches_reviews = scrapertools.find_multiple_matches(data, patron)
if matches_reviews:
for review, autor, valoracion in matches_reviews:
review = dhe(scrapertools.htmlclean(review))
review += "\n" + autor + "[CR]"
review = re.sub(r'Puntuac.*?\)', '', review)
if "positiva" in valoracion:
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
elif "neutral" in valoracion:
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
else:
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
else:
critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo
return critica, rating_filma, year_f, sinopsis_f
def get_art(item):
logger.info()
id = item.infoLabels['tmdb_id']
check_fanart = item.infoLabels['fanart']
if item.contentType != "movie":
tipo_ps = "tv"
else:
tipo_ps = "movie"
if not id:
year = item.extra
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
if item.contentType == "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es")
id = otmdb.result.get("id")
if id == None:
if "(" in item.fulltitle:
title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)')
if item.contentType != "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps,
idioma_busqueda="es")
id = otmdb.result.get("id")
if not id:
fanart = item.fanart
imagenes = []
itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps)
images = itmdb.result.get("images")
if images:
for key, value in images.iteritems():
for detail in value:
imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"])
if item.contentType == "movie":
if len(imagenes) >= 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3]
else:
item.extra = imagenes[3] + "|" + imagenes[3]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
id_tvdb = ""
else:
if itmdb.result.get("external_ids").get("tvdb_id"):
id_tvdb = itmdb.result.get("external_ids").get("tvdb_id")
else:
id_tvdb = ""
if len(imagenes) >= 6:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \
imagenes[5]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \
imagenes[1]
elif len(imagenes) == 5:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
item.extra = item.extra
images_fanarttv = fanartv(item, id_tvdb, id)
if images_fanarttv:
if item.contentType == "movie":
if images_fanarttv.get("moviedisc"):
item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url")
elif images_fanarttv.get("hdmovielogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
elif images_fanarttv.get("moviethumb"):
item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url")
elif images_fanarttv.get("moviebanner"):
item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url")
else:
item.thumbnail = item.thumbnail
else:
if images_fanarttv.get("hdtvlogo"):
item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url")
elif images_fanarttv.get("clearlogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
item.thumb_info = item.thumbnail
if images_fanarttv.get("tvbanner"):
item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url")
elif images_fanarttv.get("tvthumb"):
item.thumb_art = images_fanarttv.get("tvthumb")[0].get("url")
else:
item.thumb_art = item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def get_year(url):
data = httptools.downloadpage(url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
@@ -984,7 +444,6 @@ def ext_size(url):
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
torrent = decode(pepe)
try:
name = torrent["info"]["name"]
@@ -1021,25 +480,22 @@ def ext_size(url):
size = ""
return ext_v, size
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.divxtotal.com/peliculas/'
item.url = host + '/peliculas/'
item.contentType="movie"
itemlist = scraper(item)
if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,47 +0,0 @@
{
"id": "documaniatv",
"name": "DocumaniaTV",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/qMR9sg9.png",
"banner": "documaniatv.png",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "documaniatvaccount",
"type": "bool",
"label": "Usar cuenta de documaniatv",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "documaniatvuser",
"type": "text",
"label": "Usuario",
"color": "0xFFd50b0b",
"enabled": "eq(-1,true)",
"visible": true
},
{
"id": "documaniatvpassword",
"type": "text",
"label": "Contraseña",
"color": "0xFFd50b0b",
"enabled": "!eq(-1,)+eq(-2,true)",
"visible": true,
"hidden": true
}
]
}

View File

@@ -1,373 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.documaniatv.com/"
account = config.get_setting("documaniatvaccount", "documaniatv")
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def login():
logger.info()
user = config.get_setting("documaniatvuser", "documaniatv")
password = config.get_setting("documaniatvpassword", "documaniatv")
if user == "" or password == "":
return True, ""
data = scrapertools.cachePage(host, headers=headers)
if "http://www.documaniatv.com/user/" + user in data:
return False, user
post = "username=%s&pass=%s&Login=Iniciar Sesión" % (user, password)
data = scrapertools.cachePage("http://www.documaniatv.com/login.php", headers=headers, post=post)
if "Nombre de usuario o contraseña incorrectas" in data:
logger.error("login erróneo")
return True, ""
return False, user
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="novedades", title="Novedades", url="http://www.documaniatv.com/newvideos.html"))
itemlist.append(
item.clone(action="categorias", title="Categorías y Canales", url="http://www.documaniatv.com/browse.html"))
itemlist.append(item.clone(action="novedades", title="Top", url="http://www.documaniatv.com/topvideos.html"))
itemlist.append(item.clone(action="categorias", title="Series Documentales",
url="http://www.documaniatv.com/top-series-documentales-html"))
itemlist.append(item.clone(action="viendo", title="Viendo ahora", url="http://www.documaniatv.com"))
itemlist.append(item.clone(action="", title=""))
itemlist.append(item.clone(action="search", title="Buscar"))
folder = False
action = ""
if account:
error, user = login()
if error:
title = "Playlists Personales (Error en usuario y/o contraseña)"
else:
title = "Playlists Personales (Logueado)"
action = "usuario"
folder = True
else:
title = "Playlists Personales (Sin cuenta configurada)"
user = ""
url = "http://www.documaniatv.com/user/%s" % user
itemlist.append(item.clone(title=title, action=action, url=url, folder=folder))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion",
folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
platformtools.show_channel_settings()
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'documentales':
item.url = "http://www.documaniatv.com/newvideos.html"
itemlist = novedades(item)
if itemlist[-1].action == "novedades":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
data = scrapertools.cachePage(host, headers=headers)
item.url = scrapertools.find_single_match(data, 'form action="([^"]+)"') + "?keywords=%s&video-id="
texto = texto.replace(" ", "+")
item.url = item.url % texto
try:
return novedades(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def novedades(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
# Saca el plot si lo tuviese
scrapedplot = scrapertools.find_single_match(data, '<div class="pm-section-head">(.*?)</div>')
if "<div" in scrapedplot:
scrapedplot = ""
else:
scrapedplot = scrapertools.htmlclean(scrapedplot)
bloque = scrapertools.find_multiple_matches(data, '<li class="col-xs-[\d] col-sm-[\d] col-md-[\d]">(.*?)</li>')
if "Registrarse" in data or not account:
for match in bloque:
patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \
'.*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
fulltitle=scrapedtitle, contentTitle=contentTitle, folder=False))
else:
for match in bloque:
patron = '<span class="pm-label-duration">(.*?)</span>.*?onclick="watch_later_add\(([\d]+)\)' \
'.*?<a href="([^"]+)".*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for duracion, video_id, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
id=video_id,
fulltitle=scrapedtitle, contentTitle=contentTitle))
# Busca enlaces de paginas siguientes...
try:
next_page_url = scrapertools.get_match(data, '<a href="([^"]+)">&raquo;</a>')
next_page_url = urlparse.urljoin(host, next_page_url)
itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url))
except:
logger.error("Siguiente pagina no encontrada")
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
patron = '<div class="pm-li-category">.*?<a href="([^"]+)"' \
'.*?<img src="([^"]+)".*?<h3>(?:<a.*?><span.*?>|)(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Busca enlaces de paginas siguientes...
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)"><i class="fa fa-arrow-right">')
if next_page_url != "":
itemlist.append(item.clone(action="categorias", title=">> Página siguiente", url=next_page_url))
return itemlist
def viendo(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
bloque = scrapertools.find_single_match(data, '<ul class="pm-ul-carousel-videos list-inline"(.*?)</ul>')
patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \
'.*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail, fulltitle=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Se comprueba si el vídeo está ya en favoritos/ver más tarde
url = "http://www.documaniatv.com/ajax.php?p=playlists&do=video-watch-load-my-playlists&video-id=%s" % item.id
data = scrapertools.cachePage(url, headers=headers)
data = jsontools.load(data)
data = re.sub(r"\n|\r|\t", '', data['html'])
itemlist.append(item.clone(action="play_", title=">> Reproducir vídeo", folder=False))
if "kodi" in config.get_platform():
folder = False
else:
folder = True
patron = '<li data-playlist-id="([^"]+)".*?onclick="playlist_(\w+)_item' \
'.*?<span class="pm-playlists-name">(.*?)</span>.*?' \
'<span class="pm-playlists-video-count">(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for playlist_id, playlist_action, playlist_title, video_count in matches:
scrapedtitle = playlist_action.replace('remove', 'Eliminar de ').replace('add', 'Añadir a ')
scrapedtitle += playlist_title + " (" + video_count + ")"
itemlist.append(item.clone(action="acciones_playlist", title=scrapedtitle, list_id=playlist_id,
url="http://www.documaniatv.com/ajax.php", folder=folder))
if "kodi" in config.get_platform():
itemlist.append(item.clone(action="acciones_playlist", title="Crear una nueva playlist y añadir el documental",
id=item.id, url="http://www.documaniatv.com/ajax.php", folder=folder))
itemlist.append(item.clone(action="acciones_playlist", title="Me gusta", url="http://www.documaniatv.com/ajax.php",
folder=folder))
return itemlist
def play_(item):
logger.info()
itemlist = []
try:
import xbmc
if not xbmc.getCondVisibility('System.HasAddon(script.cnubis)'):
from platformcode import platformtools
platformtools.dialog_ok("Addon no encontrado",
"Para ver vídeos alojados en cnubis necesitas tener su instalado su add-on",
line3="Descárgalo en http://cnubis.com/kodi-pelisalacarta.html")
return itemlist
except:
pass
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
# Busca enlace directo
video_url = scrapertools.find_single_match(data, 'class="embedded-video"[^<]+<iframe.*?src="([^"]+)"')
if config.get_platform() == "plex" or config.get_platform() == "mediaserver":
code = scrapertools.find_single_match(video_url, 'u=([A-z0-9]+)')
url = "http://cnubis.com/plugins/mediaplayer/embeder/_embedkodi.php?u=%s" % code
data = scrapertools.downloadpage(url, headers=headers)
video_url = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]+)"')
itemlist.append(item.clone(action="play", url=video_url, server="directo"))
return itemlist
cnubis_script = xbmc.translatePath("special://home/addons/script.cnubis/default.py")
xbmc.executebuiltin("XBMC.RunScript(%s, url=%s&referer=%s&title=%s)"
% (cnubis_script, urllib.quote_plus(video_url), urllib.quote_plus(item.url),
item.fulltitle))
return itemlist
def usuario(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
profile_id = scrapertools.find_single_match(data, 'data-profile-id="([^"]+)"')
url = "http://www.documaniatv.com/ajax.php?p=profile&do=profile-load-playlists&uid=%s" % profile_id
data = scrapertools.cachePage(url, headers=headers)
data = jsontools.load(data)
data = data['html']
patron = '<div class="pm-video-thumb">.*?src="([^"]+)".*?' \
'<span class="pm-pl-items">(.*?)</span>(.*?)</div>' \
'.*?<h3.*?href="([^"]+)".*?title="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, items, videos, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("Historia", 'Historial')
scrapedtitle += " (" + items + videos + ")"
if "no-thumbnail" in scrapedthumbnail:
scrapedthumbnail = ""
else:
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
itemlist.append(item.clone(action="playlist", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail))
return itemlist
def acciones_playlist(item):
logger.info()
itemlist = []
if item.title == "Crear una nueva playlist y añadir el documental":
from platformcode import platformtools
texto = platformtools.dialog_input(heading="Introduce el título de la nueva playlist")
if texto is not None:
post = "p=playlists&do=create-playlist&title=%s&visibility=1&video-id=%s&ui=video-watch" % (texto, item.id)
data = scrapertools.cachePage(item.url, headers=headers, post=post)
else:
return
elif item.title != "Me gusta":
if "Eliminar" in item.title:
action = "remove-from-playlist"
else:
action = "add-to-playlist"
post = "p=playlists&do=%s&playlist-id=%s&video-id=%s" % (action, item.list_id, item.id)
data = scrapertools.cachePage(item.url, headers=headers, post=post)
else:
item.url = "http://www.documaniatv.com/ajax.php?vid=%s&p=video&do=like" % item.id
data = scrapertools.cachePage(item.url, headers=headers)
try:
import xbmc
from platformcode import platformtools
platformtools.dialog_notification(item.title, "Se ha añadido/eliminado correctamente")
xbmc.executebuiltin("Container.Refresh")
except:
itemlist.append(item.clone(action="", title="Se ha añadido/eliminado correctamente"))
return itemlist
def playlist(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
patron = '<div class="pm-pl-list-index.*?src="([^"]+)".*?' \
'<a href="([^"]+)".*?>(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail, fulltitle=scrapedtitle, folder=False))
return itemlist

View File

@@ -8,5 +8,15 @@
"banner": "gnula.png",
"categories": [
"movie"
]
}
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config, logger
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=10&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=AOdTmaBgzSiy5RxoV4cZSGGEr17reWoGLg:1519145966291&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
def mainlist(item):
logger.info()
@@ -19,43 +19,87 @@ def mainlist(item):
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/"))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url = host_search))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url %texto
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
if itemlist:
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + 10
npage = (page / 10) + 1
item_page = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
itemlist.append(Item(action = "sub_search",
channel = item.channel,
title = "[COLOR green]Página %s[/COLOR]" %npage,
url = item_page
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table')
patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for genero, scrapedurl in matches:
title = scrapertools.htmlclean(genero)
plot = ""
url = item.url + scrapedurl
thumbnail = ""
itemlist.append(Item(channel = item.channel,
action = 'peliculas',
title = title,
url = url,
thumbnail = thumbnail,
plot = plot,
viewmode = "movie"))
itemlist = sorted(itemlist, key=lambda item: item.title)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
language = []
plot = scrapertools.htmlclean(resto).strip()
@@ -110,6 +154,13 @@ def findvideos(item):
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist

View File

@@ -134,7 +134,7 @@ def listado(item):
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = 'data-file-id(.*?)</div></div></li>'
patron = 'data-file-id(.*?</p>)</div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
@@ -184,8 +184,7 @@ def listado(item):
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, 'class="pageSplitter" data-nextpage-number="([^"]+)"')
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)

View File

@@ -4,349 +4,97 @@ import re
import urllib
import urlparse
from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import logger
from core import httptools
Host='http://www.tvsinpagar.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas"))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series"))
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url="http://www.newpct.com/anime/",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="listado", title="Documentales", url="http://www.newpct.com/documentales/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
#itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/",
# viewmode="movie_with_plot"))
#itemlist.append(
# Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/",
# viewmode="movie_with_plot"))
#itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.newpct.com/buscar-descargas/%s" % (texto)
try:
return buscador(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscador(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# <td class="center" style="border-bottom:solid 1px cyan;">14-09-14</td><td style="border-bottom:solid 1px cyan;"><strong><a href="http://www.newpct.com/descargar-pelicula/malefica-3d-sbs/" title="M&aacute;s informaci&oacute;n sobre Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"> <span class="searchTerm">Malefica</span> 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]</a></strong></td><td class="center" style="border-bottom:solid 1px cyan;">10.9 GB</td><td style="border-bottom:solid 1px cyan;"><a href="http://tumejorserie.com/descargar/index.php?link=torrents/059784.torrent" title="Descargar Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"><img src="http://newpct.com/v2/imagenes//buttons/download.png"
patron = '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # createdate
patron += '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # info
patron += '<a href="([^"]+)" ' # url
patron += 'title="Descargar([^"]+)">' # title
patron += '<img src="([^"]+)"' # thumbnail
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedcreatedate, scrapedinfo, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapedtitle + "(Tamaño:" + scrapedinfo + "--" + scrapedcreatedate + ")"
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, folder=True))
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
return itemlist
def submenu(item):
logger.info()
itemlist = []
if item.title == "Películas":
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDRIP-BRRIP Castellano",
url="http://www.newpct.com/peliculas-castellano/peliculas-rip/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Latino",
url="http://www.newpct.com/peliculas-latino/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Estrenos de Cine Castellano",
url="http://www.newpct.com/peliculas-castellano/estrenos-de-cine/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Alta Definicion HD",
url="http://www.newpct.com/cine-alta-definicion-hd/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas en 3D HD",
url="http://www.newpct.com/peliculas-en-3d-hd/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDFULL",
url="http://www.newpct.com/peliculas-castellano/peliculas-dvd/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas V.O.Subtituladas",
url="http://www.newpct.com/peliculas-vo/", viewmode="movie_with_plot"))
else:
itemlist.append(
Item(channel=item.channel, action="listado", title="HDTV Castellano", url="http://www.newpct.com/series/",
category="serie", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Miniseries Castellano",
url="http://www.newpct.com/miniseries-es/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Series TV - V.O.S.E",
url="http://www.newpct.com/series-vo/", category="serie", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="listado", title="Últimos Capítulos HD",
url="http://www.newpct.com/series-alta-definicion-hd/", category="serie",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="series", title="Series HD [A-Z]",
url="http://www.newpct.com/index.php?l=torrentListByCategory&subcategory_s=1469&more=listar",
category="serie"))
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
data_cat = scrapertools.find_single_match(data, patron)
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
'''
<li>
<a href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>
<div class='boxgrid captionb'>
<img src='http://images.newpct.com/banco_de_imagenes/destacados/038707/la-pequeña-venecia--dvdrip--ac3-5-1-español-castellano--2012-.jpg' alt='Descargar Peliculas Castellano &raquo; Películas RIP La Pequeña Venecia [DVDrip][AC3 5.1 Español Castellano][2012]' />
<div class='cover boxcaption'>
<h3>La Pequeña Venecia </h3>
<p>Peliculas Castellano<br/>
Calidad: DVDRIP AC3 5.1<br>
Tama&ntilde;o: 1.1 GB<br>
Idioma : Español Castellano
</p>
</div>
</div>
</a>
<div id='bot-desc'>
<div id='tinfo'>
<a class='youtube' href='#' rel='gx9EKDC0UFQ' title='Ver Trailer' alt='Ver Trailer'>
<img style='width:25px;' src='http://www.newpct.com/images.inc/images/playm2.gif'></a>
</div>
<div id='tdescargar' ><a class='atdescargar' href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>DESCARGAR</a></div>
</div>
</li>
'''
patron = "<li[^<]+"
patron += "<a href='([^']+)'[^<]+"
patron += "<div class='boxgrid captionb'[^<]+"
patron += "<img src='([^']+)'[^<]+"
patron += "<div class='cover boxcaption'[^<]+"
patron += '<h3>([^<]+)</h3>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
title = scrapedtitle.strip()
title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = scrapertools.htmlclean(scrapedplot).strip()
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.category == "serie":
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot))
else:
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle=title))
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="pelilist">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
if 'Serie' in item.title:
patron_listado+='.+?>'
else:
patron_listado+='>'
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
if 'Serie' in item.title:
action="episodios"
else:
action="findvideos"
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
# Página siguiente
'''
GET /include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 HTTP/1.1
Host: www.newpct.com
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0
Accept: */*
Accept-Language: es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate
X-Requested-With: XMLHttpRequest
Referer: http://www.newpct.com/peliculas-castellano/peliculas-rip/
Cookie: adbooth_popunder=5%7CSat%2C%2009%20Mar%202013%2018%3A23%3A22%20GMT
Connection: keep-alive
'''
'''
function orderCategory(type,leter,pag,other)
{
if(leter=='buscar')
{
leter = document.getElementById('word').value;
}
if(type=='todo')
{
document.getElementById('todo').className = "active_todo";
}
if(type=='letter')
{
switch(leter)
{
case '09':
document.getElementById('09').className = "active_num";
break;
default:
document.getElementById(leter).className = "active_a";
break;
}
}
var parametros = {
"type" : type,
"leter" : leter,
"sql" : "SELECT DISTINCT torrentID, torrentCategoryID, torrentCategoryIDR, torrentImageID, torrentName, guid, torrentShortName, torrentLanguage, torrentSize, calidad as calidad_, torrentDescription, torrentViews, rating, n_votos, vistas_hoy, vistas_ayer, vistas_semana, vistas_mes FROM torrentsFiles as t WHERE (torrentStatus = 1 OR torrentStatus = 2) AND (torrentCategoryID IN (1537, 758, 1105, 760, 1225)) ORDER BY torrentDateAdded DESC LIMIT 0, 50",
"pag" : pag,
"tot" : '',
"ban" : '3',
"other": other,
"cate" : '1225'
};
//alert(type+leter);
$('#content-category').html('<div style="margin:100px auto;width:100px;height:100px;"><img src="http://www.newpct.com/images.inc/images/ajax-loader.gif"/></div>');
var page = $(this).attr('data');
var dataString = 'page='+page;
$.ajax({
type: "GET",
url: 'http://www.newpct.com/include.inc/ajax.php/orderCategory.php',
data: parametros,
success: function(data) {
//Cargamos finalmente el contenido deseado
$('#content-category').fadeIn(1000).html(data);
}
});
}
'''
if item.extra != "":
bloque = item.extra
else:
bloque = scrapertools.get_match(data, "function orderCategory(.*?)\}\)\;")
logger.info("bloque=" + bloque)
param_type = scrapertools.get_match(data, "<a href='javascript:;' onclick=\"orderCategory\('([^']+)'[^>]+> >> </a>")
logger.info("param_type=" + param_type)
param_leter = scrapertools.get_match(data,
"<a href='javascript:;' onclick=\"orderCategory\('[^']+','([^']*)'[^>]+> >> </a>")
logger.info("param_leter=" + param_leter)
param_pag = scrapertools.get_match(data,
"<a href='javascript:;' onclick=\"orderCategory\('[^']+','[^']*','([^']+)'[^>]+> >> </a>")
logger.info("param_pag=" + param_pag)
param_total = scrapertools.get_match(bloque, '"total"\s*\:\s*\'([^\']+)')
logger.info("param_sql=" + param_total)
param_sql = scrapertools.get_match(bloque, '"sql"\s*\:\s*\'([^\']+)')
logger.info("param_sql=" + param_sql)
param_tot = scrapertools.get_match(bloque, "\"tot\"\s*\:\s*'([^']*)'")
logger.info("param_tot=" + param_tot)
param_ban = scrapertools.get_match(bloque, "\"ban\"\s*\:\s*'([^']+)'")
logger.info("param_ban=" + param_ban)
param_cate = scrapertools.get_match(bloque, "\"cate\"\s*\:\s*'([^']+)'")
logger.info("param_cate=" + param_cate)
base_url = scrapertools.get_match(bloque, "url\s*\:\s*'([^']+)'")
base_url = re.sub("../..", "http://www.newpct.com", base_url, count=1)
logger.info("base_url=" + base_url)
# http://www.newpct.com/include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225
url_next_page = base_url + "?" + urllib.urlencode(
{"total": param_total, "type": param_type, "leter": param_leter, "sql": param_sql, "pag": param_pag,
"tot": param_tot, "ban": param_ban, "cate": param_cate})
logger.info("url_next_page=" + url_next_page)
if item.category == "serie":
itemlist.append(
Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque,
category="serie", viewmode="movie_with_plot"))
else:
itemlist.append(
Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque,
viewmode="movie_with_plot"))
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
siguiente = scrapertools.find_single_match(data, patron_pag)
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
return itemlist
def series(item):
logger.info()
itemlist = []
# Lista menú Series de la A-Z
data = scrapertools.cache_page(item.url)
patron = '<div id="content-abc">(.*?)<\/div>'
data = re.compile(patron, re.DOTALL | re.M).findall(data)
patron = 'id="([^"]+)".*?>([^"]+)<\/a>'
matches = re.compile(patron, re.DOTALL | re.M).findall(data[0])
for id, scrapedtitle in matches:
url_base = "http://www.newpct.com/include.inc/ajax.php/orderCategory.php?total=9&type=letter&leter=%s&sql=+%09%09SELECT++t.torrentID%2C++%09%09%09%09t.torrentCategoryID%2C++%09%09%09%09t.torrentCategoryIDR%2C++%09%09%09%09t.torrentImageID%2C++%09%09%09%09t.torrentName%2C++%09%09%09%09t.guid%2C++%09%09%09%09t.torrentShortName%2C+%09%09%09%09t.torrentLanguage%2C+%09%09%09%09t.torrentSize%2C+%09%09%09%09t.calidad+as+calidad_%2C+%09%09%09%09t.torrentDescription%2C+%09%09%09%09t.torrentViews%2C+%09%09%09%09t.rating%2C+%09%09%09%09t.n_votos%2C+%09%09%09%09t.vistas_hoy%2C+%09%09%09%09t.vistas_ayer%2C+%09%09%09%09t.vistas_semana%2C+%09%09%09%09t.vistas_mes%2C+%09%09%09%09t.imagen+FROM+torrentsFiles+as+t++%09%09LEFT+JOIN+torrentsCategories+as+tc+ON+(t.torrentCategoryID+%3D+tc.categoryID)++%09%09INNER+JOIN++%09%09(+%09%09%09SELECT+torrentID+%09%09%09FROM+torrentsFiles++%09%09%09WHERE++torrentCategoryIDR+%3D+1469+%09%09%09ORDER+BY+torrentID+DESC+%09%09)t1+ON+t1.torrentID+%3D+t.torrentID+WHERE+(t.torrentStatus+%3D+1+OR+t.torrentStatus+%3D+2)+AND+t.home_active+%3D+0++AND+tc.categoryIDR+%3D+1469+GROUP+BY+t.torrentCategoryID+ORDER+BY+t.torrentID+DESC+LIMIT+0%2C+50&pag=&tot=&ban=3&cate=1469"
scrapedurl = url_base.replace("%s", id)
if id != "todo": itemlist.append(
Item(channel=item.channel, action="listaseries", title=scrapedtitle, url=scrapedurl, folder=True))
return itemlist
def listaseries(item):
logger.info()
itemlist = []
data = scrapertools.downloadpageGzip(item.url)
patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>"
matches = re.compile(patron, re.DOTALL | re.M).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
patron = "<ul style='display:none;'.*?>(.*?)<\/ul>"
data = re.compile(patron, re.DOTALL | re.M).findall(data)
patron = "<a href='([^']+)'.*?title='([^']+)'"
for index in range(len(data)):
matches = re.compile(patron, re.DOTALL | re.M).findall(data[index])
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, folder=True))
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="buscar-list">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href=(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
matches = scrapertools.find_multiple_matches(data_listado, patron)
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
if " al " in scrapedtitle:
#action="episodios"
titulo=scrapedurl.split('http')
scrapedurl="http"+titulo[1]
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
# <span id='content-torrent'> <a href='http://tumejorjuego.com/descargar/index.php?link=descargar/torrent/58591/el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014.html' rel='nofollow' id='58591' title='el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014' class='external-url' target='_blank'>
torrent_url = scrapertools.find_single_match(data, "<span id='content-torrent'[^<]+<a href='([^']+)'")
if torrent_url != "":
itemlist.append(Item(channel=item.channel, action="play", title="Torrent", url=torrent_url, server="torrent"))
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = "[" + videoitem.server + "]"
new_item = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data = data)
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
itemlist.extend(new_item)
for it in itemlist:
it.channel = item.channel
return itemlist

View File

@@ -1,470 +1,100 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from platformcode import logger
from core import httptools
Host='http://descargas2020.com'
host = 'http://newpct1.com/'
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
thumb_search = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
#itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/",
# viewmode="movie_with_plot"))
#itemlist.append(
# Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/",
# viewmode="movie_with_plot"))
#itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
return itemlist
def submenu(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
patron = '<li><a href="http://(?:www.)?newpct1.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
data = scrapertools.get_match(data, patron)
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
data_cat = scrapertools.find_single_match(data, patron)
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
itemlist.append(
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
return itemlist
def alfabeto(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
patron = '<ul class="alfabeto">(.*?)</ul>'
data = scrapertools.get_match(data, patron)
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.upper()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
return itemlist
def listado(item):
logger.info()
itemlist = []
url_next_page =''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
#logger.debug(data)
logger.debug('item.modo: %s'%item.modo)
logger.debug('item.extra: %s'%item.extra)
if item.modo != 'next' or item.modo =='':
logger.debug('item.title: %s'% item.title)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
page_extra = item.extra
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="pelilist">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
if 'Serie' in item.title:
patron_listado+='.+?>'
else:
fichas = data
page_extra = item.extra
patron = '<a href="([^"]+).*?' # la url
patron += 'title="([^"]+).*?' # el titulo
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += '<span>([^<].*?)<' # la calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
logger.debug('item.next_page: %s'%item.next_page)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
modo = 'continue'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = '<a href="([^"]+)">Next<\/a>'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
modo = 'continue'
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
modo = 'next'
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if "1.com/series" in url:
action = "episodios"
extra = "serie"
title = scrapertools.find_single_match(title, '([^-]+)')
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
1).strip()
patron_listado+='>'
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
if 'Serie' in item.title:
action="episodios"
else:
title = title.replace("Descargar", "", 1).strip()
if title.endswith("gratis"): title = title[:-7]
show = title
if item.extra != "buscar-list":
title = title + ' ' + calidad
context = ""
context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/")
if context_title:
try:
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
"tvshow")
context_title = context_title[1].replace("-", " ")
if re.search('\d{4}', context_title[-4:]):
context_title = context_title[:-4]
elif re.search('\(\d{4}\)', context_title[-6:]):
context_title = context_title[:-6]
except:
context_title = show
logger.debug('contxt title: %s'%context_title)
logger.debug('year: %s' % year)
logger.debug('context: %s' % context)
if not 'array' in title:
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra,
show = context_title, contentTitle=context_title, contentType=context,
context=["buscar_trailer"], infoLabels= {'year':year}))
tmdb.set_infoLabels(itemlist, True)
if url_next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True,
text_color='yellow', text_bold=True, modo = modo, plot = extra,
extra = page_extra))
action="findvideos"
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
# Página siguiente
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
siguiente = scrapertools.find_single_match(data, patron_pag)
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
return itemlist
def listado2(item):
def episodios(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
list_chars = [["ñ", "ñ"]]
for el in list_chars:
data = re.sub(r"%s" % el[0], el[1], data)
try:
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
except:
post = False
if post:
if "pg" in item.post:
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
else:
item.post += "&pg=%s" % post
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
data = scrapertools.get_match(data, pattern)
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
# fix encoding for title
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ")
# no mostramos lo que no sean videos
if "/juego/" in url or "/varios/" in url:
continue
if ".com/series" in url:
show = real_title
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], contentSerieName=show))
else:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"]))
if post:
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
thumbnail=get_thumb("next.png")))
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="buscar-list">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href=(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
matches = scrapertools.find_multiple_matches(data_listado, patron)
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
if " al " in scrapedtitle:
#action="episodios"
titulo=scrapedurl.split('http')
scrapedurl="http"+titulo[1]
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
## Cualquiera de las tres opciones son válidas
# item.url = item.url.replace("1.com/","1.com/ver-online/")
# item.url = item.url.replace("1.com/","1.com/descarga-directa/")
item.url = item.url.replace("1.com/", "1.com/descarga-torrent/")
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)</strong>[^<]+</h1>")
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+</strong>([^<]+)</h1>")
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
# <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>
patron = 'openTorrent.*?"title=".*?" class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
# escraped torrent
url = scrapertools.find_single_match(data, patron)
if url != "":
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title + " [torrent]", fulltitle=title,
url=url, thumbnail=caratula, plot=item.plot, folder=False))
logger.debug("matar %s" % data)
# escraped ver vídeos, descargar vídeos un link, múltiples liks
data = data.replace("'", '"')
data = data.replace(
'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=', "")
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "")
data = data.replace("$!", "#!")
patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
match_ver = scrapertools.find_single_match(data, patron_ver)
match_descargar = scrapertools.find_single_match(data, patron_descargar)
patron = '<div class="box1"><img src="([^"]+)".*?' # logo
patron += '<div class="box2">([^<]+)</div>' # servidor
patron += '<div class="box3">([^<]+)</div>' # idioma
patron += '<div class="box4">([^<]+)</div>' # calidad
patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
patron += '<div class="box6">([^<]+)</div>' # titulo
enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
servidor = servidor.replace("streamin", "streaminto")
titulo = titulo + " [" + servidor + "]"
mostrar_server = True
if config.get_setting("hidepremium"):
mostrar_server = servertools.is_server_enabled(servidor)
if mostrar_server:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
except:
pass
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
servidor = servidor.replace("uploaded", "uploadedto")
partes = enlace.split(" ")
p = 1
for enlace in partes:
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
p += 1
mostrar_server = True
if config.get_setting("hidepremium"):
mostrar_server = servertools.is_server_enabled(servidor)
if mostrar_server:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
plot=item.plot, folder=False))
except:
pass
return itemlist
def episodios(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = [item.url]
for x in range(2, int(last_page) + 1):
response = httptools.downloadpage('%s%s'% (url,x))
if response.sucess:
list_pages.append("%s%s" % (url, x))
else:
list_pages = [item.url]
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
data = scrapertools.get_match(data, pattern)
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, info in matches:
if "<span" in info: # new style
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
if match["episode2"]:
multi = True
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
str(match["episode2"]).zfill(2), match["lang"],
match["quality"])
else:
multi = False
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
match["lang"], match["quality"])
else: # old style
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
# logger.debug("data %s" % match)
str_lang = ""
if match["lang"] is not None:
str_lang = "[%s]" % match["lang"]
if match["season2"] and match["episode2"]:
multi = True
if match["season"] == match["season2"]:
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["episode2"], str_lang, match["quality"])
else:
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["season2"], match["episode2"], str_lang,
match["quality"])
else:
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
match["quality"])
multi = False
season = match['season']
episode = match['episode']
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
quality=item.quality, multi=multi, contentSeason=season,
contentEpisodeNumber=episode, infoLabels = infoLabels))
# order list
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
try:
item.post = "q=%s" % texto
item.pattern = "buscar-list"
itemlist = listado2(item)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.extra = 'pelilist'
if categoria == 'torrent':
item.url = host+'peliculas/'
itemlist = listado(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
item.url = host+'series/'
itemlist.extend(listado(item))
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
new_item = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data = data)
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
itemlist.extend(new_item)
for it in itemlist:
it.channel = item.channel
return itemlist

View File

@@ -1,10 +1,7 @@
# -*- coding: utf-8 -*-
import re
from threading import Thread
import xbmc
import xbmcgui
from core import httptools
from core import scrapertools
from core import servertools
@@ -12,71 +9,22 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
__modo_grafico__ = config.get_setting('modo_grafico', "peliscon")
# Para la busqueda en bing evitando baneos
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
# br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
host = "http://peliscon.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url="http://peliscon.com/peliculas/",
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url= host + "/peliculas/",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/MGQyetQ.jpg",
contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR aqua][B]Series[/B][/COLOR]", action="scraper",
url="http://peliscon.com/series/", thumbnail="http://imgur.com/FrcWTS8.png",
url= host + "/series/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(item.clone(title="[COLOR aqua][B] Últimos capitulos[/B][/COLOR]", action="ul_cap",
url="http://peliscon.com/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
url= host + "/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/h1b7tfN.jpg"))
@@ -86,7 +34,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "https://peliscon.com/?s=" + texto
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
@@ -105,9 +53,7 @@ def buscador(item):
patron = scrapertools.find_multiple_matches(data,
'<div class="result-item">.*?href="([^"]+)".*?alt="([^"]+)".*?<span class=".*?">([^"]+)</span>.*?<span class="year">([^"]+)</span>')
for url, title, genere, year in patron:
if "Serie" in genere:
checkmt = "tvshow"
genere = "[COLOR aqua][B]" + genere + "[/B][/COLOR]"
@@ -115,20 +61,15 @@ def buscador(item):
checkmt = "movie"
genere = "[COLOR cadetblue][B]" + genere + "[/B][/COLOR]"
titulo = "[COLOR crimson]" + title + "[/COLOR]" + " [ " + genere + " ] "
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
else:
new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, contentTitle=title,
show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = year
itemlist.append(new_item)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
@@ -143,7 +84,6 @@ def buscador(item):
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
return itemlist
@@ -152,14 +92,11 @@ def scraper(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?img\/flags\/(.*?)\.png.*?imdb.*?<span>(.*?)>')
for thumb, url, title, language, year in patron:
titulo = title
title = re.sub(r"!|¡", "", title)
@@ -169,13 +106,10 @@ def scraper(item):
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True,
language= language, infoLabels={'year':year})
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?<span>(.*?)<')
for thumb, url, title, year in patron:
titulo = title.strip()
title = re.sub(r"\d+x.*", "", title)
@@ -183,17 +117,14 @@ def scraper(item):
thumbnail=thumb, fulltitle=title, contentTitle=title, show=title,
contentType="tvshow", library=True, infoLabels={'year':year})
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
@@ -202,13 +133,8 @@ def scraper(item):
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
@@ -217,35 +143,25 @@ def ul_cap(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?<img src="([^"]+)" alt="([^"]+):.*?href="([^"]+)"><span class="b">(\d+x\d+)<\/span>')
for thumb, title, url, cap in patron:
temp = re.sub(r"x\d+", "", cap)
epi = re.sub(r"\d+x", "", cap)
titulo = title.strip() + "--" + "[COLOR red][B]" + cap + "[/B][/COLOR]"
title = re.sub(r"\d+x.*", "", title)
# filtro_thumb = thumb.replace("https://image.tmdb.org/t/p/w300", "")
# filtro_list = {"poster_path": filtro_thumb}
# filtro_list = filtro_list.items()
# url_tv = scrapertools.find_single_match(url,'episodios/(.*?)/')
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, thumbnail=thumb,
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", temp=temp, epi=epi,
library=True)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
@@ -257,27 +173,17 @@ def ul_cap(item):
else:
item.infoLabels['rating'] = "[COLOR springgreen] (" + str(item.infoLabels['rating']) + ")[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
if not item.temp:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
check_temp = None
else:
check_temp = "yes"
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
@@ -318,19 +224,12 @@ def findtemporadas(item):
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
itemlist.append(item.clone(action="epis",
title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]",
url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle,
show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info,
datalibrary=data, check_temp=check_temp, folder=True))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
item.fanart = fanart
item.extra = extra
if item.temp:
item.thumbnail = item.infoLabels['temporada_poster']
if config.get_videolibrary_support() and itemlist:
if len(bloque_episodios) == 1:
extra = "epis"
@@ -340,7 +239,7 @@ def findtemporadas(item):
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
'imdb_id': item.infoLabels['imdb_id']}
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
action="add_serie_to_library", extra=extra, url=item.url,
action="add_serie_to_library", extra="", url=item.url,
contentSerieName=item.fulltitle, infoLabels=infoLabels,
thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data))
return itemlist
@@ -349,16 +248,13 @@ def findtemporadas(item):
def epis(item):
logger.info()
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url, '<div class="imagen"><a href="([^"]+)".*?"numerando">(.*?)<')
for url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+ - (\d+)')
item.infoLabels['episode'] = episodio
epi = re.sub(r" - ", "X", epi)
itemlist.append(
item.clone(title="[COLOR deepskyblue]Episodio " + "[COLOR red]" + epi, url=url, action="findvideos",
show=item.show, fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra,
@@ -377,21 +273,12 @@ def findvideos(item):
itemlist = []
if item.temp:
url_epis = item.url
data = httptools.downloadpage(item.url).data
if not item.infoLabels['episode'] or item.temp:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
url_capitulo = scrapertools.find_single_match(data,
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
@@ -399,17 +286,14 @@ def findvideos(item):
else:
capitulo = item.title
url_capitulo = item.url
try:
fanart = item.fanart_extra
except:
fanart = item.extra.split("|")[0]
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
if not item.temp:
item.infoLabels['year'] = None
if item.temp:
@@ -427,7 +311,6 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart,
thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle,
folder=False))
if item.temp:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
@@ -443,26 +326,17 @@ def findvideos(item):
fanart = item.fanart
if item.temp:
item.infoLabels['tvdb_id'] = item.tvdb
itemlist.append(
Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", action="info_capitulos",
fanart=fanart, thumbnail=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra,
show=item.show, InfoLabels=item.infoLabels, folder=False))
if item.temp and not item.check_temp:
url_epis = re.sub(r"-\dx.*", "", url_epis)
url_epis = url_epis.replace("episodios", "series")
itemlist.append(
Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis,
action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1],
thumbnail=item.infoLabels['thumbnail'], extra=item.extra + "|" + item.thumbnail,
action="findtemporadas", server="torrent",
thumbnail=item.infoLabels['thumbnail'],
contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels,
thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle,
library=item.library, temp=item.temp, folder=True))
else:
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
@@ -481,7 +355,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0],
infoLabels=infoLabels, text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/3ik73p8.png'))
return itemlist
@@ -496,417 +369,6 @@ def play(item):
return itemlist
def info_capitulos(item, images={}):
logger.info()
itemlist = []
try:
url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str(
item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml"
if "/0" in url:
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if "<filename>episodes" in data:
image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>')
image = "http://thetvdb.com/banners/" + image
else:
try:
image = item.InfoLabels['episodio_imagen']
except:
image = "http://imgur.com/ZiEAVOD.png"
foto = item.thumb_info
if not ".png" in foto:
foto = "http://imgur.com/PRiEW1D.png"
try:
title = item.InfoLabels['episodio_titulo']
except:
title = ""
title = "[COLOR red][B]" + title + "[/B][/COLOR]"
try:
plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]"
except:
plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>')
if plot == "":
plot = "Sin información todavia"
try:
rating = item.InfoLabels['episodio_vote_average']
except:
rating = 0
try:
if rating >= 5 and rating < 8:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]"
elif rating >= 8 and rating < 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]"
elif rating == 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]"
else:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
except:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
except:
title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
plot = "Este capitulo no tiene informacion..."
plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
image = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
rating = ""
ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
ventana.doModal()
class TextBox2(xbmcgui.WindowDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.getTitle = kwargs.get('title')
self.getPlot = kwargs.get('plot')
self.getThumbnail = kwargs.get('thumbnail')
self.getFanart = kwargs.get('fanart')
self.getRating = kwargs.get('rating')
self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/aj4qzTr.jpg')
self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
self.addControl(self.background)
self.background.setAnimations(
[('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
self.addControl(self.thumbnail)
self.thumbnail.setAnimations([('conditional',
'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
self.addControl(self.plot)
self.plot.setAnimations(
[('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
'conditional',
'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
self.addControl(self.fanart)
self.fanart.setAnimations(
[('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
'conditional',
'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
self.addControl(self.title)
self.title.setText(self.getTitle)
self.title.setAnimations(
[('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
self.addControl(self.rating)
self.rating.setText(self.getRating)
self.rating.setAnimations(
[('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
xbmc.sleep(200)
try:
self.plot.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
self.plot.setText(self.getPlot)
def get(self):
self.show()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
self.close()
def test():
return True
def fanartv(item, id_tvdb, id, images={}):
headers = [['Content-Type', 'application/json']]
from core import jsontools
if item.contentType == "movie":
url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \
% id
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb
try:
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
if data and not "error message" in data:
for key, value in data.items():
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
else:
images = []
except:
images = []
return images
def get_art(item):
logger.info()
id = item.infoLabels['tmdb_id']
check_fanart = item.infoLabels['fanart']
if item.contentType != "movie":
tipo_ps = "tv"
else:
tipo_ps = "movie"
if not id:
year = item.extra
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
if item.contentType == "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es")
id = otmdb.result.get("id")
if id == None:
if "(" in item.fulltitle:
title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)')
if item.contentType != "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps,
idioma_busqueda="es")
id = otmdb.result.get("id")
if not id:
fanart = item.fanart
imagenes = []
itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps)
images = itmdb.result.get("images")
if images:
for key, value in images.iteritems():
for detail in value:
imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"])
if item.contentType == "movie":
if len(imagenes) >= 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3]
else:
item.extra = imagenes[3] + "|" + imagenes[3]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
id_tvdb = ""
else:
if itmdb.result.get("external_ids").get("tvdb_id"):
id_tvdb = itmdb.result.get("external_ids").get("tvdb_id")
if item.temp:
item.tvdb = id_tvdb
else:
id_tvdb = ""
if len(imagenes) >= 6:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \
imagenes[5]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \
imagenes[1]
elif len(imagenes) == 5:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
item.extra = item.extra
images_fanarttv = fanartv(item, id_tvdb, id)
if images_fanarttv:
if item.contentType == "movie":
if images_fanarttv.get("moviedisc"):
item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url")
elif images_fanarttv.get("hdmovielogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
elif images_fanarttv.get("moviethumb"):
item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url")
elif images_fanarttv.get("moviebanner"):
item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url")
else:
item.thumbnail = item.thumbnail
else:
if images_fanarttv.get("hdtvlogo"):
item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url")
elif images_fanarttv.get("clearlogo"):
item.thumbnail = images_fanarttv.get("clearlogo")[0].get("url")
item.thumb_info = item.thumbnail
if images_fanarttv.get("hdclearart"):
item.thumb_art = images_fanarttv.get("hdclearart")[0].get("url")
elif images_fanarttv.get("tvbanner"):
item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url")
else:
item.thumb_art = item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def get_year(url):
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

View File

@@ -84,13 +84,13 @@ def menu_genero(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal")
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal")
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(host + "/principal").data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
@@ -108,13 +108,13 @@ def menu_genero(item):
def series(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(item.url).data
lista = jsontools.load(data)
if item.extra == "next":
@@ -165,13 +165,13 @@ def series(item):
def episodios(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
# data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
@@ -237,13 +237,13 @@ def pelis(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/", add_referer=True)
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(item.url).data
lista = jsontools.load(data)
if item.extra == "next":

View File

@@ -0,0 +1,88 @@
{
"id": "pelisultra",
"name": "PelisUltra",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s17.postimg.org/ft51srhjj/logoultra.png",
"categories": ["movie", "tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
]
},
{
"id": "episodios_x_pag",
"type": "list",
"label": "Episodios por página",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"10",
"15",
"20",
"25",
"30"
]
},
{
"id": "temporada_o_todos",
"type": "bool",
"label": "Mostrar temporadas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,282 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__perfil__ = int(config.get_setting('perfil', 'pelisultra'))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ < 3:
color1, color2, color3 = perfil[__perfil__]
else:
color1 = color2 = color3 = ""
host="http://www.pelisultra.com"
def mainlist(item):
logger.info()
itemlist = []
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
itemlist.append(item.clone(title="Películas:", folder=False, text_color="0xFFD4AF37", text_bold=True))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host))
itemlist.append(Item(channel = item.channel, title = " Estrenos", action = "peliculas", url = host + "/genero/estrenos/" ))
itemlist.append(Item(channel = item.channel, title = " Por género", action = "genero", url = host + "/genero/" ))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
itemlist.append(item.clone(title="Series:", folder=False, text_color="0xFFD4AF37", text_bold=True))
itemlist.append(Item(channel = item.channel, title = " Todas las series", action = "series", url = host + "/series/" ))
itemlist.append(Item(channel = item.channel, title = " Nuevos episodios", action = "nuevos_episodios", url = host + "/episodio/" ))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", url = host, text_color="red", text_bold=True))
itemlist.append(item.clone(title="Configurar canal...", text_color="green", action="configuracion", text_bold=True))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ["peliculas", "latino"]:
item.url = host
itemlist = peliculas(item)
elif categoria == 'terror':
item.url = host + '/genero/terror/'
itemlist = peliculas(item)
elif categoria == "series":
item.url = host + "/episodio/"
itemlist = nuevos_episodios(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
#logger.info()
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
data2 = scrapertools.find_single_match(data,'(?s)<div class="item_1.*?>(.*?)id="paginador">')
# Se saca la info
#(?s)class="ml-item.*?a href="([^"]+).*?img src="([^"]+).*?alt="([^"]+).*?class="year">(\d{4})</span>(.*?)<div
patron = '(?s)class="ml-item.*?' # base
patron += 'a href="([^"]+).*?' # url
patron += 'img src="([^"]+).*?' # imagen
patron += 'alt="([^"]+).*?' # titulo
patron += 'class="year">(\d{4})' # año
patron += '</span>(.*?)<div' # calidad
matches = scrapertools.find_multiple_matches(data2, patron)
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
if not "/series/" in scrapedurl:
scrapedquality = scrapertools.find_single_match(scrapedquality, '<span class="calidad2">(.*?)</span>')
itemlist.append(Item(action = "findvideos", channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ") [" + scrapedquality + "]", contentTitle=scrapedtitle, thumbnail = scrapedthumbnail, url = scrapedurl, quality=scrapedquality, infoLabels={'year':scrapedyear}))
else:
if item.action == "search":
itemlist.append(Item(action = "temporadas", channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear}))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina_siguiente))
return itemlist
def genero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Delimita la búsqueda a la lista donde estan los géneros
data = scrapertools.find_single_match(data,'(?s)<ul id="menu-generos" class="">(.*?)</ul>')
# Extrae la url y el género
patron = '<a href="(.*?)">(.*?)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron)
# Se quita "Estrenos" de la lista porque tiene su propio menu
matches.pop(0)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "peliculas", channel = item.channel, title = scrapedtitle, url = scrapedurl))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Se saca la info
patron = '(?s)class="ml-item.*?' # base
patron += 'a href="([^"]+).*?' # url
patron += 'img src="([^"]+).*?' # imagen
patron += 'alt="([^"]+).*?' # titulo
patron += 'class="year">(\d{4})' # año
matches = scrapertools.find_multiple_matches(data, patron)
#if config.get_setting('temporada_o_todos', 'pelisultra') == 0:
if config.get_setting('temporada_o_todos', 'pelisultra'):
accion="temporadas"
else:
accion="episodios"
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
itemlist.append(Item(action = accion, channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear}))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "series", title = pagina, url = url_pagina_siguiente))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Extrae las temporadas
patron = '<span class="se-t.*?>(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
# Excepción para la serie Sin Límites
if item.contentTitle == 'Amar sin límites':
item.contentSerieName = "limitless"
item.infoLabels['tmdb_id']=''
for scrapedseason in matches:
itemlist.append(item.clone(action = "episodios", title = "Temporada " + scrapedseason, contentSeason=scrapedseason))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)class="episodiotitle">.*?a href="(.*?)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
# Saco el numero de capitulo de la url
numero_capitulo=scrapertools.get_season_and_episode(scrapedurl)
if numero_capitulo != "":
temporada=numero_capitulo.split("x")[0]
capitulo=numero_capitulo.split("x")[1]
else:
temporada="_"
capitulo="_"
if item.contentSeason and str(item.contentSeason) != temporada:
continue
itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
# if item.contentTitle.startswith('Temporada'):
# if str(item.contentSeason) == temporada:
# itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
# else:
# itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
#episodios_por_pagina=20
# if config.get_setting('episodios_x_pag', 'pelisultra').isdigit():
# episodios_por_pagina=int(config.get_setting('episodios_x_pag', 'pelisultra'))
# else:
# episodios_por_pagina=20
# config.set_setting('episodios_x_pag', '20', 'pelisultra')
episodios_por_pagina= int(config.get_setting('episodios_x_pag', 'pelisultra')) * 5 + 10
if not item.page:
item.page = 0
itemlist_page = itemlist[item.page: item.page + episodios_por_pagina]
if len(itemlist) > item.page + episodios_por_pagina:
itemlist_page.append(item.clone(title = ">>> Pagina siguiente", page = item.page + episodios_por_pagina))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist_page, seekTmdb=True)
return itemlist_page
def nuevos_episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)<td class="bb">.*?' # base
patron += '<a href="(.*?)">' # url
patron += '(.*?)</a>.*?' # nombre_serie
patron += '<img src="(.*?)>.*?' # imagen
patron += '<h2>(.*?)</h2>' # titulo
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedseriename, scrapedthumbnail, scrapedtitle in matches:
numero_capitulo=scrapertools.get_season_and_episode(scrapedurl)
if numero_capitulo != "":
temporada=numero_capitulo.split("x")[0]
capitulo=numero_capitulo.split("x")[1]
else:
temporada="_"
capitulo="_"
itemlist.append(Item(channel = item.channel, action = "findvideos", title = scrapedseriename +": " + numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, thumbnail = scrapedthumbnail, contentSerieName=scrapedseriename, contentSeason=temporada, contentEpisodeNumber=capitulo))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "nuevos_episodios", title = pagina, url = url_pagina_siguiente))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
try:
item.url = host + "/?s=%s" % texto
itemlist.extend(peliculas(item))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -1,73 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [
"http://allmyvideos.net/embed-theme.html",
"http://allmyvideos.net/embed-jquery.html",
"http://allmyvideos.net/embed-s.html",
"http://allmyvideos.net/embed-images.html",
"http://allmyvideos.net/embed-faq.html",
"http://allmyvideos.net/embed-embed.html",
"http://allmyvideos.net/embed-ri.html",
"http://allmyvideos.net/embed-d.html",
"http://allmyvideos.net/embed-css.html",
"http://allmyvideos.net/embed-js.html",
"http://allmyvideos.net/embed-player.html",
"http://allmyvideos.net/embed-cgi.html",
"http://allmyvideos.net/embed-i.html",
"http://allmyvideos.net/images",
"http://allmyvideos.net/theme",
"http://allmyvideos.net/xupload",
"http://allmyvideos.net/s",
"http://allmyvideos.net/js",
"http://allmyvideos.net/jquery",
"http://allmyvideos.net/login",
"http://allmyvideos.net/make",
"http://allmyvideos.net/i",
"http://allmyvideos.net/faq",
"http://allmyvideos.net/tos",
"http://allmyvideos.net/premium",
"http://allmyvideos.net/checkfiles",
"http://allmyvideos.net/privacy",
"http://allmyvideos.net/refund",
"http://allmyvideos.net/links",
"http://allmyvideos.net/contact"
],
"patterns": [
{
"pattern": "allmyvideos.net/(?:embed-)?([a-z0-9]+)",
"url": "http://allmyvideos.net/\\1"
}
]
},
"free": true,
"id": "allmyvideos",
"name": "allmyvideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_allmyvideos.png"
}

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# No existe / borrado: http://allmyvideos.net/8jcgbrzhujri
data = scrapertools.cache_page("http://anonymouse.org/cgi-bin/anon-www.cgi/" + page_url)
if "<b>File Not Found</b>" in data or "<b>Archivo no encontrado</b>" in data or '<b class="err">Deleted' in data \
or '<b class="err">Removed' in data or '<font class="err">No such' in data:
return False, "No existe o ha sido borrado de allmyvideos"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=%s" % page_url)
# Normaliza la URL
videoid = scrapertools.get_match(page_url, "http://allmyvideos.net/([a-z0-9A-Z]+)")
page_url = "http://amvtv.net/embed-" + videoid + "-728x400.html"
data = scrapertools.cachePage(page_url)
if "Access denied" in data:
geobloqueo = True
else:
geobloqueo = False
if geobloqueo:
# url = "http://www.anonymousbrowser.xyz/hide.php"
# post = "go=%s" % page_url
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % page_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
# url = "http://www.anonymousbrowser.xyz/" + location
url = "http://www.videoproxy.co/" + location
data = scrapertools.cachePage(url)
# Extrae la URL
media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",')
video_urls = []
if media_url != "":
if geobloqueo:
# url = "http://www.anonymousbrowser.xyz/hide.php"
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % media_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
# media_url = "http://www.anonymousbrowser.xyz/" + location + "&direct=false"
media_url = "http://www.videoproxy.co/" + location + "&direct=false"
else:
media_url += "&direct=false"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [allmyvideos]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -13,21 +13,13 @@ from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'file was deleted' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado"
elif 'File Not Found' in data:
return False, "[FlashX] El archivo no existe"
elif 'Video is processing now' in data:
return False, "[FlashX] El archivo se está procesando"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
headers = {'Host': 'www.flashx.tv',
headers = {'Host': 'www.flashx.sx',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
@@ -35,11 +27,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Cookie': ''}
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.sx/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.sx/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.sx/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -49,19 +41,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.sx/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(data, "value='([^']+)' name='imhuman'")
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)<!--')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), hash_f, imhuman)
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
headers['Referer'] = "https://www.flashx.tv/"
headers['Referer'] = "https://www.flashx.sx/"
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
headers['Host'] = "www.flashx.sx"
headers['X-Requested-With'] = 'XMLHttpRequest'
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
@@ -76,7 +69,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage(playnow, post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:

View File

@@ -17,7 +17,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page_url = "http://www.youtube.com/watch?v=%s" % page_url
logger.info(" page_url->'%s'" % page_url)
video_id = scrapertools.find_single_match(page_url, 'v=([A-z0-9_-]{11})')
video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})')
video_urls = extract_videos(video_id)
video_urls.reverse()