Merge branch 'master' into Fixes

This commit is contained in:
Alfa
2018-02-23 14:18:07 -05:00
committed by GitHub
17 changed files with 310 additions and 3156 deletions

View File

@@ -4,6 +4,7 @@ import re
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
@@ -11,7 +12,7 @@ from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
__perfil__ = ''
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
@@ -31,10 +32,12 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", thumbnail=item.thumbnail,
url=host+'/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20', text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="list_by_json", title="En emisión", thumbnail=item.thumbnail,
text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
@@ -55,14 +58,8 @@ def openconfig(item):
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
item.url = "%s/api/buscador?q=%s&letra=ALL&genero=ALL&estado=2&offset=0&limit=30" % (host, texto.replace(" ", "+"))
return list_by_json(item)
def recientes(item):
@@ -71,7 +68,9 @@ def recientes(item):
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
data = re.sub(r'\n|\s{2,}','', data)
bloque = scrapertools.find_single_match(data, '<ul class="hover">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
@@ -116,13 +115,13 @@ def recientes(item):
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
data= data.get('result')
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
url = "%s/%s/" % (host, it["slug"])
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it['id']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
@@ -132,22 +131,22 @@ def listado(item):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context(item), contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
@@ -160,68 +159,48 @@ def indices(item):
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
itemlist.append(item.clone(title="Por Género", url="%s/anime" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime" % host))
itemlist.append(item.clone(action="list_by_json", title="Lista completa de Animes",
url="%s/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
data = re.sub('\n|\s{2,}', '', data)
if 'Letra' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="letra"(.*?)</select>')
patron = '<option value="(\w)"'
elif 'Género' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="genero"(.*?)</select>')
patron = '<option value="(\d+.*?)/'
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
url = '%s/api/buscador?q=&letra=%s&genero=ALL&estado=2&offset=0&limit=20' % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
value = scrapertools.find_single_match(title, '(\d+)"')
title = scrapertools.find_single_match(title, '\d+">(.*?)<')
url = '%s/api/buscador?q=&letra=ALL&genero=%s&estado=2&offset=0&limit=20' % (host, value)
itemlist.append(item.clone(action="list_by_json", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context(item), contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
data = re.sub('\n|\s{2,}', '', data)
show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="x-sinopsis">\s*(.*?)</div>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
@@ -252,56 +231,44 @@ def episodios(item):
return itemlist
def peliculas(item):
def list_by_json(item):
logger.info()
itemlist = []
repeat = 1
status = False
if item.url =='':
item.url = host+"/api/buscador?limit=30&estado=1&dia=%s"
repeat = 6
for element in range(0,repeat):
if repeat != 1:
data = jsontools.load(httptools.downloadpage(item.url % element).data)
else:
data = jsontools.load(httptools.downloadpage(item.url).data)
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
status = data.get('status')
json_data = data.get('result')
elem_data = json_data['items']
for item_data in elem_data:
url = '%s/%s/' % (host, item_data['slug'])
title = item_data['title']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "",
title)
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data['id']
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
itemlist.append(
item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
@@ -310,80 +277,69 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}', '', data)
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
akiba_url = scrapertools.find_single_match(data, '<div class="x-link"><a href="(.*?)"')
url = httptools.downloadpage('http:'+akiba_url, follow_redirects=False).headers.get('location')
title = '%s (%s)' % (item.title, 'akiba')
itemlist.append(item.clone(title=title, url=url, action='play'))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
info = scrapertools.find_single_match(data, 'episodio_info=(.*?);')
dict_info = jsontools.load(info)
return itemlist
servers = dict_info['stream']['servers']
id = dict_info['id']
access_point = dict_info['stream']['accessPoint']
expire = dict_info['stream']['expire']
callback = dict_info['stream']['callback']
signature = dict_info['stream']['signature']
last_modify = dict_info['stream']['last_modify']
for server in servers:
stream_info = 'http:%s/%s/%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % \
(access_point, id, server, expire, callback, signature, last_modify)
def play(item):
logger.info()
dict_stream = jsontools.load(httptools.downloadpage(stream_info).data)
if item.server:
return [item]
if dict_stream['status']:
kind = dict_stream['result']['kind']
try:
if kind == 'iframe':
url = dict_stream['result']['src']
title = '%s (%s)' % (item.title, server)
elif kind == 'jwplayer':
url_style = dict_stream['result']['setup']
if server != 'rin':
itemlist = []
if 'playlist' in url_style:
part = 1
for media_list in url_style['playlist']:
url = media_list['file']
title = '%s (%s) - parte %s' % (item.title, server, part)
itemlist.append(item.clone(title=title, url=url, action='play'))
part += 1
else:
url = url_style['file']
title = '%s (%s)' % (item.title, server)
else:
src_list = url_style['sources']
for source in src_list:
url = source['file']
quality = source['label']
title = '%s [%s](%s)' % (item.title, quality, server)
itemlist.append(item.clone(title=title, url=url, action='play'))
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
elif kind == 'javascript':
if 'jsCode' in dict_stream['result']:
jscode = dict_stream['result']['jsCode']
url = scrapertools.find_single_match(jscode, 'xmlhttp.open\("GET", "(.*?)"')
title = '%s (%s)' % (item.title, server)
if url != '':
itemlist.append(item.clone(title=title, url=url, action='play'))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
@@ -392,7 +348,7 @@ def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.url = host
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla

View File

@@ -15,7 +15,6 @@ from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'portugues': 'Portugues'}
list_language = IDIOMAS.values()
logger.debug('lista_language: %s' % list_language)
list_quality = ['1080p', '720p', '480p', '360p', '240p', 'default']
list_servers = [
@@ -115,7 +114,7 @@ def submenu(item):
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True),
url=host + '/apiseries/seriebyword/',
url=host + '/?s=',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
host=item.host,
))
@@ -351,15 +350,12 @@ def get_urls(item, link):
data = httptools.downloadpage(url, post=post, headers=headers).data
dict_data = jsontools.load(data)
logger.debug(dict_data['link'])
logger.debug(data)
return dict_data['link']
def play(item):
logger.info()
itemlist = []
logger.debug('item: %s' % item)
if 'juicyapi' not in item.url:
itemlist = servertools.find_video_items(data=item.url)
@@ -398,72 +394,9 @@ def newest(categoria):
return itemlist
def busqueda(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["url"]
plot = entry["content"]
plot = scrapertools.htmlclean(plot)
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
title = scrapertools.find_single_match(title, '(.*?) \(.*?\)')
year = re.sub(r'.*?\((\d{4})\)', '', title)
title = year
fulltitle = title
new_item = item.clone(action="findvideos",
title=title,
fulltitle=fulltitle,
url=url,
thumbnail=thumbnail,
contentTitle=title,
contentType="movie",
plot=plot,
infoLabels={'year': year, 'sinopsis': plot}
)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(
Item(channel=item.channel,
action="busqueda",
title=">> Página Siguiente",
url=url_next
))
return itemlist
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, 'name="cx" value="(.*?)"')
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz" \
"=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www" \
".google.com&start=0" % (cx, texto)
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
texto = texto.replace(" ", "-")
item.url = host + '/?s=' + texto
if texto != '':
return peliculas(item)

View File

@@ -53,7 +53,7 @@ def list_all(item):
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>.*?Qlty>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches:

View File

@@ -1,7 +1,7 @@
{
"id": "cuelgame",
"name": "Cuelgame",
"active": false,
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "cuelgame.png",
@@ -31,4 +31,4 @@
"visible": true
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,83 +3,30 @@
import os
import re
import urllib
from threading import Thread
import xbmc
import xbmcgui
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from core.scrapertools import decodeHtmlentities as dhe
from platformcode import config, logger
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
host = "http://www.divxtotal.co"
__modo_grafico__ = config.get_setting('modo_grafico', "divxtotal")
# Para la busqueda en bing evitando baneos
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
# br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="[COLOR orange][B]Películas[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png",
url = host + "/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png",
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(item.clone(title="[COLOR orange][B] Películas HD[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png",
url = host + "/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png",
fanart="http://imgur.com/fdntKsy.jpg", contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR orange][B]Series[/B][/COLOR]", action="scraper",
url="http://www.divxtotal.com/series/", thumbnail="http://imgur.com/GPX2wLt.png",
url = host + "/series/", thumbnail="http://imgur.com/GPX2wLt.png",
contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search",
@@ -90,7 +37,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.divxtotal.com/?s=" + texto
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
@@ -106,22 +53,16 @@ def buscador(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>')
for url, title, check in patron:
patron = '<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, check in matches:
if "N/A" in check:
checkmt = "tvshow"
else:
checkmt = "movie"
titulo = title
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*\[.*?]\]", "", title)
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
@@ -138,9 +79,7 @@ def buscador(item):
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -153,7 +92,6 @@ def buscador(item):
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
@@ -162,14 +100,10 @@ def scraper(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>')
for url, title, year in patron:
patron = '<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, year in matches:
titulo = re.sub(r"\d+\d+\d+\d+|\(.*?\).*", "", title)
title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*", "", title)
title = title.replace("Autosia", "Autopsia")
@@ -178,14 +112,12 @@ def scraper(item):
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<p class="secconimagen"><a href="([^"]+)" title="[^"]+"><img src="([^"]+)".*?title="[^"]+">([^"]+)</a>')
for url, thumb, title in patron:
patron = '(?s)<p class="secconimagen"><a href="([^"]+)"'
patron += ' title="[^"]+"><img src="([^"]+)".*?'
patron += 'rel="bookmark">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumb, title in matches:
titulo = title.strip()
title = re.sub(r"\d+x.*|\(.*?\)", "", title)
new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url,
@@ -193,7 +125,6 @@ def scraper(item):
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
@@ -215,21 +146,14 @@ def scraper(item):
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
if item.extra == "search":
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
@@ -264,8 +188,7 @@ def findtemporadas(item):
except:
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -298,9 +221,9 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url,
'<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
for idioma, url, epi in patron:
patron = '<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>'
matches = scrapertools.find_multiple_matches(item.url, patron)
for idioma, url, epi in matches:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio
itemlist.append(
@@ -320,19 +243,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if not item.infoLabels['episode']:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
url_capitulo = scrapertools.find_single_match(data,
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
patron = '<a href="(' + host + '/wp-content/uploads/.*?' + capitulo + '.*?.torrent)'
url_capitulo = scrapertools.find_single_match(data, patron)
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
@@ -350,7 +265,6 @@ def findvideos(item):
title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]",
url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail,
extra=item.extra, fulltitle=item.fulltitle, folder=False))
if item.infoLabels['episode'] and item.library:
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
if thumbnail == "":
@@ -363,15 +277,13 @@ def findvideos(item):
action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art,
thumb_info=item.thumb_info, extra=item.extra, show=item.show,
InfoLabels=item.infoLabels, folder=False))
if not item.infoLabels['episode']:
itemlist.append(
Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url,
action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1],
action="findtemporadas", server="torrent",
thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType,
contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art,
thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True))
else:
url = scrapertools.find_single_match(data, '<h3 class="orange text-center">.*?href="([^"]+)"')
item.infoLabels['year'] = None
@@ -388,7 +300,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/xQNTqqy.png'))
return itemlist
@@ -401,7 +312,6 @@ def info_capitulos(item, images={}):
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
if "<filename>episodes" in data:
image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>')
image = "http://thetvdb.com/banners/" + image
@@ -431,7 +341,6 @@ def info_capitulos(item, images={}):
except:
rating = 0
try:
if rating >= 5 and rating < 8:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]"
elif rating >= 8 and rating < 10:
@@ -444,90 +353,17 @@ def info_capitulos(item, images={}):
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
except:
title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
plot = "Este capitulo no tiene informacion..."
plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
image = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
rating = ""
ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
ventana.doModal()
class TextBox2(xbmcgui.WindowDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.getTitle = kwargs.get('title')
self.getPlot = kwargs.get('plot')
self.getThumbnail = kwargs.get('thumbnail')
self.getFanart = kwargs.get('fanart')
self.getRating = kwargs.get('rating')
self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/K6wduMe.png')
self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
self.addControl(self.background)
self.background.setAnimations(
[('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
self.addControl(self.thumbnail)
self.thumbnail.setAnimations([('conditional',
'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
self.addControl(self.plot)
self.plot.setAnimations(
[('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
'conditional',
'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
self.addControl(self.fanart)
self.fanart.setAnimations(
[('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
'conditional',
'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
self.addControl(self.title)
self.title.setText(self.getTitle)
self.title.setAnimations(
[('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
self.addControl(self.rating)
self.rating.setText(self.getRating)
self.rating.setAnimations(
[('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
xbmc.sleep(200)
try:
self.plot.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
self.plot.setText(self.getPlot)
def get(self):
self.show()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
self.close()
def test():
return True
def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
i = 0
while i < len(text):
@@ -576,7 +412,6 @@ def decode(text):
data = data
except:
data = src
return data
@@ -591,381 +426,6 @@ def convert_size(size):
return '%s %s' % (s, size_name[i])
def fanartv(item, id_tvdb, id, images={}):
headers = [['Content-Type', 'application/json']]
from core import jsontools
if item.contentType == "movie":
url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \
% id
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb
try:
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
if data and not "error message" in data:
for key, value in data.items():
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
else:
images = []
except:
images = []
return images
def filmaffinity(item, infoLabels):
title = infoLabels["title"].replace(" ", "+")
try:
year = infoLabels["year"]
except:
year = ""
sinopsis = infoLabels["sinopsis"]
if year == "":
if item.contentType != "movie":
tipo = "serie"
url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title
else:
tipo = "película"
url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title
try:
data = browser(url_bing)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
try:
data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data
except:
data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data
else:
try:
data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data
except:
data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
else:
tipo = "Pelicula"
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
title, year)
data = httptools.downloadpage(url, cookies=False).data
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
if url_filmaf:
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
data = httptools.downloadpage(url_filmaf, cookies=False).data
else:
if item.contentType != "movie":
tipo = "serie"
url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title
else:
tipo = "película"
url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title
try:
data = browser(url_bing)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
data = httptools.downloadpage("http://" + url_filma, cookies=False).data
else:
data = httptools.downloadpage(url_filma, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
sinopsis_f = sinopsis_f.replace("<br><br />", "\n")
sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f)
try:
year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>')
except:
year_f = ""
try:
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
except:
rating_filma = "Sin puntuacion"
critica = ""
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
matches_reviews = scrapertools.find_multiple_matches(data, patron)
if matches_reviews:
for review, autor, valoracion in matches_reviews:
review = dhe(scrapertools.htmlclean(review))
review += "\n" + autor + "[CR]"
review = re.sub(r'Puntuac.*?\)', '', review)
if "positiva" in valoracion:
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
elif "neutral" in valoracion:
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
else:
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
else:
critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo
return critica, rating_filma, year_f, sinopsis_f
def get_art(item):
logger.info()
id = item.infoLabels['tmdb_id']
check_fanart = item.infoLabels['fanart']
if item.contentType != "movie":
tipo_ps = "tv"
else:
tipo_ps = "movie"
if not id:
year = item.extra
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
if item.contentType == "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es")
id = otmdb.result.get("id")
if id == None:
if "(" in item.fulltitle:
title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)')
if item.contentType != "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps,
idioma_busqueda="es")
id = otmdb.result.get("id")
if not id:
fanart = item.fanart
imagenes = []
itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps)
images = itmdb.result.get("images")
if images:
for key, value in images.iteritems():
for detail in value:
imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"])
if item.contentType == "movie":
if len(imagenes) >= 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3]
else:
item.extra = imagenes[3] + "|" + imagenes[3]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
id_tvdb = ""
else:
if itmdb.result.get("external_ids").get("tvdb_id"):
id_tvdb = itmdb.result.get("external_ids").get("tvdb_id")
else:
id_tvdb = ""
if len(imagenes) >= 6:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \
imagenes[5]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \
imagenes[1]
elif len(imagenes) == 5:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
item.extra = item.extra
images_fanarttv = fanartv(item, id_tvdb, id)
if images_fanarttv:
if item.contentType == "movie":
if images_fanarttv.get("moviedisc"):
item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url")
elif images_fanarttv.get("hdmovielogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
elif images_fanarttv.get("moviethumb"):
item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url")
elif images_fanarttv.get("moviebanner"):
item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url")
else:
item.thumbnail = item.thumbnail
else:
if images_fanarttv.get("hdtvlogo"):
item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url")
elif images_fanarttv.get("clearlogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
item.thumb_info = item.thumbnail
if images_fanarttv.get("tvbanner"):
item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url")
elif images_fanarttv.get("tvthumb"):
item.thumb_art = images_fanarttv.get("tvthumb")[0].get("url")
else:
item.thumb_art = item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def get_year(url):
data = httptools.downloadpage(url, headers=header, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
@@ -984,7 +444,6 @@ def ext_size(url):
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
torrent = decode(pepe)
try:
name = torrent["info"]["name"]
@@ -1021,25 +480,22 @@ def ext_size(url):
size = ""
return ext_v, size
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.divxtotal.com/peliculas/'
item.url = host + '/peliculas/'
item.contentType="movie"
itemlist = scraper(item)
if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,47 +0,0 @@
{
"id": "documaniatv",
"name": "DocumaniaTV",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/qMR9sg9.png",
"banner": "documaniatv.png",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "documaniatvaccount",
"type": "bool",
"label": "Usar cuenta de documaniatv",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "documaniatvuser",
"type": "text",
"label": "Usuario",
"color": "0xFFd50b0b",
"enabled": "eq(-1,true)",
"visible": true
},
{
"id": "documaniatvpassword",
"type": "text",
"label": "Contraseña",
"color": "0xFFd50b0b",
"enabled": "!eq(-1,)+eq(-2,true)",
"visible": true,
"hidden": true
}
]
}

View File

@@ -1,373 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.documaniatv.com/"
account = config.get_setting("documaniatvaccount", "documaniatv")
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def login():
logger.info()
user = config.get_setting("documaniatvuser", "documaniatv")
password = config.get_setting("documaniatvpassword", "documaniatv")
if user == "" or password == "":
return True, ""
data = scrapertools.cachePage(host, headers=headers)
if "http://www.documaniatv.com/user/" + user in data:
return False, user
post = "username=%s&pass=%s&Login=Iniciar Sesión" % (user, password)
data = scrapertools.cachePage("http://www.documaniatv.com/login.php", headers=headers, post=post)
if "Nombre de usuario o contraseña incorrectas" in data:
logger.error("login erróneo")
return True, ""
return False, user
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="novedades", title="Novedades", url="http://www.documaniatv.com/newvideos.html"))
itemlist.append(
item.clone(action="categorias", title="Categorías y Canales", url="http://www.documaniatv.com/browse.html"))
itemlist.append(item.clone(action="novedades", title="Top", url="http://www.documaniatv.com/topvideos.html"))
itemlist.append(item.clone(action="categorias", title="Series Documentales",
url="http://www.documaniatv.com/top-series-documentales-html"))
itemlist.append(item.clone(action="viendo", title="Viendo ahora", url="http://www.documaniatv.com"))
itemlist.append(item.clone(action="", title=""))
itemlist.append(item.clone(action="search", title="Buscar"))
folder = False
action = ""
if account:
error, user = login()
if error:
title = "Playlists Personales (Error en usuario y/o contraseña)"
else:
title = "Playlists Personales (Logueado)"
action = "usuario"
folder = True
else:
title = "Playlists Personales (Sin cuenta configurada)"
user = ""
url = "http://www.documaniatv.com/user/%s" % user
itemlist.append(item.clone(title=title, action=action, url=url, folder=folder))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion",
folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
platformtools.show_channel_settings()
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'documentales':
item.url = "http://www.documaniatv.com/newvideos.html"
itemlist = novedades(item)
if itemlist[-1].action == "novedades":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
data = scrapertools.cachePage(host, headers=headers)
item.url = scrapertools.find_single_match(data, 'form action="([^"]+)"') + "?keywords=%s&video-id="
texto = texto.replace(" ", "+")
item.url = item.url % texto
try:
return novedades(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def novedades(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
# Saca el plot si lo tuviese
scrapedplot = scrapertools.find_single_match(data, '<div class="pm-section-head">(.*?)</div>')
if "<div" in scrapedplot:
scrapedplot = ""
else:
scrapedplot = scrapertools.htmlclean(scrapedplot)
bloque = scrapertools.find_multiple_matches(data, '<li class="col-xs-[\d] col-sm-[\d] col-md-[\d]">(.*?)</li>')
if "Registrarse" in data or not account:
for match in bloque:
patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \
'.*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
fulltitle=scrapedtitle, contentTitle=contentTitle, folder=False))
else:
for match in bloque:
patron = '<span class="pm-label-duration">(.*?)</span>.*?onclick="watch_later_add\(([\d]+)\)' \
'.*?<a href="([^"]+)".*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for duracion, video_id, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
id=video_id,
fulltitle=scrapedtitle, contentTitle=contentTitle))
# Busca enlaces de paginas siguientes...
try:
next_page_url = scrapertools.get_match(data, '<a href="([^"]+)">&raquo;</a>')
next_page_url = urlparse.urljoin(host, next_page_url)
itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url))
except:
logger.error("Siguiente pagina no encontrada")
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
patron = '<div class="pm-li-category">.*?<a href="([^"]+)"' \
'.*?<img src="([^"]+)".*?<h3>(?:<a.*?><span.*?>|)(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Busca enlaces de paginas siguientes...
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)"><i class="fa fa-arrow-right">')
if next_page_url != "":
itemlist.append(item.clone(action="categorias", title=">> Página siguiente", url=next_page_url))
return itemlist
def viendo(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
bloque = scrapertools.find_single_match(data, '<ul class="pm-ul-carousel-videos list-inline"(.*?)</ul>')
patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \
'.*?title="([^"]+)".*?data-echo="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle += " [" + duracion + "]"
if not scrapedthumbnail.startswith("data:image"):
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
else:
scrapedthumbnail = item.thumbnail
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail, fulltitle=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Se comprueba si el vídeo está ya en favoritos/ver más tarde
url = "http://www.documaniatv.com/ajax.php?p=playlists&do=video-watch-load-my-playlists&video-id=%s" % item.id
data = scrapertools.cachePage(url, headers=headers)
data = jsontools.load(data)
data = re.sub(r"\n|\r|\t", '', data['html'])
itemlist.append(item.clone(action="play_", title=">> Reproducir vídeo", folder=False))
if "kodi" in config.get_platform():
folder = False
else:
folder = True
patron = '<li data-playlist-id="([^"]+)".*?onclick="playlist_(\w+)_item' \
'.*?<span class="pm-playlists-name">(.*?)</span>.*?' \
'<span class="pm-playlists-video-count">(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for playlist_id, playlist_action, playlist_title, video_count in matches:
scrapedtitle = playlist_action.replace('remove', 'Eliminar de ').replace('add', 'Añadir a ')
scrapedtitle += playlist_title + " (" + video_count + ")"
itemlist.append(item.clone(action="acciones_playlist", title=scrapedtitle, list_id=playlist_id,
url="http://www.documaniatv.com/ajax.php", folder=folder))
if "kodi" in config.get_platform():
itemlist.append(item.clone(action="acciones_playlist", title="Crear una nueva playlist y añadir el documental",
id=item.id, url="http://www.documaniatv.com/ajax.php", folder=folder))
itemlist.append(item.clone(action="acciones_playlist", title="Me gusta", url="http://www.documaniatv.com/ajax.php",
folder=folder))
return itemlist
def play_(item):
logger.info()
itemlist = []
try:
import xbmc
if not xbmc.getCondVisibility('System.HasAddon(script.cnubis)'):
from platformcode import platformtools
platformtools.dialog_ok("Addon no encontrado",
"Para ver vídeos alojados en cnubis necesitas tener su instalado su add-on",
line3="Descárgalo en http://cnubis.com/kodi-pelisalacarta.html")
return itemlist
except:
pass
# Descarga la pagina
data = scrapertools.cachePage(item.url, headers=headers)
# Busca enlace directo
video_url = scrapertools.find_single_match(data, 'class="embedded-video"[^<]+<iframe.*?src="([^"]+)"')
if config.get_platform() == "plex" or config.get_platform() == "mediaserver":
code = scrapertools.find_single_match(video_url, 'u=([A-z0-9]+)')
url = "http://cnubis.com/plugins/mediaplayer/embeder/_embedkodi.php?u=%s" % code
data = scrapertools.downloadpage(url, headers=headers)
video_url = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]+)"')
itemlist.append(item.clone(action="play", url=video_url, server="directo"))
return itemlist
cnubis_script = xbmc.translatePath("special://home/addons/script.cnubis/default.py")
xbmc.executebuiltin("XBMC.RunScript(%s, url=%s&referer=%s&title=%s)"
% (cnubis_script, urllib.quote_plus(video_url), urllib.quote_plus(item.url),
item.fulltitle))
return itemlist
def usuario(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
profile_id = scrapertools.find_single_match(data, 'data-profile-id="([^"]+)"')
url = "http://www.documaniatv.com/ajax.php?p=profile&do=profile-load-playlists&uid=%s" % profile_id
data = scrapertools.cachePage(url, headers=headers)
data = jsontools.load(data)
data = data['html']
patron = '<div class="pm-video-thumb">.*?src="([^"]+)".*?' \
'<span class="pm-pl-items">(.*?)</span>(.*?)</div>' \
'.*?<h3.*?href="([^"]+)".*?title="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, items, videos, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("Historia", 'Historial')
scrapedtitle += " (" + items + videos + ")"
if "no-thumbnail" in scrapedthumbnail:
scrapedthumbnail = ""
else:
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
itemlist.append(item.clone(action="playlist", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail))
return itemlist
def acciones_playlist(item):
logger.info()
itemlist = []
if item.title == "Crear una nueva playlist y añadir el documental":
from platformcode import platformtools
texto = platformtools.dialog_input(heading="Introduce el título de la nueva playlist")
if texto is not None:
post = "p=playlists&do=create-playlist&title=%s&visibility=1&video-id=%s&ui=video-watch" % (texto, item.id)
data = scrapertools.cachePage(item.url, headers=headers, post=post)
else:
return
elif item.title != "Me gusta":
if "Eliminar" in item.title:
action = "remove-from-playlist"
else:
action = "add-to-playlist"
post = "p=playlists&do=%s&playlist-id=%s&video-id=%s" % (action, item.list_id, item.id)
data = scrapertools.cachePage(item.url, headers=headers, post=post)
else:
item.url = "http://www.documaniatv.com/ajax.php?vid=%s&p=video&do=like" % item.id
data = scrapertools.cachePage(item.url, headers=headers)
try:
import xbmc
from platformcode import platformtools
platformtools.dialog_notification(item.title, "Se ha añadido/eliminado correctamente")
xbmc.executebuiltin("Container.Refresh")
except:
itemlist.append(item.clone(action="", title="Se ha añadido/eliminado correctamente"))
return itemlist
def playlist(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url, headers=headers)
patron = '<div class="pm-pl-list-index.*?src="([^"]+)".*?' \
'<a href="([^"]+)".*?>(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail, fulltitle=scrapedtitle, folder=False))
return itemlist

View File

@@ -8,5 +8,15 @@
"banner": "gnula.png",
"categories": [
"movie"
]
}
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config, logger
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=10&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=AOdTmaBgzSiy5RxoV4cZSGGEr17reWoGLg:1519145966291&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
def mainlist(item):
logger.info()
@@ -19,43 +19,87 @@ def mainlist(item):
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/"))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url = host_search))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url %texto
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
if itemlist:
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + 10
npage = (page / 10) + 1
item_page = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
itemlist.append(Item(action = "sub_search",
channel = item.channel,
title = "[COLOR green]Página %s[/COLOR]" %npage,
url = item_page
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table')
patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for genero, scrapedurl in matches:
title = scrapertools.htmlclean(genero)
plot = ""
url = item.url + scrapedurl
thumbnail = ""
itemlist.append(Item(channel = item.channel,
action = 'peliculas',
title = title,
url = url,
thumbnail = thumbnail,
plot = plot,
viewmode = "movie"))
itemlist = sorted(itemlist, key=lambda item: item.title)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
language = []
plot = scrapertools.htmlclean(resto).strip()
@@ -110,6 +154,13 @@ def findvideos(item):
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist

View File

@@ -134,7 +134,7 @@ def listado(item):
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = 'data-file-id(.*?)</div></div></li>'
patron = 'data-file-id(.*?</p>)</div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
@@ -184,8 +184,7 @@ def listado(item):
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, 'class="pageSplitter" data-nextpage-number="([^"]+)"')
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)

View File

@@ -1,10 +1,7 @@
# -*- coding: utf-8 -*-
import re
from threading import Thread
import xbmc
import xbmcgui
from core import httptools
from core import scrapertools
from core import servertools
@@ -12,71 +9,22 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
__modo_grafico__ = config.get_setting('modo_grafico', "peliscon")
# Para la busqueda en bing evitando baneos
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
# br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
host = "http://peliscon.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url="http://peliscon.com/peliculas/",
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url= host + "/peliculas/",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/MGQyetQ.jpg",
contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR aqua][B]Series[/B][/COLOR]", action="scraper",
url="http://peliscon.com/series/", thumbnail="http://imgur.com/FrcWTS8.png",
url= host + "/series/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(item.clone(title="[COLOR aqua][B] Últimos capitulos[/B][/COLOR]", action="ul_cap",
url="http://peliscon.com/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
url= host + "/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/h1b7tfN.jpg"))
@@ -86,7 +34,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "https://peliscon.com/?s=" + texto
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
@@ -105,9 +53,7 @@ def buscador(item):
patron = scrapertools.find_multiple_matches(data,
'<div class="result-item">.*?href="([^"]+)".*?alt="([^"]+)".*?<span class=".*?">([^"]+)</span>.*?<span class="year">([^"]+)</span>')
for url, title, genere, year in patron:
if "Serie" in genere:
checkmt = "tvshow"
genere = "[COLOR aqua][B]" + genere + "[/B][/COLOR]"
@@ -115,20 +61,15 @@ def buscador(item):
checkmt = "movie"
genere = "[COLOR cadetblue][B]" + genere + "[/B][/COLOR]"
titulo = "[COLOR crimson]" + title + "[/COLOR]" + " [ " + genere + " ] "
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
else:
new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, contentTitle=title,
show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = year
itemlist.append(new_item)
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
@@ -143,7 +84,6 @@ def buscador(item):
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
return itemlist
@@ -152,14 +92,11 @@ def scraper(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?img\/flags\/(.*?)\.png.*?imdb.*?<span>(.*?)>')
for thumb, url, title, language, year in patron:
titulo = title
title = re.sub(r"!|¡", "", title)
@@ -169,13 +106,10 @@ def scraper(item):
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True,
language= language, infoLabels={'year':year})
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?<span>(.*?)<')
for thumb, url, title, year in patron:
titulo = title.strip()
title = re.sub(r"\d+x.*", "", title)
@@ -183,17 +117,14 @@ def scraper(item):
thumbnail=thumb, fulltitle=title, contentTitle=title, show=title,
contentType="tvshow", library=True, infoLabels={'year':year})
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
@@ -202,13 +133,8 @@ def scraper(item):
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
@@ -217,35 +143,25 @@ def ul_cap(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?<img src="([^"]+)" alt="([^"]+):.*?href="([^"]+)"><span class="b">(\d+x\d+)<\/span>')
for thumb, title, url, cap in patron:
temp = re.sub(r"x\d+", "", cap)
epi = re.sub(r"\d+x", "", cap)
titulo = title.strip() + "--" + "[COLOR red][B]" + cap + "[/B][/COLOR]"
title = re.sub(r"\d+x.*", "", title)
# filtro_thumb = thumb.replace("https://image.tmdb.org/t/p/w300", "")
# filtro_list = {"poster_path": filtro_thumb}
# filtro_list = filtro_list.items()
# url_tv = scrapertools.find_single_match(url,'episodios/(.*?)/')
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, thumbnail=thumb,
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", temp=temp, epi=epi,
library=True)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
@@ -257,27 +173,17 @@ def ul_cap(item):
else:
item.infoLabels['rating'] = "[COLOR springgreen] (" + str(item.infoLabels['rating']) + ")[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
if not item.temp:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
check_temp = None
else:
check_temp = "yes"
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
@@ -318,19 +224,12 @@ def findtemporadas(item):
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
itemlist.append(item.clone(action="epis",
title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]",
url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle,
show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info,
datalibrary=data, check_temp=check_temp, folder=True))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
item.fanart = fanart
item.extra = extra
if item.temp:
item.thumbnail = item.infoLabels['temporada_poster']
if config.get_videolibrary_support() and itemlist:
if len(bloque_episodios) == 1:
extra = "epis"
@@ -340,7 +239,7 @@ def findtemporadas(item):
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
'imdb_id': item.infoLabels['imdb_id']}
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
action="add_serie_to_library", extra=extra, url=item.url,
action="add_serie_to_library", extra="", url=item.url,
contentSerieName=item.fulltitle, infoLabels=infoLabels,
thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data))
return itemlist
@@ -349,16 +248,13 @@ def findtemporadas(item):
def epis(item):
logger.info()
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url, '<div class="imagen"><a href="([^"]+)".*?"numerando">(.*?)<')
for url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+ - (\d+)')
item.infoLabels['episode'] = episodio
epi = re.sub(r" - ", "X", epi)
itemlist.append(
item.clone(title="[COLOR deepskyblue]Episodio " + "[COLOR red]" + epi, url=url, action="findvideos",
show=item.show, fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra,
@@ -377,21 +273,12 @@ def findvideos(item):
itemlist = []
if item.temp:
url_epis = item.url
data = httptools.downloadpage(item.url).data
if not item.infoLabels['episode'] or item.temp:
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
url_capitulo = scrapertools.find_single_match(data,
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
@@ -399,17 +286,14 @@ def findvideos(item):
else:
capitulo = item.title
url_capitulo = item.url
try:
fanart = item.fanart_extra
except:
fanart = item.extra.split("|")[0]
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
if not item.temp:
item.infoLabels['year'] = None
if item.temp:
@@ -427,7 +311,6 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart,
thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle,
folder=False))
if item.temp:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
@@ -443,26 +326,17 @@ def findvideos(item):
fanart = item.fanart
if item.temp:
item.infoLabels['tvdb_id'] = item.tvdb
itemlist.append(
Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", action="info_capitulos",
fanart=fanart, thumbnail=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra,
show=item.show, InfoLabels=item.infoLabels, folder=False))
if item.temp and not item.check_temp:
url_epis = re.sub(r"-\dx.*", "", url_epis)
url_epis = url_epis.replace("episodios", "series")
itemlist.append(
Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis,
action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1],
thumbnail=item.infoLabels['thumbnail'], extra=item.extra + "|" + item.thumbnail,
action="findtemporadas", server="torrent",
thumbnail=item.infoLabels['thumbnail'],
contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels,
thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle,
library=item.library, temp=item.temp, folder=True))
else:
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
@@ -481,7 +355,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0],
infoLabels=infoLabels, text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/3ik73p8.png'))
return itemlist
@@ -496,417 +369,6 @@ def play(item):
return itemlist
def info_capitulos(item, images={}):
logger.info()
itemlist = []
try:
url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str(
item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml"
if "/0" in url:
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if "<filename>episodes" in data:
image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>')
image = "http://thetvdb.com/banners/" + image
else:
try:
image = item.InfoLabels['episodio_imagen']
except:
image = "http://imgur.com/ZiEAVOD.png"
foto = item.thumb_info
if not ".png" in foto:
foto = "http://imgur.com/PRiEW1D.png"
try:
title = item.InfoLabels['episodio_titulo']
except:
title = ""
title = "[COLOR red][B]" + title + "[/B][/COLOR]"
try:
plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]"
except:
plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>')
if plot == "":
plot = "Sin información todavia"
try:
rating = item.InfoLabels['episodio_vote_average']
except:
rating = 0
try:
if rating >= 5 and rating < 8:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]"
elif rating >= 8 and rating < 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]"
elif rating == 10:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]"
else:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
except:
rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
except:
title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
plot = "Este capitulo no tiene informacion..."
plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
image = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
rating = ""
ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
ventana.doModal()
class TextBox2(xbmcgui.WindowDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.getTitle = kwargs.get('title')
self.getPlot = kwargs.get('plot')
self.getThumbnail = kwargs.get('thumbnail')
self.getFanart = kwargs.get('fanart')
self.getRating = kwargs.get('rating')
self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/aj4qzTr.jpg')
self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
self.addControl(self.background)
self.background.setAnimations(
[('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
self.addControl(self.thumbnail)
self.thumbnail.setAnimations([('conditional',
'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
self.addControl(self.plot)
self.plot.setAnimations(
[('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
'conditional',
'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
self.addControl(self.fanart)
self.fanart.setAnimations(
[('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
'conditional',
'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
self.addControl(self.title)
self.title.setText(self.getTitle)
self.title.setAnimations(
[('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
self.addControl(self.rating)
self.rating.setText(self.getRating)
self.rating.setAnimations(
[('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
xbmc.sleep(200)
try:
self.plot.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
self.plot.setText(self.getPlot)
def get(self):
self.show()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
self.close()
def test():
return True
def fanartv(item, id_tvdb, id, images={}):
headers = [['Content-Type', 'application/json']]
from core import jsontools
if item.contentType == "movie":
url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \
% id
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb
try:
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
if data and not "error message" in data:
for key, value in data.items():
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
else:
images = []
except:
images = []
return images
def get_art(item):
logger.info()
id = item.infoLabels['tmdb_id']
check_fanart = item.infoLabels['fanart']
if item.contentType != "movie":
tipo_ps = "tv"
else:
tipo_ps = "movie"
if not id:
year = item.extra
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
if item.contentType == "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es")
id = otmdb.result.get("id")
if id == None:
if "(" in item.fulltitle:
title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)')
if item.contentType != "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps,
idioma_busqueda="es")
id = otmdb.result.get("id")
if not id:
fanart = item.fanart
imagenes = []
itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps)
images = itmdb.result.get("images")
if images:
for key, value in images.iteritems():
for detail in value:
imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"])
if item.contentType == "movie":
if len(imagenes) >= 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3]
else:
item.extra = imagenes[3] + "|" + imagenes[3]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
id_tvdb = ""
else:
if itmdb.result.get("external_ids").get("tvdb_id"):
id_tvdb = itmdb.result.get("external_ids").get("tvdb_id")
if item.temp:
item.tvdb = id_tvdb
else:
id_tvdb = ""
if len(imagenes) >= 6:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \
imagenes[5]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \
imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \
imagenes[1]
elif len(imagenes) == 5:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1]
else:
item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes[0] + "|" + imagenes[0]
else:
item.extra = item.fanart + "|" + item.fanart
item.extra = item.extra
images_fanarttv = fanartv(item, id_tvdb, id)
if images_fanarttv:
if item.contentType == "movie":
if images_fanarttv.get("moviedisc"):
item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url")
elif images_fanarttv.get("hdmovielogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
elif images_fanarttv.get("moviethumb"):
item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url")
elif images_fanarttv.get("moviebanner"):
item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url")
else:
item.thumbnail = item.thumbnail
else:
if images_fanarttv.get("hdtvlogo"):
item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url")
elif images_fanarttv.get("clearlogo"):
item.thumbnail = images_fanarttv.get("clearlogo")[0].get("url")
item.thumb_info = item.thumbnail
if images_fanarttv.get("hdclearart"):
item.thumb_art = images_fanarttv.get("hdclearart")[0].get("url")
elif images_fanarttv.get("tvbanner"):
item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url")
else:
item.thumb_art = item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def get_year(url):
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

View File

@@ -84,13 +84,13 @@ def menu_genero(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal")
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal")
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(host + "/principal").data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
@@ -108,13 +108,13 @@ def menu_genero(item):
def series(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(item.url).data
lista = jsontools.load(data)
if item.extra == "next":
@@ -165,13 +165,13 @@ def series(item):
def episodios(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/")
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
# data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
@@ -237,13 +237,13 @@ def pelis(item):
logger.info()
itemlist = []
httptools.downloadpage("https://kproxy.com/")
url = "https://kproxy.com/doproxy.jsp"
post = "page=%s&x=34&y=14" % urllib.quote(item.url)
response = httptools.downloadpage(url, post, follow_redirects=False).data
url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(url).data
# TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17
# httptools.downloadpage("https://kproxy.com/", add_referer=True)
# url = "https://kproxy.com/doproxy.jsp"
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
data = httptools.downloadpage(item.url).data
lista = jsontools.load(data)
if item.extra == "next":

View File

@@ -1,73 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [
"http://allmyvideos.net/embed-theme.html",
"http://allmyvideos.net/embed-jquery.html",
"http://allmyvideos.net/embed-s.html",
"http://allmyvideos.net/embed-images.html",
"http://allmyvideos.net/embed-faq.html",
"http://allmyvideos.net/embed-embed.html",
"http://allmyvideos.net/embed-ri.html",
"http://allmyvideos.net/embed-d.html",
"http://allmyvideos.net/embed-css.html",
"http://allmyvideos.net/embed-js.html",
"http://allmyvideos.net/embed-player.html",
"http://allmyvideos.net/embed-cgi.html",
"http://allmyvideos.net/embed-i.html",
"http://allmyvideos.net/images",
"http://allmyvideos.net/theme",
"http://allmyvideos.net/xupload",
"http://allmyvideos.net/s",
"http://allmyvideos.net/js",
"http://allmyvideos.net/jquery",
"http://allmyvideos.net/login",
"http://allmyvideos.net/make",
"http://allmyvideos.net/i",
"http://allmyvideos.net/faq",
"http://allmyvideos.net/tos",
"http://allmyvideos.net/premium",
"http://allmyvideos.net/checkfiles",
"http://allmyvideos.net/privacy",
"http://allmyvideos.net/refund",
"http://allmyvideos.net/links",
"http://allmyvideos.net/contact"
],
"patterns": [
{
"pattern": "allmyvideos.net/(?:embed-)?([a-z0-9]+)",
"url": "http://allmyvideos.net/\\1"
}
]
},
"free": true,
"id": "allmyvideos",
"name": "allmyvideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_allmyvideos.png"
}

View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# No existe / borrado: http://allmyvideos.net/8jcgbrzhujri
data = scrapertools.cache_page("http://anonymouse.org/cgi-bin/anon-www.cgi/" + page_url)
if "<b>File Not Found</b>" in data or "<b>Archivo no encontrado</b>" in data or '<b class="err">Deleted' in data \
or '<b class="err">Removed' in data or '<font class="err">No such' in data:
return False, "No existe o ha sido borrado de allmyvideos"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=%s" % page_url)
# Normaliza la URL
videoid = scrapertools.get_match(page_url, "http://allmyvideos.net/([a-z0-9A-Z]+)")
page_url = "http://amvtv.net/embed-" + videoid + "-728x400.html"
data = scrapertools.cachePage(page_url)
if "Access denied" in data:
geobloqueo = True
else:
geobloqueo = False
if geobloqueo:
# url = "http://www.anonymousbrowser.xyz/hide.php"
# post = "go=%s" % page_url
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % page_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
# url = "http://www.anonymousbrowser.xyz/" + location
url = "http://www.videoproxy.co/" + location
data = scrapertools.cachePage(url)
# Extrae la URL
media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",')
video_urls = []
if media_url != "":
if geobloqueo:
# url = "http://www.anonymousbrowser.xyz/hide.php"
url = "http://www.videoproxy.co/hide.php"
post = "go=%s" % media_url
location = scrapertools.get_header_from_response(url, post=post, header_to_get="location")
# media_url = "http://www.anonymousbrowser.xyz/" + location + "&direct=false"
media_url = "http://www.videoproxy.co/" + location + "&direct=false"
else:
media_url += "&direct=false"
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [allmyvideos]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -13,21 +13,13 @@ from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'file was deleted' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado"
elif 'File Not Found' in data:
return False, "[FlashX] El archivo no existe"
elif 'Video is processing now' in data:
return False, "[FlashX] El archivo se está procesando"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
headers = {'Host': 'www.flashx.tv',
headers = {'Host': 'www.flashx.sx',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
@@ -35,11 +27,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Cookie': ''}
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.sx/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.sx/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.sx/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -49,19 +41,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.sx/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(data, "value='([^']+)' name='imhuman'")
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)<!--')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), hash_f, imhuman)
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
headers['Referer'] = "https://www.flashx.tv/"
headers['Referer'] = "https://www.flashx.sx/"
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
headers['Host'] = "www.flashx.sx"
headers['X-Requested-With'] = 'XMLHttpRequest'
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
@@ -76,7 +69,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage(playnow, post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:

View File

@@ -17,7 +17,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page_url = "http://www.youtube.com/watch?v=%s" % page_url
logger.info(" page_url->'%s'" % page_url)
video_id = scrapertools.find_single_match(page_url, 'v=([A-z0-9_-]{11})')
video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})')
video_urls = extract_videos(video_id)
video_urls.reverse()