This commit is contained in:
danielr460
2019-02-13 13:56:44 -05:00
33 changed files with 1075 additions and 1716 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.24" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.25" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,17 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ CineDeTodo ¤ CinemaHD ¤ PelisPlus
¤ pack +18 ¤ alldebrid ¤ fembed
¤ ZonaWorld ¤ SeriesPapaya ¤ inkaseries
¤ maxipelis24 ¤ animeflv ¤ anitoons
¤ serieslan
[COLOR green][B]Novedades[/B][/COLOR]
¤ PelisRex ¤ ReyAnime ¤ pelis123
¤ abtoon
Agradecimientos a @diegotcba y @chivmalev por colaborar con ésta de versión
¤ allcalidad ¤ canalpelis ¤ ciberpeliculashd
¤ pelisplay ¤ doramasmp4 ¤ Newpct1
¤ AnimeBoom ¤ AnimeID ¤ abtoon
¤ mixtoon ¤ Animeflv
[COLOR green][B]Novedades[/B][/COLOR]
¤ Mirapeliculas
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -19,7 +19,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'v
__channel__='allcalidad'
host = "http://allcalidad.net/"
host = "https://allcalidad.net/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)

View File

@@ -1,43 +0,0 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.cc/92ji7stii7/animemovil1.png",
"banner": "",
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,338 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = ''
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", thumbnail=item.thumbnail,
url=host+'/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20', text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="list_by_json", title="En emisión", thumbnail=item.thumbnail,
text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/api/buscador?q=%s&letra=ALL&genero=ALL&estado=2&offset=0&limit=30" % (host, texto.replace(" ", "+"))
return list_by_json(item)
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}','', data)
bloque = scrapertools.find_single_match(data, '<ul class="hover">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context(item)
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
data= data.get('result')
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s/" % (host, it["slug"])
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it['id']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context(item), contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime" % host))
itemlist.append(item.clone(action="list_by_json", title="Lista completa de Animes",
url="%s/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20" % host))
else:
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\s{2,}', '', data)
if 'Letra' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="letra"(.*?)</select>')
patron = '<option value="(\w)"'
elif 'Género' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="genero"(.*?)</select>')
patron = '<option value="(\d+.*?)/'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = '%s/api/buscador?q=&letra=%s&genero=ALL&estado=2&offset=0&limit=20' % (host, title)
else:
value = scrapertools.find_single_match(title, '(\d+)"')
title = scrapertools.find_single_match(title, '\d+">(.*?)<')
url = '%s/api/buscador?q=&letra=ALL&genero=%s&estado=2&offset=0&limit=20' % (host, value)
itemlist.append(item.clone(action="list_by_json", url=url, title=title))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\s{2,}', '', data)
show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="x-sinopsis">\s*(.*?)</div>')
bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '.+?(\d+) (?:Sub|Audio|Español)')
#epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="")
if epi:
if "Especial" in title:
epi=0
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.contentSerieName, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def list_by_json(item):
logger.info()
itemlist = []
repeat = 1
status = False
if item.url =='':
item.url = host+"/api/buscador?limit=30&estado=1&dia=%s"
repeat = 6
for element in range(0,repeat):
if repeat != 1:
data = jsontools.load(httptools.downloadpage(item.url % element).data)
else:
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
json_data = data.get('result')
elem_data = json_data['items']
for item_data in elem_data:
url = '%s/%s/' % (host, item_data['slug'])
title = item_data['title']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "",
title)
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data['id']
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(
item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}', '', data)
strm_id = scrapertools.find_single_match(data, '"id": (.*?),')
streams = scrapertools.find_single_match(data, '"stream": (.*?)};')
dict_strm = jsontools.load(streams)
base_url = 'http:%s%s/' % (dict_strm['accessPoint'], strm_id)
for server in dict_strm['servers']:
expire = dict_strm['expire']
signature = dict_strm['signature']
last_modify = dict_strm['last_modify']
callback = 'playerWeb'
strm_url = base_url +'%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % (server, expire, callback,
signature, last_modify)
try:
strm_data = httptools.downloadpage(strm_url).data
strm_data = scrapertools.unescape(strm_data)
title = '%s'
language = ''
if server not in ['fire', 'meph']:
urls = scrapertools.find_multiple_matches(strm_data, '"(?:file|src)"*?:.*?"(.*?)"')
for url in urls:
if url != '':
url = url.replace ('\\/','/')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play'))
elif server in ['fire', 'mpeh']:
url = scrapertools.find_single_match(strm_data, 'xmlhttp.open(\"GET\", \"(.*?)\"')
if url != '':
url = url.replace('\\/', '/')
itemlist.append(Item(channel=item.channel, title=url, url=url, action='play'))
else:
continue
except:
pass
servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = host
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "animeyt",
"name": "AnimeYT",
"active": true,
"adult": false,
"language": "cast, lat",
"thumbnail": "http://i.imgur.com/dHpupFk.png",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,510 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import renumbertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config,logger
import gktools, random, time, urllib
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
HOST = "http://animeyt.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST))
itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST))
itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos=")))
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def novedades(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_novedades = '<div class="capitulos-portada">[\s\S]+?<h2>Comentarios</h2>'
data_novedades = scrapertools.find_single_match(data, patron_novedades)
patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)'
matches = scrapertools.find_multiple_matches(data_novedades, patron)
for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]:
title = scrapedtitle + " " + "1x" + eps + info
title = title.replace("Sub Español", "").replace("sub español", "")
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for it in itemlist:
it.thumbnail = it.thumb
except:
pass
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ':
titulo = letra
if letra == "0":
letra = "num"
itemlist.append(Item(channel=item.channel, action="recientes", title=titulo,
url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra))))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return recientes(item)
def recientes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_recientes = '<article class="anime">[\s\S]+?</main>'
data_recientes = scrapertools.find_single_match(data, patron_recientes)
patron = '<a href="([^"]+)"[^<]+<img src="([^"]+)".+?js-synopsis-reduce">(.*?)<.*?<h3 class="anime__title">(.*?)<small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data_recientes, patron)
for url, thumbnail, plot, title, cat in matches:
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat, context=renumbertools.context(item)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
paginacion = scrapertools.find_single_match(data, '<a class="pager__link icon-derecha last" href="([^"]+)"')
paginacion = scrapertools.decodeHtmlentities(paginacion)
if paginacion:
itemlist.append(Item(channel=item.channel, action="recientes", title=">> Página Siguiente", url=paginacion))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class="icon-triangulo-derecha"></span>.*?<a href="([^"]+)">([^"]+) (\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedtitle, episode in matches:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(item.channel, scrapedtitle, season, episode)
title = "%sx%s %s" % (season, str(episode).zfill(2), scrapedtitle)
itemlist.append(item.clone(title=title, url=url, action='findvideos'))
if config.get_videolibrary_support:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
from collections import OrderedDict # cambiado dict por OrderedDict para mantener el mismo orden que en la web
matches = scrapertools.find_multiple_matches(data, '<li><a id="mirror(\d*)" class="link-veranime[^"]*" href="[^"]*">([^<]*)')
d_links = OrderedDict(matches)
matches = scrapertools.find_multiple_matches(data, 'if \(mirror == (\d*)\).*?iframe src="([^"]*)"')
d_frames = OrderedDict(matches)
for k in d_links:
if k in d_frames and d_frames[k] != '':
tit = scrapertools.find_single_match(d_frames[k], '/([^\./]*)\.php\?')
if tit == '':
tit = 'mega' if 'mega.nz/' in d_frames[k] else 'dailymotion' if 'dailymotion.com/' in d_frames[k] else'noname'
if tit == 'id' and 'yourupload.com/' in d_frames[k]: tit = 'yourupload'
title = 'Opción %s (%s)' % (d_links[k], tit)
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=d_frames[k], referer=item.url))
if item.extra != "library":
if config.get_videolibrary_support() and item.extra:
itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie"))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.url.startswith('https://www.dailymotion.com/'):
itemlist.append(item.clone(url=item.url, server='dailymotion'))
elif item.url.startswith('https://mega.nz/'):
itemlist.append(item.clone(url=item.url.replace('embed',''), server='mega'))
elif item.url.startswith('https://s2.animeyt.tv/rakuten.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
if not gsv: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '3497510'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
link, subtitle = gktools.get_play_link_id(data, item.url)
url = 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php'
post = "link=%s&token=%s" % (link, token)
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer, subtitle)
elif item.url.startswith('https://s3.animeyt.tv/amz.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
v_token = scrapertools.find_single_match(data, "var v_token='([^']*)'")
if not gsv or not v_token: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '9457610'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
url = 'https://s3.animeyt.tv/amz_animeyts.php'
post = "v_token=%s&token=%s&handler=%s" % (v_token, token, 'Animeyt')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s2.animeyt.tv/lola.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_file = scrapertools.find_single_match(data, "var cd='([^']*)';\s*var file='([^']*)'")
if not gsv or not s_cd or not s_file: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '8134976'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
url = 'https://s2.animeyt.tv/minha_animeyt.php'
post = "cd=%s&file=%s&token=%s&handler=%s" % (s_cd, s_file, token, 'Animeyt')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/chumi.php?'): #https://s4.animeyt.tv/chumi.php?cd=3481&file=4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_file = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&file=([^&]*)')
if not gsv or not s_cd or not s_file: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/minha/minha_animeyt.php'
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_file, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s3.animeyt.tv/mega.php?'): #https://s3.animeyt.tv/mega.php?v=WmpHMEVLVTNZZktyaVAwai9sYzhWV1ZRTWh0WTZlNGZ3VzFVTXhMTkx2NGlOMjRYUHhZQlMvaUFsQlJFbHBVTA==
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_v = scrapertools.find_single_match(item.url, '\?v=([^&]*)')
if not gsv or not s_v: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s3.animeyt.tv/mega_animeyts.php'
post = "v=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_v, archive, item.url, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s2.animeyt.tv/naruto/naruto.php?'): #https://s2.animeyt.tv/naruto/naruto.php?id=3477&file=11.mp4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id, s_file = scrapertools.find_single_match(item.url, '\?id=([^&]*)&file=([^&]*)')
if not gsv or not s_id or not s_file: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s2.animeyt.tv/naruto/narutos_animeyt.php'
post = "id=%s&file=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_id, s_file, archive, urllib.quote(item.url), ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/facebook.php?'): #https://s4.animeyt.tv/facebook.php?cd=3481&id=4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
if not gsv or not s_cd or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/facebook/facebook_animeyts.php'
post = "cd=%s&id=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, urllib.quote(item.url), ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s.animeyt.tv/v4/media.php?'): #https://s.animeyt.tv/v4/media.php?id=SmdMQ2Y0NUhFK2hOZlYzbVJCbnE3QT09
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
if not gsv or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '8049762' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s.animeyt.tv/v4/gsuite_animeyts.php'
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s10.animeyt.tv/yourupload.com/id.php?'): #https://s10.animeyt.tv/yourupload.com/id.php?id=62796D77774A4E4363326642
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
if not gsv or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '8049762' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s10.animeyt.tv/yourupload.com/chinese_streaming.php'
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/onedrive.php?'): #https://s4.animeyt.tv/onedrive.php?cd=3439&id=12
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
if not gsv or not s_cd or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/onedrive/onedrive_animeyts.php'
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
return itemlist

View File

@@ -289,9 +289,9 @@ def temporadas(item):
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capitulos
patron = "<span class='title'>([^<]+)<i>.*?" # numeros de temporadas
patron += "<img src='([^']+)'>" # capitulos
# logger.info(datas)
matches = scrapertools.find_multiple_matches(datas, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
@@ -331,14 +331,13 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(datas)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
patron = "<div class='imagen'>.*?"
patron += "<div class='numerando'>(.*?)</div>.*?"
patron += "<a href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(datas, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
for scrapedtitle, scrapedurl, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)

View File

@@ -248,8 +248,8 @@ def findvideos(item):
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
# Opción "Añadir esta película a la videoteca de KODI"
if item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,

View File

@@ -3,21 +3,18 @@
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -28,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -40,43 +37,46 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<ul class="cat_menu" id="cat_menu_c0">(.*?)</ul>')
data = scrapertools.get_match(data,'<div class="category">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
scrapedurl = host + scrapedurl
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?div class="short-time">(.*?)</div>'
patron = '<div class="short-story">.*?'
patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?'
patron += 'div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
url = scrapertools.find_single_match(data,'<iframe src=.*?<iframe src="([^"]+)"')
url = scrapertools.find_single_match(data,'<div id=\'dle-content\'>.*?<iframe src="([^"]+)"')
url = "http:" + url
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:

View File

@@ -167,7 +167,8 @@ def findvideos(item):
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedserver, scrapeduser in matches:
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
url = base64.b64decode(scrapertools.find_single_match(data1, 'hashUser = "([^"]+)'))
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
url1 = devuelve_enlace(url)
if "drive.google" in url1:
url1 = url1.replace("view","preview")

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www3.doramasmp4.com/'
host = 'https://www4.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
@@ -166,6 +166,8 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -191,7 +193,7 @@ def findvideos(item):
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = 'https://www3.doramasmp4.com/api/redirect.php?token=%s' % token
video_url = new_dom+'api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')

View File

@@ -18,7 +18,7 @@
"id": "library_add",
"type": "bool",
"label": "@70230",
"default": true,
"default": false,
"enabled": true,
"visible": true
},
@@ -26,7 +26,7 @@
"id": "library_move",
"type": "bool",
"label": "@70231",
"default": true,
"default": false,
"enabled": "eq(-1,true)",
"visible": true
},
@@ -34,7 +34,7 @@
"id": "browser",
"type": "bool",
"label": "@70232",
"default": false,
"default": true,
"enabled": true,
"visible": true
},

View File

@@ -6,8 +6,11 @@
import os
import re
import time
import unicodedata
from core import filetools
from core import jsontools
from core import scraper
from core import scrapertools
from core import servertools
@@ -53,7 +56,7 @@ def mainlist(item):
title = TITLE_TVSHOW % (
STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel)
itemlist.append(Item(title=title, channel="descargas", action="mainlist", contentType="tvshow",
itemlist.append(Item(title=title, channel="downloads", action="mainlist", contentType="tvshow",
contentSerieName=i.contentSerieName, contentChannel=i.contentChannel,
downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress],
fanart=i.fanart, thumbnail=i.thumbnail))
@@ -308,7 +311,6 @@ def update_json(path, params):
def save_server_statistics(server, speed, success):
from core import jsontools
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
@@ -330,7 +332,6 @@ def save_server_statistics(server, speed, success):
def get_server_position(server):
from core import jsontools
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
@@ -360,7 +361,6 @@ def get_match_list(data, match_list, order_list=None, only_ascii=False, ignoreca
coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad.
"""
import unicodedata
match_dict = dict()
matches = []

View File

@@ -0,0 +1,62 @@
{
"id": "mirapeliculas",
"name": "MiraPeliculas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://mirapeliculas.net/favicon.ico",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,142 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'http://mirapeliculas.net'
IDIOMAS = {'Latino': 'LAT', 'Español': 'ESP', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['Streamango', 'Streamplay', 'Openload', 'Okru']
list_quality = ['BR-Rip', 'HD-Rip', 'DVD-Rip', 'TS-HQ', 'TS-Screner', 'Cam']
__channel__='mirapeliculas'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/repelis/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/repelis/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/repelis/subtituladas/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/buscar/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-3"><a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches) # esto luego de terminado el canal se debe eliminar
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="col-mt-5 postsh">.*?<a href="([^"]+)".*?'
patron += '<span class="under-title-gnro">([^"]+)</span>.*?'
patron += '<p>(\d+)</p>.*?'
patron += '<img src="([^"]+)".*?'
patron += 'title="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, calidad, scrapedyear, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
title = '%s [COLOR red] %s [/COLOR] (%s)' % (scrapedtitle, calidad , scrapedyear)
itemlist.append(item.clone(action="findvideos", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , contentTitle = scrapedtitle, plot=scrapedplot ,
quality=calidad, infoLabels={'year':scrapedyear}) )
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<span class="current">\d+</span>.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td><a rel="nofollow" href=.*?'
patron += '<td>([^<]+)</td>.*?'
patron += '<td>([^<]+)</td>.*?'
patron += '<img src=".*?=([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for lang, calidad, url in matches:
if lang in IDIOMAS:
lang = IDIOMAS[lang]
if not config.get_setting('unify'):
title = '[COLOR red] %s [/COLOR] (%s)' % (calidad , lang)
else:
title = ''
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang, quality=calidad ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,38 +0,0 @@
{
"id": "mundoflv",
"name": "MundoFlv",
"active": false,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s32.postimg.cc/h1ewz9hhx/mundoflv.png",
"banner": "mundoflv.png",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Español",
"VOS",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,655 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = "http://mundoflv.com"
thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png'
thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png'
thumben = 'http://flags.fmcdn.net/data/flags/normal/gb.png'
thumbsub = 'https://s32.postimg.cc/nzstk8z11/sub.png'
thumbtodos = 'https://s29.postimg.cc/4p8j2pkdj/todos.png'
patrones = ['<<meta property="og:image" content="([^"]+)" \/>" \/>', '\/><\/a>([^*]+)<p><\/p>.*']
IDIOMAS = {'la': 'Latino',
'es': 'Español',
'sub': 'VOS',
'vosi': 'VOSE',
'en': 'VO'
}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'openload',
'gamovideo',
'powvideo',
'streamplay',
'streamin',
'streame',
'flashx',
'nowvideo'
]
list_quality = ['default']
audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
'sub': '[COLOR orange]ORIGINAL SUBTITULADO[/COLOR]', 'en': '[COLOR red]Original[/COLOR]',
'vosi': '[COLOR red]ORIGINAL SUBTITULADO INGLES[/COLOR]'
}
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Series",
action="todas",
url=host,
thumbnail=get_thumb('tvshows', auto=True),
fanart='https://s27.postimg.cc/iahczwgrn/series.png'
))
itemlist.append(Item(channel=item.channel,
title="Alfabetico",
action="letras",
url=host,
thumbnail=get_thumb('alphabet', auto=True),
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png'
))
itemlist.append(Item(channel=item.channel,
title="Mas vistas",
action="masvistas",
url=host,
thumbnail=get_thumb('more watched', auto=True),
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png'
))
itemlist.append(Item(channel=item.channel,
title="Recomendadas",
action="recomendadas",
url=host,
thumbnail=get_thumb('recomended', auto=True),
fanart='https://s12.postimg.cc/s881laywd/recomendadas.png'
))
itemlist.append(Item(channel=item.channel,
title="Ultimas Agregadas",
action="ultimas",
url=host, thumbnail=get_thumb('last', auto=True),
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png'
))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
url='http://mundoflv.com/?s=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
if autoplay.context:
autoplay.show_option(item.channel, itemlist)
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = 'class="item"><a href="(.*?)" title="(.*?)(?:\|.*?|\(.*?|- )(\d{4})(?:\)|-)".*?'
patron += '<div class="img">.*?'
patron += '<img src="([^"]+)" alt.*?>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedyear, scrapedthumbnail in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.rstrip(' ')
thumbnail = scrapedthumbnail
year = scrapedyear
plot = ''
fanart = 'https://s32.postimg.cc/h1ewz9hhx/mundoflv.png'
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=title,
infoLabels={'year': year},
show=title,
list_language=list_language,
context=autoplay.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = fail_tmdb(itemlist)
# Paginacion
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="todas",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
return itemlist
def letras(item):
thumbletras = {'0-9': 'https://s32.postimg.cc/drojt686d/image.png',
'0 - 9': 'https://s32.postimg.cc/drojt686d/image.png',
'#': 'https://s32.postimg.cc/drojt686d/image.png',
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.cc/vlon87gmd/image.png',
'd': 'https://s32.postimg.cc/3zlvnix9h/image.png',
'e': 'https://s32.postimg.cc/bgv32qmsl/image.png',
'f': 'https://s32.postimg.cc/y6u7vq605/image.png',
'g': 'https://s32.postimg.cc/9237ib6jp/image.png',
'h': 'https://s32.postimg.cc/812yt6pk5/image.png',
'i': 'https://s32.postimg.cc/6nbbxvqat/image.png',
'j': 'https://s32.postimg.cc/axpztgvdx/image.png',
'k': 'https://s32.postimg.cc/976yrzdut/image.png',
'l': 'https://s32.postimg.cc/fmal2e9yd/image.png',
'm': 'https://s32.postimg.cc/m19lz2go5/image.png',
'n': 'https://s32.postimg.cc/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.cc/c6igsucpx/image.png',
'p': 'https://s32.postimg.cc/jnro82291/image.png',
'q': 'https://s32.postimg.cc/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.cc/nmovqvqw5/image.png',
's': 'https://s32.postimg.cc/zd2t89jol/image.png',
't': 'https://s32.postimg.cc/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.cc/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.cc/e7dlrey91/image.png',
'w': 'https://s32.postimg.cc/fnp49k15x/image.png',
'x': 'https://s32.postimg.cc/dkep1w1d1/image.png',
'y': 'https://s32.postimg.cc/um7j3zg85/image.png',
'z': 'https://s32.postimg.cc/jb4vfm9d1/image.png'}
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li><a.*?href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
if scrapedtitle.lower() in thumbletras:
thumbnail = thumbletras[scrapedtitle.lower()]
else:
thumbnail = ''
plot = ""
fanart = item.fanart
itemlist.append(
Item(channel=item.channel,
action="todas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=title
))
return itemlist
def fail_tmdb(itemlist):
logger.info()
realplot = ''
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
if item.thumbnail == '':
item.thumbnail = scrapertools.find_single_match(data, patrones[0])
realplot = scrapertools.find_single_match(data, patrones[1])
item.plot = scrapertools.remove_htmltags(realplot)
return itemlist
def masvistas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="(?!http:\/\/mundoflv\.com\/tag\/)(.*?)">.*?'
patron += 'div class="im">.*?'
patron += '<img src=".*?" alt="(.*?)(?:\|.*?|\(.*?|- )(\d{4})|-" \/>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedyear in matches:
url = scrapedurl
title = scrapedtitle
fanart = item.fanart
contentSerieName = scrapedtitle
year = scrapedyear
thumbnail = ''
plot = 'nada'
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=contentSerieName,
infoLabels={'year': year},
context=autoplay.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = fail_tmdb(itemlist)
return itemlist
def recomendadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
realplot = ''
patron = '<li><A HREF="([^"]+)"><.*?>Ver ([^<]+)<\/A><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
data = httptools.downloadpage(scrapedurl).data
thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>')
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
plot = scrapertools.remove_htmltags(realplot)
title = scrapedtitle.replace('online', '')
title = scrapertools.decodeHtmlentities(title)
fanart = item.fanart
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=title,
context=autoplay.context
))
return itemlist
def ultimas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
realplot = ''
patron = '<li><A HREF="([^"]+)"> <.*?>Ver ([^<]+)<\/A><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
data = httptools.downloadpage(scrapedurl).data
thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>')
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
plot = scrapertools.remove_htmltags(realplot)
plot = ""
title = scrapedtitle.replace('online', '')
title = scrapertools.decodeHtmlentities(title)
fanart = item.fanart
itemlist.append(
Item(channel=item.channel,
action="idioma",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=title,
context=autoplay.context
))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
data = data.replace ('"',"'")
realplot = ''
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)<\/button>"
matches = re.compile(patron, re.DOTALL).findall(data)
serieid = scrapertools.find_single_match(data, "data-nonce='(.*?)'")
item.thumbnail = item.thumbvid
infoLabels = item.infoLabels
for scrapedtitle in matches:
url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie=' + serieid + \
'&temporada=' + scrapedtitle
title = 'Temporada ' + scrapertools.decodeHtmlentities(scrapedtitle)
contentSeasonNumber = scrapedtitle
thumbnail = item.thumbnail
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
plot = ''
fanart = ''
itemlist.append(
Item(channel=item.channel,
action="episodiosxtemp",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
extra1=item.extra1,
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber,
infoLabels={'season': contentSeasonNumber},
context=item.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
extra1=item.extra1
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodiosxtemp(tempitem)
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('"', "'")
patron = "<button class='classnamer' onclick='javascript: mostrarenlaces\(([^\)]+)\).*?<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
item.url = item.url.replace("&sr", "")
item.url = item.url.replace("capitulos", "enlaces")
url = item.url + '&capitulo=' + scrapedtitle
contentEpisodeNumber = scrapedtitle
title = item.contentSerieName + ' ' + item.contentSeasonNumber + 'x' + contentEpisodeNumber
thumbnail = item.thumbnail
plot = ''
infoLabels = item.infoLabels
infoLabels['episode'] = contentEpisodeNumber
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=thumbnail,
plot=plot,
extra1=item.extra1,
idioma='',
contentSerieName=item.contentSerieName,
contentSeasonNumber=item.contentSeasonNumber,
infoLabels=infoLabels,
show=item.contentSerieName,
list_language=list_language,
context=item.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def idioma(item):
logger.info()
itemlist = []
thumbvid = item.thumbnail
itemlist.append(
Item(channel=item.channel,
title="Latino",
action="temporadas",
url=item.url,
thumbnail=thumbmx,
fanart='',
extra1='la',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
infoLabels=item.infoLabels,
language='la'
))
itemlist.append(
Item(channel=item.channel,
title="Español",
action="temporadas",
url=item.url,
thumbnail=thumbes,
fanart='',
extra1='es',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
language='es'
))
itemlist.append(
Item(channel=item.channel,
title="Subtitulado",
action="temporadas",
url=item.url,
thumbnail=thumbsub,
fanart='',
extra1='sub',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
language='sub'
))
itemlist.append(
Item(channel=item.channel,
title="Original",
action="temporadas",
url=item.url,
thumbnail=thumben,
fanart='',
extra1='en',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
language='en'
))
itemlist.append(
Item(channel=item.channel,
title="Original Subtitulado en Ingles",
action="temporadas",
url=item.url,
thumbnail=thumben,
fanart='',
extra1='vosi',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
language='vosi'
))
itemlist.append(
Item(channel=item.channel,
title="Todo",
action="temporadas",
url=item.url,
thumbnail=thumbtodos,
fanart='',
extra1='all',
fulltitle=item.title,
thumbvid=thumbvid,
contentSerieName=item.contentSerieName,
language='all'
))
return itemlist
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<img class=.*?src="([^"]+)" alt="(.*?)(?:\|.*?|\(.*?|")>.*?h3><a href="(.*?)".*?class="year">(' \
'.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = scrapedthumbnail
plot = ''
year = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="idioma",
title=title,
fulltitle=title,
url=url,
thumbnail=thumbnail,
plot=plot,
contentSerieName=title,
infoLabels={'year': year},
context=autoplay.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = fail_tmdb(itemlist)
# Paginacion
next_page_url = scrapertools.find_single_match(data,
"<a rel='nofollow' class=previouspostslink' href='(["
"^']+)'>Siguiente &rsaquo;</a>")
if next_page_url != "":
item.url = next_page_url
itemlist.append(
Item(channel=item.channel,
action="busqueda",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return busqueda(item)
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="([^"]+)".*?domain=.*?>([^<]+).*?gold">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedserver, scrapedidioma in matches:
url = scrapedurl
idioma = audio[scrapedidioma]
server = scrapedserver.strip(' ')
if server == 'streamin':
server = 'streaminto'
title = item.contentSerieName + ' ' + str(item.contentSeasonNumber) + 'x' + str(
item.contentEpisodeNumber) + ' ' + idioma + ' (' + server + ')'
new_item = item.clone(title=title,
url=url,
action="play",
language=IDIOMAS[scrapedidioma],
server=server,
quality='default',
fulltitle=item.ContentSeriename,
)
# Requerido para FilterTools
itemlist = filtertools.get_link(itemlist, new_item, list_language)
import os
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
"server_%s.png" % videoitem.server)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
if item.server not in ['streamplay','streame', 'clipwatching', 'vidoza']:
url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)')
else:
url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">')
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
videoitem.title = item.title
videoitem.thumbnail = videoitem.infoLabels['thumbnail']
return itemlist

View File

@@ -264,6 +264,7 @@ def submenu_novedades(item):
itemlist = []
itemlist_alt = []
item.extra2 = ''
thumb_buscar = get_thumb("search.png")
#Renombramos el canal al nombre de clone inicial desde la URL
item.channel_host = host
@@ -327,6 +328,9 @@ def submenu_novedades(item):
item.post = "date=%s" % value
itemlist.append(item.clone(action="listado_busqueda", title=title, url=item.url, post=item.post))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=item.channel_host + "buscar", thumbnail=thumb_buscar, category=item.category, channel_host=item.channel_host))
itemlist.append(item.clone(action='', title="[COLOR yellow]Lo Último en la Categoría:[/COLOR]"))
for value, title in matches:
if value.isdigit():

View File

@@ -534,7 +534,7 @@ def show_channels(item):
def menu_opciones(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(60525),
thumbnail=get_thumb("setting_0.png"),
text_bold = True, thumbnail=get_thumb("setting_0.png"),
folder=False))
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="peliculas", title=config.get_localized_string(60526),
thumbnail=get_thumb("channels_movie.png"),

View File

@@ -173,6 +173,8 @@ def p_portipo(item):
def peliculas(item):
logger.info()
itemlist = []
# action = ''
# contentType = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
@@ -184,12 +186,22 @@ def peliculas(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches:
if 'serie' in scrapedurl:
action = 'temporadas'
contentType = 'tvshow'
title = scrapedtitle + ' [COLOR blue](Serie)[/COLOR]'
else:
action = 'findvideos'
contentType = 'movie'
title = scrapedtitle
if item.infoLabels['plot'] == '':
item.plot = plot
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
itemlist.append(Item(channel=item.channel, action=action, contentTitle=scrapedtitle, contentType=contentType,
infoLabels={"year": year}, thumbnail=host + scrapedthumbnail,
url=scrapedurl, title=scrapedtitle, plot=plot))
url=scrapedurl, title=title, plot=plot))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -308,7 +320,8 @@ def temporadas(item):
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episdesxseason(item)
return episodesxseason(item)
def episodios(item):
logger.info()
@@ -339,12 +352,12 @@ def episodesxseason(item):
episode = element['metas_formateadas']['nepisodio']
season = element['metas_formateadas']['ntemporada']
scrapedurl = element['url_directa']
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(
2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3,
fulltitle=title, contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
@@ -365,11 +378,7 @@ def episodesxseason(item):
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
# if config.get_videolibrary_support() and len(itemlist) > 0:
# itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
# action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
# text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist

View File

@@ -57,31 +57,25 @@ def menu_channels(item):
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60545), action="conf_tools", folder=False,
extra="channels_onoff", thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60546), action="", folder=False,
thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60546) + ":", action="", folder=False,
text_bold = True, thumbnail=get_thumb("setting_0.png")))
# Inicio - Canales configurables
import channelselector
from core import channeltools
channel_list = channelselector.filterchannels("all")
for channel in channel_list:
channel_parameters = channeltools.get_channel_parameters(channel.channel)
if channel_parameters["has_settings"]:
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60547) % channel.title,
itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60547) % channel.title,
action="channel_config", config=channel.channel, folder=False,
thumbnail=channel.thumbnail))
# Fin - Canales configurables
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60548), action="", folder=False,
thumbnail=get_thumb("channels.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60549), action="conf_tools",
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60548) + ":", action="", folder=False,
text_bold=True, thumbnail=get_thumb("channels.png")))
itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60549), action="conf_tools",
folder=True, extra="lib_check_datajson", thumbnail=get_thumb("channels.png")))
return itemlist
@@ -129,7 +123,7 @@ def menu_servers(item):
action="servers_favorites", folder=False, thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60552),
action="", folder=False, thumbnail=get_thumb("setting_0.png")))
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
# Inicio - Servidores configurables
@@ -138,11 +132,11 @@ def menu_servers(item):
server_parameters = servertools.get_server_parameters(server)
if server_parameters["has_settings"]:
itemlist.append(
Item(channel=CHANNELNAME, title=config.get_localized_string(60553) % server_parameters["name"],
Item(channel=CHANNELNAME, title = ". " + config.get_localized_string(60553) % server_parameters["name"],
action="server_config", config=server, folder=False, thumbnail=""))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60554),
action="", folder=False, thumbnail=get_thumb("setting_0.png")))
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
server_list = servertools.get_servers_list().keys()
@@ -152,7 +146,7 @@ def menu_servers(item):
if server_parameters["has_settings"] and filter(lambda x: x["id"] not in ["black_list", "white_list"],
server_parameters["settings"]):
itemlist.append(
Item(channel=CHANNELNAME, title=config.get_localized_string(60553) % server_parameters["name"],
Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"],
action="server_config", config=server, folder=False, thumbnail=""))
# Fin - Servidores configurables
@@ -306,25 +300,27 @@ def submenu_tools(item):
itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False,
title="Comprobar actualizaciones urgentes", plot="Versión actual: %s" % config.get_addon_version() ))
itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False,
title="Actualizar addon externo Quasar"))
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60564), action="", folder=False,
thumbnail=get_thumb("channels.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60564) + ":", action="", folder=False,
text_bold=True, thumbnail=get_thumb("channels.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60565), action="conf_tools",
folder=True, extra="lib_check_datajson", thumbnail=get_thumb("channels.png")))
if config.get_videolibrary_support():
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60566), action="", folder=False,
thumbnail=get_thumb("videolibrary.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60566) + ":", action="", folder=False,
text_bold=True, thumbnail=get_thumb("videolibrary.png")))
itemlist.append(Item(channel=CHANNELNAME, action="overwrite_tools", folder=False,
thumbnail=get_thumb("videolibrary.png"),
title=config.get_localized_string(60567)))
title="- " + config.get_localized_string(60567)))
itemlist.append(Item(channel="videolibrary", action="update_videolibrary", folder=False,
thumbnail=get_thumb("videolibrary.png"),
title=config.get_localized_string(60568)))
title="- " + config.get_localized_string(60568)))
return itemlist
@@ -336,6 +332,18 @@ def check_quickfixes(item):
return updater.check_addon_updates(verbose=True)
def update_quasar(item):
logger.info()
from platformcode import custom_code, platformtools
stat = False
stat = custom_code.update_external_addon("quasar")
if stat:
platformtools.dialog_notification("Actualización Quasar", "Realizada con éxito")
else:
platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log")
def conf_tools(item):
logger.info()

View File

@@ -152,9 +152,9 @@ def findvideos(data, skip=False):
if skip and len(devuelve) >= skip:
devuelve = devuelve[:skip]
break
if config.get_setting("filter_servers") == False: is_filter_servers = False
if not devuelve and is_filter_servers:
platformtools.dialog_ok(config.get_localized_string(60001))
platformtools.dialog_ok(config.get_localized_string(60000), config.get_localized_string(60001))
return devuelve

View File

@@ -0,0 +1,294 @@
import os
import stat
import time
import xbmc
import shutil
import socket
import urllib2
import xbmcgui
import threading
import subprocess
from quasar.logger import log
from quasar.osarch import PLATFORM
from quasar.config import QUASARD_HOST
from quasar.addon import ADDON, ADDON_ID, ADDON_PATH
from quasar.util import notify, system_information, getLocalizedString, getWindowsShortPath
def ensure_exec_perms(file_):
st = os.stat(file_)
os.chmod(file_, st.st_mode | stat.S_IEXEC)
return file_
def android_get_current_appid():
with open("/proc/%d/cmdline" % os.getpid()) as fp:
return fp.read().rstrip("\0")
def get_quasard_checksum(path):
try:
with open(path) as fp:
fp.seek(-40, os.SEEK_END) # we put a sha1 there
return fp.read()
except Exception:
return ""
def get_quasar_binary():
binary = "quasar" + (PLATFORM["os"] == "windows" and ".exe" or "")
log.info("PLATFORM: %s" % str(PLATFORM))
binary_dir = os.path.join(ADDON_PATH, "resources", "bin", "%(os)s_%(arch)s" % PLATFORM)
if PLATFORM["os"] == "android":
log.info("Detected binary folder: %s" % binary_dir)
binary_dir_legacy = binary_dir.replace("/storage/emulated/0", "/storage/emulated/legacy")
if os.path.exists(binary_dir_legacy):
binary_dir = binary_dir_legacy
log.info("Using binary folder: %s" % binary_dir)
app_id = android_get_current_appid()
xbmc_data_path = os.path.join("/data", "data", app_id)
try: #Test if there is any permisions problem
f = open(os.path.join(xbmc_data_path, "test.txt"), "wb")
f.write("test")
f.close()
os.remove(os.path.join(xbmc_data_path, "test.txt"))
except:
xbmc_data_path = ''
if not os.path.exists(xbmc_data_path):
log.info("%s path does not exist, so using %s as xbmc_data_path" % (xbmc_data_path, xbmc.translatePath("special://xbmcbin/")))
xbmc_data_path = xbmc.translatePath("special://xbmcbin/")
try: #Test if there is any permisions problem
f = open(os.path.join(xbmc_data_path, "test.txt"), "wb")
f.write("test")
f.close()
os.remove(os.path.join(xbmc_data_path, "test.txt"))
except:
xbmc_data_path = ''
if not os.path.exists(xbmc_data_path):
log.info("%s path does not exist, so using %s as xbmc_data_path" % (xbmc_data_path, xbmc.translatePath("special://masterprofile/")))
xbmc_data_path = xbmc.translatePath("special://masterprofile/")
dest_binary_dir = os.path.join(xbmc_data_path, "files", ADDON_ID, "bin", "%(os)s_%(arch)s" % PLATFORM)
else:
dest_binary_dir = os.path.join(xbmc.translatePath(ADDON.getAddonInfo("profile")).decode('utf-8'), "bin", "%(os)s_%(arch)s" % PLATFORM)
log.info("Using destination binary folder: %s" % dest_binary_dir)
binary_path = os.path.join(binary_dir, binary)
dest_binary_path = os.path.join(dest_binary_dir, binary)
if not os.path.exists(binary_path):
notify((getLocalizedString(30103) + " %(os)s_%(arch)s" % PLATFORM), time=7000)
system_information()
try:
log.info("Source directory (%s):\n%s" % (binary_dir, os.listdir(os.path.join(binary_dir, ".."))))
log.info("Destination directory (%s):\n%s" % (dest_binary_dir, os.listdir(os.path.join(dest_binary_dir, ".."))))
except Exception:
pass
return False, False
if os.path.isdir(dest_binary_path):
log.warning("Destination path is a directory, expected previous binary file, removing...")
try:
shutil.rmtree(dest_binary_path)
except Exception as e:
log.error("Unable to remove destination path for update: %s" % e)
system_information()
return False, False
if not os.path.exists(dest_binary_path) or get_quasard_checksum(dest_binary_path) != get_quasard_checksum(binary_path):
log.info("Updating quasar daemon...")
try:
os.makedirs(dest_binary_dir)
except OSError:
pass
try:
shutil.rmtree(dest_binary_dir)
except Exception as e:
log.error("Unable to remove destination path for update: %s" % e)
system_information()
pass
try:
shutil.copytree(binary_dir, dest_binary_dir)
except Exception as e:
log.error("Unable to copy to destination path for update: %s" % e)
system_information()
return False, False
# Clean stale files in the directory, as this can cause headaches on
# Android when they are unreachable
dest_files = set(os.listdir(dest_binary_dir))
orig_files = set(os.listdir(binary_dir))
log.info("Deleting stale files %s" % (dest_files - orig_files))
for file_ in (dest_files - orig_files):
path = os.path.join(dest_binary_dir, file_)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
return dest_binary_dir, ensure_exec_perms(dest_binary_path)
def clear_fd_inherit_flags():
# Ensure the spawned quasar binary doesn't inherit open files from Kodi
# which can break things like addon updates. [WINDOWS ONLY]
from ctypes import windll
HANDLE_RANGE = xrange(0, 65536)
HANDLE_FLAG_INHERIT = 1
FILE_TYPE_DISK = 1
for hd in HANDLE_RANGE:
if windll.kernel32.GetFileType(hd) == FILE_TYPE_DISK:
if not windll.kernel32.SetHandleInformation(hd, HANDLE_FLAG_INHERIT, 0):
log.error("Error clearing inherit flag, disk file handle %x" % hd)
def jsonrpc_enabled(notify=False):
try:
s = socket.socket()
s.connect(('127.0.0.1', 9090))
s.close()
log.info("Kodi's JSON-RPC service is available, starting up...")
del s
return True
except Exception as e:
log.error(repr(e))
if notify:
xbmc.executebuiltin("ActivateWindow(ServiceSettings)")
dialog = xbmcgui.Dialog()
dialog.ok("Quasar", getLocalizedString(30199))
return False
def start_quasard(**kwargs):
jsonrpc_failures = 0
while jsonrpc_enabled() is False:
jsonrpc_failures += 1
log.warning("Unable to connect to Kodi's JSON-RPC service, retrying...")
if jsonrpc_failures > 1:
time.sleep(5)
if not jsonrpc_enabled(notify=True):
log.error("Unable to reach Kodi's JSON-RPC service, aborting...")
return False
else:
break
time.sleep(3)
quasar_dir, quasar_binary = get_quasar_binary()
if quasar_dir is False or quasar_binary is False:
return False
lockfile = os.path.join(ADDON_PATH, ".lockfile")
if os.path.exists(lockfile):
log.warning("Existing process found from lockfile, killing...")
try:
with open(lockfile) as lf:
pid = int(lf.read().rstrip(" \t\r\n\0"))
os.kill(pid, 9)
except Exception as e:
log.error(repr(e))
if PLATFORM["os"] == "windows":
log.warning("Removing library.db.lock file...")
try:
library_lockfile = os.path.join(xbmc.translatePath(ADDON.getAddonInfo("profile")).decode('utf-8'), "library.db.lock")
os.remove(library_lockfile)
except Exception as e:
log.error(repr(e))
SW_HIDE = 0
STARTF_USESHOWWINDOW = 1
args = [quasar_binary]
kwargs["cwd"] = quasar_dir
if PLATFORM["os"] == "windows":
args[0] = getWindowsShortPath(quasar_binary)
kwargs["cwd"] = getWindowsShortPath(quasar_dir)
si = subprocess.STARTUPINFO()
si.dwFlags = STARTF_USESHOWWINDOW
si.wShowWindow = SW_HIDE
clear_fd_inherit_flags()
kwargs["startupinfo"] = si
else:
env = os.environ.copy()
env["LD_LIBRARY_PATH"] = "%s:%s" % (quasar_dir, env.get("LD_LIBRARY_PATH", ""))
kwargs["env"] = env
kwargs["close_fds"] = True
wait_counter = 1
while xbmc.getCondVisibility('Window.IsVisible(10140)') or xbmc.getCondVisibility('Window.IsActive(10140)'):
if wait_counter == 1:
log.info('Add-on settings currently opened, waiting before starting...')
if wait_counter > 300:
break
time.sleep(1)
wait_counter += 1
return subprocess.Popen(args, **kwargs)
def shutdown():
try:
urllib2.urlopen(QUASARD_HOST + "/shutdown")
except:
pass
def wait_for_abortRequested(proc, monitor):
monitor.closing.wait()
log.info("quasard: exiting quasard daemon")
try:
proc.terminate()
except OSError:
pass # Process already exited, nothing to terminate
log.info("quasard: quasard daemon exited")
def quasard_thread(monitor):
crash_count = 0
try:
while not xbmc.abortRequested:
log.info("quasard: starting quasard")
proc = start_quasard(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not proc:
break
threading.Thread(target=wait_for_abortRequested, args=[proc, monitor]).start()
if PLATFORM["os"] == "windows":
while proc.poll() is None:
log.info(proc.stdout.readline())
else:
# Kodi hangs on some Android (sigh...) systems when doing a blocking
# read. We count on the fact that Quasar daemon flushes its log
# output on \n, creating a pretty clean output
import fcntl
import select
fd = proc.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while proc.poll() is None:
try:
to_read, _, _ = select.select([proc.stdout], [], [])
for ro in to_read:
line = ro.readline()
if line == "": # write end is closed
break
log.info(line)
except IOError:
time.sleep(1) # nothing to read, sleep
if proc.returncode == 0 or xbmc.abortRequested:
break
crash_count += 1
notify(getLocalizedString(30100), time=3000)
xbmc.executebuiltin("Dialog.Close(all, true)")
system_information()
time.sleep(5)
if crash_count >= 3:
notify(getLocalizedString(30110), time=3000)
break
except Exception as e:
import traceback
map(log.error, traceback.format_exc().split("\n"))
notify("%s: %s" % (getLocalizedString(30226), repr(e).encode('utf-8')))
raise

View File

@@ -0,0 +1,260 @@
import os
import sys
import socket
import urllib2
import urlparse
import xbmc
import xbmcgui
import xbmcplugin
from quasar.logger import log
from quasar.config import QUASARD_HOST
from quasar.addon import ADDON, ADDON_ID, ADDON_PATH
from quasar.util import notify, getLocalizedString, getLocalizedLabel, system_information
try:
import simplejson as json
except ImportError:
import json
HANDLE = int(sys.argv[1])
class InfoLabels(dict):
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key.lower(), "")
def __setitem__(self, key, val):
dict.__setitem__(self, key.lower(), val)
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
import urllib
infourl = urllib.addinfourl(fp, headers, headers["Location"])
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def getInfoLabels():
id_list = [int(s) for s in sys.argv[0].split("/") if s.isdigit()]
tmdb_id = id_list[0] if id_list else None
if not tmdb_id:
parsed_url = urlparse.urlparse(sys.argv[0] + sys.argv[2])
query = urlparse.parse_qs(parsed_url.query)
log.debug("Parsed URL: %s, Query: %s", repr(parsed_url), repr(query))
if 'tmdb' in query and 'show' not in query:
tmdb_id = query['tmdb'][0]
url = "%s/movie/%s/infolabels" % (QUASARD_HOST, tmdb_id)
elif 'show' in query:
tmdb_id = query['show'][0]
if 'season' in query and 'episode' in query:
url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, query['season'][0], query['episode'][0])
else:
url = "%s/show/%s/infolabels" % (QUASARD_HOST, tmdb_id)
else:
url = "%s/infolabels" % (QUASARD_HOST)
elif 'movie' in sys.argv[0]:
url = "%s/movie/%s/infolabels" % (QUASARD_HOST, tmdb_id)
elif ('episode' in sys.argv[0] or 'show' in sys.argv[0]) and len(id_list) > 2:
url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, id_list[1], id_list[2])
elif 'show' in sys.argv[0] and len(id_list) == 2:
url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, id_list[1], 1)
else:
url = "%s/infolabels" % (QUASARD_HOST)
log.debug("Resolving TMDB item by calling %s for %s" % (url, repr(sys.argv)))
try:
with closing(urllib2.urlopen(url)) as response:
resolved = json.loads(response.read())
if not resolved:
return {}
if 'info' in resolved and resolved['info']:
resolved.update(resolved['info'])
if 'art' in resolved and resolved['art']:
resolved['artbanner'] = ''
for k, v in resolved['art'].items():
resolved['art' + k] = v
if 'info' in resolved:
del resolved['info']
if 'art' in resolved:
del resolved['art']
if 'stream_info' in resolved:
del resolved['stream_info']
if 'dbtype' not in resolved:
resolved['dbtype'] = 'video'
if 'mediatype' not in resolved or resolved['mediatype'] == '':
resolved['Mediatype'] = resolved['dbtype']
return resolved
except:
log.debug("Could not resolve TMDB item: %s" % tmdb_id)
return {}
def _json(url):
with closing(urllib2.urlopen(url)) as response:
if response.code >= 300 and response.code <= 307:
# Pause currently playing Quasar file to avoid doubling requests
if xbmc.Player().isPlaying() and ADDON_ID in xbmc.Player().getPlayingFile():
xbmc.Player().pause()
_infoLabels = InfoLabels(getInfoLabels())
item = xbmcgui.ListItem(
path=response.geturl(),
label=_infoLabels["label"],
label2=_infoLabels["label2"],
thumbnailImage=_infoLabels["thumbnail"])
item.setArt({
"poster": _infoLabels["artposter"],
"banner": _infoLabels["artbanner"],
"fanart": _infoLabels["artfanart"]
})
item.setInfo(type='Video', infoLabels=_infoLabels)
xbmcplugin.setResolvedUrl(HANDLE, True, item)
return
payload = response.read()
try:
if payload:
return json.loads(payload)
except:
raise Exception(payload)
def run(url_suffix=""):
if not os.path.exists(os.path.join(ADDON_PATH, ".firstrun")):
notify(getLocalizedString(30101))
system_information()
return
donatePath = os.path.join(ADDON_PATH, ".donate")
if not os.path.exists(donatePath):
with open(donatePath, "w"):
os.utime(donatePath, None)
dialog = xbmcgui.Dialog()
dialog.ok("Quasar", getLocalizedString(30141))
socket.setdefaulttimeout(int(ADDON.getSetting("buffer_timeout")))
urllib2.install_opener(urllib2.build_opener(NoRedirectHandler()))
# Pause currently playing Quasar file to avoid doubling requests
if xbmc.Player().isPlaying() and ADDON_ID in xbmc.Player().getPlayingFile():
xbmc.Player().pause()
url = sys.argv[0].replace("plugin://%s" % ADDON_ID, QUASARD_HOST + url_suffix) + sys.argv[2]
log.debug("Requesting %s from %s" % (url, repr(sys.argv)))
try:
data = _json(url)
except urllib2.URLError as e:
if 'Connection refused' in e.reason:
notify(getLocalizedString(30116), time=7000)
else:
import traceback
map(log.error, traceback.format_exc().split("\n"))
notify(e.reason, time=7000)
return
except Exception as e:
import traceback
map(log.error, traceback.format_exc().split("\n"))
try:
msg = unicode(e)
except:
try:
msg = str(e)
except:
msg = repr(e)
notify(getLocalizedLabel(msg), time=7000)
return
if not data:
return
if data["content_type"]:
content_type = data["content_type"]
if data["content_type"].startswith("menus"):
content_type = data["content_type"].split("_")[1]
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_UNSORTED)
if content_type != "tvshows":
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setContent(HANDLE, content_type)
listitems = range(len(data["items"]))
for i, item in enumerate(data["items"]):
# Translate labels
if item["label"][0:8] == "LOCALIZE":
item["label"] = unicode(getLocalizedLabel(item["label"]), 'utf-8')
if item["label2"][0:8] == "LOCALIZE":
item["label2"] = getLocalizedLabel(item["label2"])
listItem = xbmcgui.ListItem(label=item["label"], label2=item["label2"], iconImage=item["icon"], thumbnailImage=item["thumbnail"])
if item.get("info"):
listItem.setInfo("video", item["info"])
if item.get("stream_info"):
for type_, values in item["stream_info"].items():
listItem.addStreamInfo(type_, values)
if item.get("art"):
listItem.setArt(item["art"])
elif ADDON.getSetting('default_fanart') == 'true' and item["label"] != unicode(getLocalizedString(30218), 'utf-8'):
fanart = os.path.join(ADDON_PATH, "fanart.jpg")
listItem.setArt({'fanart': fanart})
if item.get("context_menu"):
# Translate context menus
for m, menu in enumerate(item["context_menu"]):
if menu[0][0:8] == "LOCALIZE":
menu[0] = getLocalizedLabel(menu[0])
listItem.addContextMenuItems(item["context_menu"])
listItem.setProperty("isPlayable", item["is_playable"] and "true" or "false")
if item.get("properties"):
for k, v in item["properties"].items():
listItem.setProperty(k, v)
listitems[i] = (item["path"], listItem, not item["is_playable"])
xbmcplugin.addDirectoryItems(HANDLE, listitems, totalItems=len(listitems))
# Set ViewMode
if data["content_type"]:
viewMode = ADDON.getSetting("viewmode_%s" % data["content_type"])
if viewMode:
try:
xbmc.executebuiltin('Container.SetViewMode(%s)' % viewMode)
except Exception as e:
log.warning("Unable to SetViewMode(%s): %s" % (viewMode, repr(e)))
xbmcplugin.endOfDirectory(HANDLE, succeeded=True, updateListing=False, cacheToDisc=True)

View File

@@ -0,0 +1,56 @@
import xbmc
import sys
import platform
def get_platform():
build = xbmc.getInfoLabel("System.BuildVersion")
kodi_version = int(build.split()[0][:2])
ret = {
"auto_arch": sys.maxsize > 2 ** 32 and "64-bit" or "32-bit",
"arch": sys.maxsize > 2 ** 32 and "x64" or "x86",
"os": "",
"version": platform.release(),
"kodi": kodi_version,
"build": build
}
if xbmc.getCondVisibility("system.platform.android"):
ret["os"] = "android"
if "arm" in platform.machine() or "aarch" in platform.machine():
ret["arch"] = "arm"
if "64" in platform.machine() and ret["auto_arch"] == "64-bit":
ret["arch"] = "arm"
#ret["arch"] = "x64" #The binary is corrupted in install package
elif xbmc.getCondVisibility("system.platform.linux"):
ret["os"] = "linux"
if "aarch" in platform.machine() or "arm64" in platform.machine():
if xbmc.getCondVisibility("system.platform.linux.raspberrypi"):
ret["arch"] = "armv7"
elif ret["auto_arch"] == "32-bit":
ret["arch"] = "armv7"
elif ret["auto_arch"] == "64-bit":
ret["arch"] = "arm64"
elif platform.architecture()[0].startswith("32"):
ret["arch"] = "arm"
else:
ret["arch"] = "arm64"
elif "armv7" in platform.machine():
ret["arch"] = "armv7"
elif "arm" in platform.machine():
ret["arch"] = "arm"
elif xbmc.getCondVisibility("system.platform.xbox"):
ret["os"] = "windows"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.windows"):
ret["os"] = "windows"
if platform.machine().endswith('64'):
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.osx"):
ret["os"] = "darwin"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.ios"):
ret["os"] = "ios"
ret["arch"] = "arm"
return ret
PLATFORM = get_platform()

View File

@@ -0,0 +1,72 @@
import platform
import xbmc
import xbmcgui
from quasar.logger import log
from quasar.osarch import PLATFORM
from quasar.addon import ADDON, ADDON_NAME, ADDON_ICON
def notify(message, header=ADDON_NAME, time=5000, image=ADDON_ICON):
sound = ADDON.getSetting('do_not_disturb') == 'false'
dialog = xbmcgui.Dialog()
return dialog.notification(toUtf8(header), toUtf8(message), toUtf8(image), time, sound)
def getLocalizedLabel(label):
try:
if "LOCALIZE" not in label:
return label
if ";;" not in label and label.endswith(']'):
return getLocalizedString(int(label[9:-1]))
else:
parts = label.split(";;")
translation = getLocalizedString(int(parts[0][9:14]))
for i, part in enumerate(parts[1:]):
if part[0:8] == "LOCALIZE":
parts[i + 1] = getLocalizedString(int(part[9:14]))
return (translation.decode('utf-8', 'replace') % tuple(parts[1:])).encode('utf-8', 'ignore')
except:
return label
def getLocalizedString(stringId):
try:
return ADDON.getLocalizedString(stringId).encode('utf-8', 'ignore')
except:
return stringId
def toUtf8(string):
if isinstance(string, unicode):
return string.encode('utf-8', 'ignore')
return string
def system_information():
build = xbmc.getInfoLabel("System.BuildVersion")
log.info("System information: %(os)s_%(arch)s %(version)s" % PLATFORM)
log.info("Kodi build version: %s" % build)
log.info("OS type: %s" % platform.system())
log.info("uname: %s" % repr(platform.uname()))
return PLATFORM
def getShortPath(path):
if PLATFORM["os"] == "windows":
return getWindowsShortPath(path)
return path
def getWindowsShortPath(path):
try:
import ctypes
import ctypes.wintypes
ctypes.windll.kernel32.GetShortPathNameW.argtypes = [
ctypes.wintypes.LPCWSTR, # lpszLongPath
ctypes.wintypes.LPWSTR, # lpszShortPath
ctypes.wintypes.DWORD # cchBuffer
]
ctypes.windll.kernel32.GetShortPathNameW.restype = ctypes.wintypes.DWORD
buf = ctypes.create_unicode_buffer(1024) # adjust buffer size, if necessary
ctypes.windll.kernel32.GetShortPathNameW(path, buf, len(buf))
return buf.value
except:
return path

View File

@@ -5,8 +5,11 @@
import os
import json
import traceback
import xbmc
import xbmcaddon
from platformcode import config, logger
from platformcode import config, logger, platformtools
from core import jsontools
from core import filetools
@@ -54,6 +57,15 @@ def init():
"""
try:
#QUASAR: Preguntamos si se hacen modificaciones a Quasar
if not filetools.exists(os.path.join(config.get_data_path(), "quasar.json")) and not config.get_setting('addon_quasar_update', default=False):
question_update_external_addon("quasar")
#QUASAR: Hacemos las modificaciones a Quasar, si está permitido, y si está instalado
if config.get_setting('addon_quasar_update', default=False):
if not update_external_addon("quasar"):
platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log")
#Existe carpeta "custom_code" ? Si no existe se crea y se sale
custom_code_dir = os.path.join(config.get_data_path(), 'custom_code')
if os.path.exists(custom_code_dir) == False:
@@ -70,7 +82,7 @@ def init():
#Se verifica si la versión del .json y del add-on son iguales. Si es así se sale. Si no se copia "custom_code" al add-on
verify_copy_folders(custom_code_dir, custom_code_json_path)
except:
pass
logger.error(traceback.format_exc())
def create_folder_structure(custom_code_dir):
@@ -88,11 +100,11 @@ def create_folder_structure(custom_code_dir):
return
def create_json(custom_code_json_path):
def create_json(custom_code_json_path, json_name=json_data_file_name):
logger.info()
#Guardamaos el json con la versión de Alfa vacía, para permitir hacer la primera copia
json_data_file = filetools.join(custom_code_json_path, json_data_file_name)
json_data_file = filetools.join(custom_code_json_path, json_name)
json_file = open(json_data_file, "a+")
json_file.write(json.dumps({"addon_version": ""}))
json_file.close()
@@ -123,3 +135,55 @@ def verify_copy_folders(custom_code_dir, custom_code_json_path):
filetools.write(json_data_file, jsontools.dump(json_data))
return
def question_update_external_addon(addon_name):
logger.info(addon_name)
#Verificamos que el addon está instalado
stat = False
if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name):
#Si es la primera vez que se pregunta por la actualización del addon externo, recogemos la respuesta,
# guardaos un .json en userdat/alfa para no volver a preguntar otra vez, y se actualiza el setting en Alfa.
stat = platformtools.dialog_yesno('Actualización de %s' % addon_name.capitalize(), '¿Quiere que actualicemos Quasar para que sea compatible con las últimas versiones de Kodi? (recomendado: SÍ)', '', 'Si actualiza Quasar, reinicie Kodi en un par de minutos')
#Con la respuesta actualizamos la variable en Alfa settings.xml. Se puede cambiar en Ajustes de Alfa, Otros
if stat:
config.set_setting('addon_quasar_update', True)
else:
config.set_setting('addon_quasar_update', False)
#Creamos un .json en userdata para no volver a preguntar otra vez
create_json(config.get_data_path(), "%s.json" % addon_name)
return stat
def update_external_addon(addon_name):
logger.info(addon_name)
#Verificamos que el addon está instalado
if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name):
#Path de actuali<aciones de Alfa
alfa_addon_updates = filetools.join(config.get_runtime_path(), filetools.join("lib", addon_name))
#Path de destino en addon externo
__settings__ = xbmcaddon.Addon(id="plugin.video." + addon_name)
if addon_name.lower() in ['quasar', 'elementum']:
addon_path = filetools.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), filetools.join("resources", filetools.join("site-packages", addon_name)))
else:
addon_path = ''
#Hay modificaciones en Alfa? Las copiamos al addon
if filetools.exists(alfa_addon_updates) and filetools.exists(addon_path):
for root, folders, files in os.walk(alfa_addon_updates):
for file in files:
input_file = filetools.join(root, file)
output_file = input_file.replace(alfa_addon_updates, addon_path)
if filetools.copy(input_file, output_file, silent=True) == False:
logger.error('Error en la copia: Input: %s o Output: %s' % (input_file, output_file))
return False
return True
else:
logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path))
return False

View File

@@ -498,11 +498,11 @@ msgid "Search for actor/actress"
msgstr ""
msgctxt "#60000"
msgid "Filtra server (Black List)"
msgid "Filter server (Black List)"
msgstr ""
msgctxt "#60001"
msgid "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
msgstr ""
msgctxt "#60003"

View File

@@ -498,12 +498,12 @@ msgid "Search for actor/actress"
msgstr "Cerca attore/attrice"
msgctxt "#60000"
msgid "Filtra server (Black List)"
msgid "Filter server (Black List)"
msgstr "Filtra server (Black List)"
msgctxt "#60001"
msgid "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
msgstr "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
msgstr "Nessun collegamento disponibile che soddisfi i requisiti della Black list. Riprova modificando il filtro in 'Configurazione Server"
msgctxt "#60003"
msgid "Connessione con %s"

View File

@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
msgstr "Filtrar servidores (Lista Negra)"
msgctxt "#60001"
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
msgctxt "#60003"
msgid "Connecting with %s"

View File

@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
msgstr "Filtrar servidores (Lista Negra)"
msgctxt "#60001"
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
msgctxt "#60003"
msgid "Connecting with %s"

View File

@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
msgstr "Filtrar servidores (Lista Negra)"
msgctxt "#60001"
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
msgctxt "#60003"
msgid "Connecting with %s"

View File

@@ -124,9 +124,13 @@
<setting type="sep"/>
<setting label="Gestión de actualizaciones urgentes de módulos de Alfa (Quick Fixes):" type="lsep"/>
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="Intervalo entre actualizaciones automáticas (horas)" default="12"/>
<setting id="addon_update_message" type="bool" label="Quiere ver mensajes de las actualizaciones" default="false"/>
<setting id="addon_update_message" type="bool" label="¿Quiere ver mensajes de las actualizaciones?" default="false"/>
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>
<setting type="sep"/>
<setting label="Gestión de actualizaciones de otros addon relacionados con Alfa:" type="lsep"/>
<setting id="addon_quasar_update" type="bool" label="¿Quiere actualizar Quasar para evitar errores?" default="false"/>
</category>
</settings>