Actualizaciones
-allcalidad: Cambio de dominio -animemovil: Eliminado, web no existe -canalpelis: Correción de episodios en series -ciberpeliculashd: Más compatibilidad con la videoteca -downloads: Por defecto ahora las descargas van en "descargas" y no en "videoteca" -mundoflv: Eliminado, web no existe -pelisplay: Correción de temporadas en la videoteca
This commit is contained in:
@@ -19,7 +19,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'v
|
||||
|
||||
__channel__='allcalidad'
|
||||
|
||||
host = "http://allcalidad.net/"
|
||||
host = "https://allcalidad.net/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
{
|
||||
"id": "animemovil",
|
||||
"name": "Animemovil",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://s1.postimg.cc/92ji7stii7/animemovil1.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,338 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import renumbertools
|
||||
from core import httptools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import platformtools, config, logger
|
||||
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
|
||||
__perfil__ = ''
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "http://animemovil.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
|
||||
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", thumbnail=item.thumbnail,
|
||||
url=host+'/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20', text_color=color1, contentType="tvshow", extra="recientes"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="list_by_json", title="En emisión", thumbnail=item.thumbnail,
|
||||
text_color=color2, contentType="tvshow"))
|
||||
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
|
||||
text_color=color2))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
|
||||
thumbnail=item.thumbnail, text_color=color3))
|
||||
|
||||
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
|
||||
if renumbertools.context:
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def openconfig(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
item.url = "%s/api/buscador?q=%s&letra=ALL&genero=ALL&estado=2&offset=0&limit=30" % (host, texto.replace(" ", "+"))
|
||||
return list_by_json(item)
|
||||
|
||||
|
||||
def recientes(item):
|
||||
logger.info()
|
||||
item.contentType = "tvshow"
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\s{2,}','', data)
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="hover">(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, title, thumb in matches:
|
||||
url = host + url
|
||||
|
||||
try:
|
||||
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
|
||||
except:
|
||||
contentTitle = ""
|
||||
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
|
||||
|
||||
tipo = "tvshow"
|
||||
show = contentTitle
|
||||
action = "episodios"
|
||||
context = renumbertools.context(item)
|
||||
if item.extra == "recientes":
|
||||
action = "findvideos"
|
||||
context = ""
|
||||
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
thumb_=thumb, contentType=tipo, context=context))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if item.extra and itemlist:
|
||||
for it in itemlist:
|
||||
it.thumbnail = it.thumb_
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = jsontools.load(httptools.downloadpage(item.url).data)
|
||||
status = data.get('status')
|
||||
data= data.get('result')
|
||||
for it in data.get("items", []):
|
||||
scrapedtitle = it["title"]
|
||||
url = "%s/%s/" % (host, it["slug"])
|
||||
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it['id']
|
||||
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
|
||||
|
||||
tipo = "tvshow"
|
||||
show = title
|
||||
action = "episodios"
|
||||
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
action = "peliculas"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
|
||||
context=renumbertools.context(item), contentType=tipo))
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
if status and itemlist:
|
||||
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
|
||||
if offset:
|
||||
offset = int(offset) + 2
|
||||
else:
|
||||
offset = 0
|
||||
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
|
||||
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if "Índices" in item.title:
|
||||
itemlist.append(item.clone(title="Por Género", url="%s/anime" % host))
|
||||
itemlist.append(item.clone(title="Por Letra", url="%s/anime" % host))
|
||||
itemlist.append(item.clone(action="list_by_json", title="Lista completa de Animes",
|
||||
url="%s/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20" % host))
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub('\n|\s{2,}', '', data)
|
||||
if 'Letra' in item.title:
|
||||
bloque = scrapertools.find_single_match(data, '<select name="letra"(.*?)</select>')
|
||||
patron = '<option value="(\w)"'
|
||||
elif 'Género' in item.title:
|
||||
bloque = scrapertools.find_single_match(data, '<select name="genero"(.*?)</select>')
|
||||
patron = '<option value="(\d+.*?)/'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
|
||||
for title in matches:
|
||||
if "Letra" in item.title:
|
||||
url = '%s/api/buscador?q=&letra=%s&genero=ALL&estado=2&offset=0&limit=20' % (host, title)
|
||||
else:
|
||||
value = scrapertools.find_single_match(title, '(\d+)"')
|
||||
title = scrapertools.find_single_match(title, '\d+">(.*?)<')
|
||||
url = '%s/api/buscador?q=&letra=ALL&genero=%s&estado=2&offset=0&limit=20' % (host, value)
|
||||
|
||||
itemlist.append(item.clone(action="list_by_json", url=url, title=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub('\n|\s{2,}', '', data)
|
||||
show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>')
|
||||
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
|
||||
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="x-sinopsis">\s*(.*?)</div>')
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
|
||||
for url, title in matches:
|
||||
url = host + url
|
||||
epi = scrapertools.find_single_match(title, '.+?(\d+) (?:Sub|Audio|Español)')
|
||||
#epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
|
||||
new_item = item.clone(action="findvideos", url=url, title=title, extra="")
|
||||
if epi:
|
||||
if "Especial" in title:
|
||||
epi=0
|
||||
season, episode = renumbertools.numbered_for_tratk(
|
||||
item.channel, item.contentSerieName, 1, int(epi))
|
||||
new_item.infoLabels["episode"] = episode
|
||||
new_item.infoLabels["season"] = season
|
||||
new_item.title = "%sx%s %s" % (season, episode, title)
|
||||
itemlist.append(new_item)
|
||||
|
||||
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
if config.get_videolibrary_support() and itemlist:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
|
||||
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_by_json(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
repeat = 1
|
||||
status = False
|
||||
if item.url =='':
|
||||
item.url = host+"/api/buscador?limit=30&estado=1&dia=%s"
|
||||
repeat = 6
|
||||
for element in range(0,repeat):
|
||||
if repeat != 1:
|
||||
data = jsontools.load(httptools.downloadpage(item.url % element).data)
|
||||
else:
|
||||
data = jsontools.load(httptools.downloadpage(item.url).data)
|
||||
|
||||
status = data.get('status')
|
||||
json_data = data.get('result')
|
||||
elem_data = json_data['items']
|
||||
|
||||
for item_data in elem_data:
|
||||
url = '%s/%s/' % (host, item_data['slug'])
|
||||
title = item_data['title']
|
||||
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "",
|
||||
title)
|
||||
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data['id']
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(
|
||||
item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=title, contentSerieName=title, extra="recientes",
|
||||
context=renumbertools.context(item), infoLabels=infoLabels))
|
||||
if status and itemlist:
|
||||
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
|
||||
if offset:
|
||||
offset = int(offset) + 2
|
||||
else:
|
||||
offset = 0
|
||||
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
|
||||
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, text_color=color2))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\s{2,}', '', data)
|
||||
strm_id = scrapertools.find_single_match(data, '"id": (.*?),')
|
||||
streams = scrapertools.find_single_match(data, '"stream": (.*?)};')
|
||||
dict_strm = jsontools.load(streams)
|
||||
base_url = 'http:%s%s/' % (dict_strm['accessPoint'], strm_id)
|
||||
for server in dict_strm['servers']:
|
||||
expire = dict_strm['expire']
|
||||
signature = dict_strm['signature']
|
||||
last_modify = dict_strm['last_modify']
|
||||
callback = 'playerWeb'
|
||||
|
||||
strm_url = base_url +'%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % (server, expire, callback,
|
||||
signature, last_modify)
|
||||
try:
|
||||
strm_data = httptools.downloadpage(strm_url).data
|
||||
strm_data = scrapertools.unescape(strm_data)
|
||||
title = '%s'
|
||||
language = ''
|
||||
if server not in ['fire', 'meph']:
|
||||
urls = scrapertools.find_multiple_matches(strm_data, '"(?:file|src)"*?:.*?"(.*?)"')
|
||||
for url in urls:
|
||||
if url != '':
|
||||
url = url.replace ('\\/','/')
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play'))
|
||||
elif server in ['fire', 'mpeh']:
|
||||
url = scrapertools.find_single_match(strm_data, 'xmlhttp.open(\"GET\", \"(.*?)\"')
|
||||
if url != '':
|
||||
url = url.replace('\\/', '/')
|
||||
itemlist.append(Item(channel=item.channel, title=url, url=url, action='play'))
|
||||
else:
|
||||
continue
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
item.url = host
|
||||
item.extra = "novedades"
|
||||
itemlist = recientes(item)
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -289,9 +289,9 @@ def temporadas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
|
||||
patron += '<img src="([^"]+)"></a></div>' # capitulos
|
||||
|
||||
patron = "<span class='title'>([^<]+)<i>.*?" # numeros de temporadas
|
||||
patron += "<img src='([^']+)'>" # capitulos
|
||||
# logger.info(datas)
|
||||
matches = scrapertools.find_multiple_matches(datas, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
@@ -331,14 +331,13 @@ def episodios(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
|
||||
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
|
||||
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
|
||||
patron = "<div class='imagen'>.*?"
|
||||
patron += "<div class='numerando'>(.*?)</div>.*?"
|
||||
patron += "<a href='([^']+)'>([^<]+)</a>"
|
||||
|
||||
matches = scrapertools.find_multiple_matches(datas, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
for scrapedtitle, scrapedurl, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+) - (\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
|
||||
@@ -248,8 +248,8 @@ def findvideos(item):
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
# Opción "Añadir esta película a la videoteca de KODI"
|
||||
if item.contentChannel != "videolibrary":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"id": "library_add",
|
||||
"type": "bool",
|
||||
"label": "@70230",
|
||||
"default": true,
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
@@ -26,7 +26,7 @@
|
||||
"id": "library_move",
|
||||
"type": "bool",
|
||||
"label": "@70231",
|
||||
"default": true,
|
||||
"default": false,
|
||||
"enabled": "eq(-1,true)",
|
||||
"visible": true
|
||||
},
|
||||
@@ -34,7 +34,7 @@
|
||||
"id": "browser",
|
||||
"type": "bool",
|
||||
"label": "@70232",
|
||||
"default": false,
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
@@ -6,8 +6,11 @@
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import unicodedata
|
||||
|
||||
|
||||
from core import filetools
|
||||
from core import jsontools
|
||||
from core import scraper
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -53,7 +56,7 @@ def mainlist(item):
|
||||
title = TITLE_TVSHOW % (
|
||||
STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel)
|
||||
|
||||
itemlist.append(Item(title=title, channel="descargas", action="mainlist", contentType="tvshow",
|
||||
itemlist.append(Item(title=title, channel="downloads", action="mainlist", contentType="tvshow",
|
||||
contentSerieName=i.contentSerieName, contentChannel=i.contentChannel,
|
||||
downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress],
|
||||
fanart=i.fanart, thumbnail=i.thumbnail))
|
||||
@@ -308,7 +311,6 @@ def update_json(path, params):
|
||||
|
||||
|
||||
def save_server_statistics(server, speed, success):
|
||||
from core import jsontools
|
||||
if os.path.isfile(STATS_FILE):
|
||||
servers = jsontools.load(open(STATS_FILE, "rb").read())
|
||||
else:
|
||||
@@ -330,7 +332,6 @@ def save_server_statistics(server, speed, success):
|
||||
|
||||
|
||||
def get_server_position(server):
|
||||
from core import jsontools
|
||||
if os.path.isfile(STATS_FILE):
|
||||
servers = jsontools.load(open(STATS_FILE, "rb").read())
|
||||
else:
|
||||
@@ -360,7 +361,6 @@ def get_match_list(data, match_list, order_list=None, only_ascii=False, ignoreca
|
||||
coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad.
|
||||
|
||||
"""
|
||||
import unicodedata
|
||||
match_dict = dict()
|
||||
matches = []
|
||||
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"id": "mundoflv",
|
||||
"name": "MundoFlv",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s32.postimg.cc/h1ewz9hhx/mundoflv.png",
|
||||
"banner": "mundoflv.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Español",
|
||||
"VOS",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,655 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "http://mundoflv.com"
|
||||
thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png'
|
||||
thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png'
|
||||
thumben = 'http://flags.fmcdn.net/data/flags/normal/gb.png'
|
||||
thumbsub = 'https://s32.postimg.cc/nzstk8z11/sub.png'
|
||||
thumbtodos = 'https://s29.postimg.cc/4p8j2pkdj/todos.png'
|
||||
patrones = ['<<meta property="og:image" content="([^"]+)" \/>" \/>', '\/><\/a>([^*]+)<p><\/p>.*']
|
||||
|
||||
IDIOMAS = {'la': 'Latino',
|
||||
'es': 'Español',
|
||||
'sub': 'VOS',
|
||||
'vosi': 'VOSE',
|
||||
'en': 'VO'
|
||||
}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = [
|
||||
'openload',
|
||||
'gamovideo',
|
||||
'powvideo',
|
||||
'streamplay',
|
||||
'streamin',
|
||||
'streame',
|
||||
'flashx',
|
||||
'nowvideo'
|
||||
]
|
||||
|
||||
list_quality = ['default']
|
||||
|
||||
audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
|
||||
'sub': '[COLOR orange]ORIGINAL SUBTITULADO[/COLOR]', 'en': '[COLOR red]Original[/COLOR]',
|
||||
'vosi': '[COLOR red]ORIGINAL SUBTITULADO INGLES[/COLOR]'
|
||||
}
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Series",
|
||||
action="todas",
|
||||
url=host,
|
||||
thumbnail=get_thumb('tvshows', auto=True),
|
||||
fanart='https://s27.postimg.cc/iahczwgrn/series.png'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Alfabetico",
|
||||
action="letras",
|
||||
url=host,
|
||||
thumbnail=get_thumb('alphabet', auto=True),
|
||||
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Mas vistas",
|
||||
action="masvistas",
|
||||
url=host,
|
||||
thumbnail=get_thumb('more watched', auto=True),
|
||||
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Recomendadas",
|
||||
action="recomendadas",
|
||||
url=host,
|
||||
thumbnail=get_thumb('recomended', auto=True),
|
||||
fanart='https://s12.postimg.cc/s881laywd/recomendadas.png'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Ultimas Agregadas",
|
||||
action="ultimas",
|
||||
url=host, thumbnail=get_thumb('last', auto=True),
|
||||
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png'
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title="Buscar",
|
||||
action="search",
|
||||
url='http://mundoflv.com/?s=',
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
if autoplay.context:
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def todas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = 'class="item"><a href="(.*?)" title="(.*?)(?:\|.*?|\(.*?|- )(\d{4})(?:\)|-)".*?'
|
||||
patron += '<div class="img">.*?'
|
||||
patron += '<img src="([^"]+)" alt.*?>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedyear, scrapedthumbnail in matches:
|
||||
url = scrapedurl
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.rstrip(' ')
|
||||
thumbnail = scrapedthumbnail
|
||||
year = scrapedyear
|
||||
plot = ''
|
||||
|
||||
fanart = 'https://s32.postimg.cc/h1ewz9hhx/mundoflv.png'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=title,
|
||||
infoLabels={'year': year},
|
||||
show=title,
|
||||
list_language=list_language,
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
itemlist = fail_tmdb(itemlist)
|
||||
# Paginacion
|
||||
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="todas",
|
||||
title=">> Página siguiente",
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def letras(item):
|
||||
thumbletras = {'0-9': 'https://s32.postimg.cc/drojt686d/image.png',
|
||||
'0 - 9': 'https://s32.postimg.cc/drojt686d/image.png',
|
||||
'#': 'https://s32.postimg.cc/drojt686d/image.png',
|
||||
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
|
||||
'b': 'https://s32.postimg.cc/y1qgm1yp1/image.png',
|
||||
'c': 'https://s32.postimg.cc/vlon87gmd/image.png',
|
||||
'd': 'https://s32.postimg.cc/3zlvnix9h/image.png',
|
||||
'e': 'https://s32.postimg.cc/bgv32qmsl/image.png',
|
||||
'f': 'https://s32.postimg.cc/y6u7vq605/image.png',
|
||||
'g': 'https://s32.postimg.cc/9237ib6jp/image.png',
|
||||
'h': 'https://s32.postimg.cc/812yt6pk5/image.png',
|
||||
'i': 'https://s32.postimg.cc/6nbbxvqat/image.png',
|
||||
'j': 'https://s32.postimg.cc/axpztgvdx/image.png',
|
||||
'k': 'https://s32.postimg.cc/976yrzdut/image.png',
|
||||
'l': 'https://s32.postimg.cc/fmal2e9yd/image.png',
|
||||
'm': 'https://s32.postimg.cc/m19lz2go5/image.png',
|
||||
'n': 'https://s32.postimg.cc/b2ycgvs2t/image.png',
|
||||
'o': 'https://s32.postimg.cc/c6igsucpx/image.png',
|
||||
'p': 'https://s32.postimg.cc/jnro82291/image.png',
|
||||
'q': 'https://s32.postimg.cc/ve5lpfv1h/image.png',
|
||||
'r': 'https://s32.postimg.cc/nmovqvqw5/image.png',
|
||||
's': 'https://s32.postimg.cc/zd2t89jol/image.png',
|
||||
't': 'https://s32.postimg.cc/wk9lo8jc5/image.png',
|
||||
'u': 'https://s32.postimg.cc/w8s5bh2w5/image.png',
|
||||
'v': 'https://s32.postimg.cc/e7dlrey91/image.png',
|
||||
'w': 'https://s32.postimg.cc/fnp49k15x/image.png',
|
||||
'x': 'https://s32.postimg.cc/dkep1w1d1/image.png',
|
||||
'y': 'https://s32.postimg.cc/um7j3zg85/image.png',
|
||||
'z': 'https://s32.postimg.cc/jb4vfm9d1/image.png'}
|
||||
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<li><a.*?href="([^"]+)">([^<]+)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle
|
||||
if scrapedtitle.lower() in thumbletras:
|
||||
thumbnail = thumbletras[scrapedtitle.lower()]
|
||||
else:
|
||||
thumbnail = ''
|
||||
plot = ""
|
||||
fanart = item.fanart
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="todas",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=title
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fail_tmdb(itemlist):
|
||||
logger.info()
|
||||
realplot = ''
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.thumbnail == '':
|
||||
item.thumbnail = scrapertools.find_single_match(data, patrones[0])
|
||||
realplot = scrapertools.find_single_match(data, patrones[1])
|
||||
item.plot = scrapertools.remove_htmltags(realplot)
|
||||
return itemlist
|
||||
|
||||
|
||||
def masvistas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li><a href="(?!http:\/\/mundoflv\.com\/tag\/)(.*?)">.*?'
|
||||
patron += 'div class="im">.*?'
|
||||
patron += '<img src=".*?" alt="(.*?)(?:\|.*?|\(.*?|- )(\d{4})|-" \/>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedyear in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
fanart = item.fanart
|
||||
contentSerieName = scrapedtitle
|
||||
year = scrapedyear
|
||||
thumbnail = ''
|
||||
plot = 'nada'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=contentSerieName,
|
||||
infoLabels={'year': year},
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
itemlist = fail_tmdb(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def recomendadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
realplot = ''
|
||||
patron = '<li><A HREF="([^"]+)"><.*?>Ver ([^<]+)<\/A><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>')
|
||||
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
|
||||
plot = scrapertools.remove_htmltags(realplot)
|
||||
title = scrapedtitle.replace('online', '')
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
fanart = item.fanart
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=title,
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def ultimas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
realplot = ''
|
||||
patron = '<li><A HREF="([^"]+)"> <.*?>Ver ([^<]+)<\/A><\/li>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>')
|
||||
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
|
||||
plot = scrapertools.remove_htmltags(realplot)
|
||||
plot = ""
|
||||
title = scrapedtitle.replace('online', '')
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
fanart = item.fanart
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="idioma",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=title,
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
templist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace ('"',"'")
|
||||
realplot = ''
|
||||
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)<\/button>"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
serieid = scrapertools.find_single_match(data, "data-nonce='(.*?)'")
|
||||
item.thumbnail = item.thumbvid
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedtitle in matches:
|
||||
url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie=' + serieid + \
|
||||
'&temporada=' + scrapedtitle
|
||||
title = 'Temporada ' + scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
contentSeasonNumber = scrapedtitle
|
||||
thumbnail = item.thumbnail
|
||||
realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*')
|
||||
plot = ''
|
||||
fanart = ''
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodiosxtemp",
|
||||
title=title,
|
||||
fulltitle=item.title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
extra1=item.extra1,
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber,
|
||||
infoLabels={'season': contentSeasonNumber},
|
||||
context=item.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
extra1=item.extra1
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodiosxtemp(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodiosxtemp(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = data.replace('"', "'")
|
||||
patron = "<button class='classnamer' onclick='javascript: mostrarenlaces\(([^\)]+)\).*?<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtitle in matches:
|
||||
item.url = item.url.replace("&sr", "")
|
||||
item.url = item.url.replace("capitulos", "enlaces")
|
||||
url = item.url + '&capitulo=' + scrapedtitle
|
||||
contentEpisodeNumber = scrapedtitle
|
||||
title = item.contentSerieName + ' ' + item.contentSeasonNumber + 'x' + contentEpisodeNumber
|
||||
thumbnail = item.thumbnail
|
||||
plot = ''
|
||||
infoLabels = item.infoLabels
|
||||
infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
extra1=item.extra1,
|
||||
idioma='',
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=item.contentSeasonNumber,
|
||||
infoLabels=infoLabels,
|
||||
show=item.contentSerieName,
|
||||
list_language=list_language,
|
||||
context=item.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def idioma(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
thumbvid = item.thumbnail
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Latino",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumbmx,
|
||||
fanart='',
|
||||
extra1='la',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
infoLabels=item.infoLabels,
|
||||
language='la'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Español",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumbes,
|
||||
fanart='',
|
||||
extra1='es',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
language='es'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Subtitulado",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumbsub,
|
||||
fanart='',
|
||||
extra1='sub',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
language='sub'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Original",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumben,
|
||||
fanart='',
|
||||
extra1='en',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
language='en'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Original Subtitulado en Ingles",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumben,
|
||||
fanart='',
|
||||
extra1='vosi',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
language='vosi'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Todo",
|
||||
action="temporadas",
|
||||
url=item.url,
|
||||
thumbnail=thumbtodos,
|
||||
fanart='',
|
||||
extra1='all',
|
||||
fulltitle=item.title,
|
||||
thumbvid=thumbvid,
|
||||
contentSerieName=item.contentSerieName,
|
||||
language='all'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<img class=.*?src="([^"]+)" alt="(.*?)(?:\|.*?|\(.*?|")>.*?h3><a href="(.*?)".*?class="year">(' \
|
||||
'.*?)<\/span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
|
||||
url = scrapedurl
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ''
|
||||
year = scrapedyear
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="idioma",
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
contentSerieName=title,
|
||||
infoLabels={'year': year},
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
itemlist = fail_tmdb(itemlist)
|
||||
|
||||
# Paginacion
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
"<a rel='nofollow' class=previouspostslink' href='(["
|
||||
"^']+)'>Siguiente ›</a>")
|
||||
if next_page_url != "":
|
||||
item.url = next_page_url
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="busqueda",
|
||||
title=">> Página siguiente",
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
return busqueda(item)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'href="([^"]+)".*?domain=.*?>([^<]+).*?gold">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedserver, scrapedidioma in matches:
|
||||
url = scrapedurl
|
||||
idioma = audio[scrapedidioma]
|
||||
server = scrapedserver.strip(' ')
|
||||
if server == 'streamin':
|
||||
server = 'streaminto'
|
||||
title = item.contentSerieName + ' ' + str(item.contentSeasonNumber) + 'x' + str(
|
||||
item.contentEpisodeNumber) + ' ' + idioma + ' (' + server + ')'
|
||||
|
||||
new_item = item.clone(title=title,
|
||||
url=url,
|
||||
action="play",
|
||||
language=IDIOMAS[scrapedidioma],
|
||||
server=server,
|
||||
quality='default',
|
||||
fulltitle=item.ContentSeriename,
|
||||
)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_link(itemlist, new_item, list_language)
|
||||
|
||||
import os
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
|
||||
"server_%s.png" % videoitem.server)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.server not in ['streamplay','streame', 'clipwatching', 'vidoza']:
|
||||
url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)')
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">')
|
||||
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.title = item.title
|
||||
videoitem.thumbnail = videoitem.infoLabels['thumbnail']
|
||||
|
||||
return itemlist
|
||||
@@ -534,7 +534,7 @@ def show_channels(item):
|
||||
def menu_opciones(item):
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(60525),
|
||||
thumbnail=get_thumb("setting_0.png"),
|
||||
text_bold = True, thumbnail=get_thumb("setting_0.png"),
|
||||
folder=False))
|
||||
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="peliculas", title=config.get_localized_string(60526),
|
||||
thumbnail=get_thumb("channels_movie.png"),
|
||||
|
||||
@@ -173,6 +173,8 @@ def p_portipo(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# action = ''
|
||||
# contentType = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
|
||||
@@ -184,12 +186,22 @@ def peliculas(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches:
|
||||
if 'serie' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + ' [COLOR blue](Serie)[/COLOR]'
|
||||
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = scrapedtitle
|
||||
|
||||
if item.infoLabels['plot'] == '':
|
||||
item.plot = plot
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
itemlist.append(Item(channel=item.channel, action=action, contentTitle=scrapedtitle, contentType=contentType,
|
||||
infoLabels={"year": year}, thumbnail=host + scrapedthumbnail,
|
||||
url=scrapedurl, title=scrapedtitle, plot=plot))
|
||||
url=scrapedurl, title=title, plot=plot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
@@ -308,7 +320,8 @@ def temporadas(item):
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
return itemlist
|
||||
else:
|
||||
return episdesxseason(item)
|
||||
return episodesxseason(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
@@ -339,12 +352,12 @@ def episodesxseason(item):
|
||||
episode = element['metas_formateadas']['nepisodio']
|
||||
season = element['metas_formateadas']['ntemporada']
|
||||
scrapedurl = element['url_directa']
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
title = "%sx%s: %s" % (season, episode.zfill(
|
||||
2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='serie')
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3,
|
||||
fulltitle=title, contentType="episode", extra='serie')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
new_item.infoLabels['season'] = season
|
||||
@@ -365,11 +378,7 @@ def episodesxseason(item):
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
# if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
# itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
# action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
# text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -57,31 +57,25 @@ def menu_channels(item):
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60545), action="conf_tools", folder=False,
|
||||
extra="channels_onoff", thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60546), action="", folder=False,
|
||||
thumbnail=get_thumb("setting_0.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60546) + ":", action="", folder=False,
|
||||
text_bold = True, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
# Inicio - Canales configurables
|
||||
import channelselector
|
||||
from core import channeltools
|
||||
|
||||
channel_list = channelselector.filterchannels("all")
|
||||
|
||||
for channel in channel_list:
|
||||
channel_parameters = channeltools.get_channel_parameters(channel.channel)
|
||||
|
||||
if channel_parameters["has_settings"]:
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60547) % channel.title,
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60547) % channel.title,
|
||||
action="channel_config", config=channel.channel, folder=False,
|
||||
thumbnail=channel.thumbnail))
|
||||
# Fin - Canales configurables
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60548), action="", folder=False,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60549), action="conf_tools",
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60548) + ":", action="", folder=False,
|
||||
text_bold=True, thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60549), action="conf_tools",
|
||||
folder=True, extra="lib_check_datajson", thumbnail=get_thumb("channels.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -129,7 +123,7 @@ def menu_servers(item):
|
||||
action="servers_favorites", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60552),
|
||||
action="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
# Inicio - Servidores configurables
|
||||
|
||||
@@ -138,11 +132,11 @@ def menu_servers(item):
|
||||
server_parameters = servertools.get_server_parameters(server)
|
||||
if server_parameters["has_settings"]:
|
||||
itemlist.append(
|
||||
Item(channel=CHANNELNAME, title=config.get_localized_string(60553) % server_parameters["name"],
|
||||
Item(channel=CHANNELNAME, title = ". " + config.get_localized_string(60553) % server_parameters["name"],
|
||||
action="server_config", config=server, folder=False, thumbnail=""))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60554),
|
||||
action="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
server_list = servertools.get_servers_list().keys()
|
||||
|
||||
@@ -152,7 +146,7 @@ def menu_servers(item):
|
||||
if server_parameters["has_settings"] and filter(lambda x: x["id"] not in ["black_list", "white_list"],
|
||||
server_parameters["settings"]):
|
||||
itemlist.append(
|
||||
Item(channel=CHANNELNAME, title=config.get_localized_string(60553) % server_parameters["name"],
|
||||
Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"],
|
||||
action="server_config", config=server, folder=False, thumbnail=""))
|
||||
|
||||
# Fin - Servidores configurables
|
||||
@@ -309,22 +303,22 @@ def submenu_tools(item):
|
||||
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
|
||||
thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60564), action="", folder=False,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60564) + ":", action="", folder=False,
|
||||
text_bold=True, thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60565), action="conf_tools",
|
||||
folder=True, extra="lib_check_datajson", thumbnail=get_thumb("channels.png")))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
|
||||
thumbnail=get_thumb("setting_0.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60566), action="", folder=False,
|
||||
thumbnail=get_thumb("videolibrary.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60566) + ":", action="", folder=False,
|
||||
text_bold=True, thumbnail=get_thumb("videolibrary.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, action="overwrite_tools", folder=False,
|
||||
thumbnail=get_thumb("videolibrary.png"),
|
||||
title=config.get_localized_string(60567)))
|
||||
title="- " + config.get_localized_string(60567)))
|
||||
itemlist.append(Item(channel="videolibrary", action="update_videolibrary", folder=False,
|
||||
thumbnail=get_thumb("videolibrary.png"),
|
||||
title=config.get_localized_string(60568)))
|
||||
title="- " + config.get_localized_string(60568)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -152,9 +152,9 @@ def findvideos(data, skip=False):
|
||||
if skip and len(devuelve) >= skip:
|
||||
devuelve = devuelve[:skip]
|
||||
break
|
||||
|
||||
if config.get_setting("filter_servers") == False: is_filter_servers = False
|
||||
if not devuelve and is_filter_servers:
|
||||
platformtools.dialog_ok(config.get_localized_string(60001))
|
||||
platformtools.dialog_ok(config.get_localized_string(60000), config.get_localized_string(60001))
|
||||
|
||||
return devuelve
|
||||
|
||||
|
||||
@@ -498,11 +498,11 @@ msgid "Search for actor/actress"
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#60000"
|
||||
msgid "Filtra server (Black List)"
|
||||
msgid "Filter server (Black List)"
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#60001"
|
||||
msgid "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
|
||||
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#60003"
|
||||
|
||||
@@ -498,12 +498,12 @@ msgid "Search for actor/actress"
|
||||
msgstr "Cerca attore/attrice"
|
||||
|
||||
msgctxt "#60000"
|
||||
msgid "Filtra server (Black List)"
|
||||
msgid "Filter server (Black List)"
|
||||
msgstr "Filtra server (Black List)"
|
||||
|
||||
msgctxt "#60001"
|
||||
msgid "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
|
||||
msgstr "Filtra server (Black List)\nNessun collegamento disponibile che soddisfi i requisiti della Black list.\nRiprova modificando il filtro in 'Configurazione Server"
|
||||
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
|
||||
msgstr "Nessun collegamento disponibile che soddisfi i requisiti della Black list. Riprova modificando il filtro in 'Configurazione Server"
|
||||
|
||||
msgctxt "#60003"
|
||||
msgid "Connessione con %s"
|
||||
|
||||
@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
|
||||
msgstr "Filtrar servidores (Lista Negra)"
|
||||
|
||||
msgctxt "#60001"
|
||||
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
|
||||
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
|
||||
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
|
||||
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
|
||||
|
||||
msgctxt "#60003"
|
||||
msgid "Connecting with %s"
|
||||
|
||||
@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
|
||||
msgstr "Filtrar servidores (Lista Negra)"
|
||||
|
||||
msgctxt "#60001"
|
||||
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
|
||||
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
|
||||
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
|
||||
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
|
||||
|
||||
msgctxt "#60003"
|
||||
msgid "Connecting with %s"
|
||||
|
||||
@@ -502,8 +502,8 @@ msgid "Filter server (Black List)"
|
||||
msgstr "Filtrar servidores (Lista Negra)"
|
||||
|
||||
msgctxt "#60001"
|
||||
msgid "Filter server (Black List)\nNo connection available that meets the requirements of the Black list.\nTry again by changing the filter in 'Server Configuration"
|
||||
msgstr "Filtrar servidores (Lista Negra)\nNo hay enlaces disponibles que cumplan los requisitos de su Lista Negra.\nPruebe de nuevo modificando el fíltro en 'Configuracíon Servidores"
|
||||
msgid "No links available that meets the requirements of the Black list. Try again by changing the filter in 'Server Configuration"
|
||||
msgstr "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra. Pruebe de nuevo modificando el filtro en 'Configuracíon - Servidores"
|
||||
|
||||
msgctxt "#60003"
|
||||
msgid "Connecting with %s"
|
||||
|
||||
Reference in New Issue
Block a user