@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.danimados.com/"
|
||||
host = "https://www.danimados.com/"
|
||||
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
@@ -48,12 +48,13 @@ def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="thumbnail animation-.*?href="([^"]+).*?'
|
||||
patron = '(?s)class="thumbnail animation-.*?href="([^"]+).*?'
|
||||
patron += 'img src="([^"]+).*?'
|
||||
patron += 'alt="([^"]+).*?'
|
||||
patron += 'class="year">(\d{4})'
|
||||
patron += 'class="meta"(.*?)class="contenido"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})')
|
||||
item.action = "findvideos"
|
||||
item.contentTitle = scrapedtitle
|
||||
item.contentSerieName = ""
|
||||
@@ -95,7 +96,7 @@ def mainpage(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
|
||||
show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
@@ -171,12 +172,14 @@ def findvideos(item):
|
||||
id = scrapertools.find_single_match(url, 'iframe/(.*)')
|
||||
url = url.replace(id, base64.b64encode(id))
|
||||
new_data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(new_data, "sources: \[\{file:'([^']+)")
|
||||
if "zkstream" in url:
|
||||
new_data = new_data.replace('"',"'")
|
||||
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
|
||||
if "zkstream" in url or "cloudup" in url:
|
||||
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
else:
|
||||
url1 = url
|
||||
itemlist.append(item.clone(title='%s',url=url1, action="play"))
|
||||
if url1:
|
||||
itemlist.append(item.clone(title='%s',url=url1, action="play"))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
|
||||
|
||||
85
plugin.video.alfa/channels/megadede.json
Normal file
85
plugin.video.alfa/channels/megadede.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"id": "megadede",
|
||||
"name": "Megadede",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "https://i.postimg.cc/L5pNtXdS/megadede1.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "megadedeuser",
|
||||
"type": "text",
|
||||
"label": "@30014",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "megadedepassword",
|
||||
"type": "text",
|
||||
"hidden": true,
|
||||
"label": "@30015",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": "!eq(-1,'') + !eq(-2,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "megadedesortlinks",
|
||||
"type": "list",
|
||||
"label": "Ordenar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-2,'') + !eq(-3,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Por no Reportes",
|
||||
"Por Idioma",
|
||||
"Por Calidad",
|
||||
"Por Idioma y Calidad",
|
||||
"Por Idioma y no Reportes",
|
||||
"Por Idioma, Calidad y no Reportes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "megadedeshowlinks",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-3,'') + !eq(-4,'')",
|
||||
"lvalues": [
|
||||
"Todos",
|
||||
"Ver online",
|
||||
"Descargar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "megadedenumberlinks",
|
||||
"type": "list",
|
||||
"label": "Limitar número de enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-4,'') + !eq(-5,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"5",
|
||||
"10",
|
||||
"15",
|
||||
"20",
|
||||
"25",
|
||||
"30"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
816
plugin.video.alfa/channels/megadede.py
Normal file
816
plugin.video.alfa/channels/megadede.py
Normal file
@@ -0,0 +1,816 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
from time import sleep
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
host = 'https://www.megadede.com'
|
||||
__channel__ = 'megadede'
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
|
||||
|
||||
|
||||
def login():
|
||||
url_origen = host+"/login?popup=1"
|
||||
try:
|
||||
data = httptools.downloadpage(url_origen).data
|
||||
except:
|
||||
data = httptools.downloadpage(url_origen, follow_redirects=False).data
|
||||
if '<span class="username">' in data:
|
||||
return True
|
||||
token = scrapertools.find_single_match(data, '<input name="_token" type="hidden" value="([^"]+)"')
|
||||
if 'Escribe los números de la imagen' in data:
|
||||
captcha_url = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="captcha">')
|
||||
imagen_data = httptools.downloadpage(captcha_url).data
|
||||
ficheropng = os.path.join(config.get_data_path(), "captcha_megadede.png")
|
||||
outfile=open(ficheropng,'wb')
|
||||
outfile.write(imagen_data)
|
||||
outfile.close()
|
||||
img = xbmcgui.ControlImage(450,15,400,130,ficheropng)
|
||||
wdlg = xbmcgui.WindowDialog()
|
||||
wdlg.addControl(img)
|
||||
wdlg.show()
|
||||
sleep(1)
|
||||
kb = platformtools.dialog_numeric(0, "Escribe los números de la imagen")
|
||||
|
||||
postcaptcha = ""
|
||||
if kb !='':
|
||||
solution = kb
|
||||
postcaptcha = "&captcha=" + str(solution)
|
||||
else:
|
||||
return False
|
||||
wdlg.close()
|
||||
else:
|
||||
postcaptcha=""
|
||||
post = "_token=" + str(token) + "&email=" + str(config.get_setting("megadedeuser", "megadede")) + \
|
||||
"&password=" + str(config.get_setting("megadedepassword", "megadede")) + postcaptcha\
|
||||
#+ "&app=2131296469"
|
||||
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/66.0.3163.100 Safari/537.36", "Referer": host, "X-Requested-With": "XMLHttpRequest","X-CSRF-TOKEN":
|
||||
token}
|
||||
data = httptools.downloadpage(host+"/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
if "redirect" in data:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not config.get_setting("megadedeuser", "megadede"):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Habilita tu cuenta en la configuración e ingresar de nuevo al canal", action="settingCanal",
|
||||
url=""))
|
||||
else:
|
||||
result = login()
|
||||
if not result:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
|
||||
return itemlist
|
||||
item.url = host
|
||||
item.fanart = fanart_host
|
||||
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
|
||||
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
|
||||
|
||||
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
|
||||
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
|
||||
|
||||
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def settingCanal(item):
|
||||
return platformtools.show_channel_settings()
|
||||
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = host
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url= host + "/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
|
||||
itemlist.append(item.clone(action="generos", title=" Por géneros", url= host + "/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Siguiendo", url= host + "/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
|
||||
url= host + "/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Favoritas", url= host + "/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Pendientes", url= host + "/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url= host + "/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Recomendadas", url= host + "/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
|
||||
itemlist.append(item.clone(action="search", title=" Buscar...", url= host + "/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menupeliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = host
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url= host + "/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
|
||||
itemlist.append(item.clone(action="generos", title=" Por géneros", url= host + "/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url= host + "/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Pendientes", url= host + "/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Recomendadas", url= host + "/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Favoritas", url= host + "/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Vistas", url= host + "/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
|
||||
itemlist.append(item.clone(action="search", title=" Buscar...", url= host + "/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menulistas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = host
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="populares", title=" Populares", url= host + "/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url= host + "/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url= host + "/listas"))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
tipo = item.url.replace( host + "/", "")
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data,
|
||||
'<select name="genre_id" class="selectpicker" title="Selecciona...">(.*?)</select>')
|
||||
patron = '<option value="([^"]+)">([^<]+)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for id_genere, title in matches:
|
||||
title = title.strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
url = host + "/" + tipo + "?genre_id=" + id_genere
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.tipo = item.url.replace(host + "/", "")
|
||||
item.url = host + "/search/"
|
||||
texto = texto.replace(" ", "-")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
return buscar(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def buscar(item):
|
||||
logger.info()
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["content"]
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def parse_mixed_results(item, data):
|
||||
itemlist = []
|
||||
patron = '<div class="media-dropdown mini dropdown model" data-value="([^"]+)"+'
|
||||
patron += '.*?<a href="([^"]+)"[^<]data-toggle="tooltip" data-container="body"+'
|
||||
patron += ' data-delay="500" title="([^"]+)"[^<]+'
|
||||
patron += '.*?src="([^"]+)"+'
|
||||
patron += '.*?<div class="year">([^<]+)</div>+'
|
||||
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if item.tipo == "lista":
|
||||
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
|
||||
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
|
||||
if following.strip() == "following":
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Dejar de seguir", idtemp=data_id, token=item.token, valor="unfollow",
|
||||
action="megadede_check", url=item.url, tipo=item.tipo))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Seguir esta lista", idtemp=data_id, token=item.token, valor="follow",
|
||||
action="megadede_check", url=item.url, tipo=item.tipo))
|
||||
|
||||
for visto, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches:
|
||||
title = ""
|
||||
if visto.strip() == "seen":
|
||||
title += "[visto] "
|
||||
title += scrapertools.htmlclean(scrapedtitle)
|
||||
if scrapedyear != '':
|
||||
title += " (" + scrapedyear + ")"
|
||||
fulltitle = title
|
||||
if scrapedvalue != '':
|
||||
title += " (" + scrapedvalue + ")"
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb", "mediabigcover")
|
||||
plot = ""
|
||||
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
|
||||
# sectionStr = "peli" if "/peli/" in scrapedurl else "docu"
|
||||
if "/peli/" in scrapedurl:
|
||||
sectionStr = "peli"
|
||||
else:
|
||||
sectionStr = "docu"
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
if item.tipo != "series":
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
if item.tipo != "pelis":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin(host, next_page).replace("amp;", "")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
|
||||
extra=item.extra, url=url))
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def siguientes(item): # No utilizada
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
|
||||
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
|
||||
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
|
||||
patron += '<div class="extra-info"><span class="year">[^<]+'
|
||||
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
|
||||
patron += '</span></div>[^<]+'
|
||||
patron += '</div>[^<]+'
|
||||
patron += '</a>[^<]+'
|
||||
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
session = scrapertools.htmlclean(scrapedsession)
|
||||
episode = scrapertools.htmlclean(scrapedepisode)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb", "mediabigcover")
|
||||
plot = ""
|
||||
title = session + "x" + episode + " - " + title
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = referer
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodio(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
for bloque_episodios in matchestemporadas:
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_episodios)
|
||||
for scrapedurl, scrapedtitle, info, visto in matches:
|
||||
if visto.strip() == "active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
numero = episode
|
||||
title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)")
|
||||
url = host + "/links/viewepisode/id/" + epid
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
itemlist2 = findvideos(capitulo)
|
||||
return itemlist2
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["content"]
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
for nombre_temporada, bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron_episodio = '<li><a href="#"(.*?)</a></li>'
|
||||
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
|
||||
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
|
||||
for data_episodio in matches:
|
||||
|
||||
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
|
||||
scrapedurl = scrapertools.find_single_match(data_episodio, 'data-href="([^"]+)">\s*<div class="name">')
|
||||
numero = scrapertools.find_single_match(data_episodio, '<span class="num">([^<]+)</span>')
|
||||
scrapedtitle = scrapertools.find_single_match(data_episodio,
|
||||
'<span class="num">.*?</span>\s*([^<]+)\s*</div>')
|
||||
visto = scrapertools.find_single_match(data_episodio, '"show-close-footer episode model([^"]+)"')
|
||||
|
||||
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
|
||||
"") + "x" + numero + " " + scrapertools.htmlclean(
|
||||
scrapedtitle)
|
||||
if visto.strip() == "seen":
|
||||
title = "[visto] " + title
|
||||
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
plot = ""
|
||||
url = host + scrapedurl
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
|
||||
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Añadir esta serie a la videoteca", url=item.url, token=token,
|
||||
action="add_serie_to_library", extra="episodios###", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Descargar todos los episodios de la serie", url=item.url, token=token,
|
||||
action="download_all_episodes", extra="episodios", show=show))
|
||||
itemlist.append(Item(channel='megadede', title="Marcar como Pendiente", tipo="5", idtemp=idserie, token=token,
|
||||
valor="pending", action="megadede_check", show=show))
|
||||
itemlist.append(Item(channel='megadede', title="Marcar como Siguiendo", tipo="5", idtemp=idserie, token=token,
|
||||
valor="following", action="megadede_check", show=show))
|
||||
itemlist.append(Item(channel='megadede', title="Marcar como Finalizada", tipo="5", idtemp=idserie, token=token,
|
||||
valor="seen", action="megadede_check", show=show))
|
||||
itemlist.append(Item(channel='megadede', title="Marcar como Favorita", tipo="5", idtemp=idserie, token=token,
|
||||
valor="favorite", action="megadede_check", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Quitar marca", tipo="5", idtemp=idserie, token=token, valor="nothing",
|
||||
action="megadede_check", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Añadir a lista", tipo="5", tipo_esp="lista", idtemp=idserie, token=token,
|
||||
action="megadede_check", show=show))
|
||||
return itemlist
|
||||
|
||||
|
||||
def parse_listas(item, bloque_lista):
|
||||
logger.info()
|
||||
if item.tipo == "populares":
|
||||
patron = '<div class="lista(.*?)</div>\s*</h4>'
|
||||
else:
|
||||
patron = '<div class="lista(.*?)</h4>\s*</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_lista)
|
||||
itemlist = []
|
||||
for lista in matches:
|
||||
scrapedurl = scrapertools.htmlclean(scrapertools.find_single_match(lista, '<a href="([^"]+)">[^<]+</a>'))
|
||||
scrapedtitle = scrapertools.find_single_match(lista, '<a href="[^"]+">([^<]+)</a>')
|
||||
scrapedfollowers = scrapertools.find_single_match(lista, 'Follow: <span class="number">([^<]+)')
|
||||
scrapedseries = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Series: ([^<]+)')
|
||||
scrapedpelis = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Pelis: ([^<]+)')
|
||||
title = scrapertools.htmlclean(scrapedtitle) + ' ('
|
||||
if scrapedpelis != '':
|
||||
title += scrapedpelis + ' pelis, '
|
||||
if scrapedseries != '':
|
||||
title += scrapedseries + ' series, '
|
||||
if scrapedfollowers != '':
|
||||
title += scrapedfollowers + ' seguidores'
|
||||
title += ')'
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
thumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
|
||||
nextpage = scrapertools.find_single_match(bloque_lista,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
|
||||
if nextpage != '':
|
||||
url = urlparse.urljoin(host, nextpage)
|
||||
itemlist.append(Item(channel=item.channel, action="lista_sig", token=item.token, tipo=item.tipo,
|
||||
title=">> Página siguiente", extra=item.extra, url=url))
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def listas(item):
|
||||
logger.info()
|
||||
if item.tipo == "tuslistas":
|
||||
patron = 'Tus listas(.*?)>Listas que sigues<'
|
||||
elif item.tipo == "siguiendo":
|
||||
patron = '<h3>Listas que sigues</h3>(.*?)<h2>Listas populares</h2>'
|
||||
else:
|
||||
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
|
||||
bloque_lista = scrapertools.find_single_match(data, patron)
|
||||
return parse_listas(item, bloque_lista)
|
||||
|
||||
|
||||
def lista_sig(item):
|
||||
logger.info()
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
return parse_listas(item, data)
|
||||
|
||||
|
||||
def pag_sig(item):
|
||||
logger.info()
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def findvideos(item, verTodos=False):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
|
||||
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
|
||||
trailer = "https://www.youtube.com/watch?v=" + scrapertools.find_single_match(data,
|
||||
'data-youtube="([^"]+)" class="youtube-link')
|
||||
url = host + "/aportes/" + data_model + "/" + data_id + "?popup=1"
|
||||
data = httptools.downloadpage(url).data
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
patron = 'target="_blank" (.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
idpeli = data_id
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and data_model == "4":
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
itemsort = []
|
||||
sortlinks = config.get_setting("megadedesortlinks",
|
||||
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
|
||||
showlinks = config.get_setting("megadedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
|
||||
if sortlinks != '' and sortlinks != "No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
sortlinks = 0
|
||||
if showlinks != '' and showlinks != "No":
|
||||
showlinks = int(showlinks)
|
||||
else:
|
||||
showlinks = 0
|
||||
for match in matches:
|
||||
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
|
||||
if (showlinks == 1 and jdown != '') or (
|
||||
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
|
||||
continue
|
||||
idioma_1 = ""
|
||||
idiomas = re.compile('<img src="https://cd.*?/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idioma_0 = idiomas[0]
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = idiomas[1]
|
||||
idioma = idioma_0 + ", SUB " + idioma_1
|
||||
else:
|
||||
idioma_1 = ''
|
||||
idioma = idioma_0
|
||||
calidad_video = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
calidad_audio = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
|
||||
if jdown != '':
|
||||
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
else:
|
||||
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
valoracion = 0
|
||||
reports = scrapertools.find_single_match(match,
|
||||
'<i class="fa fa-exclamation-triangle"></i><br/>\s*<span class="number" data-num="([^"]*)">')
|
||||
valoracion -= int(reports)
|
||||
title += " (" + reports + " reps)"
|
||||
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
if sortlinks > 0:
|
||||
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
|
||||
# orden2 segun configuración
|
||||
if sortlinks == 1:
|
||||
orden = valoracion
|
||||
elif sortlinks == 2:
|
||||
orden = valora_idioma(idioma_0, idioma_1)
|
||||
elif sortlinks == 3:
|
||||
orden = valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 4:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 5:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
|
||||
elif sortlinks == 6:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (
|
||||
valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
|
||||
itemsort.append(
|
||||
{'action': "play", 'title': title, 'data_id': data_id, 'token': token, 'tipo': data_model, 'url': url,
|
||||
'thumbnail': thumbnail, 'fanart': item.fanart, 'plot': plot, 'extra': item.url,
|
||||
'fulltitle': item.fulltitle, 'orden1': (jdown == ''), 'orden2': orden})
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", data_id=data_id, token=token, tipo=data_model, title=title,
|
||||
url=url, thumbnail=thumbnail, fanart=item.fanart, plot=plot, extra=item.url,
|
||||
fulltitle=item.fulltitle))
|
||||
|
||||
if sortlinks > 0:
|
||||
numberlinks = config.get_setting("megadedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...)
|
||||
# numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0
|
||||
if numberlinks != '' and numberlinks != "No":
|
||||
numberlinks = int(numberlinks)
|
||||
else:
|
||||
numberlinks = 0
|
||||
if numberlinks == 0:
|
||||
verTodos = True
|
||||
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
|
||||
for i, subitem in enumerate(itemsort):
|
||||
if verTodos == False and i >= numberlinks:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url,
|
||||
extra=item.extra))
|
||||
break
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action=subitem['action'], title=subitem['title'], data_id=subitem['data_id'],
|
||||
token=subitem['token'], tipo=subitem['tipo'], url=subitem['url'], thumbnail=subitem['thumbnail'],
|
||||
fanart=subitem['fanart'], plot=subitem['plot'], extra=subitem['extra'],
|
||||
fulltitle=subitem['fulltitle']))
|
||||
if data_model == "4":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Pendiente",
|
||||
valor="pending", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Vista",
|
||||
valor="seen", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Favorita",
|
||||
valor="favorite", idtemp=idpeli))
|
||||
itemlist.append(Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Quitar Marca",
|
||||
valor="nothing", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel='megadede', title="Añadir a lista", tipo="4", tipo_esp="lista", idtemp=idpeli, token=token,
|
||||
action="megadede_check"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findallvideos(item):
|
||||
return findvideos(item, True)
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
if "trailer" in item:
|
||||
url = item.trailer
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
else:
|
||||
logger.info("url=" + item.url)
|
||||
# Hace la llamada
|
||||
headers = {'Referer': item.extra}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
url = scrapertools.find_single_match(data,
|
||||
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
|
||||
url = urlparse.urljoin(host, url)
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
itemlist = servertools.find_video_items(data=media_url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
try:
|
||||
checkseen(item)
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def checkseen(item):
|
||||
logger.info(item)
|
||||
url_temp = ""
|
||||
if item.tipo == "8":
|
||||
url_temp = host + "/set/episode/" + item.data_id + "/seen"
|
||||
tipo_str = "series"
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": host + "/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
else:
|
||||
url_temp = host + "/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": host + "/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
return True
|
||||
|
||||
|
||||
def infosinopsis(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
|
||||
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
|
||||
scrapedyear = scrapertools.find_single_match(data,
|
||||
'<strong>Fecha</strong>\s*<div class="mini-content">([^<]+)</div>').strip()
|
||||
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
|
||||
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
|
||||
" ", "").replace("\n", ""))
|
||||
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
|
||||
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
|
||||
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
|
||||
scrapedcasting = re.compile(
|
||||
'<a href="%s/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>' %host,
|
||||
re.DOTALL).findall(data)
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
plot = "[B]Año: [/B]" + scrapedyear
|
||||
plot += " [B]Duración: [/B]" + scrapedduration
|
||||
plot += " [B]Puntuación usuarios: [/B]" + scrapedvalue
|
||||
plot += "\n[B]Géneros: [/B]" + ", ".join(scrapedgenres)
|
||||
plot += "\n\n[B]Sinopsis:[/B]\n" + scrapertools.htmlclean(scrapedplot)
|
||||
plot += "\n\n[B]Casting:[/B]\n"
|
||||
for actor, papel in scrapedcasting:
|
||||
plot += actor + " (" + papel.strip() + ")\n"
|
||||
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
|
||||
tbd.ask(title, plot)
|
||||
del tbd
|
||||
return
|
||||
|
||||
|
||||
try:
|
||||
import xbmcgui
|
||||
class TextBox(xbmcgui.WindowXML):
|
||||
""" Create a skinned textbox window """
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
def onInit(self):
|
||||
try:
|
||||
self.getControl(5).setText(self.text)
|
||||
self.getControl(1).setLabel(self.title)
|
||||
except:
|
||||
pass
|
||||
def onClick(self, controlId):
|
||||
pass
|
||||
def onFocus(self, controlId):
|
||||
pass
|
||||
def onAction(self, action):
|
||||
if action == 7:
|
||||
self.close()
|
||||
def ask(self, title, text):
|
||||
self.title = title
|
||||
self.text = text
|
||||
self.doModal()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def valora_calidad(video, audio):
|
||||
prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener']
|
||||
prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener']
|
||||
video = ''.join(video.split()).lower()
|
||||
if video in prefs_video:
|
||||
pts = (9 - prefs_video.index(video)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
audio = ''.join(audio.split()).lower()
|
||||
if audio in prefs_audio:
|
||||
pts = (9 - prefs_audio.index(audio)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
return pts
|
||||
|
||||
|
||||
def valora_idioma(idioma_0, idioma_1):
|
||||
prefs = ['spanish', 'latino', 'catalan', 'english', 'french']
|
||||
if idioma_0 in prefs:
|
||||
pts = (9 - prefs.index(idioma_0)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
if idioma_1 != '': # si hay subtítulos
|
||||
idioma_1 = idioma_1.replace(' SUB', '')
|
||||
if idioma_1 in prefs:
|
||||
pts += 8 - prefs.index(idioma_1)
|
||||
else:
|
||||
pts += 8 - 1
|
||||
else:
|
||||
pts += 9 # sin subtítulos por delante
|
||||
return pts
|
||||
|
||||
|
||||
def megadede_check(item):
|
||||
if item.tipo_esp == "lista":
|
||||
url_temp = host + "/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
|
||||
data = httptools.downloadpage(url_temp).data
|
||||
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
|
||||
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for id_lista, nombre_lista in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="megadede_check", tipo=item.tipo, tipo_esp="add_list",
|
||||
token=item.token, title=nombre_lista, idlista=id_lista, idtemp=item.idtemp))
|
||||
if len(itemlist) < 1:
|
||||
itemlist.append(Item(channel=item.channel, action="", title="No tienes ninguna lista creada por ti!"))
|
||||
return itemlist
|
||||
else:
|
||||
if item.tipo == "10" or item.tipo == "lista":
|
||||
url_temp = host + "/set/lista/" + item.idtemp + "/" + item.valor
|
||||
else:
|
||||
if (item.tipo_esp == "add_list"):
|
||||
url_temp = host + "/set/listamedia/" + item.idlista + "/add/" + item.tipo + "/" + item.idtemp
|
||||
else:
|
||||
url_temp = host + "/set/usermedia/" + item.tipo + "/" + item.idtemp + "/" + item.valor
|
||||
if item.tipo == "5":
|
||||
tipo_str = "series"
|
||||
elif item.tipo == "lista":
|
||||
tipo_str = "listas"
|
||||
else:
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": host + "/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
replace_headers=True).data.strip()
|
||||
dialog = platformtools
|
||||
dialog.ok = platformtools.dialog_ok
|
||||
if data == "1":
|
||||
if item.valor != "nothing":
|
||||
dialog.ok('SUCCESS', 'Marca realizada con éxito!')
|
||||
elif item.valor == "nothing":
|
||||
dialog.ok('SUCCESS', 'Marca eliminada con éxito!')
|
||||
elif item.valor == "unfollow":
|
||||
dialog.ok('SUCCESS', 'Has dejado de seguir esta lista!')
|
||||
elif item.valor == "follow":
|
||||
dialog.ok('SUCCESS', 'Has comenzado a seguir esta lista!')
|
||||
elif item.tipo_esp == "add_list":
|
||||
dialog.ok('SUCCESS', 'Añadido a la lista!')
|
||||
else:
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"id": "metaserie",
|
||||
"name": "MetaSerie (Latino)",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s32.postimg.cc/7g50yo39h/metaserie.png",
|
||||
"banner": "https://s31.postimg.cc/u6yddil8r/metaserie_banner.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Español",
|
||||
"VOS"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
IDIOMAS = {'la': 'Latino', 'es': 'Español', 'sub': 'VOS'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = [
|
||||
'openload',
|
||||
'gamovideo',
|
||||
'powvideo',
|
||||
'streamplay',
|
||||
'streaminto',
|
||||
'streame',
|
||||
'flashx'
|
||||
]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Series",
|
||||
action="todas",
|
||||
url="http://metaserie.com/series-agregadas",
|
||||
thumbnail='https://s27.postimg.cc/iahczwgrn/series.png',
|
||||
fanart='https://s27.postimg.cc/iahczwgrn/series.png'
|
||||
))
|
||||
|
||||
# itemlist.append(item.clone(title="Anime",
|
||||
# action="todas",
|
||||
# url="http://metaserie.com/animes-agregados",
|
||||
# thumbnail='https://s2.postimg.cc/s38borokp/anime.png',
|
||||
# fanart='https://s2.postimg.cc/s38borokp/anime.png'
|
||||
# ))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar",
|
||||
action="search",
|
||||
url="http://www.metaserie.com/?s=",
|
||||
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def todas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = '<div class=poster>.*?<a href=(.*?) title=(.*?)en(.*?)>.*?'
|
||||
patron += '<div class=poster_efecto><span>(.*?)<.*?div>.*?<img.*?src=(.*?) class'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, lang, scrapedplot, scrapedthumbnail in matches:
|
||||
if 'latino' in lang:
|
||||
idioma = 'Latino'
|
||||
elif 'español' in lang:
|
||||
idioma = 'Español'
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (%s)' % idioma
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = scrapedplot
|
||||
fanart = 'https://s32.postimg.cc/7g50yo39h/metaserie.png'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=title,
|
||||
context=autoplay.context
|
||||
))
|
||||
|
||||
# Paginacion
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
'<li><a class=next page-numbers local-link href=('
|
||||
'.*?)>».*?li>')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="todas",
|
||||
title=">> Página siguiente",
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class=".*?="([^"]+)".*?>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
contentSeasonNumber = re.findall(r'.*?temporada-([^-]+)-', url)
|
||||
title = scrapedtitle
|
||||
title = title.replace("&", "x");
|
||||
thumbnail = item.thumbnail
|
||||
plot = item.plot
|
||||
fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='episodiosxtemp',
|
||||
title=title,
|
||||
fulltitle=item.contentSerieName,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
fanart=fanart,
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber,
|
||||
context=item.context
|
||||
))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra='episodios',
|
||||
contentSerieName=item.contentSerieName
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodiosxtemp(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def more_episodes(item, itemlist, url):
|
||||
logger.info()
|
||||
templist = []
|
||||
item.url = url
|
||||
templist = episodiosxtemp(item)
|
||||
itemlist += templist
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodiosxtemp(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td><h3 class=".*?href="([^"]+)".*?">([^<]+).*?td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches[::-1]:
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = re.findall(r'.*?x([^\/]+)\/', url)
|
||||
title = scrapedtitle
|
||||
title = title.replace("×", "x")
|
||||
title = title.replace("×", "x")
|
||||
thumbnail = item.thumbnail
|
||||
plot = item.plot
|
||||
fanart = item.fanart
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=plot,
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=item.contentSeasonNumber,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
context=item.context
|
||||
))
|
||||
more_pages = scrapertools.find_single_match(data,
|
||||
'<li><a class="next page-numbers local-link" href="(.*?)">»')
|
||||
logger.debug('more_pages: %s' % more_pages)
|
||||
if more_pages:
|
||||
itemlist = more_episodes(item, itemlist, more_pages)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
itemlist = []
|
||||
if texto != '':
|
||||
try:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="([^\"]+)" rel="bookmark" class="local-link">([^<]+)<.*?'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
thumbnail = ''
|
||||
plot = ''
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="temporadas",
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
folder=True,
|
||||
contentSerieName=title
|
||||
))
|
||||
|
||||
return itemlist
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
|
||||
'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" ' \
|
||||
'width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/> (' \
|
||||
'[^<]+)<\/td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
anterior = scrapertools.find_single_match(data,
|
||||
'<th scope="col"><a href="([^"]+)" rel="prev" '
|
||||
'class="local-link">Anterior</a></th>')
|
||||
siguiente = scrapertools.find_single_match(data,
|
||||
'<th scope="col"><a href="([^"]+)" rel="next" '
|
||||
'class="local-link">Siguiente</a></th>')
|
||||
|
||||
for scrapedid, scrapedurl, scrapedserv in matches:
|
||||
url = scrapedurl
|
||||
server = servertools.get_server_from_url(url).lower()
|
||||
title = item.title + ' audio ' + audio[scrapedid] + ' en ' + server
|
||||
extra = item.thumbnail
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
fulltitle=item.contentSerieName,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
extra=extra,
|
||||
language=IDIOMAS[scrapedid],
|
||||
server=server,
|
||||
))
|
||||
if item.extra1 != 'capitulos':
|
||||
if anterior != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title='Capitulo Anterior',
|
||||
url=anterior,
|
||||
thumbnail='https://s31.postimg.cc/k5kpwyrgb/anterior.png'
|
||||
))
|
||||
if siguiente != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title='Capitulo Siguiente',
|
||||
url=siguiente,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from core import servertools
|
||||
itemlist.extend(servertools.find_video_items(data=item.url))
|
||||
for videoitem in itemlist:
|
||||
video = item.channel
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.folder = False
|
||||
videoitem.thumbnail = item.extra
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
return itemlist
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "mundiseries",
|
||||
"name": "Mundiseries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://imgur.com/GdGMFi1.png",
|
||||
"banner": "https://imgur.com/1bDbYY1.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import filtertools
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import httptools
|
||||
from channels import autoplay
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "http://mundiseries.com"
|
||||
list_servers = ['okru']
|
||||
list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "/lista-de-series"), thumbnail=get_thumb('tvshows', auto=True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, thumbnail, name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
|
||||
return itemlist
|
||||
|
||||
def temporada(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info("preon,:"+data)
|
||||
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link,thumbnail,name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
patron_show='<h1 class="h-responsive center">.+?'
|
||||
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
|
||||
show = scrapertools.find_single_match(data,patron_show)
|
||||
for link, name,cap,temp in matches:
|
||||
if '|' in cap:
|
||||
cap = cap.replace('|','')
|
||||
if '|' in temp:
|
||||
temp = temp.replace('|','')
|
||||
if '|' in name:
|
||||
name = name.replace('|','')
|
||||
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
|
||||
url=host+link
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos",
|
||||
title=title, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
id = ""
|
||||
type = ""
|
||||
data = httptools.downloadpage(item.url).data
|
||||
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for item in it2:
|
||||
if "###" not in item.url:
|
||||
item.url += "###" + id + ";" + type
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel= item.channel
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "peliscon",
|
||||
"name": "Peliscon",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://imgur.com/yTQRPUJ.png",
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "http://es.cumlouder.com/embed/([a-z0-9A-Z]+)/",
|
||||
"url": "http://es.cumlouder.com/embed/\\1/"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "cumlouder",
|
||||
"name": "cumlouder",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
data = scrapertools.cache_page(page_url)
|
||||
media_url = scrapertools.get_match(data, "var urlVideo = \'([^']+)\';")
|
||||
video_urls = []
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [cumlouder]", media_url])
|
||||
|
||||
return video_urls
|
||||
@@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
# def test_video_exists(page_url):
|
||||
# logger.info("(page_url='%s')" % page_url)
|
||||
# data = httptools.downloadpage(page_url).data
|
||||
|
||||
# if "File was deleted" in data:
|
||||
# return False, "[eroshare] El archivo no existe o ha sido borrado"
|
||||
|
||||
# return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
url = scrapertools.find_single_match(data, '"url_mp4":"(.*?)"')
|
||||
video_urls.append(['eroshare', url])
|
||||
|
||||
# for video_url in video_urls:
|
||||
# logger.info("%s - %s" % (video_url[0],video_url[1]))
|
||||
|
||||
return video_urls
|
||||
11
plugin.video.alfa/servers/eroshare.json → plugin.video.alfa/servers/fembed.json
Executable file → Normal file
11
plugin.video.alfa/servers/eroshare.json → plugin.video.alfa/servers/fembed.json
Executable file → Normal file
@@ -4,14 +4,14 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://eroshare.com/embed/[a-zA-Z0-9]+)",
|
||||
"pattern": "(https://www.fembed.com/v/[A-z0-9]+)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "eroshare",
|
||||
"name": "eroshare",
|
||||
"id": "fembed",
|
||||
"name": "Fembed",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
@@ -38,5 +38,6 @@
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s31.postimg.cc/cewftt397/eroshare.png"
|
||||
}
|
||||
"thumbnail": "https://i.postimg.cc/prdPwBhT/fembed1.png",
|
||||
"version": 1
|
||||
}
|
||||
24
plugin.video.alfa/servers/fembed.py
Normal file
24
plugin.video.alfa/servers/fembed.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import jsontools
|
||||
from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Sorry 404 not found" in data:
|
||||
return False, "[fembed] El fichero ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
page_url = page_url.replace("/v/","/api/sources/")
|
||||
data = httptools.downloadpage(page_url, post={}).data
|
||||
data = jsontools.load(data)
|
||||
for videos in data["data"]:
|
||||
video_urls.append([videos["label"] + " [fembed]", videos["file"]])
|
||||
return video_urls
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "http://www.pelismundo.com/gkvip/vip/playervip3/.*?id=([A-z0-9]+)",
|
||||
"url": "http://www.pelismundo.com/gkvip/vip/playervip3/player.php?id=\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "pelismundo",
|
||||
"name": "pelismundo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s26.postimg.cc/72c9mr3ux/pelismundo1.png"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Alfa addon - KODI Plugin
|
||||
# Conector para pelismundo
|
||||
# https://github.com/alfa-addon
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, add_referer = True).data
|
||||
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data or 'sources: []' in data:
|
||||
return False, "[pelismundo] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url, add_referer = True).data
|
||||
patron = 'sources.*?}],'
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
|
||||
match = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedquality in match:
|
||||
video_urls.append([scrapedquality + " [pelismundo]", scrapedurl])
|
||||
#video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
|
||||
return video_urls
|
||||
@@ -9,28 +9,24 @@ from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
import sys, os
|
||||
import re, base64
|
||||
from lib.aadecode import decode as aadecode
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[upvid] El archivo no existe o ha sido borrado"
|
||||
|
||||
return False, "[upvid] El archivo no existe o ha sido borrado"
|
||||
if "<title>video is no longer available" in data.data:
|
||||
return False, "[upvid] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
headers = {'referer': page_url}
|
||||
|
||||
for i in range(0, 3):
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
@@ -38,27 +34,15 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
|
||||
break
|
||||
else:
|
||||
page_url = scrapertools.find_single_match(data, "iframe src=(.*?) scrolling")
|
||||
|
||||
|
||||
# logger.debug(data)
|
||||
# decodificar función para obtener función y clave
|
||||
# ------------------------------------------------
|
||||
code = re.findall('<script>\s*゚ω゚(.*?)</script>', data, flags=re.DOTALL)[0]
|
||||
text_decode = aadecode(code)
|
||||
funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ", text_decode, flags=re.DOTALL)[0]
|
||||
|
||||
# decodificar javascript en campos html hidden
|
||||
# --------------------------------------------
|
||||
oculto = re.findall('<input type=hidden value=([^ ]+) id=func', data, flags=re.DOTALL)[0]
|
||||
funciones = resuelve(clave, base64.b64decode(oculto))
|
||||
|
||||
oculto = re.findall('<input type=hidden value=([^ ]+) id=code', data, flags=re.DOTALL)[0]
|
||||
codigo = resuelve(clave, base64.b64decode(oculto))
|
||||
|
||||
url, type = scrapertools.find_single_match(funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
|
||||
|
||||
video_urls.append(['upvid [%s]' % type ,url])
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user