Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-10-05 14:09:35 -03:00
62 changed files with 1856 additions and 1272 deletions

11
mediaserver/lib/xbmc.py Normal file
View File

@@ -0,0 +1,11 @@
# -*- coding: utf-8 -*-
# librería que simula xbmc para evitar errores en módulos que lo usen en mediaserver
# y no tener que poner excepciones en el código del addon
def getInfoLabel(parm):
if parm == 'Container.PluginName': return 'plugin.video.alfa'
elif parm == 'Container.FolderName': return 'Alfa'
return ''

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.5" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.7" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,20 +19,16 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
¤ divxtotal ¤ elitetorrent
¤ estrenosgo ¤ grantorrent
¤ mejortorrent1 ¤ newpct1
¤ pelismagnet ¤ todopeliculas
¤ allpeliculas ¤ puyasubs
¤ yape ¤ dilo
¤ goovie ¤ pelisipad
¤ seriesblanco ¤ pepecine
¤ maxipelis24 ¤ pelisplanet
¤ yts
¤ cinecalidad ¤ verpelis
¤ pepecine ¤ pelispedia
¤ pelisplusco ¤ seriesblancoxyz
¤ seriesdanko ¤ pedropolis
¤ pelisplanet ¤ danimados
¤ fembed ¤ upvid
¤ megadede ¤ crunchyroll
¤ pelismagnet
¤ arreglos internos
¤ Agradecimientos a @wrlopez y @chivmalev por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -63,14 +63,15 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
'(.*?)<div class="publis-bottom">')
'(.*?)<\/ul>')
patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, thumbnail, title in matches:
for url, title in matches:
url = host + url
thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail))
#thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist

View File

@@ -3,11 +3,13 @@
"name": "CineCalidad",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["cast", "lat", "por"],
"thumbnail": "https://s9.postimg.cc/58xyblsvj/cinecalidad.png",
"banner": "https://s32.postimg.cc/kihkdpx1x/banner_cinecalidad.png",
"categories": [
"movie",
"direct",
"vos",
"torrent"
],
"settings": [
@@ -15,9 +17,9 @@
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",

View File

@@ -2,13 +2,11 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, platformtools, logger
from channelselector import get_thumb
@@ -32,19 +30,16 @@ else:
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades",
url="http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
thumbnail=get_thumb('newest', auto=True), text_color=color1))
itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
@@ -56,7 +51,6 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
texto = texto.replace(" ", "%20")
item.url = host + "search?q=%s" % texto
try:
@@ -75,20 +69,17 @@ def newest(categoria):
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost"
item.url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
@@ -96,13 +87,10 @@ def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
for link in entry["link"]:
if link["rel"] == "alternate":
@@ -124,17 +112,12 @@ def peliculas(item):
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
tmdb.set_infoLabels(itemlist, __modo_grafico__)
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
url_next = item.url.replace("start-index=" + str(actualpage), "start-index=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=url_next))
return itemlist
@@ -163,7 +146,6 @@ def busqueda(item):
def generos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
@@ -174,50 +156,35 @@ def generos(item):
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
itemlist.sort(key=lambda x: x.title)
return itemlist
def findvideos(item):
def decodifica_id(txt):
res = ''
for i in range(0, len(txt), 3):
res += '\\u0' + txt[i:i+3]
return res.decode('unicode-escape') #Ej: {"v":"9KD2iEmiYLsF"}
def findvideos(item):
logger.info()
itemlist = []
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
library_path = config.get_videolibrary_path()
if "data:text/javascript;base64" in data:
div_id = scrapertools.find_single_match(data, '<div id="([0-9a-fA-F]+)"')
# ~ logger.info(div_id)
vid_id = scrapertools.find_single_match(decodifica_id(div_id), ':"([^"]+)"')
# ~ logger.info(vid_id)
itemlist.append(item.clone(url='http://netu.tv/watch_video.php?v='+vid_id, server='netutv', action='play'))
else:
iframe = scrapertools.find_single_match(data, '<iframe width="720".*?src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
if config.get_videolibrary_support():
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()
if "clasicofilm" in canales:
canales.pop(canales.index("clasicofilm"))
canales.insert(0, "[COLOR red]clasicofilm[/COLOR]")
title = "Película ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales)
break
except:
import traceback
logger.error(traceback.format_exc())
itemlist.append(item.clone(action="add_pelicula_to_library", title=title))
token_auth = config.get_setting("token_trakt", "tvmoviedb")
if token_auth and item.infoLabels["tmdb_id"]:
itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
extra="movie"))
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
return itemlist

View File

@@ -12,7 +12,7 @@ from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.danimados.com/"
host = "https://www.danimados.com/"
list_servers = ['openload',
'okru',
@@ -48,12 +48,13 @@ def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="thumbnail animation-.*?href="([^"]+).*?'
patron = '(?s)class="thumbnail animation-.*?href="([^"]+).*?'
patron += 'img src="([^"]+).*?'
patron += 'alt="([^"]+).*?'
patron += 'class="year">(\d{4})'
patron += 'class="meta"(.*?)class="contenido"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})')
item.action = "findvideos"
item.contentTitle = scrapedtitle
item.contentSerieName = ""
@@ -95,7 +96,7 @@ def mainpage(item):
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
@@ -133,14 +134,15 @@ def episodios(item):
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img alt=".+?" src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?'
patron_caps += '<a .+? href="([^"]+)">([^"]+)<\/a>'
patron = '(?s)<ul class="episodios">(.+?)<\/ul>'
data_lista = scrapertools.find_single_match(data,patron)
contentSerieName = item.title
patron_caps = 'href="([^"]+)".*?'
patron_caps += 'src="([^"]+)".*?'
patron_caps += 'numerando">([^<]+).*?'
patron_caps += 'link_go">.*?>([^<]+)'
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
@@ -150,8 +152,8 @@ def episodios(item):
itemlist.append(item.clone(thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=contentSerieName))
return itemlist
@@ -159,22 +161,25 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav>?\s<\/div><\/div>')
patron = '<div id="playex" .+?>(.+?)<\/nav>'
data1 = scrapertools.find_single_match(data, patron)
patron = "changeLink\('([^']+)'\)"
matches = re.compile(patron, re.DOTALL).findall(data1)
matches = scrapertools.find_multiple_matches(data1, patron)
for url64 in matches:
url1 =base64.b64decode(url64)
if 'danimados' in url1:
new_data = httptools.downloadpage('https:'+url1.replace('stream', 'stream_iframe')).data
logger.info("Intel33 %s" %new_data)
url = scrapertools.find_single_match(new_data, "sources: \[\{file:'([^']+)")
if "zkstream" in url:
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
id = scrapertools.find_single_match(url, 'iframe/(.*)')
url = url.replace(id, base64.b64encode(id))
new_data = httptools.downloadpage(url).data
new_data = new_data.replace('"',"'")
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
if "zkstream" in url or "cloudup" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
else:
url1 = url
itemlist.append(item.clone(title='%s',url=url1, action="play"))
if url1:
itemlist.append(item.clone(title='%s',url=url1, action="play"))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':

View File

@@ -65,6 +65,7 @@ def menupeliculas(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True))
@@ -94,6 +95,7 @@ def menuseries(item):
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Para Ver[/B][/COLOR]",
url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos",
url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True))
@@ -109,6 +111,7 @@ def menuseries(item):
Item(channel=item.channel, action="generos_series", title="Series por Género", url=host, folder=True))
itemlist.append(Item(channel=item.channel, action="listado_series", title="Listado de todas las series",
url=host + "/series/list", folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Favoritas[/B][/COLOR]",
@@ -235,6 +238,7 @@ def fichas(item):
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
@@ -248,6 +252,7 @@ def fichas(item):
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
'<a href="([^"]+)"[^<]+' + \
@@ -496,9 +501,11 @@ def findvideos(item):
itemlist = []
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
id = ""
type = ""
@@ -521,41 +528,43 @@ def findvideos(item):
thumbnail=item.thumbnail, show=item.show))
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, language=item.language, folder=True))
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
try:
data_js = jhexdecode(data_js)
from lib import alfaresolver
provs = alfaresolver.hdfull_providers(data_js)
if provs == '': return []
except:
from lib.aadecode import decode as aadecode
data_js = data_js.split(";゚ω゚")
decode_aa = ""
for match in data_js:
decode_aa += aadecode(match)
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
return []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
matches = []
for match in data_decrypt:
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))
server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')
url = '%s%s' % (server_url, match['code'])
url = re.sub(r'\'|"|\s|\+', '', url)
url = re.sub(r'var_\d+\[\d+\]', '', url)
embed = prov["e"]
matches.append([match["lang"], match["quality"], url, embed])
if match['provider'] in provs:
try:
embed = provs[match['provider']][0]
url = eval(provs[match['provider']][1].replace('_code_', "match['code']"))
matches.append([match['lang'], match['quality'], url, embed])
except:
pass
for idioma, calidad, url, embed in matches:
mostrar_server = True
option = "Ver"
option1 = 1
if re.search(r'return ([\'"]{2,}|\})', embed):
if embed == 'd':
option = "Descargar"
option1 = 2
else:
option = "Ver"
option1 = 1
calidad = unicode(calidad, "utf8").upper().encode("utf8")
title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
@@ -569,6 +578,7 @@ def findvideos(item):
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels, language=idioma,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
@@ -576,6 +586,7 @@ def findvideos(item):
item.url += "###" + id + ";" + type
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
if type == "2" and item.category != "Cine":
if config.get_videolibrary_support():

View File

@@ -0,0 +1,85 @@
{
"id": "megadede",
"name": "Megadede",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "https://i.postimg.cc/L5pNtXdS/megadede1.png",
"banner": "",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "megadedeuser",
"type": "text",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "megadedepassword",
"type": "text",
"hidden": true,
"label": "@30015",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": "!eq(-1,'') + !eq(-2,'')",
"visible": true
},
{
"id": "megadedesortlinks",
"type": "list",
"label": "Ordenar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-2,'') + !eq(-3,'')",
"lvalues": [
"No",
"Por no Reportes",
"Por Idioma",
"Por Calidad",
"Por Idioma y Calidad",
"Por Idioma y no Reportes",
"Por Idioma, Calidad y no Reportes"
]
},
{
"id": "megadedeshowlinks",
"type": "list",
"label": "Mostrar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-3,'') + !eq(-4,'')",
"lvalues": [
"Todos",
"Ver online",
"Descargar"
]
},
{
"id": "megadedenumberlinks",
"type": "list",
"label": "Limitar número de enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-4,'') + !eq(-5,'')",
"lvalues": [
"No",
"5",
"10",
"15",
"20",
"25",
"30"
]
}
]
}

View File

@@ -0,0 +1,816 @@
# -*- coding: utf-8 -*-
import os
import re
import sys
import urlparse
from time import sleep
from core import channeltools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
host = 'https://www.megadede.com'
__channel__ = 'megadede'
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login():
url_origen = host+"/login?popup=1"
try:
data = httptools.downloadpage(url_origen).data
except:
data = httptools.downloadpage(url_origen, follow_redirects=False).data
if '<span class="username">' in data:
return True
token = scrapertools.find_single_match(data, '<input name="_token" type="hidden" value="([^"]+)"')
if 'Escribe los números de la imagen' in data:
captcha_url = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="captcha">')
imagen_data = httptools.downloadpage(captcha_url).data
ficheropng = os.path.join(config.get_data_path(), "captcha_megadede.png")
outfile=open(ficheropng,'wb')
outfile.write(imagen_data)
outfile.close()
img = xbmcgui.ControlImage(450,15,400,130,ficheropng)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
sleep(1)
kb = platformtools.dialog_numeric(0, "Escribe los números de la imagen")
postcaptcha = ""
if kb !='':
solution = kb
postcaptcha = "&captcha=" + str(solution)
else:
return False
wdlg.close()
else:
postcaptcha=""
post = "_token=" + str(token) + "&email=" + str(config.get_setting("megadedeuser", "megadede")) + \
"&password=" + str(config.get_setting("megadedepassword", "megadede")) + postcaptcha\
#+ "&app=2131296469"
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/66.0.3163.100 Safari/537.36", "Referer": host, "X-Requested-With": "XMLHttpRequest","X-CSRF-TOKEN":
token}
data = httptools.downloadpage(host+"/login", post=post, headers=headers,
replace_headers=False).data
if "redirect" in data:
return True
else:
return False
def mainlist(item):
logger.info()
itemlist = []
if not config.get_setting("megadedeuser", "megadede"):
itemlist.append(
Item(channel=item.channel, title="Habilita tu cuenta en la configuración e ingresar de nuevo al canal", action="settingCanal",
url=""))
else:
result = login()
if not result:
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
return itemlist
item.url = host
item.fanart = fanart_host
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def settingCanal(item):
return platformtools.show_channel_settings()
def menuseries(item):
logger.info()
itemlist = []
item.url = host
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(action="peliculas", title=" Novedades", url= host + "/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url= host + "/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(
item.clone(action="peliculas", title=" Siguiendo", url= host + "/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
url= host + "/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url= host + "/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url= host + "/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url= host + "/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url= host + "/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url= host + "/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
item.url = host
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(action="peliculas", title=" Novedades", url= host + "/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url= host + "/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url= host + "/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url= host + "/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url= host + "/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url= host + "/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url= host + "/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url= host + "/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def menulistas(item):
logger.info()
itemlist = []
item.url = host
item.fanart = fanart_host
item.text_color = None
itemlist.append(
item.clone(action="listas", tipo="populares", title=" Populares", url= host + "/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
itemlist.append(
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url= host + "/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
itemlist.append(
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url= host + "/listas"))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def generos(item):
logger.info()
tipo = item.url.replace( host + "/", "")
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,
'<select name="genre_id" class="selectpicker" title="Selecciona...">(.*?)</select>')
patron = '<option value="([^"]+)">([^<]+)</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for id_genere, title in matches:
title = title.strip()
thumbnail = ""
plot = ""
url = host + "/" + tipo + "?genre_id=" + id_genere
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
item.tipo = item.url.replace(host + "/", "")
item.url = host + "/search/"
texto = texto.replace(" ", "-")
item.url = item.url + texto
try:
return buscar(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscar(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
data = json_object["content"]
return parse_mixed_results(item, data)
def parse_mixed_results(item, data):
itemlist = []
patron = '<div class="media-dropdown mini dropdown model" data-value="([^"]+)"+'
patron += '.*?<a href="([^"]+)"[^<]data-toggle="tooltip" data-container="body"+'
patron += ' data-delay="500" title="([^"]+)"[^<]+'
patron += '.*?src="([^"]+)"+'
patron += '.*?<div class="year">([^<]+)</div>+'
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.tipo == "lista":
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
if following.strip() == "following":
itemlist.append(
Item(channel='megadede', title="Dejar de seguir", idtemp=data_id, token=item.token, valor="unfollow",
action="megadede_check", url=item.url, tipo=item.tipo))
else:
itemlist.append(
Item(channel='megadede', title="Seguir esta lista", idtemp=data_id, token=item.token, valor="follow",
action="megadede_check", url=item.url, tipo=item.tipo))
for visto, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches:
title = ""
if visto.strip() == "seen":
title += "[visto] "
title += scrapertools.htmlclean(scrapedtitle)
if scrapedyear != '':
title += " (" + scrapedyear + ")"
fulltitle = title
if scrapedvalue != '':
title += " (" + scrapedvalue + ")"
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
fanart = thumbnail.replace("mediathumb", "mediabigcover")
plot = ""
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
# sectionStr = "peli" if "/peli/" in scrapedurl else "docu"
if "/peli/" in scrapedurl:
sectionStr = "peli"
else:
sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl)
if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
else:
referer = item.url
url = urlparse.urljoin(item.url, scrapedurl)
if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(data,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
if next_page != "":
url = urlparse.urljoin(host, next_page).replace("amp;", "")
itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def siguientes(item): # No utilizada
logger.info()
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
patron += '<div class="extra-info"><span class="year">[^<]+'
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
patron += '</span></div>[^<]+'
patron += '</div>[^<]+'
patron += '</a>[^<]+'
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches:
title = scrapertools.htmlclean(scrapedtitle)
session = scrapertools.htmlclean(scrapedsession)
episode = scrapertools.htmlclean(scrapedepisode)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
fanart = thumbnail.replace("mediathumb", "mediabigcover")
plot = ""
title = session + "x" + episode + " - " + title
referer = urlparse.urljoin(item.url, scrapedurl)
url = referer
itemlist.append(
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
return itemlist
def episodio(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron, re.DOTALL).findall(bloque_episodios)
for scrapedurl, scrapedtitle, info, visto in matches:
if visto.strip() == "active":
visto_string = "[visto] "
else:
visto_string = ""
numero = episode
title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle)
thumbnail = ""
plot = ""
epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)")
url = host + "/links/viewepisode/id/" + epid
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show))
itemlist2 = []
for capitulo in itemlist:
itemlist2 = findvideos(capitulo)
return itemlist2
def peliculas(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
json_object = jsontools.load(data)
data = json_object["content"]
return parse_mixed_results(item, data)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
for nombre_temporada, bloque_episodios in matchestemporadas:
# Extrae los episodios
patron_episodio = '<li><a href="#"(.*?)</a></li>'
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
for data_episodio in matches:
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
scrapedurl = scrapertools.find_single_match(data_episodio, 'data-href="([^"]+)">\s*<div class="name">')
numero = scrapertools.find_single_match(data_episodio, '<span class="num">([^<]+)</span>')
scrapedtitle = scrapertools.find_single_match(data_episodio,
'<span class="num">.*?</span>\s*([^<]+)\s*</div>')
visto = scrapertools.find_single_match(data_episodio, '"show-close-footer episode model([^"]+)"')
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
"") + "x" + numero + " " + scrapertools.htmlclean(
scrapedtitle)
if visto.strip() == "seen":
title = "[visto] " + title
thumbnail = item.thumbnail
fanart = item.fanart
plot = ""
url = host + scrapedurl
itemlist.append(
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
if config.get_videolibrary_support():
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
itemlist.append(
Item(channel='megadede', title="Añadir esta serie a la videoteca", url=item.url, token=token,
action="add_serie_to_library", extra="episodios###", show=show))
itemlist.append(
Item(channel='megadede', title="Descargar todos los episodios de la serie", url=item.url, token=token,
action="download_all_episodes", extra="episodios", show=show))
itemlist.append(Item(channel='megadede', title="Marcar como Pendiente", tipo="5", idtemp=idserie, token=token,
valor="pending", action="megadede_check", show=show))
itemlist.append(Item(channel='megadede', title="Marcar como Siguiendo", tipo="5", idtemp=idserie, token=token,
valor="following", action="megadede_check", show=show))
itemlist.append(Item(channel='megadede', title="Marcar como Finalizada", tipo="5", idtemp=idserie, token=token,
valor="seen", action="megadede_check", show=show))
itemlist.append(Item(channel='megadede', title="Marcar como Favorita", tipo="5", idtemp=idserie, token=token,
valor="favorite", action="megadede_check", show=show))
itemlist.append(
Item(channel='megadede', title="Quitar marca", tipo="5", idtemp=idserie, token=token, valor="nothing",
action="megadede_check", show=show))
itemlist.append(
Item(channel='megadede', title="Añadir a lista", tipo="5", tipo_esp="lista", idtemp=idserie, token=token,
action="megadede_check", show=show))
return itemlist
def parse_listas(item, bloque_lista):
logger.info()
if item.tipo == "populares":
patron = '<div class="lista(.*?)</div>\s*</h4>'
else:
patron = '<div class="lista(.*?)</h4>\s*</div>'
matches = re.compile(patron, re.DOTALL).findall(bloque_lista)
itemlist = []
for lista in matches:
scrapedurl = scrapertools.htmlclean(scrapertools.find_single_match(lista, '<a href="([^"]+)">[^<]+</a>'))
scrapedtitle = scrapertools.find_single_match(lista, '<a href="[^"]+">([^<]+)</a>')
scrapedfollowers = scrapertools.find_single_match(lista, 'Follow: <span class="number">([^<]+)')
scrapedseries = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Series: ([^<]+)')
scrapedpelis = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Pelis: ([^<]+)')
title = scrapertools.htmlclean(scrapedtitle) + ' ('
if scrapedpelis != '':
title += scrapedpelis + ' pelis, '
if scrapedseries != '':
title += scrapedseries + ' series, '
if scrapedfollowers != '':
title += scrapedfollowers + ' seguidores'
title += ')'
url = urlparse.urljoin(host, scrapedurl)
thumbnail = ""
itemlist.append(
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
nextpage = scrapertools.find_single_match(bloque_lista,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
if nextpage != '':
url = urlparse.urljoin(host, nextpage)
itemlist.append(Item(channel=item.channel, action="lista_sig", token=item.token, tipo=item.tipo,
title=">> Página siguiente", extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def listas(item):
logger.info()
if item.tipo == "tuslistas":
patron = 'Tus listas(.*?)>Listas que sigues<'
elif item.tipo == "siguiendo":
patron = '<h3>Listas que sigues</h3>(.*?)<h2>Listas populares</h2>'
else:
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
data = httptools.downloadpage(item.url).data
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
bloque_lista = scrapertools.find_single_match(data, patron)
return parse_listas(item, bloque_lista)
def lista_sig(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
return parse_listas(item, data)
def pag_sig(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
return parse_mixed_results(item, data)
def findvideos(item, verTodos=False):
logger.info()
data = httptools.downloadpage(item.url).data
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
trailer = "https://www.youtube.com/watch?v=" + scrapertools.find_single_match(data,
'data-youtube="([^"]+)" class="youtube-link')
url = host + "/aportes/" + data_model + "/" + data_id + "?popup=1"
data = httptools.downloadpage(url).data
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
patron = 'target="_blank" (.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
idpeli = data_id
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and data_model == "4":
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
itemsort = []
sortlinks = config.get_setting("megadedesortlinks",
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
showlinks = config.get_setting("megadedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
if sortlinks != '' and sortlinks != "No":
sortlinks = int(sortlinks)
else:
sortlinks = 0
if showlinks != '' and showlinks != "No":
showlinks = int(showlinks)
else:
showlinks = 0
for match in matches:
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
if (showlinks == 1 and jdown != '') or (
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue
idioma_1 = ""
idiomas = re.compile('<img src="https://cd.*?/images/flags/([^"]+).png', re.DOTALL).findall(match)
idioma_0 = idiomas[0]
if len(idiomas) > 1:
idioma_1 = idiomas[1]
idioma = idioma_0 + ", SUB " + idioma_1
else:
idioma_1 = ''
idioma = idioma_0
calidad_video = scrapertools.find_single_match(match,
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
calidad_audio = scrapertools.find_single_match(match,
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
else:
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
valoracion = 0
reports = scrapertools.find_single_match(match,
'<i class="fa fa-exclamation-triangle"></i><br/>\s*<span class="number" data-num="([^"]*)">')
valoracion -= int(reports)
title += " (" + reports + " reps)"
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor
plot = ""
if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración
if sortlinks == 1:
orden = valoracion
elif sortlinks == 2:
orden = valora_idioma(idioma_0, idioma_1)
elif sortlinks == 3:
orden = valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 4:
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 5:
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
elif sortlinks == 6:
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (
valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
itemsort.append(
{'action': "play", 'title': title, 'data_id': data_id, 'token': token, 'tipo': data_model, 'url': url,
'thumbnail': thumbnail, 'fanart': item.fanart, 'plot': plot, 'extra': item.url,
'fulltitle': item.fulltitle, 'orden1': (jdown == ''), 'orden2': orden})
else:
itemlist.append(
Item(channel=item.channel, action="play", data_id=data_id, token=token, tipo=data_model, title=title,
url=url, thumbnail=thumbnail, fanart=item.fanart, plot=plot, extra=item.url,
fulltitle=item.fulltitle))
if sortlinks > 0:
numberlinks = config.get_setting("megadedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...)
# numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0
if numberlinks != '' and numberlinks != "No":
numberlinks = int(numberlinks)
else:
numberlinks = 0
if numberlinks == 0:
verTodos = True
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
for i, subitem in enumerate(itemsort):
if verTodos == False and i >= numberlinks:
itemlist.append(
Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url,
extra=item.extra))
break
itemlist.append(
Item(channel=item.channel, action=subitem['action'], title=subitem['title'], data_id=subitem['data_id'],
token=subitem['token'], tipo=subitem['tipo'], url=subitem['url'], thumbnail=subitem['thumbnail'],
fanart=subitem['fanart'], plot=subitem['plot'], extra=subitem['extra'],
fulltitle=subitem['fulltitle']))
if data_model == "4":
itemlist.append(
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Pendiente",
valor="pending", idtemp=idpeli))
itemlist.append(
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Vista",
valor="seen", idtemp=idpeli))
itemlist.append(
Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Marcar como Favorita",
valor="favorite", idtemp=idpeli))
itemlist.append(Item(channel=item.channel, action="megadede_check", tipo="4", token=token, title="Quitar Marca",
valor="nothing", idtemp=idpeli))
itemlist.append(
Item(channel='megadede', title="Añadir a lista", tipo="4", tipo_esp="lista", idtemp=idpeli, token=token,
action="megadede_check"))
return itemlist
def findallvideos(item):
return findvideos(item, True)
def play(item):
itemlist = []
if "trailer" in item:
url = item.trailer
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
else:
logger.info("url=" + item.url)
# Hace la llamada
headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data
url = scrapertools.find_single_match(data,
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
url = urlparse.urljoin(host, url)
headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
itemlist = servertools.find_video_items(data=media_url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
try:
checkseen(item)
except:
pass
return itemlist
def checkseen(item):
logger.info(item)
url_temp = ""
if item.tipo == "8":
url_temp = host + "/set/episode/" + item.data_id + "/seen"
tipo_str = "series"
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": host + "/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
else:
url_temp = host + "/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
tipo_str = "pelis"
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": host + "/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True
def infosinopsis(item):
logger.info()
data = httptools.downloadpage(item.url).data
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
scrapedyear = scrapertools.find_single_match(data,
'<strong>Fecha</strong>\s*<div class="mini-content">([^<]+)</div>').strip()
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
" ", "").replace("\n", ""))
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
scrapedcasting = re.compile(
'<a href="%s/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>' %host,
re.DOTALL).findall(data)
title = scrapertools.htmlclean(scrapedtitle)
plot = "[B]Año: [/B]" + scrapedyear
plot += " [B]Duración: [/B]" + scrapedduration
plot += " [B]Puntuación usuarios: [/B]" + scrapedvalue
plot += "\n[B]Géneros: [/B]" + ", ".join(scrapedgenres)
plot += "\n\n[B]Sinopsis:[/B]\n" + scrapertools.htmlclean(scrapedplot)
plot += "\n\n[B]Casting:[/B]\n"
for actor, papel in scrapedcasting:
plot += actor + " (" + papel.strip() + ")\n"
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
tbd.ask(title, plot)
del tbd
return
try:
import xbmcgui
class TextBox(xbmcgui.WindowXML):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
pass
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, controlId):
pass
def onFocus(self, controlId):
pass
def onAction(self, action):
if action == 7:
self.close()
def ask(self, title, text):
self.title = title
self.text = text
self.doModal()
except:
pass
def valora_calidad(video, audio):
prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener']
prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener']
video = ''.join(video.split()).lower()
if video in prefs_video:
pts = (9 - prefs_video.index(video)) * 10
else:
pts = (9 - 1) * 10
audio = ''.join(audio.split()).lower()
if audio in prefs_audio:
pts = (9 - prefs_audio.index(audio)) * 10
else:
pts = (9 - 1) * 10
return pts
def valora_idioma(idioma_0, idioma_1):
prefs = ['spanish', 'latino', 'catalan', 'english', 'french']
if idioma_0 in prefs:
pts = (9 - prefs.index(idioma_0)) * 10
else:
pts = (9 - 1) * 10
if idioma_1 != '': # si hay subtítulos
idioma_1 = idioma_1.replace(' SUB', '')
if idioma_1 in prefs:
pts += 8 - prefs.index(idioma_1)
else:
pts += 8 - 1
else:
pts += 9 # sin subtítulos por delante
return pts
def megadede_check(item):
if item.tipo_esp == "lista":
url_temp = host + "/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for id_lista, nombre_lista in matches:
itemlist.append(Item(channel=item.channel, action="megadede_check", tipo=item.tipo, tipo_esp="add_list",
token=item.token, title=nombre_lista, idlista=id_lista, idtemp=item.idtemp))
if len(itemlist) < 1:
itemlist.append(Item(channel=item.channel, action="", title="No tienes ninguna lista creada por ti!"))
return itemlist
else:
if item.tipo == "10" or item.tipo == "lista":
url_temp = host + "/set/lista/" + item.idtemp + "/" + item.valor
else:
if (item.tipo_esp == "add_list"):
url_temp = host + "/set/listamedia/" + item.idlista + "/add/" + item.tipo + "/" + item.idtemp
else:
url_temp = host + "/set/usermedia/" + item.tipo + "/" + item.idtemp + "/" + item.valor
if item.tipo == "5":
tipo_str = "series"
elif item.tipo == "lista":
tipo_str = "listas"
else:
tipo_str = "pelis"
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": host + "/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()
dialog = platformtools
dialog.ok = platformtools.dialog_ok
if data == "1":
if item.valor != "nothing":
dialog.ok('SUCCESS', 'Marca realizada con éxito!')
elif item.valor == "nothing":
dialog.ok('SUCCESS', 'Marca eliminada con éxito!')
elif item.valor == "unfollow":
dialog.ok('SUCCESS', 'Has dejado de seguir esta lista!')
elif item.valor == "follow":
dialog.ok('SUCCESS', 'Has comenzado a seguir esta lista!')
elif item.tipo_esp == "add_list":
dialog.ok('SUCCESS', 'Añadido a la lista!')
else:
dialog.ok('ERROR', 'No se pudo realizar la acción!')

View File

@@ -1,23 +1,24 @@
{
"id": "mejortorrent",
"name": "Mejor Torrent",
"name": "MejorTorrent",
"active": true,
"adult": false,
"language": ["*"],
"language": ["cast", "lat"],
"thumbnail": "mejortorrent.png",
"banner": "mejortorrent.png",
"categories": [
"torrent",
"movie",
"tvshow",
"documentary"
"documentary",
"vos"
],
"settings": [
{
"id": "domain_name",
"type": "text",
"label": "URL actual de la Web Mejor Torrent",
"default": "http://www.mejortorrent.com/",
"default": "http://www.mejortorrent.org/",
"enabled": true,
"visible": true
},
@@ -37,6 +38,22 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
@@ -60,6 +77,27 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

View File

@@ -4,6 +4,7 @@ import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
@@ -13,11 +14,25 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
host = 'http://www.mejortorrent.com/'
#host = config.get_setting('domain_name', 'mejortorrent')
__modo_grafico__ = config.get_setting('modo_grafico', 'mejortorrent')
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
channel = "mejortorrent"
host = 'http://www.mejortorrent.org/'
#host = config.get_setting('domain_name', channel)
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
@@ -31,9 +46,13 @@ def mainlist(item):
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_docus = get_thumb("channels_documentary.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False,
url= host + "/secciones.php?sec=ultimos_torrents", thumbnail=thumb_buscar))
#itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False,
# url= host + "/secciones.php?sec=ultimos_torrents", thumbnail=thumb_buscar))
itemlist.append(Item(channel=item.channel, title="Peliculas", action="listado", extra="peliculas", tipo=False,
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="listado", extra="peliculas", tipo=False,
@@ -57,7 +76,20 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Documentales Listado Alfabetico", action="alfabeto", extra="documentales", url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar, tipo=False))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def alfabeto(item):
@@ -366,6 +398,9 @@ def listado(item):
title = re.sub(r'-\s[m|M].*?serie', '', title)
title_subs += ["Miniserie"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
if title.endswith('.'):
title = title[:-1]
@@ -608,6 +643,9 @@ def listado_busqueda(item):
title = re.sub(r'-\s[m|M].*?serie', '', title)
title_subs += ["Miniserie"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
if title.endswith('.'):
title = title[:-1]
@@ -687,7 +725,13 @@ def listado_busqueda(item):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
itemlist.append(item_local.clone())
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
#cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -715,6 +759,10 @@ def listado_busqueda(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#Bajamos los datos de la página
data = ''
@@ -778,6 +826,8 @@ def findvideos(item):
# Extrae la dimensión del vídeo
size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent')
size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra
if not size:
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
@@ -785,18 +835,40 @@ def findvideos(item):
item_local.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
#Ahora pintamos el link del Torrent, si lo hay
if item_local.url: # Hay Torrent ?
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
if item_local.url: # Hay Torrent ?
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("title=[" + item.title + "], torrent=[ " + item_local.url + " ], url=[ " + url + " ], post=[" + item.post + "], thumbnail=[ " + item.thumbnail + " ]" + " size: " + size)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist

View File

@@ -1,6 +1,6 @@
{
"id": "mejortorrent1",
"name": "Mejor Torrent 1",
"name": "MejorTorrent 1",
"active": true,
"adult": false,
"language": ["cast", "lat"],

View File

@@ -1,313 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'es': 'Español', 'sub': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'openload',
'gamovideo',
'powvideo',
'streamplay',
'streaminto',
'streame',
'flashx'
]
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Series",
action="todas",
url="http://metaserie.com/series-agregadas",
thumbnail='https://s27.postimg.cc/iahczwgrn/series.png',
fanart='https://s27.postimg.cc/iahczwgrn/series.png'
))
# itemlist.append(item.clone(title="Anime",
# action="todas",
# url="http://metaserie.com/animes-agregados",
# thumbnail='https://s2.postimg.cc/s38borokp/anime.png',
# fanart='https://s2.postimg.cc/s38borokp/anime.png'
# ))
itemlist.append(item.clone(title="Buscar",
action="search",
url="http://www.metaserie.com/?s=",
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class=poster>.*?<a href=(.*?) title=(.*?)en(.*?)>.*?'
patron += '<div class=poster_efecto><span>(.*?)<.*?div>.*?<img.*?src=(.*?) class'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, lang, scrapedplot, scrapedthumbnail in matches:
if 'latino' in lang:
idioma = 'Latino'
elif 'español' in lang:
idioma = 'Español'
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (%s)' % idioma
thumbnail = scrapedthumbnail
plot = scrapedplot
fanart = 'https://s32.postimg.cc/7g50yo39h/metaserie.png'
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=title,
context=autoplay.context
))
# Paginacion
next_page_url = scrapertools.find_single_match(data,
'<li><a class=next page-numbers local-link href=('
'.*?)>&raquo;.*?li>')
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="todas",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
patron = '<li class=".*?="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
contentSeasonNumber = re.findall(r'.*?temporada-([^-]+)-', url)
title = scrapedtitle
title = title.replace("&", "x");
thumbnail = item.thumbnail
plot = item.plot
fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>')
itemlist.append(
Item(channel=item.channel,
action='episodiosxtemp',
title=title,
fulltitle=item.contentSerieName,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber,
context=item.context
))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra='episodios',
contentSerieName=item.contentSerieName
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodiosxtemp(tempitem)
return itemlist
def more_episodes(item, itemlist, url):
logger.info()
templist = []
item.url = url
templist = episodiosxtemp(item)
itemlist += templist
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td><h3 class=".*?href="([^"]+)".*?">([^<]+).*?td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
contentEpisodeNumber = re.findall(r'.*?x([^\/]+)\/', url)
title = scrapedtitle
title = title.replace("&#215;", "x")
title = title.replace("×", "x")
thumbnail = item.thumbnail
plot = item.plot
fanart = item.fanart
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
plot=plot,
contentSerieName=item.contentSerieName,
contentSeasonNumber=item.contentSeasonNumber,
contentEpisodeNumber=contentEpisodeNumber,
context=item.context
))
more_pages = scrapertools.find_single_match(data,
'<li><a class="next page-numbers local-link" href="(.*?)">&raquo;')
logger.debug('more_pages: %s' % more_pages)
if more_pages:
itemlist = more_episodes(item, itemlist, more_pages)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
itemlist = []
if texto != '':
try:
data = httptools.downloadpage(item.url).data
patron = '<a href="([^\"]+)" rel="bookmark" class="local-link">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = ''
plot = ''
itemlist.append(Item(channel=item.channel,
action="temporadas",
title=title,
fulltitle=title,
url=url,
thumbnail=thumbnail,
plot=plot,
folder=True,
contentSerieName=title
))
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
data = httptools.downloadpage(item.url).data
patron = '<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" ' \
'width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;(' \
'[^<]+)<\/td>'
matches = re.compile(patron, re.DOTALL).findall(data)
anterior = scrapertools.find_single_match(data,
'<th scope="col"><a href="([^"]+)" rel="prev" '
'class="local-link">Anterior</a></th>')
siguiente = scrapertools.find_single_match(data,
'<th scope="col"><a href="([^"]+)" rel="next" '
'class="local-link">Siguiente</a></th>')
for scrapedid, scrapedurl, scrapedserv in matches:
url = scrapedurl
server = servertools.get_server_from_url(url).lower()
title = item.title + ' audio ' + audio[scrapedid] + ' en ' + server
extra = item.thumbnail
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(Item(channel=item.channel,
action="play",
title=title,
fulltitle=item.contentSerieName,
url=url,
thumbnail=thumbnail,
extra=extra,
language=IDIOMAS[scrapedid],
server=server,
))
if item.extra1 != 'capitulos':
if anterior != '':
itemlist.append(Item(channel=item.channel,
action="findvideos",
title='Capitulo Anterior',
url=anterior,
thumbnail='https://s31.postimg.cc/k5kpwyrgb/anterior.png'
))
if siguiente != '':
itemlist.append(Item(channel=item.channel,
action="findvideos",
title='Capitulo Siguiente',
url=siguiente,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
from core import servertools
itemlist.extend(servertools.find_video_items(data=item.url))
for videoitem in itemlist:
video = item.channel
videoitem.title = item.fulltitle
videoitem.folder = False
videoitem.thumbnail = item.extra
videoitem.fulltitle = item.fulltitle
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "miltorrents",
"name": "Miltorrents",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/KZoska0.png",

View File

@@ -1,12 +0,0 @@
{
"id": "mundiseries",
"name": "Mundiseries",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png",
"categories": [
"tvshow"
]
}

View File

@@ -1,101 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import filtertools
from platformcode import config, logger
from platformcode import platformtools
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import autoplay
from channelselector import get_thumb
host = "http://mundiseries.com"
list_servers = ['okru']
list_quality = ['default']
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "/lista-de-series"), thumbnail=get_thumb('tvshows', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
matches = scrapertools.find_multiple_matches(data, patron)
for link, thumbnail, name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
return itemlist
def temporada(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info("preon,:"+data)
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for link,thumbnail,name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_show='<h1 class="h-responsive center">.+?'
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
show = scrapertools.find_single_match(data,patron_show)
for link, name,cap,temp in matches:
if '|' in cap:
cap = cap.replace('|','')
if '|' in temp:
temp = temp.replace('|','')
if '|' in name:
name = name.replace('|','')
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
url=host+link
itemlist.append(Item(channel=item.channel, action="findvideos",
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
id = ""
type = ""
data = httptools.downloadpage(item.url).data
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.extend(servertools.find_video_items(data=data))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
for videoitem in itemlist:
videoitem.channel= item.channel
autoplay.start(itemlist, item)
return itemlist

View File

@@ -77,7 +77,7 @@
"id": "intervenidos_channels_list",
"type": "text",
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('1', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('1', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force')",
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('1', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto')",
"enabled": true,
"visible": false
},

View File

@@ -2,11 +2,11 @@
"id": "pedropolis",
"name": "PedroPolis",
"active": true,
"adult": false,
"adult": true,
"language": ["cast", "lat"],
"fanart": "https://scontent-lht6-1.xx.fbcdn.net/v/t31.0-8/21056316_670362456502498_8317422545691005578_o.png?oh=1f13a23a931d82e944a7ec743a19f583&oe=5A599F4D",
"thumbnail": "https://scontent-lht6-1.xx.fbcdn.net/v/t1.0-9/20292600_467501756957771_6794721577753226614_n.jpg?oh=bba1479eccf0adceeb8c0d3450cc2531&oe=5A4EE0F5",
"banner": "",
"fanart": "https://s33.postimg.cc/d3ioghaof/image.png",
"thumbnail": "https://s33.postimg.cc/aft86728f/image.jpg",
"banner": "https://s33.postimg.cc/cyex6xlen/image.png",
"categories": [
"movie",
"tvshow",

View File

@@ -7,7 +7,7 @@
"thumbnail": "http://imgur.com/ThH8Zmk.png",
"banner": "peliculasgratis.png",
"categories": [
"torrent",
"direct",
"movie",
"tvshow"
],

View File

@@ -1,7 +1,7 @@
{
"id": "peliscon",
"name": "Peliscon",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/yTQRPUJ.png",

View File

@@ -49,8 +49,10 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Películas", action="submenu", url=api, thumbnail=thumb_pelis, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Buscar en Películas >>", action="search", url=api + "?keywords=%s&page=0", thumbnail=thumb_buscar, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Series", action="submenu", url=api_serie, thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, title="Buscar en Series >>", action="search", url=api_serie + "?keywords=%s&page=0", thumbnail=thumb_buscar, extra="series"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
@@ -94,7 +96,6 @@ def submenu(item):
itemlist.append(item.clone(action="listado", title=" + Valoradas", url=api + "?sort_by=rating&page=0", thumbnail=thumb_popular))
itemlist.append(item.clone(action="alfabeto", title=" Ordenado Alfabético", url=api, thumbnail=thumb_pelis_az))
itemlist.append(item.clone(action="categorias", title=" Ordenado por Género", url=api, thumbnail=thumb_generos))
itemlist.append(item.clone(action="search", title="Buscar Películas...", url=api + "?keywords=%s&page=0", thumbnail=thumb_buscar))
else:
itemlist.append(item.clone(action="listado", title="Series", url=api_serie + "?sort_by=''&page=0", thumbnail=thumb_series))
@@ -103,7 +104,6 @@ def submenu(item):
itemlist.append(item.clone(action="listado", title=" + Valoradas", url=api_serie + "?sort_by=rating&page=0", thumbnail=thumb_popular))
itemlist.append(item.clone(action="alfabeto", title=" Ordenado Alfabético", url=api_serie, thumbnail=thumb_series_az))
itemlist.append(item.clone(action="categorias", title=" Ordenado por Género", url=api_serie, thumbnail=thumb_generos))
itemlist.append(item.clone(action="search", title="Buscar Series...", url=api_serie + "?keywords=%s&page=0", thumbnail=thumb_buscar))
return itemlist
@@ -654,13 +654,15 @@ def actualizar_titulos(item):
def search(item, texto):
logger.info()
#texto = texto.replace(" ", "+")
itemlist = []
texto = texto.replace(' ', '%20')
try:
item.url = item.url % texto.replace(' ', '%20')
item.url = item.url % texto
if texto != '':
return listado(item)
itemlist = listado(item)
return itemlist
except:
import sys
for line in sys.exc_info():

View File

@@ -264,39 +264,47 @@ def listado(item):
logger.info()
itemlist = []
action = "findvideos"
content_type = "movie"
if item.extra == 'serie':
action = "temporadas"
content_type = "tvshow"
# ~ data = httptools.downloadpage(item.url).data
data = obtener_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
# logger.info("data -- {}".format(data))
patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
'<p class="font12">(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra == 'movies':
action = "findvideos"
content_type = "movie"
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
title = "%s (%s)" % (scrapertools.unescape(scrapedtitle.strip()), scrapedyear)
plot = scrapertools.entityunescape(scrapedplot)
patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
'<p class="font12">(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra,
contentType=content_type, fulltitle=title)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
title = "%s (%s)" % (scrapertools.unescape(scrapedtitle.strip()), scrapedyear)
plot = scrapertools.entityunescape(scrapedplot)
if item.extra == 'serie':
new_item.show = scrapertools.unescape(scrapedtitle.strip())
# fix en algunos casos la url está mal
new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula", CHANNEL_HOST + "serie")
else:
new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra,
contentType=content_type)
new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
new_item.infoLabels = {'year': scrapedyear}
itemlist.append(new_item)
else:
action = "temporadas"
content_type = "tvshow"
patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.unescape(scrapedtitle.strip())
new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
thumbnail=scrapedthumbnail, context="", extra=item.extra,
contentType=content_type, fulltitle=title)
new_item.show = title
# fix en algunos casos la url está mal
new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula", CHANNEL_HOST + "serie")
itemlist.append(new_item)
itemlist.append(new_item)
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)

View File

@@ -1,12 +0,0 @@
{
"id": "pelispekes",
"name": "PelisPekes",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "pelispekes.png",
"banner": "pelispekes.png",
"categories": [
"movie"
]
}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
if item.url == "":
item.url = "http://www.pelispekes.com/"
data = scrapertools.cachePage(item.url)
patron = '<div class="poster-media-card"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<div class="poster"[^<]+'
patron += '<div class="title"[^<]+'
patron += '<span[^<]+</span[^<]+'
patron += '</div[^<]+'
patron += '<span class="rating"[^<]+'
patron += '<i[^<]+</i><span[^<]+</span[^<]+'
patron += '</span[^<]+'
patron += '<div class="poster-image-container"[^<]+'
patron += '<img width="\d+" height="\d+" src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
# Extrae la pagina siguiente
next_page_url = scrapertools.find_single_match(data,
'<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url,
viewmode="movie"))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
data = scrapertools.cachePage(item.url)
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>(.*?)<div')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)

View File

@@ -9,10 +9,19 @@
"banner": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/pelisplanetbaner.png",
"categories": [
"movie",
"tvshow",
"direct",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -177,50 +177,25 @@ def peliculas(item):
patron += 'browse-movie-link-qd.*?>([^<]+)</.+?'
patron += '<p>([^<]+)</p>.+?'
patron += 'title one-line">([^<]+)</h2>.+?'
patron += 'title-category">([^<]+)</span>.*?'
patron += 'img-responsive" src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, quality, year, scrapedtitle, category, scrapedthumbnail in matches:
for scrapedurl, quality, year, scrapedtitle, scrapedthumbnail in matches:
if '/ ' in scrapedtitle:
scrapedtitle = scrapedtitle.partition('/ ')[2]
title = scrapedtitle
contentTitle = title
url = scrapedurl
quality = quality
thumbnail = scrapedthumbnail
itemlist.append(Item(channel=item.channel,
action="findvideos",
title="%s [COLOR yellowgreen][%s][/COLOR] [COLOR violet][%s][/COLOR]" % (title, category, year),
url=url,
title=scrapedtitle,
url=scrapedurl,
quality=quality,
thumbnail=thumbnail,
contentTitle=contentTitle,
thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle,
infoLabels={"year": year},
text_color=color3
))
# for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
# datas = httptools.downloadpage(scrapedurl).data
# datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
# # logger.info(datas)
# if '/ ' in scrapedtitle:
# scrapedtitle = scrapedtitle.partition('/ ')[2]
# contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
# contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
# rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
# director = scrapertools.find_single_match(
# datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
# title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
#
# logger.debug('thumbnail: %s' % scrapedthumbnail)
# new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
# url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
# contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
# text_color=color3)
# itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
@@ -290,7 +265,7 @@ def findvideos(item):
itemlist.sort(key=lambda it: it.title, reverse=True)
if 'drive' not in servidores and 'streamvips' not in servidores and 'mediastream' not in servidores:
if 'drive' not in servidores and 'streamvips' not in servidores and 'mediastream' not in servidores and 'megavips' not in servidores:
if 'ultrastream' not in servidores:
server = servertools.get_server_from_url(scrapedurl)
quality = scrapertools.find_single_match(

View File

@@ -1,249 +1,241 @@
# -*- coding: utf-8 -*-
# -*- Channel Pelisplus -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from core import servertools
host = "http://www.pelisplus.tv/"
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
IDIOMA = {'latino': 'Latino'}
list_language = IDIOMA.values()
list_quality = []
list_quality = ['1080p',
'720p',
'480p',
'360p',
'240p',
'default'
]
list_servers = [
'gvideo',
'directo',
'openload',
'thevideos'
]
'rapidvideo',
'streamango',
'vidlox',
'vidoza'
]
host = 'https://www.pelisplus.to/'
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer == None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
item.clone(title="Peliculas",
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Peliculas",
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(
item.clone(title="Series",
action="sub_menu",
thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Series",
action="sub_menu",
thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'busqueda/?s=',
thumbnail=get_thumb('search', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Buscar", action="search", url=host + 'search/?s=',
thumbnail=get_thumb('search', auto=True),
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
content = item.title.lower()
itemlist.append(item.clone(title="Todas",
action="list_all",
url=host + '%s/ultimas-%s/' % (content, content),
thumbnail=get_thumb('all', auto=True),
))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host + '%s/' % content,
thumbnail=get_thumb('genres', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=host + '%s/estrenos' % content,
thumbnail=get_thumb('last', auto=True),
type=content
))
itemlist.append(Item(channel=item.channel,title="Todas",
action="list_all",
url=host + '%s' % content,
thumbnail=get_thumb('all', auto=True),
type=content
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
type=content
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
itemlist=[]
data = get_source(item.url)
patron = '(?:</a>|Posters>)<a href=(.*?) class=Posters.*?data-title=(.*?) data.*?src=(.*?) alt'
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div class="Posters">(.*?)</(?:ul|a></div>)')
patron = 'href="([^"]+)".*?src="([^"]+)".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
url = scrapedurl
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
new_item=(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro': filtro_list},
context=autoplay.context
))
if 'serie' not in url:
new_item.contentTitle = scrapedtitle
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas' or 'serie' not in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
next_page_pattern = '<a class="page-link" href="([^"]+)" data-ci-pagination-page="\d+" rel="next">&gt;</a>'
url_next_page = scrapertools.find_single_match(full_data, next_page_pattern)
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
# Pagination
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+) rel=next>&raquo;</a>')
if next_page != '':
itemlist.append(item.clone(action="list_all",
title='Siguiente >>>',
url=host+next_page,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if 'series' not in item.url:
clean_genre = 'PELÍCULAS DE'
else:
clean_genre = 'SERIES DE'
patron = '<h2 class=Heading--carousel> %s(.*?) <a class=Heading-link title=View All href=(.*?)><' % clean_genre
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
url = scrapedurl
title = scrapedtitle
if 'agregadas' not in title.lower():
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
url=url,
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
templist = []
data = get_source(item.url)
serie_id = scrapertools.find_single_match(data, '<div class=owl-carousel data-serieid=(.*?)>')
itemlist=[]
patron = 'class=js-season-item> SEASON<span>(.*?)</span>'
data=get_source(item.url)
patron='data-toggle="tab">TEMPORADA (\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
contentSeasonNumber = season
infoLabels['season']=season
itemlist.append(Item(channel=item.channel, action="episodes", title='Temporada %s' % season,
serie_id=serie_id, contentSeasonNumber=contentSeasonNumber,
serie_url=item.url, infoLabels=infoLabels))
if item.extra == 'seasons':
for tempitem in itemlist:
templist += episodes(tempitem)
else:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="seasons",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
if item.extra == 'seasons':
return templist
else:
return itemlist
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodes(item):
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist= []
itemlist = []
url = host+'api/episodes?titleId=%s&seasonNumber=%s' % (item.serie_id, item.contentSeasonNumber)
season = item.infoLabels['season']
data=get_source(item.url)
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
patron='href="([^"]+)".*?block">Capitulo (\d+) - ([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(season_data)
data = jsontools.load(httptools.downloadpage(url).data)
episode_list = data['titles']
infoLabels = item.infoLabels
for episode in episode_list:
url = item.serie_url+episode['friendlyTitle4Url']
thumbnail = episode['url_image']
plot = episode['shortDescription']
contentEpisodeNumber = episode['tvSeasonEpisodeNumber']
title = '%sx%s - %s' % (item.contentSeasonNumber, contentEpisodeNumber, episode['title'])
infoLabels['episode']=contentEpisodeNumber
for scrapedurl, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail,
plot=plot, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def section(item):
logger.info()
itemlist=[]
data = get_source(host)
genres_data = scrapertools.find_single_match(data, '>Generos<(.*?)</ul>')
patron = 'href="\/\w+\/([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(genres_data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = '%s/%s/%s' % (host, item.type, scrapedurl)
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = get_source(servers_page, referer=item.url)
patron = '<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url in matches:
url_data = get_source(enc_url, referer=item.url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.language = IDIOMA['latino']
videoitem.title = '[%s] [%s]' % (videoitem.server, videoitem.language)
videoitem.infoLabels = item.infoLabels
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
@@ -255,18 +247,18 @@ def findvideos(item):
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + 'busqueda/?s=' + texto
item.url += texto
try:
if texto != '':
@@ -279,26 +271,20 @@ def search(item, texto):
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + 'peliculas/ultimas-peliculas/'
if categoria in ['peliculas', 'latino']:
item.url = host + 'peliculas/estrenos'
elif categoria == 'infantiles':
item.url = host + 'peliculas/animacion/'
item.url = host + 'peliculas/generos/animacion/'
elif categoria == 'terror':
item.url = host + 'peliculas/terror/'
elif categoria == 'documentales':
item.url = host + 'documentales/'
item.url = host + 'peliculas/generos/terror/'
item.type='peliculas'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
@@ -306,4 +292,4 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
return itemlist
return itemlist

View File

@@ -147,7 +147,7 @@ def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
@@ -159,17 +159,20 @@ def list_all (item):
else:
contentType = 'pelicula'
action = 'findvideos'
if item.type not in ['normal', 'seccion', 'serie']:
if 'pagination' in item.url:
post = {'page':item.page, 'type':item.type,'slug':item.slug,'id':item.id}
post = urllib.urlencode(post)
data =httptools.downloadpage(item.url, post=post, headers=CHANNEL_HEADERS).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron ='<a href=(.*?)><figure><img.*?src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<a href="([^"]+)">.*?<figure><img.*?src="([^"]+)".*?'
patron +='<span class="year text-center">(\d{4})</span>.*?<p>([^<]+)</p>'
else:
data = get_source(item.url)
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})</span>'%contentType
patron = '<div class="item-pelicula pull-left"><a href="([^"]+)">.*?data-src="([^"]+)".*?'
patron += '<span class="year text-center">([^<]+)</span>.*?<p>([^<]+)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
url = host+scrapedurl+'p001/'
thumbnail = scrapedthumbnail
contentTitle=scrapedtitle
@@ -192,8 +195,8 @@ def list_all (item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
next_page_valid = scrapertools.find_single_match(data, '<div class=butmore(?: site=series|) page=(.*?) id=(.*?) '
'type=(.*?) limit=.*?>')
next_page_valid = scrapertools.find_single_match(data, '<div class="butmore" site=(?:""|"series") page="(\d+)" '
'id="(.*?)" type="([^"]+)" limit="\d+">')
if item.type != 'normal' and (len(itemlist)>19 or next_page_valid):
type = item.type
if item.type == 'serie':
@@ -233,24 +236,18 @@ def seccion(item):
data = get_source(item.url)
page = "1"
if item.seccion == 'generos':
patron = '<li><a href=(.*?)><i class=ion-cube><\/i>(.*?)<\/span>'
patron = '<li><a href="([^"]+)"><i class="ion-cube"></i>([^<]+)<'
type = 'genre'
pat = 'genero/'
elif item.seccion == 'anios':
patron = '<li><a href=(\/peliculas.*?)>(\d{4})<\/a>'
patron = '<li><a href="(\/peliculas.*?)">(\d{4})<\/a>'
type = 'year'
pat = 'peliculas-'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if item.seccion == 'generos':
cant = re.sub(r'.*?<span class=cant-genre>','',scrapedtitle)
only_title = re.sub(r'<.*','',scrapedtitle).rstrip()
title = only_title+' (%s)'%cant
url = host+scrapedurl
slug = scrapertools.find_single_match(scrapedurl, "%s(.*?)/" %pat)
if item.seccion in ['generos', 'anios']:
url = host + "/pagination/"
itemlist.append(
Item(action="list_all",
channel=item.channel,

View File

@@ -62,7 +62,7 @@ def mainlist(item):
type = 'series'))
itemlist.append(item.clone(title = "Buscar",
url = host + '/donde-ver?q=',
url = host + '/ver-online?q=',
action ='search',
type = 'movie'))

View File

@@ -493,10 +493,10 @@ def do_search(item, categories=None):
if categories:
# Si no se ha seleccionado torrent no se muestra
if "torrent" not in categories and "infoPlus" not in categories:
if "torrent" in channel_parameters["categories"]:
logger.info("%s -torrent-" % basename_without_extension)
continue
#if "torrent" not in categories and "infoPlus" not in categories:
# if "torrent" in channel_parameters["categories"]:
# logger.info("%s -torrent-" % basename_without_extension)
# continue
for cat in categories:
if cat not in channel_parameters["categories"]:

View File

@@ -1,6 +1,6 @@
{
"id": "seriesblanco",
"name": "SeriesBlanco",
"name": "SeriesBlanco.org",
"active": true,
"adult": false,
"language": ["cast", "lat"],

View File

@@ -1,13 +1,14 @@
{
"id": "metaserie",
"name": "MetaSerie (Latino)",
"id": "seriesblancoxyz",
"name": "SeriesBlanco.xyz",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s32.postimg.cc/7g50yo39h/metaserie.png",
"banner": "https://s31.postimg.cc/u6yddil8r/metaserie_banner.png",
"language": ["cast", "lat"],
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
"banner": "",
"categories": [
"tvshow"
"tvshow",
"vos"
],
"settings": [
{
@@ -27,10 +28,11 @@
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Español",
"VOS"
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}
}

View File

@@ -0,0 +1,323 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesBlanco.xyz -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesblanco.xyz/'
IDIOMAS = {'Esp':'Cast', 'es': 'Cast', 'la': 'Lat', 'Latino':'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Nuevos Capitulos",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
# itemlist.append(Item(channel=item.channel,
# title="A - Z",
# action="section",
# thumbnail=get_thumb('alphabet', auto=True),
# url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<li><div style=.*?><a href="([^"]+)"><img.*?src="([^"]+)" title="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href="([^"]+)"><i class="fa fa-bookmark-o"></i> (.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<p class='panel-primary btn-primary'> Temporada (\d+)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
id = scrapertools.find_single_match(data, "onclick='loadSeason\((\d+),\d+\);")
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
id=id,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
season_url = '%sajax/visto3.php?season_id=%s&season_number=%s' % (host, item.id, season)
data = get_source(season_url)
patron = "<a href='([^ ]+)'.*?>.*?\d+x(\d+).*?-([^<]+)<.*?(/banderas.*?)</td>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace("'", '"')
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?aalt="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedtitle, scrapedurl, scrapedthumbnail in matches:
url =host+scrapedurl
thumbnail = scrapedthumbnail
season_episode = scrapertools.find_single_match(scrapedtitle, '.*? (\d+x\d+) ')
scrapedtitle= scrapertools.find_single_match(scrapedtitle, '(.*?) \d+x')
title = '%s - %s' % (scrapedtitle, season_episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
language=language,
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<a href=([^ ]+) target="_blank"><img src="/servidores/(.*?).(?:png|jpg)".*?sno.*?'
patron += '<span>(.*?)<.*?(/banderas.*?)td'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search(item, texto):
logger.info()
if texto != '':
item.url = host + 'search.php?q1=%s' % texto
return list_all(item)

View File

@@ -56,21 +56,19 @@ def novedades(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
data = re.sub(r"<!--.*?-->", "", data)
patron = '<a title="([^"]+)" href="([^"]+)".*?>'
patron += "<img.*?src='([^']+)'"
data = scrapertools.find_single_match(data, "<div class='main section' id='main'>(.*?)</ul>")
patron = "<div class='post-header'>(.*?)</span>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumb in matches:
# patron = "^(.*?)(?:Ya Disponible|Disponible|Disponbile|disponible|\(Actualizada\))$"
# match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
for serie_data in matches:
scrapedtitle = scrapertools.find_single_match(serie_data, "title='([^']+)'")
scrapedurl = scrapertools.find_single_match(serie_data, 'href="([^"]+)"')
scrapedthumb = scrapertools.find_single_match(serie_data, "src='([^']+)'")
title = scrapertools.decodeHtmlentities(scrapedtitle)
language=''
# language = scrapertools.find_multiple_matches(title,'(Vose|Español|Latino)')
# for lang in language:
# title = title.replace(lang,'')
# title = title.replace ('Disponible','')
# title = title.replace('Ya', '')
# title = title.strip()
title = title.replace ('Disponible','')
title = title.replace('Ya', '')
title = title.strip()
show = scrapertools.find_single_match(title, "^(.+?) \d+[x|X]\d+")

View File

@@ -1,7 +1,7 @@
{
"id": "tiotorrent",
"name": "TioTorrent",
"active": true,
"active": false,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s1.postimg.cc/29eths1fi7/tiotorrent.png",

View File

@@ -6,9 +6,7 @@
"language": ["cast", "lat"],
"thumbnail": "http://imgur.com/lmYQgOu.png",
"categories": [
"torrent",
"movie",
"tvshow",
"direct"
],
"settings": [

View File

@@ -460,6 +460,10 @@ def findvideos(item):
server.channel = "videolibrary"
server.nfo = item.nfo
server.strm_path = item.strm_path
#### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos
if server.action == 'play':
server.folder = False
# Se añade el nombre del canal si se desea
if config.get_setting("quit_channel_name", "videolibrary") == 0:

View File

@@ -11,7 +11,8 @@
"torrent",
"movie",
"tvshow",
"vos"
"vos",
"direct"
],
"settings": [
{

File diff suppressed because one or more lines are too long

View File

@@ -486,7 +486,7 @@ def post_tmdb_seasons(item, itemlist):
Pasada para gestión del menú de Temporadas de una Serie
La clave de activación de este método es la variable item.season_colapse que pone el canal en el Item de Listado.
Esta variable tendrá que desaparecer cuando se aña a la Videoteca para que se analicen los episodios de la forma tradicional
Esta variable tendrá que desaparecer cuando se añada a la Videoteca para que se analicen los episodios de la forma tradicional
Repasa todos los episodios producidos en itemlist por "episodios" del canal para extraer las temporadas. Pone un título para Todas la Temps.
Crea un menú con las diferentes temporadas, así como con los títulos de Actualización de Título y de Añadir a Videoteca
@@ -1743,7 +1743,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
if ow_force == 'del': #Si es un borrado de estructuras erroneas, hacemos un proceso aparte
canal_des_def = canal_des #Si hay canal de sustitución para item.library_urls, lo usamos
if not canal_des_def and canal_org in item.library_urls and len(item.library_urls) == 1: #Si no, lo extraemos de la url
canal_des_def = scrapertools.find_single_match(item.library_urls[canal_org], 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #salvamos la url actual de la estructura a borrar
canal_des_def = scrapertools.find_single_match(item.library_urls[canal_org], 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #salvamos la url actual de la estructura a borrar
url_total = ''
if item.url:
url_total = item.url #Si existe item.url, lo salvamos para futuro uso
@@ -1795,7 +1795,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
channel_alt = channel_py
if channel_alt == 'videolibrary':
continue
if item.contentType == "list": #Si viene de Videolibrary, le cambiamos ya el canal
if item.contentType == "list": #Si viene de Videolibrary, le cambiamos ya el canal
if item.channel != channel_py:
item.channel = canal_des #Cambiamos el canal. Si es clone, lo hace el canal
continue #Salimos sin hacer nada más. item está casi vacío
@@ -1811,7 +1811,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
continue #... una intervención que afecte solo a una región
if ow_force == 'no' and it.library_urls: #Esta regla solo vale para findvideos...
continue #... salidmos si estamos actualizando
if lookup == True: #Queremos que el canal solo visualice sin migración?
if lookup == True: #Queremos que el canal solo visualice sin migración?
if ow_force != 'no':
overwrite = True #Avisamos que hay cambios
continue #Salimos sin tocar archivos
@@ -1820,7 +1820,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
url_total = item.url
elif not item.url and item.library_urls:
url_total = item.library_urls[canal_org]
url_total = url_total.replace(url_org, url_des) #reemplazamos una parte de url
url_total = url_total.replace(url_org, url_des) #reemplazamos una parte de url
url = ''
if patron1: #Hay expresión regex?
url += scrapertools.find_single_match(url_total, patron1) #La aplicamos a url
@@ -1856,7 +1856,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
item.channel = canal_des_def #Cambiamos el canal. Si es clone, lo hace el canal
if channel_alt == item.category.lower(): #Actualizamos la Categoría y si la tenía
item.category = item.channel.capitalize()
if ow_force_def == 'force': #Queremos que el canal revise la serie entera?
if ow_force_def == 'force' and item.contentType != 'movie': #Queremos que el canal revise la serie entera?
item.ow_force = '1' #Se lo decimos
if ow_force_def in ['force', 'auto']: #Sobreescribir la series?
overwrite = True #Sí, lo marcamos

View File

@@ -386,7 +386,7 @@ def play_from_library(item):
@param item: elemento con información
"""
logger.info()
# logger.debug("item: \n" + item.tostring('\n'))
#logger.debug("item: \n" + item.tostring('\n'))
import xbmcgui
import xbmcplugin
@@ -437,33 +437,39 @@ def play_from_library(item):
itemlist = reorder_itemlist(itemlist)
import time
p_dialog.update(100, '')
xbmc.sleep(500)
time.sleep(0.5)
p_dialog.close()
if len(itemlist) > 0:
# El usuario elige el mirror
opciones = []
for item in itemlist:
opciones.append(item.title)
while not xbmc.Monitor().abortRequested():
# El usuario elige el mirror
opciones = []
for item in itemlist:
opciones.append(item.title)
# Se abre la ventana de seleccion
if (item.contentSerieName != "" and
item.contentSeason != "" and
item.contentEpisodeNumber != ""):
cabecera = ("%s - %sx%s -- %s" %
(item.contentSerieName,
item.contentSeason,
item.contentEpisodeNumber,
config.get_localized_string(30163)))
else:
cabecera = config.get_localized_string(30163)
# Se abre la ventana de seleccion
if (item.contentSerieName != "" and
item.contentSeason != "" and
item.contentEpisodeNumber != ""):
cabecera = ("%s - %sx%s -- %s" %
(item.contentSerieName,
item.contentSeason,
item.contentEpisodeNumber,
config.get_localized_string(30163)))
else:
cabecera = config.get_localized_string(30163)
seleccion = platformtools.dialog_select(cabecera, opciones)
seleccion = platformtools.dialog_select(cabecera, opciones)
if seleccion == -1:
return
else:
item = videolibrary.play(itemlist[seleccion])[0]
platformtools.play_video(item)
if seleccion == -1:
return
else:
item = videolibrary.play(itemlist[seleccion])[0]
platformtools.play_video(item)
from channels import autoplay
if (platformtools.is_playing() and item.action) or item.server == 'torrent' or autoplay.is_active(item.contentChannel):
break

View File

@@ -154,7 +154,7 @@ def render_items(itemlist, parent_item):
valid_genre = True
elif anime:
valid_genre = True
elif 'siguiente' in item.title.lower() and '>' in item.title:
elif (('siguiente' in item.title.lower() and '>' in item.title) or ('pagina:' in item.title.lower())):
item.thumbnail = get_thumb("next.png")
elif 'add' in item.action:
if 'pelicula' in item.action:
@@ -179,6 +179,9 @@ def render_items(itemlist, parent_item):
from core import httptools
if item.action == 'play':
#### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos
item.folder = False
item.thumbnail = unify.thumbnail_type(item)
else:
item.thumbnail = httptools.get_url_headers(item.thumbnail)
@@ -1077,12 +1080,13 @@ def play_torrent(item, xlistitem, mediaurl):
if seleccion > 1:
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(1) #Dejamos tiempo para que se ejecute
#if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(0.5) #Dejamos tiempo para que se ejecute
mediaurl = urllib.quote_plus(item.url)
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título
#Llamada con más parámetros para completar el título
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']:
if item.contentType == 'episode' and "elementum" not in torrent_options[seleccion][1]:
mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % (item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], item.infoLabels['tmdb_id'])
elif item.contentType == 'movie':

View File

@@ -279,7 +279,7 @@ def title_format(item):
visto = True
# Se elimina cualquier formato previo en el titulo
if item.action != '':
if item.action != '' and item.action !='mainlist':
item.title = remove_format(item.title)
#logger.debug('visto? %s' % visto)

View File

@@ -3245,6 +3245,10 @@ msgctxt "#70135"
msgid "Custom Colours"
msgstr "Colores Personalizados"
msgctxt "#70136"
msgid "Tv Show"
msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
@@ -4000,7 +4004,7 @@ msgid "Error in the user and/or password. Check your credentials"
msgstr "Error en el usuario y/o contraseña. Comprueba tus credenciales"
msgctxt "#70331"
msgid "Error during login. Check your credentials
msgid "Error during login. Check your credentials"
msgstr "Error durante el login. Comprueba tus credenciales"
msgctxt "#70332"
@@ -4052,7 +4056,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Eliminar %s de tu watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Añadir %s a tu watchlist"
msgctxt "#70345"
@@ -4060,7 +4064,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Eliminar %s de tu colección"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Añadir %s a tu colección"
msgctxt "#70347"
@@ -4684,8 +4688,8 @@ msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -4928,7 +4932,7 @@ msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70561"
msgid "Search Similar
msgid "Search Similar"
msgstr "Buscar Similares"

View File

@@ -3245,6 +3245,10 @@ msgctxt "#70135"
msgid "Custom Colours"
msgstr "Colores Personalizados"
msgctxt "#70136"
msgid "Tv Show"
msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
@@ -4000,7 +4004,7 @@ msgid "Error in the user and/or password. Check your credentials"
msgstr "Error en el usuario y/o contraseña. Comprueba tus credenciales"
msgctxt "#70331"
msgid "Error during login. Check your credentials
msgid "Error during login. Check your credentials"
msgstr "Error durante el login. Comprueba tus credenciales"
msgctxt "#70332"
@@ -4052,7 +4056,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Eliminar %s de tu watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Añadir %s a tu watchlist"
msgctxt "#70345"
@@ -4060,7 +4064,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Eliminar %s de tu colección"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Añadir %s a tu colección"
msgctxt "#70347"
@@ -4684,8 +4688,8 @@ msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -4928,7 +4932,7 @@ msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70561"
msgid "Search Similar
msgid "Search Similar"
msgstr "Buscar Similares"

View File

@@ -3245,6 +3245,10 @@ msgctxt "#70135"
msgid "Custom Colours"
msgstr "Colores Personalizados"
msgctxt "#70136"
msgid "Tv Show"
msgstr "Serie"
msgctxt "#70137"
msgid "Movies"
msgstr "Películas"
@@ -4000,7 +4004,7 @@ msgid "Error in the user and/or password. Check your credentials"
msgstr "Error en el usuario y/o contraseña. Comprueba tus credenciales"
msgctxt "#70331"
msgid "Error during login. Check your credentials
msgid "Error during login. Check your credentials"
msgstr "Error durante el login. Comprueba tus credenciales"
msgctxt "#70332"
@@ -4052,7 +4056,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Eliminar %s de tu watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Añadir %s a tu watchlist"
msgctxt "#70345"
@@ -4060,7 +4064,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Eliminar %s de tu colección"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Añadir %s a tu colección"
msgctxt "#70347"
@@ -4684,8 +4688,8 @@ msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, \"http://i.imgur.com/mHgwcn3.png\")"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -4928,7 +4932,7 @@ msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70561"
msgid "Search Similar
msgid "Search Similar"
msgstr "Buscar Similares"

View File

@@ -22,7 +22,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
for video, label in videos:
video_urls.append([label + " [clipwatching]", video])
logger.info("Url: %s" % videos)
if ".jpg" not in video:
video_urls.append([label + " [clipwatching]", video])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -16,7 +16,6 @@ proxy = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
premium = config.get_setting("premium", server="crunchyroll")
if premium:
return login(page_url)
@@ -27,29 +26,23 @@ def test_video_exists(page_url):
if disp:
disp = "Disponible gratuitamente: %s" % disp
return False, "[Crunchyroll] Error, se necesita cuenta premium. %s" % disp
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
if "crunchyroll.com" in page_url:
media_id = page_url.rsplit("-", 1)[1]
else:
media_id = scrapertools.find_single_match(page_url, 'media_id=(\d+)')
url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s" \
url = "https://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s" \
"&video_format=0&video_quality=0&auto_play=0&aff=af-12299-plwa" % media_id
post = "current_page=%s" % page_url
data = httptools.downloadpage(url, post, headers=GLOBAL_HEADER, replace_headers=True).data
if "<msg>Media not available</msg>" in data or "flash_block.png" in data:
data = httptools.downloadpage(proxy + url, post, headers=GLOBAL_HEADER, replace_headers=True,
cookies=False).data
media_url = scrapertools.find_single_match(data, '<file>(.*?)</file>').replace("&amp;", "&")
if not media_url:
return video_urls
@@ -60,7 +53,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
else:
filename = scrapertools.get_filename_from_url(media_url)[-4:]
quality = scrapertools.find_single_match(data, '<height>(.*?)</height>')
try:
idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
index_sub = int(config.get_setting("sub", server="crunchyroll"))
@@ -70,12 +62,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español\]")
elif not link_sub and index_sub == 1:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español \(España\)")
if not link_sub:
link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[English")
data_sub = httptools.downloadpage(link_sub.replace("&amp;", "&"), headers=GLOBAL_HEADER,
replace_headers=True).data
id_sub = scrapertools.find_single_match(data_sub, "subtitle id='([^']+)'")
iv = scrapertools.find_single_match(data_sub, '<iv>(.*?)</iv>')
data_sub = scrapertools.find_single_match(data_sub, '<data>(.*?)</data>')
@@ -84,12 +74,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
import traceback
logger.error(traceback.format_exc())
file_sub = ""
video_urls.append(["%s %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
@@ -98,13 +85,11 @@ def login(page_url):
user = config.get_setting("user", server="crunchyroll")
password = config.get_setting("password", server="crunchyroll")
data = httptools.downloadpage(login_page, headers=GLOBAL_HEADER, replace_headers=True).data
if not "<title>Redirecting" in data:
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
data = httptools.downloadpage(login_page, post, headers=GLOBAL_HEADER, replace_headers=True).data
if "<title>Redirecting" in data:
return True, ""
@@ -115,7 +100,6 @@ def login(page_url):
return False, "Es necesario resolver un captcha. Loguéate desde un navegador y vuelve a intentarlo"
else:
return False, "Error en la contraseña de crunchyroll. Corrígelo o desactiva la opción premium para ver enlaces free"
return True, ""
@@ -124,7 +108,6 @@ def decrypt_subs(iv, data, id):
data = base64.b64decode(data.encode('utf-8'))
iv = base64.b64decode(iv.encode('utf-8'))
id = int(id)
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
@@ -133,7 +116,6 @@ def decrypt_subs(iv, data, id):
output = output[2:]
output = list(map(lambda x: x % modulo + 33, output))
return output
def obfuscate_key(key):
from math import pow, sqrt, floor
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
@@ -148,17 +130,13 @@ def decrypt_subs(iv, data, id):
decshaHash.append(ord(char))
# Extend 160 Bit hash to 256 Bit
return decshaHash + [0] * 12
key = obfuscate_key(id)
key = struct.pack('B' * len(key), *key)
decryptor = jscrypto.new(key, 2, iv)
decrypted_data = decryptor.decrypt(data)
data = zlib.decompress(decrypted_data)
import xml.etree.ElementTree as ET
raiz = ET.fromstring(data)
ass_sub = convert_to_ass(raiz)
file_sub = filetools.join(config.get_data_path(), 'crunchyroll_sub.ass')
filetools.write(file_sub, ass_sub)
@@ -167,13 +145,11 @@ def decrypt_subs(iv, data, id):
def convert_to_ass(raiz):
output = ''
def ass_bool(strvalue):
assvalue = '0'
if strvalue == '1':
assvalue = '-1'
return assvalue
output = '[Script Info]\n'
output += 'Title: %s\n' % raiz.attrib['title']
output += 'ScriptType: v4.00+\n'

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://es.cumlouder.com/embed/([a-z0-9A-Z]+)/",
"url": "http://es.cumlouder.com/embed/\\1/"
}
]
},
"free": true,
"id": "cumlouder",
"name": "cumlouder",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,19 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
media_url = scrapertools.get_match(data, "var urlVideo = \'([^']+)\';")
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [cumlouder]", media_url])
return video_urls

View File

@@ -8,6 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "Contenido rechazado" in response.data:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
if response.code == 404:
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
# def test_video_exists(page_url):
# logger.info("(page_url='%s')" % page_url)
# data = httptools.downloadpage(page_url).data
# if "File was deleted" in data:
# return False, "[eroshare] El archivo no existe o ha sido borrado"
# return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
url = scrapertools.find_single_match(data, '"url_mp4":"(.*?)"')
video_urls.append(['eroshare', url])
# for video_url in video_urls:
# logger.info("%s - %s" % (video_url[0],video_url[1]))
return video_urls

View File

@@ -4,14 +4,14 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://eroshare.com/embed/[a-zA-Z0-9]+)",
"pattern": "(https://www.fembed.com/v/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "eroshare",
"name": "eroshare",
"id": "fembed",
"name": "Fembed",
"settings": [
{
"default": false,
@@ -38,5 +38,6 @@
"visible": false
}
],
"thumbnail": "https://s31.postimg.cc/cewftt397/eroshare.png"
}
"thumbnail": "https://i.postimg.cc/prdPwBhT/fembed1.png",
"version": 1
}

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import jsontools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Sorry 404 not found" in data:
return False, "[fembed] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
page_url = page_url.replace("/v/","/api/sources/")
data = httptools.downloadpage(page_url, post={}).data
data = jsontools.load(data)
for videos in data["data"]:
video_urls.append([videos["label"] + " [fembed]", videos["file"]])
return video_urls

View File

@@ -13,7 +13,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'file was deleted' in data:
if 'file was deleted' in data or 'File Not Found (Deleted or Abused)' in data:
return False, config.get_localized_string(70292) % "FlashX"
elif 'Video is processing now' in data:
return False, config.get_localized_string(70293) % "FlashX"

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://www.pelismundo.com/gkvip/vip/playervip3/.*?id=([A-z0-9]+)",
"url": "http://www.pelismundo.com/gkvip/vip/playervip3/player.php?id=\\1"
}
]
},
"free": true,
"id": "pelismundo",
"name": "pelismundo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s26.postimg.cc/72c9mr3ux/pelismundo1.png"
}

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para pelismundo
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data or 'sources: []' in data:
return False, "[pelismundo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url, add_referer = True).data
patron = 'sources.*?}],'
bloque = scrapertools.find_single_match(data, patron)
patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
match = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedquality in match:
video_urls.append([scrapedquality + " [pelismundo]", scrapedurl])
#video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -17,6 +17,8 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if data == "File was deleted" or data == '':
return False, "[powvideo] El video ha sido borrado"
if 'function(p,a,c,k,e,' not in data:
return False, "[powvideo] El video no está disponible"
return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streamix.cloud/(?:embed-|)([A-z0-9]+)",
"url": "http://streamix.cloud/embed-\\1.html"
}
]
},
"free": true,
"id": "streamixcloud",
"name": "streamixcloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/NuD85Py.png?1"
}

View File

@@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
packed = scrapertools.find_single_match(data, patron)
data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
for url in media_url:
video_urls.append(["%s [streamixcloud]" % ext, url])
return video_urls

View File

@@ -11,6 +11,8 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "Video not found..." in data:
return False, config.get_localized_string(70292) % "Thevid"
if "Video removed for inactivity..." in data:
return False, "[Thevid] El video ha sido removido por inactividad"
return True, ""

View File

@@ -9,28 +9,24 @@ from core import httptools
from core import scrapertools
from platformcode import logger
import sys, os
import re, base64
from lib.aadecode import decode as aadecode
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[upvid] El archivo no existe o ha sido borrado"
return False, "[upvid] El archivo no existe o ha sido borrado"
if "<title>video is no longer available" in data.data:
return False, "[upvid] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
logger.info("url=" + page_url)
video_urls = []
headers = {'referer': page_url}
for i in range(0, 3):
data = httptools.downloadpage(page_url, headers=headers).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
@@ -38,27 +34,15 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
break
else:
page_url = scrapertools.find_single_match(data, "iframe src=(.*?) scrolling")
# logger.debug(data)
# decodificar función para obtener función y clave
# ------------------------------------------------
code = re.findall('<script>\s*゚ω゚(.*?)</script>', data, flags=re.DOTALL)[0]
text_decode = aadecode(code)
funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ", text_decode, flags=re.DOTALL)[0]
# decodificar javascript en campos html hidden
# --------------------------------------------
oculto = re.findall('<input type=hidden value=([^ ]+) id=func', data, flags=re.DOTALL)[0]
funciones = resuelve(clave, base64.b64decode(oculto))
oculto = re.findall('<input type=hidden value=([^ ]+) id=code', data, flags=re.DOTALL)[0]
codigo = resuelve(clave, base64.b64decode(oculto))
url, type = scrapertools.find_single_match(funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
video_urls.append(['upvid [%s]' % type ,url])
return video_urls