danimados: agregado buscador del canal
sipeliculas: fix play
megadrive: nuevo server
This commit is contained in:
Intel1
2018-09-11 17:01:58 -05:00
parent acec5ff234
commit baa2bb87f9
5 changed files with 118 additions and 90 deletions

View File

@@ -8,5 +8,15 @@
"banner": "https://imgur.com/xG5xqBq.png",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import re
import base64
from channelselector import get_thumb
from core import httptools
@@ -22,48 +23,64 @@ list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "?s=",
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
item.url = host + "?s=" + texto
if texto!='':
return lista(item)
"""
return sub_search(item)
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="thumbnail animation-.*?href="([^"]+).*?'
patron += 'img src="([^"]+).*?'
patron += 'alt="([^"]+).*?'
patron += 'class="year">(\d{4})'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.action = "findvideos"
item.contentTitle = scrapedtitle
item.contentSerieName = ""
if "serie" in scrapedurl:
item.action = "episodios"
item.contentTitle = ""
item.contentSerieName = scrapedtitle
title = scrapedtitle
if scrapedyear:
item.infoLabels['year'] = int(scrapedyear)
title += " (%s)" %item.infoLabels['year']
itemlist.append(item.clone(thumbnail = scrapedthumbnail,
title = title,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
if item.title=="Más Populares":
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
if item.title=="Categorías":
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
@@ -82,11 +99,10 @@ def mainpage(item):
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.title=="Peliculas Animadas":
@@ -114,8 +130,8 @@ def lista(item):
def episodios(item):
logger.info()
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
@@ -123,51 +139,52 @@ def episodios(item):
show = item.title
patron_caps = '<img alt=".+?" src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?'
patron_caps += '<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl, show=show))
item.infoLabels["season"] = tempepi[0]
item.infoLabels["episode"] = tempepi[1]
itemlist.append(item.clone(thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav>?\s<\/div><\/div>')
patron = "changeLink\('([^']+)'\)"
matches = re.compile(patron, re.DOTALL).findall(data1)
for url64 in matches:
url =base64.b64decode(url64)
if 'danimados' in url:
new_data = httptools.downloadpage('https:'+url.replace('stream', 'stream_iframe')).data
url = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
itemlist.append(item.clone(title='%s',url=url, action="play"))
url1 =base64.b64decode(url64)
if 'danimados' in url1:
new_data = httptools.downloadpage('https:'+url1.replace('stream', 'stream_iframe')).data
logger.info("Intel33 %s" %new_data)
url = scrapertools.find_single_match(new_data, "sources: \[\{file:'([^']+)")
if "zkstream" in url:
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
else:
url1 = url
itemlist.append(item.clone(title='%s',url=url1, action="play"))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", contentTitle=item.show))
action="add_pelicula_to_library"))
autoplay.start(itemlist, item)
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,8 +1,5 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
@@ -12,10 +9,8 @@ from platformcode import logger
host = 'http://www.sipeliculas.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades", action="lista", url=host + "/cartelera/"))
itemlist.append(item.clone(title="Actualizadas", action="lista", url=host + "/peliculas-actualizadas/"))
@@ -24,7 +19,6 @@ def mainlist(item):
itemlist.append(item.clone(title="Año", action="menuseccion", url=host, extra="/estrenos-gratis/"))
itemlist.append(item.clone(title="Alfabetico", action="alfabetica", url=host + '/mirar/'))
itemlist.append(item.clone(title="Buscar", action="search", url=host + "/ver/"))
return itemlist
@@ -33,7 +27,6 @@ def alfabetica(item):
itemlist = []
for letra in "1abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=item.url + letra, action="lista"))
return itemlist
@@ -42,7 +35,6 @@ def menuseccion(item):
itemlist = []
seccion = item.extra
data = httptools.downloadpage(item.url).data
if seccion == '/online/':
data = scrapertools.find_single_match(data,
'<h2 class="[^"]+"><i class="[^"]+"></i>Películas por géneros<u class="[^"]+"></u></h2>(.*?)<ul class="abc">')
@@ -50,8 +42,7 @@ def menuseccion(item):
elif seccion == '/estrenos-gratis/':
data = scrapertools.find_single_match(data, '<ul class="lista-anio" id="lista-anio">(.*?)</ul>')
patron = '<li ><a href="([^"]+)" title="[^"]+">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, extra in matches:
itemlist.append(Item(channel=item.channel, action='lista', title=extra, url=scrapedurl))
return itemlist
@@ -64,22 +55,19 @@ def lista(item):
listado = scrapertools.find_single_match(data,
'<div id="sipeliculas" class="borde"><div class="izquierda">(.*?)<div class="derecha"><h2')
patron = '<a class="i" href="(.*?)".*?src="(.*?)".*?title=.*?>(.*?)<.*?span>(.*?)<.*?<p><span>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(listado)
matches = scrapertools.find_multiple_matches(listado, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, plot in matches:
itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=plot, contentTitle=scrapedtitle, extra=item.extra,
itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle + " (%s)" %year, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, extra=item.extra,
infoLabels ={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
patron = '<li[^<]+<a href="([^"]+)" title="[^"]+">Siguiente[^<]+</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
if matches:
itemlist.append(
item.clone(title="Pagina Siguiente", action='lista', url=urlparse.urljoin(host, matches[0])))
item.clone(title="Pagina Siguiente", action='lista', url=host + "/" + matches[0]))
return itemlist
@@ -97,11 +85,10 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
listado1 = scrapertools.find_single_match(data,
'<div class="links" id="ver-mas-opciones"><h2 class="h2"><i class="[^"]+"></i>[^<]+</h2><ul class="opciones">(.*?)</ul>')
patron1 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><span class="opcion"><i class="[^"]+"></i><u>[^<]+</u>[^<]+</span><span class="ico"><img src="[^"]+" alt="[^"]+"/>[^<]+</span><span>([^"]+)</span><span>([^"]+)</span></a></li>'
matches = matches = re.compile(patron1, re.DOTALL).findall(listado1)
matches = matches = scrapertools.find_multiple_matches(listado1, patron1)
for vidId, vidUrl, vidServer, language, quality in matches:
server = servertools.get_server_name(vidServer)
if 'Sub' in language:
@@ -109,39 +96,32 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId,
title='Ver en ' + vidServer + ' | ' + language + ' | ' + quality,
thumbnail=item.thumbnail, server=server, language=language, quality=quality ))
listado2 = scrapertools.find_single_match(data, '<ul class="opciones-tab">(.*?)</ul>')
patron2 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><img src="[^"]+" alt="[^"]+"/>[^<]+</a></li>'
matches = matches = re.compile(patron2, re.DOTALL).findall(listado2)
matches = matches = scrapertools.find_multiple_matches(listado2, patron2)
for vidId, vidUrl, vidServer in matches:
server = servertools.get_server_name(vidServer)
itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, title='Ver en ' + vidServer,
thumbnail=item.thumbnail, server=server))
for videoitem in itemlist:
videoitem.fulltitle = item.title
videoitem.folder = False
return itemlist
def play(item):
logger.info()
itemlist = []
video = httptools.downloadpage(host + '/ajax.public.php', 'acc=ver_opc&f=' + item.extra).data
logger.info("video=" + video)
enlaces = servertools.findvideos(video)
if enlaces:
logger.info("server=" + enlaces[0][2])
thumbnail = servertools.guess_server_thumbnail(video)
# Añade al listado de XBMC
data = httptools.downloadpage(item.url).data
video = scrapertools.find_single_match(data, '</div><iframe src="([^"]+)')
if video:
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=enlaces[0][1],
server=enlaces[0][2], thumbnail=thumbnail, folder=False))
item.clone(action="play", url=video, folder=False, server=""))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail = item.contentThumbnail
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -155,16 +135,13 @@ def newest(categoria):
item.url = host + "/online/terror/"
else:
return []
itemlist = lista(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -4,18 +4,14 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "http://tusfiles.org/\\?([A-z0-9]+)",
"url": "http://tusfiles.org/?\\1/"
},
{
"pattern": "tusfiles.net/(?:embed-|)([A-z0-9]+)",
"url": "http://tusfiles.net/\\1"
"pattern": "megadrive.co/embed/([A-z0-9]+)",
"url": "https://megadrive.co/embed/\\1"
}
]
},
"free": true,
"id": "tusfiles",
"name": "tusfiles",
"id": "megadrive",
"name": "megadrive",
"settings": [
{
"default": false,
@@ -41,5 +37,6 @@
"type": "list",
"visible": false
}
]
}
],
"thumbnail": "https://s8.postimg.cc/kr5olxmad/megadrive1.png"
}

View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[Megadrive] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Megadrive] Error de Megadrive, no se puede generar el enlace al video"
if "File has been removed due to inactivity" in data:
return False, "[Megadrive] El archivo ha sido removido por inactividad"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, "<source.*?src='([^']+)")
video_urls.append([".MP4 [megadrive]", videourl])
return video_urls