Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8985f3ebdd | ||
|
|
d60c246bbb | ||
|
|
3b29fe47bb | ||
|
|
3093f72ce5 | ||
|
|
55dcf3f091 | ||
|
|
2924b6958d | ||
|
|
927310c7c6 | ||
|
|
0c25891790 | ||
|
|
212c06057f | ||
|
|
9c3b3e9256 | ||
|
|
6dc853b41e | ||
|
|
7afd09dfa9 | ||
|
|
6855508eaa | ||
|
|
2925c29671 | ||
|
|
506e68e8a3 | ||
|
|
9cc30152f8 | ||
|
|
267c9d8031 | ||
|
|
bd68b83b6c | ||
|
|
c1f8039672 | ||
|
|
99dfa2be58 | ||
|
|
39e711b3cb | ||
|
|
2d8d2b3baf | ||
|
|
82d126c3e1 | ||
|
|
8d41fd1c64 | ||
|
|
a8c2f409eb | ||
|
|
7b2a3c2181 | ||
|
|
9e6729f0be | ||
|
|
241e644dcf | ||
|
|
ae318721ab | ||
|
|
8328610ffa | ||
|
|
19101b5310 | ||
|
|
22827e0f7e | ||
|
|
1747c9795d | ||
|
|
f3effe9a7f | ||
|
|
0621b1fa91 | ||
|
|
16473764c9 | ||
|
|
6b1727a0b8 | ||
|
|
11fceffd14 | ||
|
|
3a49b8a442 | ||
|
|
162772e9dc | ||
|
|
60d61f861b | ||
|
|
cd1c7b692a | ||
|
|
10abe4a6d4 | ||
|
|
b0fa5e8a75 | ||
|
|
54d6a943f5 | ||
|
|
44df5b6036 | ||
|
|
ae67d9b5ee | ||
|
|
895d14760d | ||
|
|
b0b4b218f0 | ||
|
|
348787ae97 | ||
|
|
0f7c11efad | ||
|
|
ae7a4a8d83 | ||
|
|
fc58c717eb | ||
|
|
b3a19f3d20 | ||
|
|
0cac09eef5 | ||
|
|
9a1effbe25 | ||
|
|
44145660d0 | ||
|
|
aec2674316 | ||
|
|
09de611aae | ||
|
|
74598154c2 | ||
|
|
7ab9c8bb29 | ||
|
|
14178974a0 | ||
|
|
c43162cbc2 | ||
|
|
aa76986a51 | ||
|
|
9aae0e7a1b | ||
|
|
e1fe886602 | ||
|
|
19812c83a9 | ||
|
|
cabc2458e3 | ||
|
|
336376ecef | ||
|
|
af06269e39 | ||
|
|
f37d18ee0a | ||
|
|
6fefc3b048 | ||
|
|
ab5fe41403 | ||
|
|
15463ea0f8 | ||
|
|
badf40573c | ||
|
|
c80793e3e0 | ||
|
|
cbc0ff0bd0 |
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.1" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.4" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,9 +19,12 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» ohlatino » animemovil
|
||||
» pelisplus » flashx
|
||||
» allpeliculas » repelis
|
||||
» flashx » ultrapeliculashd
|
||||
» gvideo » streamixcloud
|
||||
» vshare » anitoonstv
|
||||
¤ arreglos internos
|
||||
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
@@ -59,6 +57,7 @@ def colecciones(item):
|
||||
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "listado_colecciones",
|
||||
page = 1,
|
||||
thumbnail = host + scrapedthumbnail,
|
||||
title = title,
|
||||
url = host + scrapedurl
|
||||
@@ -71,7 +70,7 @@ def listado_colecciones(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
|
||||
post = "page=1"
|
||||
post = "page=%s" %item.page
|
||||
data = httptools.downloadpage(host + data_url, post=post).data
|
||||
patron = 'a href="(/peli[^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
@@ -88,6 +87,16 @@ def listado_colecciones(item):
|
||||
url = host + scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
item.page += 1
|
||||
post = "page=%s" %item.page
|
||||
data = httptools.downloadpage(host + data_url, post=post).data
|
||||
if len(data) > 50:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "listado_colecciones",
|
||||
title = "Pagina siguiente>>",
|
||||
page = item.page,
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -159,6 +168,7 @@ def lista(item):
|
||||
params = jsontools.dump(dict_param)
|
||||
|
||||
data = httptools.downloadpage(item.url, post=params).data
|
||||
data = data.replace("<mark>","").replace("<\/mark>","")
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
for it in dict_data["items"]:
|
||||
@@ -167,7 +177,7 @@ def lista(item):
|
||||
rating = it["imdb"]
|
||||
year = it["year"]
|
||||
url = host + "pelicula/" + it["slug"]
|
||||
thumb = urlparse.urljoin(host, it["image"])
|
||||
thumb = host + it["image"]
|
||||
item.infoLabels['year'] = year
|
||||
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
|
||||
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "Animemovil",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cat", "lat"],
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
|
||||
@@ -86,7 +86,7 @@ def recientes(item):
|
||||
tipo = "tvshow"
|
||||
show = contentTitle
|
||||
action = "episodios"
|
||||
context = renumbertools.context
|
||||
context = renumbertools.context(item)
|
||||
if item.extra == "recientes":
|
||||
action = "findvideos"
|
||||
context = ""
|
||||
@@ -96,7 +96,7 @@ def recientes(item):
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
|
||||
action ="findvideos"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -148,35 +148,21 @@ def findvideos(item):
|
||||
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
||||
for server, quality, url in itemla:
|
||||
if "Calidad Alta" in quality:
|
||||
quality = quality.replace("Calidad Alta", "HQ")
|
||||
server = server.lower().strip()
|
||||
if "ok" == server:
|
||||
server = 'okru'
|
||||
if "netu" == server:
|
||||
continue
|
||||
quality = "HQ"
|
||||
if "HQ" in quality:
|
||||
quality = "HD"
|
||||
if " Calidad media - Carga mas rapido" in quality:
|
||||
quality = "360p"
|
||||
server = server.lower().strip()
|
||||
if "ok" in server:
|
||||
server = 'okru'
|
||||
if "rapid" in server:
|
||||
server = 'rapidvideo'
|
||||
if "netu" in server:
|
||||
server = 'netutv'
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Buscamos video por servidor ...
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -7,6 +7,7 @@ from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from platformcode import launcher
|
||||
|
||||
__channel__ = "autoplay"
|
||||
|
||||
@@ -78,7 +79,20 @@ def start(itemlist, item):
|
||||
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
|
||||
'''
|
||||
logger.info()
|
||||
|
||||
for videoitem in itemlist:
|
||||
#Nos dice de donde viene si del addon o videolibrary
|
||||
if item.contentChannel=='videolibrary':
|
||||
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
|
||||
videoitem.contentPlot=item.contentPlot
|
||||
videoitem.contentSeason=item.contentSeason
|
||||
videoitem.contentSerieName=item.contentSerieName
|
||||
videoitem.contentTitle=item.contentTitle
|
||||
videoitem.contentType=item.contentType
|
||||
videoitem.episode_id=item.episode_id
|
||||
videoitem.hasContentDetails=item.hasContentDetails
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
videoitem.thumbnail=item.thumbnail
|
||||
#videoitem.title=item.title
|
||||
if not config.is_xbmc():
|
||||
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
|
||||
return itemlist
|
||||
@@ -261,8 +275,12 @@ def start(itemlist, item):
|
||||
else:
|
||||
videoitem = resolved_item[0]
|
||||
|
||||
# si no directamente reproduce
|
||||
platformtools.play_video(videoitem)
|
||||
# si no directamente reproduce y marca como visto
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_auto_as_watched(item)
|
||||
#platformtools.play_video(videoitem)
|
||||
videoitem.contentChannel='videolibrary'
|
||||
launcher.run(videoitem)
|
||||
|
||||
try:
|
||||
if platformtools.is_playing():
|
||||
|
||||
4
plugin.video.alfa/channels/bajui2.json → plugin.video.alfa/channels/bajui.json
Executable file → Normal file
4
plugin.video.alfa/channels/bajui2.json → plugin.video.alfa/channels/bajui.json
Executable file → Normal file
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"id": "bajui2",
|
||||
"name": "Bajui2",
|
||||
"id": "bajui",
|
||||
"name": "Bajui",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
24
plugin.video.alfa/channels/bajui2.py → plugin.video.alfa/channels/bajui.py
Executable file → Normal file
24
plugin.video.alfa/channels/bajui2.py → plugin.video.alfa/channels/bajui.py
Executable file → Normal file
@@ -13,7 +13,7 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
|
||||
url="http://www.bajui2.com/descargas/categoria/2/peliculas",
|
||||
url="http://www.bajui.org/descargas/categoria/2/peliculas",
|
||||
fanart=item.fanart))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
|
||||
fanart=item.fanart))
|
||||
@@ -51,13 +51,13 @@ def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
|
||||
url="http://www.bajui2.com/descargas/categoria/3/series",
|
||||
url="http://www.bajui.org/descargas/categoria/3/series",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
|
||||
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre",
|
||||
url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
|
||||
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre",
|
||||
url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
|
||||
fanart=item.fanart))
|
||||
@@ -68,10 +68,10 @@ def menudocumentales(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
|
||||
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv",
|
||||
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
|
||||
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre",
|
||||
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
|
||||
fanart=item.fanart))
|
||||
@@ -86,7 +86,7 @@ def search(item, texto, categoria=""):
|
||||
texto = texto.replace(" ", "+")
|
||||
logger.info("categoria: " + categoria + " url: " + url)
|
||||
try:
|
||||
item.url = "http://www.bajui2.com/descargas/busqueda/%s"
|
||||
item.url = "http://www.bajui.org/descargas/busqueda/%s"
|
||||
item.url = item.url % texto
|
||||
itemlist.extend(peliculas(item))
|
||||
return itemlist
|
||||
@@ -118,7 +118,7 @@ def peliculas(item, paginacion=True):
|
||||
scrapedtitle = title
|
||||
scrapedplot = clean_plot(plot)
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
|
||||
# Añade al listado de XBMC
|
||||
@@ -133,7 +133,7 @@ def peliculas(item, paginacion=True):
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0])
|
||||
scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
|
||||
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
|
||||
fanart=item.fanart, viewmode="movie_with_plot")
|
||||
if not paginacion:
|
||||
@@ -197,7 +197,7 @@ def enlaces(item):
|
||||
|
||||
try:
|
||||
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
|
||||
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail)
|
||||
item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -234,8 +234,8 @@ def enlaces(item):
|
||||
lista_servidores = lista_servidores[:-2]
|
||||
|
||||
scrapedthumbnail = item.thumbnail
|
||||
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
|
||||
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
|
||||
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
|
||||
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
|
||||
scrapedplot = item.plot
|
||||
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
|
||||
|
||||
@@ -9,6 +9,7 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.cartoon-latino.com/"
|
||||
from channels import autoplay
|
||||
@@ -150,7 +151,6 @@ def episodios(item):
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
|
||||
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
@@ -185,29 +185,5 @@ def findvideos(item):
|
||||
server1 = server
|
||||
itemlist.append(item.clone(url=url, action="play", server=server1,
|
||||
title="Enlace encontrado en %s " % (server1.capitalize())))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Buscamos video por servidor ...
|
||||
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -28,9 +28,9 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
item.viewmode = viewmode
|
||||
|
||||
data = httptools.downloadpage(CHANNEL_HOST).data
|
||||
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
|
||||
titulo = "Peliculas"
|
||||
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
|
||||
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
|
||||
titulo = "Peliculas (%s)" %total
|
||||
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
|
||||
|
||||
12
plugin.video.alfa/channels/danimados.json
Normal file
12
plugin.video.alfa/channels/danimados.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"id": "danimados",
|
||||
"name": "Danimados",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://imgur.com/kU5Lx1S.png",
|
||||
"banner": "https://imgur.com/xG5xqBq.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
186
plugin.video.alfa/channels/danimados.py
Normal file
186
plugin.video.alfa/channels/danimados.py
Normal file
@@ -0,0 +1,186 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.danimados.com/"
|
||||
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
|
||||
thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
"""
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ","+")
|
||||
item.url = item.url+texto
|
||||
if texto!='':
|
||||
return lista(item)
|
||||
"""
|
||||
|
||||
|
||||
def mainpage(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
if item.title=="Más Populares":
|
||||
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
|
||||
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
|
||||
if item.title=="Categorías":
|
||||
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
|
||||
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
|
||||
data = scrapertools.find_single_match(data1, patron_sec)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.title=="Géneros" or item.title=="Categorías":
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "Películas Animadas"!=scrapedtitle:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
|
||||
return itemlist
|
||||
else:
|
||||
for scraped1, scraped2, scrapedtitle in matches:
|
||||
scrapedthumbnail=scraped1
|
||||
scrapedurl=scraped2
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
|
||||
show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
|
||||
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
|
||||
show = item.title
|
||||
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
|
||||
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
|
||||
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
|
||||
tempepi=scrapedtempepi.split(" - ")
|
||||
if tempepi[0]=='Pel':
|
||||
tempepi[0]=0
|
||||
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
|
||||
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
|
||||
action="findvideos", title=title, url=scrapedurl, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data = scrapertools.find_single_match(data,
|
||||
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
|
||||
patron='src="(.+?)"'
|
||||
itemla = scrapertools.find_multiple_matches(data,patron)
|
||||
for i in range(len(itemla)):
|
||||
#for url in itemla:
|
||||
url=itemla[i]
|
||||
#verificar existencia del video (testing)
|
||||
codigo=verificar_video(itemla[i])
|
||||
if codigo==200:
|
||||
if "ok.ru" in url:
|
||||
server='okru'
|
||||
else:
|
||||
server=''
|
||||
if "openload" in url:
|
||||
server='openload'
|
||||
if "google" in url:
|
||||
server='gvideo'
|
||||
if "rapidvideo" in url:
|
||||
server='rapidvideo'
|
||||
if "streamango" in url:
|
||||
server='streamango'
|
||||
if server!='':
|
||||
title="Enlace encontrado en %s " % (server.capitalize())
|
||||
else:
|
||||
title="NO DISPONIBLE"
|
||||
if title!="NO DISPONIBLE":
|
||||
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def verificar_video(url):
|
||||
codigo=httptools.downloadpage(url).code
|
||||
if codigo==200:
|
||||
# Revise de otra forma
|
||||
data=httptools.downloadpage(url).data
|
||||
removed = scrapertools.find_single_match(data,'removed(.+)')
|
||||
if len(removed) != 0:
|
||||
codigo1=404
|
||||
else:
|
||||
codigo1=200
|
||||
else:
|
||||
codigo1=200
|
||||
return codigo1
|
||||
@@ -302,7 +302,7 @@ def epienlaces(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
if (item.extra and item.extra != "findvideos") or item.path:
|
||||
if item.contentSeason!='':
|
||||
return epienlaces(item)
|
||||
|
||||
itemlist = []
|
||||
|
||||
@@ -319,61 +319,34 @@ def findvideos(item):
|
||||
duplicados = []
|
||||
data = get_source(item.url)
|
||||
src = data
|
||||
patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight'
|
||||
patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, videoitem in matches:
|
||||
lang = scrapertools.find_single_match(src,
|
||||
'<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option)
|
||||
'<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
|
||||
if 'audio ' in lang.lower():
|
||||
lang=lang.lower().replace('audio ','')
|
||||
lang=lang.capitalize()
|
||||
|
||||
data = get_source(videoitem)
|
||||
if 'play' in videoitem:
|
||||
url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>')
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=')
|
||||
video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
|
||||
for video in video_urls:
|
||||
video_data = get_source(video)
|
||||
if not 'fastplay' in video:
|
||||
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
|
||||
data_final = get_source(new_url)
|
||||
else:
|
||||
data_final=video_data
|
||||
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
|
||||
quality = item.quality
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
|
||||
if item.quality != '':
|
||||
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
|
||||
|
||||
url_list.append([url, lang])
|
||||
|
||||
for video_url in url_list:
|
||||
language = video_url[1]
|
||||
if 'jw.miradetodo' in video_url[0]:
|
||||
data = get_source('http:' + video_url[0])
|
||||
patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for quality, scrapedurl in matches:
|
||||
quality = quality
|
||||
title = item.contentTitle + ' (%s) %s' % (quality, language)
|
||||
server = 'directo'
|
||||
url = scrapedurl
|
||||
url = url.replace('\/', '/')
|
||||
subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
|
||||
if url not in duplicados:
|
||||
itemlist.append(item.clone(title=title,
|
||||
action='play',
|
||||
url=url,
|
||||
quality=quality,
|
||||
server=server,
|
||||
subtitle=subtitle,
|
||||
language=language
|
||||
))
|
||||
duplicados.append(url)
|
||||
elif video_url != '':
|
||||
itemlist.extend(servertools.find_video_items(data=video_url[0]))
|
||||
|
||||
import os
|
||||
for videoitem in itemlist:
|
||||
if videoitem.server != 'directo':
|
||||
|
||||
quality = item.quality
|
||||
title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
|
||||
if item.quality != '':
|
||||
title = item.contentTitle + ' (%s) %s' % (quality, language)
|
||||
videoitem.title = title
|
||||
videoitem.channel = item.channel
|
||||
|
||||
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
|
||||
"server_%s.png" % videoitem.server)
|
||||
videoitem.quality = item.quality
|
||||
if url!='':
|
||||
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
|
||||
|
||||
if item.infoLabels['mediatype'] == 'movie':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
|
||||
12
plugin.video.alfa/channels/mundiseries.json
Normal file
12
plugin.video.alfa/channels/mundiseries.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"id": "mundiseries",
|
||||
"name": "Mundiseries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://imgur.com/GdGMFi1.png",
|
||||
"banner": "https://imgur.com/1bDbYY1.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
99
plugin.video.alfa/channels/mundiseries.py
Normal file
99
plugin.video.alfa/channels/mundiseries.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import filtertools
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import httptools
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://mundiseries.com"
|
||||
list_servers = ['okru']
|
||||
list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, thumbnail, name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
|
||||
return itemlist
|
||||
|
||||
def temporada(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info("preon,:"+data)
|
||||
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link,thumbnail,name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
patron_show='<h1 class="h-responsive center">.+?'
|
||||
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
|
||||
show = scrapertools.find_single_match(data,patron_show)
|
||||
for link, name,cap,temp in matches:
|
||||
if '|' in cap:
|
||||
cap = cap.replace('|','')
|
||||
if '|' in temp:
|
||||
temp = temp.replace('|','')
|
||||
if '|' in name:
|
||||
name = name.replace('|','')
|
||||
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
|
||||
url=host+link
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos",
|
||||
title=title, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
id = ""
|
||||
type = ""
|
||||
data = httptools.downloadpage(item.url).data
|
||||
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for item in it2:
|
||||
if "###" not in item.url:
|
||||
item.url += "###" + id + ";" + type
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel= item.channel
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"id": "pelisencasa",
|
||||
"name": "PelisEnCasa",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s14.postimg.org/iqiq0bxn5/pelisencasa.png",
|
||||
"banner": "https://s18.postimg.org/j775ehbg9/pelisencasa_banner.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from lib import jsunpack
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://pelisencasa.net'
|
||||
|
||||
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
|
||||
"Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
|
||||
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
|
||||
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
|
||||
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
|
||||
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
|
||||
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
|
||||
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
|
||||
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
|
||||
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
|
||||
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
|
||||
"Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
|
||||
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
|
||||
"Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
|
||||
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
|
||||
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
|
||||
"Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
|
||||
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png"}
|
||||
|
||||
tletras = {'#': 'https://s32.postimg.org/drojt686d/image.png',
|
||||
'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
|
||||
'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
|
||||
'c': 'https://s32.postimg.org/vlon87gmd/image.png',
|
||||
'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
|
||||
'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
|
||||
'f': 'https://s32.postimg.org/y6u7vq605/image.png',
|
||||
'g': 'https://s32.postimg.org/9237ib6jp/image.png',
|
||||
'h': 'https://s32.postimg.org/812yt6pk5/image.png',
|
||||
'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
|
||||
'j': 'https://s32.postimg.org/axpztgvdx/image.png',
|
||||
'k': 'https://s32.postimg.org/976yrzdut/image.png',
|
||||
'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
|
||||
'm': 'https://s32.postimg.org/m19lz2go5/image.png',
|
||||
'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
|
||||
'o': 'https://s32.postimg.org/c6igsucpx/image.png',
|
||||
'p': 'https://s32.postimg.org/jnro82291/image.png',
|
||||
'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
|
||||
'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
|
||||
's': 'https://s32.postimg.org/zd2t89jol/image.png',
|
||||
't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
|
||||
'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
|
||||
'v': 'https://s32.postimg.org/e7dlrey91/image.png',
|
||||
'w': 'https://s32.postimg.org/fnp49k15x/image.png',
|
||||
'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
|
||||
'y': 'https://s32.postimg.org/um7j3zg85/image.png',
|
||||
'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'}
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
|
||||
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Generos", action="seccion", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
|
||||
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='generos'))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Alfabetico", action="seccion", thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
|
||||
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', url=host, extra='letras'))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url=host + '/?s=',
|
||||
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.extra != 'letras':
|
||||
patron = '<li class="TPostMv">.*?<a href="(.*?)"><div class="Image">.*?src="(.*?)\?resize=.*?".*?class="Title">(.*?)<\/h2>.*?'
|
||||
patron += '<span class="Year">(.*?)<\/span>.*?<span class="Qlty">(.*?)<\/span><\/p><div class="Description"><p>(.*?)<\/p>'
|
||||
else:
|
||||
patron = '<td class="MvTbImg"> <a href="(.*?)".*?src="(.*?)\?resize=.*?".*?<strong>(.*?)<\/strong> <\/a><\/td><td>(.*?)<\/td><td>.*?'
|
||||
patron += 'class="Qlty">(.*?)<\/span><\/p><\/td><td>(.*?)<\/td><td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = scrapedplot
|
||||
contentTitle = scrapedtitle
|
||||
title = contentTitle + ' (' + calidad + ')'
|
||||
year = scrapedyear
|
||||
fanart = ''
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="(.*?)">')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page,
|
||||
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seccion(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.extra == 'generos':
|
||||
patron = 'menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
|
||||
else:
|
||||
patron = '<li><a href="(.*?\/letter\/.*?)">(.*?)<\/a><\/li>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = ''
|
||||
if item.extra == 'generos' and scrapedtitle in tgenero:
|
||||
thumbnail = tgenero[scrapedtitle]
|
||||
elif scrapedtitle.lower() in tletras:
|
||||
thumbnail = tletras[scrapedtitle.lower()]
|
||||
fanart = ''
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
|
||||
fanart=fanart, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="Num">.*?<\/span>.*?href="(.*?)" class="Button STPb">.*?<\/a>.*?<span>(.*?)<\/span><\/td><td><span>(.*?)<\/span><\/td><td><span>.*?<\/span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedurl, servidor, idioma in matches:
|
||||
new_item = (item.clone(url=scrapedurl, servidor=servidor, idioma=idioma, infoLabels=infoLabels))
|
||||
itemlist += get_video_urls(new_item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
itemlist.insert(len(itemlist) - 1, item.clone(channel='trailertools', action='buscartrailer',
|
||||
title='[COLOR orange]Trailer en Youtube[/COLOR]'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_video_urls(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<script type="text\/javascript">(.*?)<\/script>')
|
||||
data = jsunpack.unpack(data)
|
||||
patron = '"file":"(.*?)","label":"(.*?)","type":"video.*?"}'
|
||||
subtitle = scrapertools.find_single_match(data, 'tracks:\[{"file":"(.*?)","label":".*?","kind":"captions"}')
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, calidad in matches:
|
||||
if item.servidor == 'PELISENCASA':
|
||||
item.servidor = 'Directo'
|
||||
title = item.contentTitle + ' (' + item.idioma + ')' + ' (' + calidad + ')' + ' (' + item.servidor + ')'
|
||||
itemlist.append(item.clone(title=title, url=url, calidad=calidad, action='play', subtitle=subtitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/category/animacion/'
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -215,18 +215,20 @@ def search(item, texto):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
duplicados = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class=TPlayerTbCurrent id=(.*?)><iframe.*?src=(.*?) frameborder'
|
||||
patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for opt, urls_page in matches:
|
||||
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.*?'
|
||||
logger.debug ('option: %s' % opt)
|
||||
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
|
||||
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
|
||||
data = httptools.downloadpage(urls_page).data
|
||||
servers = scrapertools.find_multiple_matches(data,'<button id="(.*?)"')
|
||||
|
||||
video_data = httptools.downloadpage(urls_page).data
|
||||
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
|
||||
for server in servers:
|
||||
quality = item.quality
|
||||
info_urls = urls_page.replace('embed','get')
|
||||
video_info=httptools.downloadpage(info_urls+'/'+server).data
|
||||
video_info = jsontools.load(video_info)
|
||||
@@ -238,8 +240,13 @@ def findvideos(item):
|
||||
url = 'https://'+video_server+'/embed/'+video_id
|
||||
else:
|
||||
url = 'https://'+video_server+'/e/'+video_id
|
||||
title = item.title
|
||||
itemlist.append(item.clone(title=title, url=url, action='play', language=language))
|
||||
title = item.contentTitle + ' [%s] [%s]'%(quality, language)
|
||||
itemlist.append(item.clone(title=title,
|
||||
url=url,
|
||||
action='play',
|
||||
language=language,
|
||||
quality=quality
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -176,7 +176,7 @@ def lista(item):
|
||||
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
|
||||
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
|
||||
actual = scrapertools.find_single_match(data,
|
||||
'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
|
||||
'<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
|
||||
'class="page bicon last"><<\/a>')
|
||||
else:
|
||||
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \
|
||||
@@ -428,13 +428,17 @@ def get_vip(url):
|
||||
itemlist =[]
|
||||
url= url.replace('reproductor','vip')
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = '<a href="(.*?)"> '
|
||||
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)">')
|
||||
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)".*?>')
|
||||
for item in video_urls:
|
||||
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
|
||||
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers
|
||||
itemlist.extend(servertools.find_video_items(data=str(data)))
|
||||
if 'elreyxhd' in item:
|
||||
if 'plus'in item:
|
||||
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
|
||||
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
|
||||
else:
|
||||
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
|
||||
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers
|
||||
itemlist.extend(servertools.find_video_items(data=str(data)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
|
||||
def login():
|
||||
url_origen = "https://www.plusdede.com/login?popup=1"
|
||||
data = httptools.downloadpage(url_origen, follow_redirects=True).data
|
||||
logger.debug("dataPLUSDEDE=" + data)
|
||||
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
|
||||
return True
|
||||
|
||||
@@ -34,12 +33,10 @@ def login():
|
||||
post = "_token=" + str(token) + "&email=" + str(
|
||||
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
|
||||
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
|
||||
# logger.debug("dataPLUSDEDE_POST="+post)
|
||||
url = "https://www.plusdede.com/"
|
||||
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
|
||||
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
logger.debug("PLUSDEDE_DATA=" + data)
|
||||
if "redirect" in data:
|
||||
return True
|
||||
else:
|
||||
@@ -183,7 +180,6 @@ def generos(item):
|
||||
tipo = item.url.replace("https://www.plusdede.com/", "")
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
data = scrapertools.find_single_match(data,
|
||||
@@ -198,7 +194,6 @@ def generos(item):
|
||||
plot = ""
|
||||
# https://www.plusdede.com/pelis?genre_id=1
|
||||
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title))
|
||||
@@ -229,11 +224,9 @@ def buscar(item):
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
logger.debug("content=" + json_object["content"])
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
|
||||
patron += '.*?<div class="year">([^<]+)</div>+'
|
||||
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
logger.debug("PARSE_DATA:" + data)
|
||||
if item.tipo == "lista":
|
||||
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
|
||||
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
|
||||
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
|
||||
sectionStr = "docu"
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if item.tipo != "series":
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if item.tipo != "pelis":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
|
||||
logger.debug("URL_SIGUIENTE:" + url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
|
||||
extra=item.extra, url=url))
|
||||
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
|
||||
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -369,7 +356,6 @@ def episodio(item):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.debug("data="+data)
|
||||
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
@@ -377,7 +363,6 @@ def episodio(item):
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
|
||||
for bloque_episodios in matchestemporadas:
|
||||
logger.debug("bloque_episodios=" + bloque_episodios)
|
||||
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
@@ -401,7 +386,6 @@ def episodio(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
@@ -415,11 +399,9 @@ def peliculas(item):
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data_DEF_PELICULAS=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
logger.debug("html=" + json_object["content"])
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
@@ -432,24 +414,18 @@ def episodios(item):
|
||||
# Descarga la pagina
|
||||
idserie = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.debug("dataEPISODIOS="+data)
|
||||
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
logger.debug(matchestemporadas)
|
||||
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
for nombre_temporada, bloque_episodios in matchestemporadas:
|
||||
logger.debug("nombre_temporada=" + nombre_temporada)
|
||||
logger.debug("bloque_episodios=" + bloque_episodios)
|
||||
logger.debug("id_serie=" + idserie)
|
||||
# Extrae los episodios
|
||||
patron_episodio = '<li><a href="#"(.*?)</a></li>'
|
||||
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
|
||||
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
|
||||
# logger.debug(matches)
|
||||
for data_episodio in matches:
|
||||
|
||||
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
|
||||
@@ -462,7 +438,6 @@ def episodios(item):
|
||||
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
|
||||
"") + "x" + numero + " " + scrapertools.htmlclean(
|
||||
scrapedtitle)
|
||||
logger.debug("CAP_VISTO:" + visto)
|
||||
if visto.strip() == "seen":
|
||||
title = "[visto] " + title
|
||||
|
||||
@@ -478,7 +453,6 @@ def episodios(item):
|
||||
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
|
||||
@@ -540,7 +514,6 @@ def parse_listas(item, bloque_lista):
|
||||
thumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]")
|
||||
|
||||
nextpage = scrapertools.find_single_match(bloque_lista,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
|
||||
@@ -569,13 +542,10 @@ def listas(item):
|
||||
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("dataSINHEADERS=" + data)
|
||||
|
||||
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
|
||||
logger.debug("token_LISTA_" + item.token)
|
||||
|
||||
bloque_lista = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("bloque_LISTA" + bloque_lista)
|
||||
|
||||
return parse_listas(item, bloque_lista)
|
||||
|
||||
@@ -585,7 +555,6 @@ def lista_sig(item):
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
return parse_listas(item, data)
|
||||
|
||||
@@ -595,7 +564,6 @@ def pag_sig(item):
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
@@ -605,8 +573,6 @@ def findvideos(item, verTodos=False):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info("URL:" + item.url + " DATA=" + data)
|
||||
# logger.debug("data="+data)
|
||||
|
||||
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
|
||||
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
|
||||
@@ -616,7 +582,6 @@ def findvideos(item, verTodos=False):
|
||||
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
logger.debug("URL:" + url + " dataLINKS=" + data)
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
|
||||
patron = 'target="_blank" (.*?)</a>'
|
||||
@@ -628,7 +593,6 @@ def findvideos(item, verTodos=False):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
logger.debug("TRAILER_YOUTUBE:" + trailer)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
@@ -637,9 +601,6 @@ def findvideos(item, verTodos=False):
|
||||
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
|
||||
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
|
||||
|
||||
# sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0
|
||||
# showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0
|
||||
|
||||
if sortlinks != '' and sortlinks != "No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
@@ -651,14 +612,13 @@ def findvideos(item, verTodos=False):
|
||||
showlinks = 0
|
||||
|
||||
for match in matches:
|
||||
# logger.debug("match="+match)
|
||||
|
||||
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
|
||||
if (showlinks == 1 and jdown != '') or (
|
||||
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
|
||||
continue
|
||||
idioma_1 = ""
|
||||
idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idioma_0 = idiomas[0]
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = idiomas[1]
|
||||
@@ -670,16 +630,12 @@ def findvideos(item, verTodos=False):
|
||||
calidad_video = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
logger.debug("calidad_video=" + calidad_video)
|
||||
calidad_audio = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
logger.debug("calidad_audio=" + calidad_audio)
|
||||
|
||||
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
|
||||
logger.debug("thumb_servidor=" + thumb_servidor)
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
|
||||
logger.debug("nombre_servidor=" + nombre_servidor)
|
||||
|
||||
if jdown != '':
|
||||
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
|
||||
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if sortlinks > 0:
|
||||
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
|
||||
# orden2 segun configuración
|
||||
@@ -788,13 +743,10 @@ def play(item):
|
||||
headers = {'Referer': item.extra}
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# logger.debug("dataLINK="+data)
|
||||
url = scrapertools.find_single_match(data,
|
||||
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
|
||||
url = urlparse.urljoin("https://www.plusdede.com", url)
|
||||
# logger.debug("DATA_LINK_FINAL:"+url)
|
||||
|
||||
logger.debug("URL_PLAY:" + url)
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
# logger.info("media_url="+media_url)
|
||||
@@ -808,7 +760,6 @@ def play(item):
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Marcar como visto
|
||||
logger.debug(item)
|
||||
checkseen(item)
|
||||
|
||||
return itemlist
|
||||
@@ -827,7 +778,6 @@ def checkseen(item):
|
||||
tipo_str = "pelis"
|
||||
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
logger.debug("Entrando a checkseen " + url_temp + item.token)
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
return True
|
||||
|
||||
@@ -836,7 +786,6 @@ def infosinopsis(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("SINOPSISdata=" + data)
|
||||
|
||||
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
|
||||
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
|
||||
@@ -845,11 +794,8 @@ def infosinopsis(item):
|
||||
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
|
||||
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
|
||||
" ", "").replace("\n", ""))
|
||||
logger.debug(scrapedduration)
|
||||
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
|
||||
logger.debug("SINOPSISdataplot=" + scrapedplot)
|
||||
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
|
||||
logger.debug("generos=" + generos)
|
||||
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
|
||||
scrapedcasting = re.compile(
|
||||
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
|
||||
@@ -954,7 +900,6 @@ def plusdede_check(item):
|
||||
if item.tipo_esp == "lista":
|
||||
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
|
||||
data = httptools.downloadpage(url_temp).data
|
||||
logger.debug("DATA_CHECK_LISTA:" + data)
|
||||
|
||||
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
|
||||
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
|
||||
@@ -986,8 +931,6 @@ def plusdede_check(item):
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
replace_headers=True).data.strip()
|
||||
logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
|
||||
logger.debug("PLUSDEDECHECK_DATA=" + data)
|
||||
dialog = platformtools
|
||||
dialog.ok = platformtools.dialog_ok
|
||||
if data == "1":
|
||||
@@ -1002,4 +945,4 @@ def plusdede_check(item):
|
||||
elif item.tipo_esp == "add_list":
|
||||
dialog.ok('SUCCESS', 'Añadido a la lista!')
|
||||
else:
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
|
||||
@@ -30,11 +30,6 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
|
||||
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
|
||||
url= host + "/archivos/proximos-estrenos/pag/1",
|
||||
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
|
||||
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
|
||||
fanart=mifan))
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
|
||||
url= host + "/pag/1",
|
||||
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
|
||||
@@ -70,7 +65,8 @@ def menupelis(item):
|
||||
logger.info(item.url)
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
|
||||
if item.genre:
|
||||
item.extra = item.genre
|
||||
if item.extra == '':
|
||||
section = 'Recién Agregadas'
|
||||
elif item.extra == 'year':
|
||||
@@ -79,17 +75,13 @@ def menupelis(item):
|
||||
section = 'de Eróticas \+18'
|
||||
else:
|
||||
section = 'de %s'%item.extra
|
||||
|
||||
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
|
||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
||||
|
||||
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
|
||||
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||
for bloque_enlaces in matchesenlaces:
|
||||
|
||||
patron = '<div class="poster-media-card">.*?'
|
||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||
patron += '<img src="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||
|
||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "");
|
||||
@@ -144,21 +136,14 @@ def menudesta(item):
|
||||
# Peliculas de Estreno
|
||||
def menuestre(item):
|
||||
logger.info(item.url)
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
|
||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
||||
|
||||
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||
for bloque_enlaces in matchesenlaces:
|
||||
|
||||
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
|
||||
|
||||
patron = '<div class="poster-media-card">.*?'
|
||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||
patron += '<img src="(.*?)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
@@ -255,32 +240,22 @@ def search(item, texto):
|
||||
patron += '<div class="row">.*?'
|
||||
patron += '<a href="(.*?)" title="(.*?)">.*?'
|
||||
patron += '<img src="(.*?)"'
|
||||
|
||||
logger.info(patron)
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "")
|
||||
url = item.url + scrapedurl
|
||||
thumbnail = item.url + scrapedthumbnail
|
||||
logger.info(url)
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, fanart=thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def poranyo(item):
|
||||
logger.info(item.url)
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
|
||||
patron = '<option value="([^"]+)">(.*?)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -289,7 +264,6 @@ def poranyo(item):
|
||||
url = item.url + scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||
fanart=item.fanart, extra='year'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -300,24 +274,25 @@ def porcateg(item):
|
||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
adult_mode = config.get_setting("adult_mode")
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "18" in scrapedtitle and adult_mode == 0:
|
||||
continue
|
||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||
title = title.replace("Online", "")
|
||||
url = scrapedurl
|
||||
logger.info(url)
|
||||
# si no esta permitidas categoria adultos, la filtramos
|
||||
extra = title
|
||||
adult_mode = config.get_setting("adult_mode")
|
||||
extra1 = title
|
||||
if adult_mode != 0:
|
||||
if 'erotic' in scrapedurl:
|
||||
extra = 'adult'
|
||||
extra1 = 'adult'
|
||||
else:
|
||||
extra=title
|
||||
extra1=title
|
||||
|
||||
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
|
||||
if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
|
||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||
fanart=item.fanart, extra = extra))
|
||||
fanart=item.fanart, genre = extra1))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -338,7 +313,6 @@ def decode(string):
|
||||
i += 1
|
||||
enc4 = keyStr.index(input[i])
|
||||
i += 1
|
||||
|
||||
chr1 = (enc1 << 2) | (enc2 >> 4)
|
||||
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
|
||||
chr3 = ((enc3 & 3) << 6) | enc4
|
||||
@@ -352,4 +326,4 @@ def decode(string):
|
||||
|
||||
output = output.decode('utf8')
|
||||
|
||||
return output
|
||||
return output
|
||||
|
||||
@@ -290,7 +290,10 @@ def do_search(item, categories=None):
|
||||
multithread = config.get_setting("multithread", "search")
|
||||
result_mode = config.get_setting("result_mode", "search")
|
||||
|
||||
tecleado = item.extra
|
||||
if item.wanted!='':
|
||||
tecleado=item.wanted
|
||||
else:
|
||||
tecleado = item.extra
|
||||
|
||||
itemlist = []
|
||||
|
||||
|
||||
@@ -21,8 +21,7 @@ list_language = ['default']
|
||||
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
|
||||
list_quality = CALIDADES
|
||||
|
||||
list_servers = ['streamix',
|
||||
'powvideo',
|
||||
list_servers = ['powvideo',
|
||||
'streamcloud',
|
||||
'openload',
|
||||
'flashx',
|
||||
@@ -312,6 +311,7 @@ def findvideos(item):
|
||||
d=d.lstrip( )
|
||||
list_links[i].server=d
|
||||
|
||||
list_links = servertools.get_servers_itemlist(list_links)
|
||||
autoplay.start(list_links, item)
|
||||
|
||||
return list_links
|
||||
@@ -319,7 +319,6 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info("%s - %s = %s" % (item.show, item.title, item.url))
|
||||
|
||||
if item.url.startswith(HOST):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
@@ -340,7 +339,7 @@ def play(item):
|
||||
else:
|
||||
url = item.url
|
||||
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
itemlist = servertools.find_video_items(item=item,data=url)
|
||||
|
||||
titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
|
||||
if titulo:
|
||||
|
||||
@@ -14,10 +14,7 @@ from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
'netutv',
|
||||
'rapidvideo'
|
||||
list_servers = ['openload'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
@@ -49,7 +46,11 @@ def lista(item):
|
||||
patron = '<a href="([^"]+)" '
|
||||
patron += 'class="link">.+?<img src="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)">'
|
||||
|
||||
if item.url==host:
|
||||
a=1
|
||||
else:
|
||||
num=(item.url).split('-')
|
||||
a=int(num[1])
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
# Paginacion
|
||||
@@ -57,17 +58,28 @@ def lista(item):
|
||||
min = item.page * num_items_x_pagina
|
||||
min=min-item.page
|
||||
max = min + num_items_x_pagina - 1
|
||||
|
||||
b=0
|
||||
for link, img, name in matches[min:max]:
|
||||
title = name
|
||||
b=b+1
|
||||
if " y " in name:
|
||||
title=name.replace(" y "," & ")
|
||||
else:
|
||||
title = name
|
||||
url = host + link
|
||||
scrapedthumbnail = host + img
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
|
||||
context=context1))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
|
||||
logger.info("gasdfsa "+str(b))
|
||||
if b<29:
|
||||
a=a+1
|
||||
url="https://serieslan.com/pag-"+str(a)
|
||||
if b>10:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=url, action="lista", page=0))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
@@ -78,13 +90,14 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("info %s " % data)
|
||||
# obtener el numero total de episodios
|
||||
total_episode = 0
|
||||
|
||||
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
|
||||
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
|
||||
patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
|
||||
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
|
||||
@@ -92,6 +105,10 @@ def episodios(item):
|
||||
|
||||
title = ""
|
||||
pat = "/"
|
||||
if "Mike, Lu & Og"==item.title:
|
||||
pat="&/"
|
||||
if "KND" in item.title:
|
||||
pat="-"
|
||||
# varios episodios en un enlace
|
||||
if len(name.split(pat)) > 1:
|
||||
i = 0
|
||||
@@ -133,7 +150,7 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
|
||||
url_server = "https://openload.co/embed/%s/"
|
||||
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s"
|
||||
url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
|
||||
|
||||
def txc(key, _str):
|
||||
s = range(256)
|
||||
@@ -156,7 +173,7 @@ def findvideos(item):
|
||||
return res
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">'
|
||||
pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
|
||||
idv, ide = scrapertools.find_single_match(data, pattern)
|
||||
thumbnail = scrapertools.find_single_match(data,
|
||||
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
|
||||
|
||||
@@ -160,7 +160,9 @@ def findvideos(item):
|
||||
'gamo': 'http://gamovideo.com/embed-',
|
||||
'powvideo': 'http://powvideo.net/embed-',
|
||||
'play': 'http://streamplay.to/embed-',
|
||||
'vido': 'http://vidoza.net/embed-'}
|
||||
'vido': 'http://vidoza.net/embed-',
|
||||
'net': 'http://hqq.tv/player/embed_player.php?vid='
|
||||
}
|
||||
data = get_source(item.url)
|
||||
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
|
||||
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
"banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png",
|
||||
"thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
"movie",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -33,6 +34,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades -Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,27 +192,24 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
patron = '<iframe class=metaframe rptss src=(.*?) (?:width=.*?|frameborder=0) allowfullscreen><\/iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video_url in matches:
|
||||
if 'stream' in video_url:
|
||||
data = httptools.downloadpage('https:'+video_url).data
|
||||
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
|
||||
new_data = httptools.downloadpage(new_url).data
|
||||
|
||||
# TODO Reparar directos
|
||||
# if 'stream' in video_url:
|
||||
# data = httptools.downloadpage('https:'+video_url).data
|
||||
# new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
|
||||
# new_data = httptools.downloadpage(new_url).data
|
||||
# logger.debug(new_data)
|
||||
#
|
||||
# url, quality = scrapertools.find_single_match(new_data, "file:'(.*?)',label:'(.*?)'")
|
||||
# headers_string = '|Referer=%s' % url
|
||||
# url = url.replace('download', 'preview')+headers_string
|
||||
# sub = scrapertools.find_single_match(new_data, "file:.*?'(.*?srt)'")
|
||||
# new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
|
||||
# subtitle=sub))
|
||||
# itemlist.append(new_item)
|
||||
# else:
|
||||
itemlist.extend(servertools.find_video_items(data=video_url))
|
||||
url, quality = scrapertools.find_single_match(new_data, 'file:.*?"(.*?)",label:.*?"(.*?)"')
|
||||
headers_string = '|Referer=%s' % url
|
||||
url = url.replace('download', 'preview')+headers_string
|
||||
sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
|
||||
new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
|
||||
subtitle=sub))
|
||||
itemlist.append(new_item)
|
||||
else:
|
||||
itemlist.extend(servertools.find_video_items(data=video_url))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
@@ -255,10 +252,13 @@ def newest(categoria):
|
||||
item.extra = 'estrenos/'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + '/category/estrenos/'
|
||||
item.url = host + '/genre/estrenos/'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/category/infantil/'
|
||||
item.url = host + '/genre/animacion/'
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/genre/terror/'
|
||||
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
|
||||
@@ -381,6 +381,7 @@ def findvideos(item):
|
||||
item_json.show = item.library_filter_show.get(nom_canal, "")
|
||||
|
||||
# Ejecutamos find_videos, del canal o común
|
||||
item_json.contentChannel='videolibrary'
|
||||
if hasattr(channel, 'findvideos'):
|
||||
from core import servertools
|
||||
list_servers = getattr(channel, 'findvideos')(item_json)
|
||||
|
||||
@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
|
||||
if item.contentType in ['movie','tvshow']and item.channel != 'search':
|
||||
# Buscar en otros canales
|
||||
if item.contentSerieName!='':
|
||||
item.extra=item.contentSerieName
|
||||
item.wanted=item.contentSerieName
|
||||
else:
|
||||
item.extra = item.contentTitle
|
||||
item.wanted = item.contentTitle
|
||||
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
|
||||
"XBMC.Container.Update (%s?%s)" % (sys.argv[0],
|
||||
item.clone(channel='search',
|
||||
|
||||
BIN
plugin.video.alfa/resources/media/channels/banner/danimados.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/banner/danimados.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 110 KiB |
BIN
plugin.video.alfa/resources/media/channels/thumb/danimados.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/danimados.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
plugin.video.alfa/resources/media/channels/thumb/mundiseries.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/mundiseries.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 75 KiB |
@@ -33,16 +33,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
'Cookie': ''}
|
||||
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
|
||||
data = data.replace("\n","")
|
||||
cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)')
|
||||
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
|
||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
||||
# Para obtener el f y el fxfx
|
||||
js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)')
|
||||
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
|
||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||
for f, v in matches:
|
||||
pfxfx += f + "=" + v + "&"
|
||||
logger.info("mfxfxfx1= %s" %js_fxfx)
|
||||
logger.info("mfxfxfx2= %s" %pfxfx)
|
||||
if pfxfx == "":
|
||||
pfxfx = "ss=yes&f=fail&fxfx=6"
|
||||
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
||||
# {f: 'y', fxfx: '6'}
|
||||
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
||||
|
||||
@@ -7,13 +7,13 @@ from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 ' \
|
||||
'Firefox/40.0'}
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 ' \
|
||||
'Firefox/57.0'}
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, add_referer = True).data
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
|
||||
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
|
||||
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
|
||||
@@ -26,7 +26,6 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
logger.debug(data)
|
||||
packer = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
|
||||
if packer != "":
|
||||
|
||||
@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
||||
streams =[]
|
||||
logger.debug('page_url: %s'%page_url)
|
||||
if 'googleusercontent' in page_url:
|
||||
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
|
||||
url=data.headers['location']
|
||||
|
||||
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
|
||||
url=response.headers['location']
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
|
||||
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
|
||||
|
||||
streams.append((quality, url))
|
||||
headers_string=""
|
||||
|
||||
else:
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
|
||||
@@ -23,6 +23,8 @@ def test_video_exists(page_url):
|
||||
|
||||
if "Object not found" in response.data:
|
||||
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
|
||||
if reponse.code == 500:
|
||||
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data:
|
||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
packed = scrapertools.find_single_match(data,
|
||||
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
|
||||
packed = scrapertools.find_single_match(data, patron)
|
||||
data = jsunpack.unpack(packed)
|
||||
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
||||
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"find_videos": {
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
|
||||
"url": "\\1"
|
||||
"pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
|
||||
"url": "http://\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
|
||||
strResult = "".join(arrayResult)
|
||||
logger.debug(strResult)
|
||||
|
||||
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
|
||||
|
||||
for url, label in videoSources:
|
||||
logger.debug("[" + label + "] " + url)
|
||||
video_urls.append([label, url])
|
||||
|
||||
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user