38 Commits

Author SHA1 Message Date
alfa-addon
162772e9dc v2.3.2 2017-10-28 22:47:51 -04:00
alfa-addon
60d61f861b fixed 2017-10-28 22:47:33 -04:00
Alfa
cd1c7b692a Merge pull request #142 from danielr460/master
Nuevos canales
2017-10-29 06:06:34 +01:00
danielr460
10abe4a6d4 Cambio en la estructura de la página web danimados 2017-10-28 23:40:49 -05:00
danielr460
b0fa5e8a75 Eliminada sección Novedades porque la página web la elimino 2017-10-28 23:23:42 -05:00
danielr460
54d6a943f5 Arreglado Mundiseries 2017-10-28 23:19:43 -05:00
Daniel Rincón Rodríguez
44df5b6036 Corregida linea que si hacia 2017-10-28 23:00:36 -05:00
Alfa
ae67d9b5ee Merge pull request #148 from Intel11/patch-1
Actualizados
2017-10-29 02:57:15 +01:00
Alfa
895d14760d Merge pull request #149 from Alfa-beto/Fixes
Corregida pagina siguiente en pelisplus
2017-10-29 02:56:42 +01:00
Intel1
b0b4b218f0 animemovil: fast fix 2017-10-28 20:55:12 -05:00
Unknown
348787ae97 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-28 21:56:26 -03:00
Unknown
0f7c11efad Corregido pagina siguiente 2017-10-28 21:37:23 -03:00
Intel1
ae7a4a8d83 rapidvideo: actualizado test_video_exists 2017-10-28 11:26:08 -05:00
Intel1
fc58c717eb plusdede: actualizado findvideos 2017-10-28 11:17:48 -05:00
Daniel Rincón Rodríguez
b3a19f3d20 Arreglos sugeridos por Intel1 2017-10-28 11:11:51 -05:00
Daniel Rincón Rodríguez
0cac09eef5 Eliminada solución "tosca" 2017-10-28 11:00:35 -05:00
Intel1
9a1effbe25 cinetux: mostrar cantidad de películas 2017-10-28 10:58:15 -05:00
Daniel Rincón Rodríguez
44145660d0 Eliminado código innecesario 2017-10-28 10:55:55 -05:00
Daniel Rincón Rodríguez
aec2674316 Eliminada función generica 2017-10-28 10:55:01 -05:00
Daniel Rincón Rodríguez
09de611aae Update cartoonlatino.py 2017-10-28 10:54:00 -05:00
Daniel Rincón Rodríguez
74598154c2 Eliminado código innecesario 2017-10-28 10:53:19 -05:00
Daniel Rincón Rodríguez
7ab9c8bb29 Cartoonlatino en su version original 2017-10-28 10:52:30 -05:00
Daniel Rincón Rodríguez
14178974a0 Mejorado Autoplay 2017-10-28 10:51:45 -05:00
Intel1
c43162cbc2 flashx: lo dicho!!! 2017-10-28 08:40:11 -05:00
Daniel Rincón Rodríguez
aa76986a51 Dejado el canal AnitoonsTV como estaba 2017-10-28 08:30:25 -05:00
Daniel Rincón Rodríguez
9aae0e7a1b Arreglos comentarios de Intel1 2017-10-26 18:28:19 -05:00
danielr460
e1fe886602 Autoplay añadido 2017-10-26 15:38:36 -05:00
danielr460
19812c83a9 Añadida info de los canales desde la videoteca 2017-10-26 15:05:12 -05:00
danielr460
cabc2458e3 Añadida info de la serie para que no se borre cuando esta activo autoplay 2017-10-26 14:36:09 -05:00
danielr460
336376ecef Añadida opción de videolibrary para saber que no vengo del addon 2017-10-26 14:34:45 -05:00
danielr460
af06269e39 Añadida opción marcar como visto en autoplay 2017-10-26 13:56:37 -05:00
danielr460
f37d18ee0a Añadido contentChannel para saber en findvideos si vengo del addon o de la videolibrary 2017-10-26 13:54:14 -05:00
danielr460
6fefc3b048 Agregado Autoplay 2017-10-26 13:44:09 -05:00
danielr460
ab5fe41403 Eliminar código innecesario 2017-10-26 07:49:46 -05:00
danielr460
15463ea0f8 Arreglo bug 2017-10-26 07:48:01 -05:00
danielr460
badf40573c Nuevo canal mundiseries 2017-10-25 21:41:55 -05:00
danielr460
c80793e3e0 Media for danimados 2017-10-25 21:41:38 -05:00
danielr460
cbc0ff0bd0 Nuevo canal danimados 2017-10-25 21:25:31 -05:00
21 changed files with 376 additions and 128 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.1" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.3.2" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,9 +19,15 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» ohlatino » animemovil » cinetux » animemovil
» pelisplus » flashx » anitoonstv » cartoonlatino
» seriesblanco » damimados
» mundiseries » serieslan
» cinetux » animemovil
» plusdede » pelisplus
» rapidvideo » flashx
¤ arreglos internos ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news> </news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -86,7 +86,7 @@ def recientes(item):
tipo = "tvshow" tipo = "tvshow"
show = contentTitle show = contentTitle
action = "episodios" action = "episodios"
context = renumbertools.context context = renumbertools.context(item)
if item.extra == "recientes": if item.extra == "recientes":
action = "findvideos" action = "findvideos"
context = "" context = ""
@@ -96,7 +96,7 @@ def recientes(item):
action = "peliculas" action = "peliculas"
if not thumb.startswith("http"): if not thumb.startswith("http"):
thumb = "http:%s" % thumb thumb = "http:%s" % thumb
action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()} infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3, itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels, contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
@@ -149,34 +149,14 @@ def findvideos(item):
for server, quality, url in itemla: for server, quality, url in itemla:
if "Calidad Alta" in quality: if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ") quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip() if " Calidad media - Carga mas rapido" in quality:
if "ok" == server: quality = quality.replace(" Calidad media - Carga mas rapido", "360p")
server = 'okru' server = server.lower().strip()
if "netu" == server: if "ok" == server:
continue server = 'okru'
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality, itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot, thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality))) title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist

View File

@@ -7,6 +7,7 @@ from core import jsontools
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
from platformcode import platformtools from platformcode import platformtools
from platformcode import launcher
__channel__ = "autoplay" __channel__ = "autoplay"
@@ -78,7 +79,20 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio :return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
''' '''
logger.info() logger.info()
for videoitem in itemlist:
#Nos dice de donde viene si del addon o videolibrary
if item.contentChannel=='videolibrary':
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
videoitem.contentPlot=item.contentPlot
videoitem.contentSeason=item.contentSeason
videoitem.contentSerieName=item.contentSerieName
videoitem.contentTitle=item.contentTitle
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc(): if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') #platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist return itemlist
@@ -261,8 +275,12 @@ def start(itemlist, item):
else: else:
videoitem = resolved_item[0] videoitem = resolved_item[0]
# si no directamente reproduce # si no directamente reproduce y marca como visto
platformtools.play_video(videoitem) from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
#platformtools.play_video(videoitem)
videoitem.contentChannel='videolibrary'
launcher.run(videoitem)
try: try:
if platformtools.is_playing(): if platformtools.is_playing():

View File

@@ -9,6 +9,7 @@ from core import servertools
from core import tmdb from core import tmdb
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
from channels import autoplay
host = "http://www.cartoon-latino.com/" host = "http://www.cartoon-latino.com/"
from channels import autoplay from channels import autoplay
@@ -150,7 +151,6 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url, itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist
@@ -185,29 +185,5 @@ def findvideos(item):
server1 = server server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1, itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize()))) title="Enlace encontrado en %s " % (server1.capitalize())))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -28,9 +28,9 @@ def mainlist(item):
itemlist = [] itemlist = []
item.viewmode = viewmode item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST).data data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>") total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas" titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True)) itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula", itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"

View File

@@ -0,0 +1,19 @@
{
"id": "danimados",
"name": "Danimados",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png",
"version": 1,
"changes": [
{
"date": "24/10/2017",
"description": "Primera version del canal"
}
],
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,186 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.danimados.com/"
list_servers = ['openload',
'okru',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
# thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
if item.title=="Más Populares":
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
if item.title=="Categorías":
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
if "Películas Animadas"!=scrapedtitle:
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
return itemlist
else:
for scraped1, scraped2, scrapedtitle in matches:
scrapedthumbnail=scraped1
scrapedurl=scraped2
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)):
#for url in itemla:
url=itemla[i]
#verificar existencia del video (testing)
codigo=verificar_video(itemla[i])
if codigo==200:
if "ok.ru" in url:
server='okru'
else:
server=''
if "openload" in url:
server='openload'
if "google" in url:
server='gvideo'
if "rapidvideo" in url:
server='rapidvideo'
if "streamango" in url:
server='streamango'
if server!='':
title="Enlace encontrado en %s " % (server.capitalize())
else:
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
autoplay.start(itemlist, item)
return itemlist
def verificar_video(url):
codigo=httptools.downloadpage(url).code
if codigo==200:
# Revise de otra forma
data=httptools.downloadpage(url).data
removed = scrapertools.find_single_match(data,'removed(.+)')
if len(removed) != 0:
codigo1=404
else:
codigo1=200
else:
codigo1=200
return codigo1

View File

@@ -0,0 +1,19 @@
{
"id": "mundiseries",
"name": "Mundiseries",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png",
"version": 1,
"changes": [
{
"date": "23/10/2017",
"description": "Primera versión del canal"
}
],
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import filtertools
from platformcode import config, logger
from platformcode import platformtools
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import autoplay
host = "http://mundiseries.com"
list_servers = ['okru']
list_quality = ['default']
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
matches = scrapertools.find_multiple_matches(data, patron)
for link, thumbnail, name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
return itemlist
def temporada(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info("preon,:"+data)
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for link,thumbnail,name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_show='<h1 class="h-responsive center">.+?'
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
show = scrapertools.find_single_match(data,patron_show)
for link, name,cap,temp in matches:
if '|' in cap:
cap = cap.replace('|','')
if '|' in temp:
temp = temp.replace('|','')
if '|' in name:
name = name.replace('|','')
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
url=host+link
itemlist.append(Item(channel=item.channel, action="findvideos",
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
id = ""
type = ""
data = httptools.downloadpage(item.url).data
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.extend(servertools.find_video_items(data=data))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
for videoitem in itemlist:
videoitem.channel= item.channel
autoplay.start(itemlist, item)
return itemlist

View File

@@ -176,7 +176,7 @@ def lista(item):
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \ patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>' 'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
actual = scrapertools.find_single_match(data, actual = scrapertools.find_single_match(data,
'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" ' '<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
'class="page bicon last"><<\/a>') 'class="page bicon last"><<\/a>')
else: else:
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \ patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \

View File

@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login(): def login():
url_origen = "https://www.plusdede.com/login?popup=1" url_origen = "https://www.plusdede.com/login?popup=1"
data = httptools.downloadpage(url_origen, follow_redirects=True).data data = httptools.downloadpage(url_origen, follow_redirects=True).data
logger.debug("dataPLUSDEDE=" + data)
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data): if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
return True return True
@@ -34,12 +33,10 @@ def login():
post = "_token=" + str(token) + "&email=" + str( post = "_token=" + str(token) + "&email=" + str(
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str( config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469" config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
# logger.debug("dataPLUSDEDE_POST="+post)
url = "https://www.plusdede.com/" url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token} headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers, data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data replace_headers=False).data
logger.debug("PLUSDEDE_DATA=" + data)
if "redirect" in data: if "redirect" in data:
return True return True
else: else:
@@ -183,7 +180,6 @@ def generos(item):
tipo = item.url.replace("https://www.plusdede.com/", "") tipo = item.url.replace("https://www.plusdede.com/", "")
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data, data = scrapertools.find_single_match(data,
@@ -198,7 +194,6 @@ def generos(item):
plot = "" plot = ""
# https://www.plusdede.com/pelis?genre_id=1 # https://www.plusdede.com/pelis?genre_id=1
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append( itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title)) fulltitle=title))
@@ -229,11 +224,9 @@ def buscar(item):
# Descarga la pagina # Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
json_object = jsontools.load(data) json_object = jsontools.load(data)
logger.debug("content=" + json_object["content"])
data = json_object["content"] data = json_object["content"]
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
patron += '.*?<div class="year">([^<]+)</div>+' patron += '.*?<div class="year">([^<]+)</div>+'
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>' patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
logger.debug("PARSE_DATA:" + data)
if item.tipo == "lista": if item.tipo == "lista":
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">') following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">') data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
sectionStr = "docu" sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl) referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl) url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "series": if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
else: else:
referer = item.url referer = item.url
url = urlparse.urljoin(item.url, scrapedurl) url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "pelis": if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url, itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">') '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
if next_page != "": if next_page != "":
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "") url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
logger.debug("URL_SIGUIENTE:" + url)
itemlist.append( itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente", Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url)) extra=item.extra, url=url))
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">') bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode)) fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
return itemlist return itemlist
@@ -369,7 +356,6 @@ def episodio(item):
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
# logger.debug("data="+data)
session = str(int(item.extra.split("|")[0])) session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1])) episode = str(int(item.extra.split("|")[1]))
@@ -377,7 +363,6 @@ def episodio(item):
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas: for bloque_episodios in matchestemporadas:
logger.debug("bloque_episodios=" + bloque_episodios)
# Extrae los episodios # Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
@@ -401,7 +386,6 @@ def episodio(item):
itemlist.append( itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show)) fulltitle=title, fanart=item.fanart, show=item.show))
logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist2 = [] itemlist2 = []
for capitulo in itemlist: for capitulo in itemlist:
@@ -415,11 +399,9 @@ def peliculas(item):
# Descarga la pagina # Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data_DEF_PELICULAS=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
json_object = jsontools.load(data) json_object = jsontools.load(data)
logger.debug("html=" + json_object["content"])
data = json_object["content"] data = json_object["content"]
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -432,24 +414,18 @@ def episodios(item):
# Descarga la pagina # Descarga la pagina
idserie = '' idserie = ''
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
# logger.debug("dataEPISODIOS="+data)
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>' patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
logger.debug(matchestemporadas)
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"') idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")): if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
for nombre_temporada, bloque_episodios in matchestemporadas: for nombre_temporada, bloque_episodios in matchestemporadas:
logger.debug("nombre_temporada=" + nombre_temporada)
logger.debug("bloque_episodios=" + bloque_episodios)
logger.debug("id_serie=" + idserie)
# Extrae los episodios # Extrae los episodios
patron_episodio = '<li><a href="#"(.*?)</a></li>' patron_episodio = '<li><a href="#"(.*?)</a></li>'
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"' # patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios) matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
# logger.debug(matches)
for data_episodio in matches: for data_episodio in matches:
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"') scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
@@ -462,7 +438,6 @@ def episodios(item):
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ", title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
"") + "x" + numero + " " + scrapertools.htmlclean( "") + "x" + numero + " " + scrapertools.htmlclean(
scrapedtitle) scrapedtitle)
logger.debug("CAP_VISTO:" + visto)
if visto.strip() == "seen": if visto.strip() == "seen":
title = "[visto] " + title title = "[visto] " + title
@@ -478,7 +453,6 @@ def episodios(item):
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url, Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show)) thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if config.get_videolibrary_support(): if config.get_videolibrary_support():
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
@@ -540,7 +514,6 @@ def parse_listas(item, bloque_lista):
thumbnail = "" thumbnail = ""
itemlist.append( itemlist.append(
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url)) Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]")
nextpage = scrapertools.find_single_match(bloque_lista, nextpage = scrapertools.find_single_match(bloque_lista,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"') '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
@@ -569,13 +542,10 @@ def listas(item):
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>' patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("dataSINHEADERS=" + data)
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip() item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
logger.debug("token_LISTA_" + item.token)
bloque_lista = scrapertools.find_single_match(data, patron) bloque_lista = scrapertools.find_single_match(data, patron)
logger.debug("bloque_LISTA" + bloque_lista)
return parse_listas(item, bloque_lista) return parse_listas(item, bloque_lista)
@@ -585,7 +555,6 @@ def lista_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_listas(item, data) return parse_listas(item, data)
@@ -595,7 +564,6 @@ def pag_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -605,8 +573,6 @@ def findvideos(item, verTodos=False):
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.info("URL:" + item.url + " DATA=" + data)
# logger.debug("data="+data)
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"') data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"') data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
@@ -616,7 +582,6 @@ def findvideos(item, verTodos=False):
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1" url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
data = httptools.downloadpage(url).data data = httptools.downloadpage(url).data
logger.debug("URL:" + url + " dataLINKS=" + data)
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
patron = 'target="_blank" (.*?)</a>' patron = 'target="_blank" (.*?)</a>'
@@ -628,7 +593,6 @@ def findvideos(item, verTodos=False):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
logger.debug("TRAILER_YOUTUBE:" + trailer)
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer, itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
@@ -637,9 +601,6 @@ def findvideos(item, verTodos=False):
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
# sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0
# showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0
if sortlinks != '' and sortlinks != "No": if sortlinks != '' and sortlinks != "No":
sortlinks = int(sortlinks) sortlinks = int(sortlinks)
else: else:
@@ -651,14 +612,13 @@ def findvideos(item, verTodos=False):
showlinks = 0 showlinks = 0
for match in matches: for match in matches:
# logger.debug("match="+match)
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)') jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
if (showlinks == 1 and jdown != '') or ( if (showlinks == 1 and jdown != '') or (
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue continue
idioma_1 = "" idioma_1 = ""
idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match) idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
idioma_0 = idiomas[0] idioma_0 = idiomas[0]
if len(idiomas) > 1: if len(idiomas) > 1:
idioma_1 = idiomas[1] idioma_1 = idiomas[1]
@@ -670,16 +630,12 @@ def findvideos(item, verTodos=False):
calidad_video = scrapertools.find_single_match(match, calidad_video = scrapertools.find_single_match(match,
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace( '<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
" ", "").replace("\n", "") " ", "").replace("\n", "")
logger.debug("calidad_video=" + calidad_video)
calidad_audio = scrapertools.find_single_match(match, calidad_audio = scrapertools.find_single_match(match,
'<span class="fa fa-headphones"></span>(.*?)</div>').replace( '<span class="fa fa-headphones"></span>(.*?)</div>').replace(
" ", "").replace("\n", "") " ", "").replace("\n", "")
logger.debug("calidad_audio=" + calidad_audio)
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">') thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
logger.debug("thumb_servidor=" + thumb_servidor)
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png") nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
logger.debug("nombre_servidor=" + nombre_servidor)
if jdown != '': if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor thumbnail = thumb_servidor
plot = "" plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if sortlinks > 0: if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar # orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración # orden2 segun configuración
@@ -788,13 +743,10 @@ def play(item):
headers = {'Referer': item.extra} headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
# logger.debug("dataLINK="+data)
url = scrapertools.find_single_match(data, url = scrapertools.find_single_match(data,
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>') '<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
url = urlparse.urljoin("https://www.plusdede.com", url) url = urlparse.urljoin("https://www.plusdede.com", url)
# logger.debug("DATA_LINK_FINAL:"+url)
logger.debug("URL_PLAY:" + url)
headers = {'Referer': item.url} headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location") media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
# logger.info("media_url="+media_url) # logger.info("media_url="+media_url)
@@ -808,7 +760,6 @@ def play(item):
videoitem.channel = item.channel videoitem.channel = item.channel
# Marcar como visto # Marcar como visto
logger.debug(item)
checkseen(item) checkseen(item)
return itemlist return itemlist
@@ -827,7 +778,6 @@ def checkseen(item):
tipo_str = "pelis" tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest", headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token} "X-CSRF-TOKEN": item.token}
logger.debug("Entrando a checkseen " + url_temp + item.token)
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True return True
@@ -836,7 +786,6 @@ def infosinopsis(item):
logger.info() logger.info()
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("SINOPSISdata=" + data)
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>') scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>') scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
@@ -845,11 +794,8 @@ def infosinopsis(item):
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data, scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace( '<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
" ", "").replace("\n", "")) " ", "").replace("\n", ""))
logger.debug(scrapedduration)
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip() scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
logger.debug("SINOPSISdataplot=" + scrapedplot)
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>') generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
logger.debug("generos=" + generos)
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos) scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
scrapedcasting = re.compile( scrapedcasting = re.compile(
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>', '<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
@@ -954,7 +900,6 @@ def plusdede_check(item):
if item.tipo_esp == "lista": if item.tipo_esp == "lista":
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1" url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data data = httptools.downloadpage(url_temp).data
logger.debug("DATA_CHECK_LISTA:" + data)
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+' patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+' patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
@@ -986,8 +931,6 @@ def plusdede_check(item):
"X-CSRF-TOKEN": item.token} "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip() replace_headers=True).data.strip()
logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
logger.debug("PLUSDEDECHECK_DATA=" + data)
dialog = platformtools dialog = platformtools
dialog.ok = platformtools.dialog_ok dialog.ok = platformtools.dialog_ok
if data == "1": if data == "1":
@@ -1002,4 +945,4 @@ def plusdede_check(item):
elif item.tipo_esp == "add_list": elif item.tipo_esp == "add_list":
dialog.ok('SUCCESS', 'Añadido a la lista!') dialog.ok('SUCCESS', 'Añadido a la lista!')
else: else:
dialog.ok('ERROR', 'No se pudo realizar la acción!') dialog.ok('ERROR', 'No se pudo realizar la acción!')

View File

@@ -311,7 +311,6 @@ def findvideos(item):
d=c[0].rstrip( ) d=c[0].rstrip( )
d=d.lstrip( ) d=d.lstrip( )
list_links[i].server=d list_links[i].server=d
autoplay.start(list_links, item) autoplay.start(list_links, item)
return list_links return list_links
@@ -319,7 +318,6 @@ def findvideos(item):
def play(item): def play(item):
logger.info("%s - %s = %s" % (item.show, item.title, item.url)) logger.info("%s - %s = %s" % (item.show, item.title, item.url))
if item.url.startswith(HOST): if item.url.startswith(HOST):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data

View File

@@ -78,13 +78,14 @@ def episodios(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("info %s " % data)
# obtener el numero total de episodios # obtener el numero total de episodios
total_episode = 0 total_episode = 0
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>' patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps) matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>') # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>' patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info) scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail scrapedthumbnail = host + scrapedthumbnail

View File

@@ -381,6 +381,7 @@ def findvideos(item):
item_json.show = item.library_filter_show.get(nom_canal, "") item_json.show = item.library_filter_show.get(nom_canal, "")
# Ejecutamos find_videos, del canal o común # Ejecutamos find_videos, del canal o común
item_json.contentChannel='videolibrary'
if hasattr(channel, 'findvideos'): if hasattr(channel, 'findvideos'):
from core import servertools from core import servertools
list_servers = getattr(channel, 'findvideos')(item_json) list_servers = getattr(channel, 'findvideos')(item_json)

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

View File

@@ -33,11 +33,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Cookie': ''} 'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","") data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)') cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","") cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx # Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)') js_fxfx = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/js/code.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')

View File

@@ -23,6 +23,8 @@ def test_video_exists(page_url):
if "Object not found" in response.data: if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado" return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
if reponse.code == 500:
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
return True, "" return True, ""