Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
162772e9dc | ||
|
|
60d61f861b | ||
|
|
cd1c7b692a | ||
|
|
10abe4a6d4 | ||
|
|
b0fa5e8a75 | ||
|
|
54d6a943f5 | ||
|
|
44df5b6036 | ||
|
|
ae67d9b5ee | ||
|
|
895d14760d | ||
|
|
b0b4b218f0 | ||
|
|
348787ae97 | ||
|
|
0f7c11efad | ||
|
|
ae7a4a8d83 | ||
|
|
fc58c717eb | ||
|
|
b3a19f3d20 | ||
|
|
0cac09eef5 | ||
|
|
9a1effbe25 | ||
|
|
44145660d0 | ||
|
|
aec2674316 | ||
|
|
09de611aae | ||
|
|
74598154c2 | ||
|
|
7ab9c8bb29 | ||
|
|
14178974a0 | ||
|
|
c43162cbc2 | ||
|
|
aa76986a51 | ||
|
|
9aae0e7a1b | ||
|
|
e1fe886602 | ||
|
|
19812c83a9 | ||
|
|
cabc2458e3 | ||
|
|
336376ecef | ||
|
|
af06269e39 | ||
|
|
f37d18ee0a | ||
|
|
6fefc3b048 | ||
|
|
ab5fe41403 | ||
|
|
15463ea0f8 | ||
|
|
badf40573c | ||
|
|
c80793e3e0 | ||
|
|
cbc0ff0bd0 |
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.1" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.2" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,9 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» ohlatino » animemovil
|
||||
» pelisplus » flashx
|
||||
» cinetux » animemovil
|
||||
» anitoonstv » cartoonlatino
|
||||
» seriesblanco » damimados
|
||||
» mundiseries » serieslan
|
||||
» cinetux » animemovil
|
||||
» plusdede » pelisplus
|
||||
» rapidvideo » flashx
|
||||
¤ arreglos internos
|
||||
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -86,7 +86,7 @@ def recientes(item):
|
||||
tipo = "tvshow"
|
||||
show = contentTitle
|
||||
action = "episodios"
|
||||
context = renumbertools.context
|
||||
context = renumbertools.context(item)
|
||||
if item.extra == "recientes":
|
||||
action = "findvideos"
|
||||
context = ""
|
||||
@@ -96,7 +96,7 @@ def recientes(item):
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
|
||||
action ="findvideos"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -149,34 +149,14 @@ def findvideos(item):
|
||||
for server, quality, url in itemla:
|
||||
if "Calidad Alta" in quality:
|
||||
quality = quality.replace("Calidad Alta", "HQ")
|
||||
server = server.lower().strip()
|
||||
if "ok" == server:
|
||||
server = 'okru'
|
||||
if "netu" == server:
|
||||
continue
|
||||
if " Calidad media - Carga mas rapido" in quality:
|
||||
quality = quality.replace(" Calidad media - Carga mas rapido", "360p")
|
||||
server = server.lower().strip()
|
||||
if "ok" == server:
|
||||
server = 'okru'
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Buscamos video por servidor ...
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -7,6 +7,7 @@ from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from platformcode import launcher
|
||||
|
||||
__channel__ = "autoplay"
|
||||
|
||||
@@ -78,7 +79,20 @@ def start(itemlist, item):
|
||||
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
|
||||
'''
|
||||
logger.info()
|
||||
|
||||
for videoitem in itemlist:
|
||||
#Nos dice de donde viene si del addon o videolibrary
|
||||
if item.contentChannel=='videolibrary':
|
||||
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
|
||||
videoitem.contentPlot=item.contentPlot
|
||||
videoitem.contentSeason=item.contentSeason
|
||||
videoitem.contentSerieName=item.contentSerieName
|
||||
videoitem.contentTitle=item.contentTitle
|
||||
videoitem.contentType=item.contentType
|
||||
videoitem.episode_id=item.episode_id
|
||||
videoitem.hasContentDetails=item.hasContentDetails
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
videoitem.thumbnail=item.thumbnail
|
||||
#videoitem.title=item.title
|
||||
if not config.is_xbmc():
|
||||
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
|
||||
return itemlist
|
||||
@@ -261,8 +275,12 @@ def start(itemlist, item):
|
||||
else:
|
||||
videoitem = resolved_item[0]
|
||||
|
||||
# si no directamente reproduce
|
||||
platformtools.play_video(videoitem)
|
||||
# si no directamente reproduce y marca como visto
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_auto_as_watched(item)
|
||||
#platformtools.play_video(videoitem)
|
||||
videoitem.contentChannel='videolibrary'
|
||||
launcher.run(videoitem)
|
||||
|
||||
try:
|
||||
if platformtools.is_playing():
|
||||
|
||||
@@ -9,6 +9,7 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.cartoon-latino.com/"
|
||||
from channels import autoplay
|
||||
@@ -150,7 +151,6 @@ def episodios(item):
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
|
||||
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
@@ -185,29 +185,5 @@ def findvideos(item):
|
||||
server1 = server
|
||||
itemlist.append(item.clone(url=url, action="play", server=server1,
|
||||
title="Enlace encontrado en %s " % (server1.capitalize())))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Buscamos video por servidor ...
|
||||
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -28,9 +28,9 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
item.viewmode = viewmode
|
||||
|
||||
data = httptools.downloadpage(CHANNEL_HOST).data
|
||||
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
|
||||
titulo = "Peliculas"
|
||||
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
|
||||
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
|
||||
titulo = "Peliculas (%s)" %total
|
||||
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
|
||||
|
||||
19
plugin.video.alfa/channels/danimados.json
Normal file
19
plugin.video.alfa/channels/danimados.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"id": "danimados",
|
||||
"name": "Danimados",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://imgur.com/kU5Lx1S.png",
|
||||
"banner": "https://imgur.com/xG5xqBq.png",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "24/10/2017",
|
||||
"description": "Primera version del canal"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
186
plugin.video.alfa/channels/danimados.py
Normal file
186
plugin.video.alfa/channels/danimados.py
Normal file
@@ -0,0 +1,186 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.danimados.com/"
|
||||
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
|
||||
thumbnail=thumb_series))
|
||||
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
|
||||
# thumbnail=thumb_series))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
"""
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ","+")
|
||||
item.url = item.url+texto
|
||||
if texto!='':
|
||||
return lista(item)
|
||||
"""
|
||||
|
||||
|
||||
def mainpage(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
if item.title=="Más Populares":
|
||||
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
|
||||
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
|
||||
if item.title=="Categorías":
|
||||
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
|
||||
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
|
||||
|
||||
data = scrapertools.find_single_match(data1, patron_sec)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.title=="Géneros" or item.title=="Categorías":
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "Películas Animadas"!=scrapedtitle:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
|
||||
return itemlist
|
||||
else:
|
||||
for scraped1, scraped2, scrapedtitle in matches:
|
||||
scrapedthumbnail=scraped1
|
||||
scrapedurl=scraped2
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
|
||||
show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
|
||||
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
|
||||
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron)
|
||||
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
data_lista = scrapertools.find_single_match(data,
|
||||
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
|
||||
show = item.title
|
||||
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
|
||||
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
|
||||
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
|
||||
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
|
||||
tempepi=scrapedtempepi.split(" - ")
|
||||
if tempepi[0]=='Pel':
|
||||
tempepi[0]=0
|
||||
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
|
||||
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
|
||||
action="findvideos", title=title, url=scrapedurl, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data = scrapertools.find_single_match(data,
|
||||
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
|
||||
patron='src="(.+?)"'
|
||||
itemla = scrapertools.find_multiple_matches(data,patron)
|
||||
for i in range(len(itemla)):
|
||||
#for url in itemla:
|
||||
url=itemla[i]
|
||||
#verificar existencia del video (testing)
|
||||
codigo=verificar_video(itemla[i])
|
||||
if codigo==200:
|
||||
if "ok.ru" in url:
|
||||
server='okru'
|
||||
else:
|
||||
server=''
|
||||
if "openload" in url:
|
||||
server='openload'
|
||||
if "google" in url:
|
||||
server='gvideo'
|
||||
if "rapidvideo" in url:
|
||||
server='rapidvideo'
|
||||
if "streamango" in url:
|
||||
server='streamango'
|
||||
if server!='':
|
||||
title="Enlace encontrado en %s " % (server.capitalize())
|
||||
else:
|
||||
title="NO DISPONIBLE"
|
||||
if title!="NO DISPONIBLE":
|
||||
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def verificar_video(url):
|
||||
codigo=httptools.downloadpage(url).code
|
||||
if codigo==200:
|
||||
# Revise de otra forma
|
||||
data=httptools.downloadpage(url).data
|
||||
removed = scrapertools.find_single_match(data,'removed(.+)')
|
||||
if len(removed) != 0:
|
||||
codigo1=404
|
||||
else:
|
||||
codigo1=200
|
||||
else:
|
||||
codigo1=200
|
||||
return codigo1
|
||||
19
plugin.video.alfa/channels/mundiseries.json
Normal file
19
plugin.video.alfa/channels/mundiseries.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"id": "mundiseries",
|
||||
"name": "Mundiseries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://imgur.com/GdGMFi1.png",
|
||||
"banner": "https://imgur.com/1bDbYY1.png",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "23/10/2017",
|
||||
"description": "Primera versión del canal"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
99
plugin.video.alfa/channels/mundiseries.py
Normal file
99
plugin.video.alfa/channels/mundiseries.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import filtertools
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import httptools
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://mundiseries.com"
|
||||
list_servers = ['okru']
|
||||
list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, thumbnail, name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
|
||||
return itemlist
|
||||
|
||||
def temporada(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
logger.info("preon,:"+data)
|
||||
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link,thumbnail,name in matches:
|
||||
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
patron_show='<h1 class="h-responsive center">.+?'
|
||||
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
|
||||
show = scrapertools.find_single_match(data,patron_show)
|
||||
for link, name,cap,temp in matches:
|
||||
if '|' in cap:
|
||||
cap = cap.replace('|','')
|
||||
if '|' in temp:
|
||||
temp = temp.replace('|','')
|
||||
if '|' in name:
|
||||
name = name.replace('|','')
|
||||
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
|
||||
url=host+link
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos",
|
||||
title=title, url=url, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
id = ""
|
||||
type = ""
|
||||
data = httptools.downloadpage(item.url).data
|
||||
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for item in it2:
|
||||
if "###" not in item.url:
|
||||
item.url += "###" + id + ";" + type
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel= item.channel
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
@@ -176,7 +176,7 @@ def lista(item):
|
||||
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
|
||||
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
|
||||
actual = scrapertools.find_single_match(data,
|
||||
'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
|
||||
'<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
|
||||
'class="page bicon last"><<\/a>')
|
||||
else:
|
||||
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \
|
||||
|
||||
@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
|
||||
def login():
|
||||
url_origen = "https://www.plusdede.com/login?popup=1"
|
||||
data = httptools.downloadpage(url_origen, follow_redirects=True).data
|
||||
logger.debug("dataPLUSDEDE=" + data)
|
||||
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
|
||||
return True
|
||||
|
||||
@@ -34,12 +33,10 @@ def login():
|
||||
post = "_token=" + str(token) + "&email=" + str(
|
||||
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
|
||||
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
|
||||
# logger.debug("dataPLUSDEDE_POST="+post)
|
||||
url = "https://www.plusdede.com/"
|
||||
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
|
||||
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
logger.debug("PLUSDEDE_DATA=" + data)
|
||||
if "redirect" in data:
|
||||
return True
|
||||
else:
|
||||
@@ -183,7 +180,6 @@ def generos(item):
|
||||
tipo = item.url.replace("https://www.plusdede.com/", "")
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
data = scrapertools.find_single_match(data,
|
||||
@@ -198,7 +194,6 @@ def generos(item):
|
||||
plot = ""
|
||||
# https://www.plusdede.com/pelis?genre_id=1
|
||||
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title))
|
||||
@@ -229,11 +224,9 @@ def buscar(item):
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
logger.debug("content=" + json_object["content"])
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
|
||||
patron += '.*?<div class="year">([^<]+)</div>+'
|
||||
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
logger.debug("PARSE_DATA:" + data)
|
||||
if item.tipo == "lista":
|
||||
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
|
||||
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
|
||||
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
|
||||
sectionStr = "docu"
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if item.tipo != "series":
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if item.tipo != "pelis":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
|
||||
logger.debug("URL_SIGUIENTE:" + url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
|
||||
extra=item.extra, url=url))
|
||||
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
|
||||
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -369,7 +356,6 @@ def episodio(item):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.debug("data="+data)
|
||||
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
@@ -377,7 +363,6 @@ def episodio(item):
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
|
||||
for bloque_episodios in matchestemporadas:
|
||||
logger.debug("bloque_episodios=" + bloque_episodios)
|
||||
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
@@ -401,7 +386,6 @@ def episodio(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
@@ -415,11 +399,9 @@ def peliculas(item):
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data_DEF_PELICULAS=" + data)
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
logger.debug("html=" + json_object["content"])
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
@@ -432,24 +414,18 @@ def episodios(item):
|
||||
# Descarga la pagina
|
||||
idserie = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.debug("dataEPISODIOS="+data)
|
||||
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
logger.debug(matchestemporadas)
|
||||
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
for nombre_temporada, bloque_episodios in matchestemporadas:
|
||||
logger.debug("nombre_temporada=" + nombre_temporada)
|
||||
logger.debug("bloque_episodios=" + bloque_episodios)
|
||||
logger.debug("id_serie=" + idserie)
|
||||
# Extrae los episodios
|
||||
patron_episodio = '<li><a href="#"(.*?)</a></li>'
|
||||
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
|
||||
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
|
||||
# logger.debug(matches)
|
||||
for data_episodio in matches:
|
||||
|
||||
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
|
||||
@@ -462,7 +438,6 @@ def episodios(item):
|
||||
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
|
||||
"") + "x" + numero + " " + scrapertools.htmlclean(
|
||||
scrapedtitle)
|
||||
logger.debug("CAP_VISTO:" + visto)
|
||||
if visto.strip() == "seen":
|
||||
title = "[visto] " + title
|
||||
|
||||
@@ -478,7 +453,6 @@ def episodios(item):
|
||||
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
|
||||
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
|
||||
@@ -540,7 +514,6 @@ def parse_listas(item, bloque_lista):
|
||||
thumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]")
|
||||
|
||||
nextpage = scrapertools.find_single_match(bloque_lista,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
|
||||
@@ -569,13 +542,10 @@ def listas(item):
|
||||
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("dataSINHEADERS=" + data)
|
||||
|
||||
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
|
||||
logger.debug("token_LISTA_" + item.token)
|
||||
|
||||
bloque_lista = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("bloque_LISTA" + bloque_lista)
|
||||
|
||||
return parse_listas(item, bloque_lista)
|
||||
|
||||
@@ -585,7 +555,6 @@ def lista_sig(item):
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
return parse_listas(item, data)
|
||||
|
||||
@@ -595,7 +564,6 @@ def pag_sig(item):
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
@@ -605,8 +573,6 @@ def findvideos(item, verTodos=False):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info("URL:" + item.url + " DATA=" + data)
|
||||
# logger.debug("data="+data)
|
||||
|
||||
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
|
||||
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
|
||||
@@ -616,7 +582,6 @@ def findvideos(item, verTodos=False):
|
||||
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
logger.debug("URL:" + url + " dataLINKS=" + data)
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
|
||||
patron = 'target="_blank" (.*?)</a>'
|
||||
@@ -628,7 +593,6 @@ def findvideos(item, verTodos=False):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
logger.debug("TRAILER_YOUTUBE:" + trailer)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
@@ -637,9 +601,6 @@ def findvideos(item, verTodos=False):
|
||||
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
|
||||
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
|
||||
|
||||
# sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0
|
||||
# showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0
|
||||
|
||||
if sortlinks != '' and sortlinks != "No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
@@ -651,14 +612,13 @@ def findvideos(item, verTodos=False):
|
||||
showlinks = 0
|
||||
|
||||
for match in matches:
|
||||
# logger.debug("match="+match)
|
||||
|
||||
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
|
||||
if (showlinks == 1 and jdown != '') or (
|
||||
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
|
||||
continue
|
||||
idioma_1 = ""
|
||||
idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idioma_0 = idiomas[0]
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = idiomas[1]
|
||||
@@ -670,16 +630,12 @@ def findvideos(item, verTodos=False):
|
||||
calidad_video = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
logger.debug("calidad_video=" + calidad_video)
|
||||
calidad_audio = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
logger.debug("calidad_audio=" + calidad_audio)
|
||||
|
||||
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
|
||||
logger.debug("thumb_servidor=" + thumb_servidor)
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
|
||||
logger.debug("nombre_servidor=" + nombre_servidor)
|
||||
|
||||
if jdown != '':
|
||||
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
|
||||
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
if sortlinks > 0:
|
||||
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
|
||||
# orden2 segun configuración
|
||||
@@ -788,13 +743,10 @@ def play(item):
|
||||
headers = {'Referer': item.extra}
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# logger.debug("dataLINK="+data)
|
||||
url = scrapertools.find_single_match(data,
|
||||
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
|
||||
url = urlparse.urljoin("https://www.plusdede.com", url)
|
||||
# logger.debug("DATA_LINK_FINAL:"+url)
|
||||
|
||||
logger.debug("URL_PLAY:" + url)
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
# logger.info("media_url="+media_url)
|
||||
@@ -808,7 +760,6 @@ def play(item):
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Marcar como visto
|
||||
logger.debug(item)
|
||||
checkseen(item)
|
||||
|
||||
return itemlist
|
||||
@@ -827,7 +778,6 @@ def checkseen(item):
|
||||
tipo_str = "pelis"
|
||||
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
logger.debug("Entrando a checkseen " + url_temp + item.token)
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
return True
|
||||
|
||||
@@ -836,7 +786,6 @@ def infosinopsis(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("SINOPSISdata=" + data)
|
||||
|
||||
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
|
||||
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
|
||||
@@ -845,11 +794,8 @@ def infosinopsis(item):
|
||||
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
|
||||
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
|
||||
" ", "").replace("\n", ""))
|
||||
logger.debug(scrapedduration)
|
||||
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
|
||||
logger.debug("SINOPSISdataplot=" + scrapedplot)
|
||||
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
|
||||
logger.debug("generos=" + generos)
|
||||
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
|
||||
scrapedcasting = re.compile(
|
||||
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
|
||||
@@ -954,7 +900,6 @@ def plusdede_check(item):
|
||||
if item.tipo_esp == "lista":
|
||||
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
|
||||
data = httptools.downloadpage(url_temp).data
|
||||
logger.debug("DATA_CHECK_LISTA:" + data)
|
||||
|
||||
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
|
||||
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
|
||||
@@ -986,8 +931,6 @@ def plusdede_check(item):
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
replace_headers=True).data.strip()
|
||||
logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
|
||||
logger.debug("PLUSDEDECHECK_DATA=" + data)
|
||||
dialog = platformtools
|
||||
dialog.ok = platformtools.dialog_ok
|
||||
if data == "1":
|
||||
@@ -1002,4 +945,4 @@ def plusdede_check(item):
|
||||
elif item.tipo_esp == "add_list":
|
||||
dialog.ok('SUCCESS', 'Añadido a la lista!')
|
||||
else:
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
|
||||
@@ -311,7 +311,6 @@ def findvideos(item):
|
||||
d=c[0].rstrip( )
|
||||
d=d.lstrip( )
|
||||
list_links[i].server=d
|
||||
|
||||
autoplay.start(list_links, item)
|
||||
|
||||
return list_links
|
||||
@@ -319,7 +318,6 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info("%s - %s = %s" % (item.show, item.title, item.url))
|
||||
|
||||
if item.url.startswith(HOST):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
|
||||
@@ -78,13 +78,14 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug("info %s " % data)
|
||||
# obtener el numero total de episodios
|
||||
total_episode = 0
|
||||
|
||||
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
|
||||
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
|
||||
patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
|
||||
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
|
||||
|
||||
@@ -381,6 +381,7 @@ def findvideos(item):
|
||||
item_json.show = item.library_filter_show.get(nom_canal, "")
|
||||
|
||||
# Ejecutamos find_videos, del canal o común
|
||||
item_json.contentChannel='videolibrary'
|
||||
if hasattr(channel, 'findvideos'):
|
||||
from core import servertools
|
||||
list_servers = getattr(channel, 'findvideos')(item_json)
|
||||
|
||||
BIN
plugin.video.alfa/resources/media/channels/banner/danimados.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/banner/danimados.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 110 KiB |
BIN
plugin.video.alfa/resources/media/channels/thumb/danimados.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/danimados.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
plugin.video.alfa/resources/media/channels/thumb/mundiseries.png
Normal file
BIN
plugin.video.alfa/resources/media/channels/thumb/mundiseries.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 75 KiB |
@@ -33,11 +33,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
'Cookie': ''}
|
||||
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
|
||||
data = data.replace("\n","")
|
||||
cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)')
|
||||
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
|
||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
||||
# Para obtener el f y el fxfx
|
||||
js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)')
|
||||
js_fxfx = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/js/code.js.*?[^(?:'|")]+)""")
|
||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||
|
||||
@@ -23,6 +23,8 @@ def test_video_exists(page_url):
|
||||
|
||||
if "Object not found" in response.data:
|
||||
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
|
||||
if reponse.code == 500:
|
||||
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user