77 Commits

Author SHA1 Message Date
alfa-addon
8985f3ebdd v2.3.4 2017-11-06 19:04:42 -05:00
Alfa
d60c246bbb Merge pull request #155 from Intel11/patch-3
Actualizados
2017-11-07 00:09:00 +01:00
Alfa
3b29fe47bb Merge pull request #156 from danielr460/master
Arreglos menores
2017-11-07 00:08:47 +01:00
Alfa
3093f72ce5 Merge pull request #159 from Alfa-beto/Fixes
Corregido error con extras
2017-11-07 00:08:33 +01:00
Unknown
55dcf3f091 Corregido error con extras 2017-11-05 18:21:26 -03:00
Intel1
2924b6958d Update allpeliculas.py 2017-11-04 15:01:27 -05:00
Intel1
927310c7c6 flashx: actualizado 2017-11-04 14:58:29 -05:00
danielr460
0c25891790 fix servers 2017-11-04 00:06:45 -05:00
danielr460
212c06057f Arreglos menores 2017-11-03 22:04:28 -05:00
Intel1
9c3b3e9256 allpeliculas: paginador para colecciones 2017-11-03 17:54:51 -05:00
Intel1
6dc853b41e repelis: fix categoria 2017-11-03 15:49:52 -05:00
Intel1
7afd09dfa9 streamixcloud: fix 2017-11-03 11:08:16 -05:00
Intel1
6855508eaa Update ultrapeliculashd.py 2017-11-03 10:21:18 -05:00
Intel1
2925c29671 Update ultrapeliculashd.json 2017-11-03 10:20:47 -05:00
Intel1
506e68e8a3 vshare: cambiado el orden de resoluciones 2017-11-03 10:17:12 -05:00
Intel1
9cc30152f8 vshare: actualizado patron 2017-11-03 10:15:27 -05:00
Intel1
267c9d8031 gvideo: fix 2017-11-03 10:07:46 -05:00
Intel1
bd68b83b6c flashx: fix 2017-11-01 06:47:51 -05:00
Unknown
c1f8039672 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-01 08:37:33 -03:00
alfa-addon
99dfa2be58 v2.3.3 2017-10-31 21:09:17 -04:00
Alfa
39e711b3cb Merge pull request #150 from danielr460/master
Arreglos en canales
2017-11-01 01:40:53 +01:00
Alfa
2d8d2b3baf Merge pull request #151 from Intel11/patch-2
Actualizados
2017-11-01 01:40:36 +01:00
Alfa
82d126c3e1 Merge pull request #152 from Alfa-beto/Fixes
Correcciones
2017-11-01 01:40:20 +01:00
Alfa
8d41fd1c64 Merge pull request #153 from danielr460/patch-1
Otros arreglos
2017-11-01 01:40:05 +01:00
Unknown
a8c2f409eb Correcciones a canales 2017-10-31 14:57:55 -03:00
Daniel Rincón Rodríguez
7b2a3c2181 Update mundiseries.json 2017-10-31 07:19:55 -05:00
Daniel Rincón Rodríguez
9e6729f0be Update danimados.json 2017-10-31 07:17:04 -05:00
Unknown
241e644dcf Correcciones 2017-10-30 15:02:57 -03:00
Intel1
ae318721ab Add files via upload 2017-10-30 10:34:08 -05:00
Intel1
8328610ffa Delete bajui2.json 2017-10-30 10:32:41 -05:00
Intel1
19101b5310 Delete bajui2.py 2017-10-30 10:32:27 -05:00
Intel1
22827e0f7e Update animemovil.json 2017-10-30 10:28:29 -05:00
Unknown
1747c9795d Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-30 10:14:07 -03:00
Unknown
f3effe9a7f Corregidas series en pelisplus 2017-10-30 10:13:40 -03:00
Intel1
0621b1fa91 gamovideo: fix 2017-10-30 04:16:22 -05:00
Intel1
16473764c9 flashx: fix 2017-10-30 04:15:00 -05:00
danielr460
6b1727a0b8 Arreglado serieslan 2017-10-29 19:50:42 -05:00
Intel1
11fceffd14 bajui2: fix 2017-10-29 10:00:39 -05:00
danielr460
3a49b8a442 Función Play eliminaba info de la serie. Corregido 2017-10-29 08:54:27 -05:00
alfa-addon
162772e9dc v2.3.2 2017-10-28 22:47:51 -04:00
alfa-addon
60d61f861b fixed 2017-10-28 22:47:33 -04:00
Alfa
cd1c7b692a Merge pull request #142 from danielr460/master
Nuevos canales
2017-10-29 06:06:34 +01:00
danielr460
10abe4a6d4 Cambio en la estructura de la página web danimados 2017-10-28 23:40:49 -05:00
danielr460
b0fa5e8a75 Eliminada sección Novedades porque la página web la elimino 2017-10-28 23:23:42 -05:00
danielr460
54d6a943f5 Arreglado Mundiseries 2017-10-28 23:19:43 -05:00
Daniel Rincón Rodríguez
44df5b6036 Corregida linea que si hacia 2017-10-28 23:00:36 -05:00
Alfa
ae67d9b5ee Merge pull request #148 from Intel11/patch-1
Actualizados
2017-10-29 02:57:15 +01:00
Alfa
895d14760d Merge pull request #149 from Alfa-beto/Fixes
Corregida pagina siguiente en pelisplus
2017-10-29 02:56:42 +01:00
Intel1
b0b4b218f0 animemovil: fast fix 2017-10-28 20:55:12 -05:00
Unknown
348787ae97 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-28 21:56:26 -03:00
Unknown
0f7c11efad Corregido pagina siguiente 2017-10-28 21:37:23 -03:00
Intel1
ae7a4a8d83 rapidvideo: actualizado test_video_exists 2017-10-28 11:26:08 -05:00
Intel1
fc58c717eb plusdede: actualizado findvideos 2017-10-28 11:17:48 -05:00
Daniel Rincón Rodríguez
b3a19f3d20 Arreglos sugeridos por Intel1 2017-10-28 11:11:51 -05:00
Daniel Rincón Rodríguez
0cac09eef5 Eliminada solución "tosca" 2017-10-28 11:00:35 -05:00
Intel1
9a1effbe25 cinetux: mostrar cantidad de películas 2017-10-28 10:58:15 -05:00
Daniel Rincón Rodríguez
44145660d0 Eliminado código innecesario 2017-10-28 10:55:55 -05:00
Daniel Rincón Rodríguez
aec2674316 Eliminada función generica 2017-10-28 10:55:01 -05:00
Daniel Rincón Rodríguez
09de611aae Update cartoonlatino.py 2017-10-28 10:54:00 -05:00
Daniel Rincón Rodríguez
74598154c2 Eliminado código innecesario 2017-10-28 10:53:19 -05:00
Daniel Rincón Rodríguez
7ab9c8bb29 Cartoonlatino en su version original 2017-10-28 10:52:30 -05:00
Daniel Rincón Rodríguez
14178974a0 Mejorado Autoplay 2017-10-28 10:51:45 -05:00
Intel1
c43162cbc2 flashx: lo dicho!!! 2017-10-28 08:40:11 -05:00
Daniel Rincón Rodríguez
aa76986a51 Dejado el canal AnitoonsTV como estaba 2017-10-28 08:30:25 -05:00
Daniel Rincón Rodríguez
9aae0e7a1b Arreglos comentarios de Intel1 2017-10-26 18:28:19 -05:00
danielr460
e1fe886602 Autoplay añadido 2017-10-26 15:38:36 -05:00
danielr460
19812c83a9 Añadida info de los canales desde la videoteca 2017-10-26 15:05:12 -05:00
danielr460
cabc2458e3 Añadida info de la serie para que no se borre cuando esta activo autoplay 2017-10-26 14:36:09 -05:00
danielr460
336376ecef Añadida opción de videolibrary para saber que no vengo del addon 2017-10-26 14:34:45 -05:00
danielr460
af06269e39 Añadida opción marcar como visto en autoplay 2017-10-26 13:56:37 -05:00
danielr460
f37d18ee0a Añadido contentChannel para saber en findvideos si vengo del addon o de la videolibrary 2017-10-26 13:54:14 -05:00
danielr460
6fefc3b048 Agregado Autoplay 2017-10-26 13:44:09 -05:00
danielr460
ab5fe41403 Eliminar código innecesario 2017-10-26 07:49:46 -05:00
danielr460
15463ea0f8 Arreglo bug 2017-10-26 07:48:01 -05:00
danielr460
badf40573c Nuevo canal mundiseries 2017-10-25 21:41:55 -05:00
danielr460
c80793e3e0 Media for danimados 2017-10-25 21:41:38 -05:00
danielr460
cbc0ff0bd0 Nuevo canal danimados 2017-10-25 21:25:31 -05:00
41 changed files with 553 additions and 556 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.1" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.3.4" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,9 +19,12 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» ohlatino » animemovil » allpeliculas » repelis
» pelisplus » flashx » flashx » ultrapeliculashd
» gvideo » streamixcloud
» vshare » anitoonstv
¤ arreglos internos ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news> </news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import urlparse
from core import httptools from core import httptools
from core import jsontools from core import jsontools
from core import scrapertools from core import scrapertools
@@ -59,6 +57,7 @@ def colecciones(item):
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")" title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel, itemlist.append(Item(channel = item.channel,
action = "listado_colecciones", action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail, thumbnail = host + scrapedthumbnail,
title = title, title = title,
url = host + scrapedurl url = host + scrapedurl
@@ -71,7 +70,7 @@ def listado_colecciones(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)") data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1" post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?' patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?' patron += 'src="([^"]+).*?'
@@ -88,6 +87,16 @@ def listado_colecciones(item):
url = host + scrapedurl url = host + scrapedurl
)) ))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist return itemlist
@@ -159,6 +168,7 @@ def lista(item):
params = jsontools.dump(dict_param) params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
for it in dict_data["items"]: for it in dict_data["items"]:
@@ -167,7 +177,7 @@ def lista(item):
rating = it["imdb"] rating = it["imdb"]
year = it["year"] year = it["year"]
url = host + "pelicula/" + it["slug"] url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"]) thumb = host + it["image"]
item.infoLabels['year'] = year item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb, itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie")) plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))

View File

@@ -3,7 +3,7 @@
"name": "Animemovil", "name": "Animemovil",
"active": true, "active": true,
"adult": false, "adult": false,
"language": ["cat", "lat"], "language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png", "thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "", "banner": "",
"categories": [ "categories": [

View File

@@ -86,7 +86,7 @@ def recientes(item):
tipo = "tvshow" tipo = "tvshow"
show = contentTitle show = contentTitle
action = "episodios" action = "episodios"
context = renumbertools.context context = renumbertools.context(item)
if item.extra == "recientes": if item.extra == "recientes":
action = "findvideos" action = "findvideos"
context = "" context = ""
@@ -96,7 +96,7 @@ def recientes(item):
action = "peliculas" action = "peliculas"
if not thumb.startswith("http"): if not thumb.startswith("http"):
thumb = "http:%s" % thumb thumb = "http:%s" % thumb
action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()} infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3, itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels, contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
@@ -148,35 +148,21 @@ def findvideos(item):
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"') itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla: for server, quality, url in itemla:
if "Calidad Alta" in quality: if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ") quality = "HQ"
server = server.lower().strip() if "HQ" in quality:
if "ok" == server: quality = "HD"
server = 'okru' if " Calidad media - Carga mas rapido" in quality:
if "netu" == server: quality = "360p"
continue server = server.lower().strip()
if "ok" in server:
server = 'okru'
if "rapid" in server:
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality, itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot, thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality))) title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist

View File

@@ -7,6 +7,7 @@ from core import jsontools
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
from platformcode import platformtools from platformcode import platformtools
from platformcode import launcher
__channel__ = "autoplay" __channel__ = "autoplay"
@@ -78,7 +79,20 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio :return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
''' '''
logger.info() logger.info()
for videoitem in itemlist:
#Nos dice de donde viene si del addon o videolibrary
if item.contentChannel=='videolibrary':
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
videoitem.contentPlot=item.contentPlot
videoitem.contentSeason=item.contentSeason
videoitem.contentSerieName=item.contentSerieName
videoitem.contentTitle=item.contentTitle
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc(): if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') #platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist return itemlist
@@ -261,8 +275,12 @@ def start(itemlist, item):
else: else:
videoitem = resolved_item[0] videoitem = resolved_item[0]
# si no directamente reproduce # si no directamente reproduce y marca como visto
platformtools.play_video(videoitem) from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
#platformtools.play_video(videoitem)
videoitem.contentChannel='videolibrary'
launcher.run(videoitem)
try: try:
if platformtools.is_playing(): if platformtools.is_playing():

View File

@@ -1,6 +1,6 @@
{ {
"id": "bajui2", "id": "bajui",
"name": "Bajui2", "name": "Bajui",
"active": true, "active": true,
"adult": false, "adult": false,
"language": ["cast"], "language": ["cast"],

View File

@@ -13,7 +13,7 @@ def mainlist(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas", itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui2.com/descargas/categoria/2/peliculas", url="http://www.bajui.org/descargas/categoria/2/peliculas",
fanart=item.fanart)) fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries", itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart)) fanart=item.fanart))
@@ -51,13 +51,13 @@ def menuseries(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series", url="http://www.bajui.org/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre", url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre", url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart)) fanart=item.fanart))
@@ -68,10 +68,10 @@ def menudocumentales(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas", itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv", url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas", itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre", url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart)) fanart=item.fanart))
@@ -86,7 +86,7 @@ def search(item, texto, categoria=""):
texto = texto.replace(" ", "+") texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url) logger.info("categoria: " + categoria + " url: " + url)
try: try:
item.url = "http://www.bajui2.com/descargas/busqueda/%s" item.url = "http://www.bajui.org/descargas/busqueda/%s"
item.url = item.url % texto item.url = item.url % texto
itemlist.extend(peliculas(item)) itemlist.extend(peliculas(item))
return itemlist return itemlist
@@ -118,7 +118,7 @@ def peliculas(item, paginacion=True):
scrapedtitle = title scrapedtitle = title
scrapedplot = clean_plot(plot) scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url) scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg")) scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC # Añade al listado de XBMC
@@ -133,7 +133,7 @@ def peliculas(item, paginacion=True):
scrapertools.printMatches(matches) scrapertools.printMatches(matches)
if len(matches) > 0: if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0]) scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl, pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot") fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion: if not paginacion:
@@ -197,7 +197,7 @@ def enlaces(item):
try: try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"') item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail) item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
except: except:
pass pass
@@ -234,8 +234,8 @@ def enlaces(item):
lista_servidores = lista_servidores[:-2] lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail scrapedthumbnail = item.thumbnail
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861 # http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2 scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")" scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"

View File

@@ -9,6 +9,7 @@ from core import servertools
from core import tmdb from core import tmdb
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
from channels import autoplay
host = "http://www.cartoon-latino.com/" host = "http://www.cartoon-latino.com/"
from channels import autoplay from channels import autoplay
@@ -150,7 +151,6 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url, itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist
@@ -185,29 +185,5 @@ def findvideos(item):
server1 = server server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1, itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize()))) title="Enlace encontrado en %s " % (server1.capitalize())))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -28,9 +28,9 @@ def mainlist(item):
itemlist = [] itemlist = []
item.viewmode = viewmode item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST).data data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>") total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas" titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True)) itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula", itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"

View File

@@ -0,0 +1,12 @@
{
"id": "danimados",
"name": "Danimados",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,186 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.danimados.com/"
list_servers = ['openload',
'okru',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
# thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
if item.title=="Más Populares":
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
if item.title=="Categorías":
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
if "Películas Animadas"!=scrapedtitle:
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
return itemlist
else:
for scraped1, scraped2, scrapedtitle in matches:
scrapedthumbnail=scraped1
scrapedurl=scraped2
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)):
#for url in itemla:
url=itemla[i]
#verificar existencia del video (testing)
codigo=verificar_video(itemla[i])
if codigo==200:
if "ok.ru" in url:
server='okru'
else:
server=''
if "openload" in url:
server='openload'
if "google" in url:
server='gvideo'
if "rapidvideo" in url:
server='rapidvideo'
if "streamango" in url:
server='streamango'
if server!='':
title="Enlace encontrado en %s " % (server.capitalize())
else:
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
autoplay.start(itemlist, item)
return itemlist
def verificar_video(url):
codigo=httptools.downloadpage(url).code
if codigo==200:
# Revise de otra forma
data=httptools.downloadpage(url).data
removed = scrapertools.find_single_match(data,'removed(.+)')
if len(removed) != 0:
codigo1=404
else:
codigo1=200
else:
codigo1=200
return codigo1

View File

@@ -302,7 +302,7 @@ def epienlaces(item):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
if (item.extra and item.extra != "findvideos") or item.path: if item.contentSeason!='':
return epienlaces(item) return epienlaces(item)
itemlist = [] itemlist = []

View File

@@ -319,61 +319,34 @@ def findvideos(item):
duplicados = [] duplicados = []
data = get_source(item.url) data = get_source(item.url)
src = data src = data
patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight' patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for option, videoitem in matches: for option, videoitem in matches:
lang = scrapertools.find_single_match(src, lang = scrapertools.find_single_match(src,
'<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option) '<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
if 'audio ' in lang.lower():
lang=lang.lower().replace('audio ','')
lang=lang.capitalize()
data = get_source(videoitem) data = get_source(videoitem)
if 'play' in videoitem: video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>') for video in video_urls:
else: video_data = get_source(video)
url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=') if not 'fastplay' in video:
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
data_final = get_source(new_url)
else:
data_final=video_data
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
quality = item.quality
server = servertools.get_server_from_url(url)
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
if item.quality != '':
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
url_list.append([url, lang]) if url!='':
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
for video_url in url_list:
language = video_url[1]
if 'jw.miradetodo' in video_url[0]:
data = get_source('http:' + video_url[0])
patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, scrapedurl in matches:
quality = quality
title = item.contentTitle + ' (%s) %s' % (quality, language)
server = 'directo'
url = scrapedurl
url = url.replace('\/', '/')
subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
if url not in duplicados:
itemlist.append(item.clone(title=title,
action='play',
url=url,
quality=quality,
server=server,
subtitle=subtitle,
language=language
))
duplicados.append(url)
elif video_url != '':
itemlist.extend(servertools.find_video_items(data=video_url[0]))
import os
for videoitem in itemlist:
if videoitem.server != 'directo':
quality = item.quality
title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
if item.quality != '':
title = item.contentTitle + ' (%s) %s' % (quality, language)
videoitem.title = title
videoitem.channel = item.channel
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
"server_%s.png" % videoitem.server)
videoitem.quality = item.quality
if item.infoLabels['mediatype'] == 'movie': if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -0,0 +1,12 @@
{
"id": "mundiseries",
"name": "Mundiseries",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import filtertools
from platformcode import config, logger
from platformcode import platformtools
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import autoplay
host = "http://mundiseries.com"
list_servers = ['okru']
list_quality = ['default']
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
matches = scrapertools.find_multiple_matches(data, patron)
for link, thumbnail, name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
return itemlist
def temporada(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info("preon,:"+data)
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for link,thumbnail,name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_show='<h1 class="h-responsive center">.+?'
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
show = scrapertools.find_single_match(data,patron_show)
for link, name,cap,temp in matches:
if '|' in cap:
cap = cap.replace('|','')
if '|' in temp:
temp = temp.replace('|','')
if '|' in name:
name = name.replace('|','')
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
url=host+link
itemlist.append(Item(channel=item.channel, action="findvideos",
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
id = ""
type = ""
data = httptools.downloadpage(item.url).data
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.extend(servertools.find_video_items(data=data))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
for videoitem in itemlist:
videoitem.channel= item.channel
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "pelisencasa",
"name": "PelisEnCasa",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s14.postimg.org/iqiq0bxn5/pelisencasa.png",
"banner": "https://s18.postimg.org/j775ehbg9/pelisencasa_banner.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,217 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from lib import jsunpack
from platformcode import config, logger
host = 'http://pelisencasa.net'
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png"}
tletras = {'#': 'https://s32.postimg.org/drojt686d/image.png',
'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.org/vlon87gmd/image.png',
'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
'f': 'https://s32.postimg.org/y6u7vq605/image.png',
'g': 'https://s32.postimg.org/9237ib6jp/image.png',
'h': 'https://s32.postimg.org/812yt6pk5/image.png',
'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
'j': 'https://s32.postimg.org/axpztgvdx/image.png',
'k': 'https://s32.postimg.org/976yrzdut/image.png',
'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
'm': 'https://s32.postimg.org/m19lz2go5/image.png',
'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.org/c6igsucpx/image.png',
'p': 'https://s32.postimg.org/jnro82291/image.png',
'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
's': 'https://s32.postimg.org/zd2t89jol/image.png',
't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.org/e7dlrey91/image.png',
'w': 'https://s32.postimg.org/fnp49k15x/image.png',
'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
'y': 'https://s32.postimg.org/um7j3zg85/image.png',
'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host))
itemlist.append(
item.clone(title="Generos", action="seccion", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='generos'))
itemlist.append(
item.clone(title="Alfabetico", action="seccion", thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', url=host, extra='letras'))
itemlist.append(item.clone(title="Buscar", action="search", url=host + '/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra != 'letras':
patron = '<li class="TPostMv">.*?<a href="(.*?)"><div class="Image">.*?src="(.*?)\?resize=.*?".*?class="Title">(.*?)<\/h2>.*?'
patron += '<span class="Year">(.*?)<\/span>.*?<span class="Qlty">(.*?)<\/span><\/p><div class="Description"><p>(.*?)<\/p>'
else:
patron = '<td class="MvTbImg"> <a href="(.*?)".*?src="(.*?)\?resize=.*?".*?<strong>(.*?)<\/strong> <\/a><\/td><td>(.*?)<\/td><td>.*?'
patron += 'class="Qlty">(.*?)<\/span><\/p><\/td><td>(.*?)<\/td><td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
contentTitle = scrapedtitle
title = contentTitle + ' (' + calidad + ')'
year = scrapedyear
fanart = ''
itemlist.append(
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="(.*?)">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra == 'generos':
patron = 'menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
else:
patron = '<li><a href="(.*?\/letter\/.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
if item.extra == 'generos' and scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
elif scrapedtitle.lower() in tletras:
thumbnail = tletras[scrapedtitle.lower()]
fanart = ''
title = scrapedtitle
url = scrapedurl
itemlist.append(
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
fanart=fanart, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="Num">.*?<\/span>.*?href="(.*?)" class="Button STPb">.*?<\/a>.*?<span>(.*?)<\/span><\/td><td><span>(.*?)<\/span><\/td><td><span>.*?<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, servidor, idioma in matches:
new_item = (item.clone(url=scrapedurl, servidor=servidor, idioma=idioma, infoLabels=infoLabels))
itemlist += get_video_urls(new_item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
itemlist.insert(len(itemlist) - 1, item.clone(channel='trailertools', action='buscartrailer',
title='[COLOR orange]Trailer en Youtube[/COLOR]'))
return itemlist
def get_video_urls(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<script type="text\/javascript">(.*?)<\/script>')
data = jsunpack.unpack(data)
patron = '"file":"(.*?)","label":"(.*?)","type":"video.*?"}'
subtitle = scrapertools.find_single_match(data, 'tracks:\[{"file":"(.*?)","label":".*?","kind":"captions"}')
matches = re.compile(patron, re.DOTALL).findall(data)
for url, calidad in matches:
if item.servidor == 'PELISENCASA':
item.servidor = 'Directo'
title = item.contentTitle + ' (' + item.idioma + ')' + ' (' + calidad + ')' + ' (' + item.servidor + ')'
itemlist.append(item.clone(title=title, url=url, calidad=calidad, action='play', subtitle=subtitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + '/category/animacion/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -215,18 +215,20 @@ def search(item, texto):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
duplicados = []
data = get_source(item.url) data = get_source(item.url)
patron = '<div class=TPlayerTbCurrent id=(.*?)><iframe.*?src=(.*?) frameborder' patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for opt, urls_page in matches: for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.*?' logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt) '<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(data,'<button id="(.*?)"') video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers: for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get') info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info) video_info = jsontools.load(video_info)
@@ -238,8 +240,13 @@ def findvideos(item):
url = 'https://'+video_server+'/embed/'+video_id url = 'https://'+video_server+'/embed/'+video_id
else: else:
url = 'https://'+video_server+'/e/'+video_id url = 'https://'+video_server+'/e/'+video_id
title = item.title title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title, url=url, action='play', language=language)) itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist) itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist return itemlist

View File

@@ -176,7 +176,7 @@ def lista(item):
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \ patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>' 'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
actual = scrapertools.find_single_match(data, actual = scrapertools.find_single_match(data,
'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" ' '<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
'class="page bicon last"><<\/a>') 'class="page bicon last"><<\/a>')
else: else:
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \ patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \
@@ -428,13 +428,17 @@ def get_vip(url):
itemlist =[] itemlist =[]
url= url.replace('reproductor','vip') url= url.replace('reproductor','vip')
data = httptools.downloadpage(url).data data = httptools.downloadpage(url).data
patron = '<a href="(.*?)"> ' video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)".*?>')
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)">')
for item in video_urls: for item in video_urls:
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)') if 'elreyxhd' in item:
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang) if 'plus'in item:
data=httptools.downloadpage(new_url, follow_redirects=False).headers id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
itemlist.extend(servertools.find_video_items(data=str(data))) new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
data=httptools.downloadpage(new_url, follow_redirects=False).headers
itemlist.extend(servertools.find_video_items(data=str(data)))
return itemlist return itemlist

View File

@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login(): def login():
url_origen = "https://www.plusdede.com/login?popup=1" url_origen = "https://www.plusdede.com/login?popup=1"
data = httptools.downloadpage(url_origen, follow_redirects=True).data data = httptools.downloadpage(url_origen, follow_redirects=True).data
logger.debug("dataPLUSDEDE=" + data)
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data): if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
return True return True
@@ -34,12 +33,10 @@ def login():
post = "_token=" + str(token) + "&email=" + str( post = "_token=" + str(token) + "&email=" + str(
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str( config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469" config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
# logger.debug("dataPLUSDEDE_POST="+post)
url = "https://www.plusdede.com/" url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token} headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers, data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data replace_headers=False).data
logger.debug("PLUSDEDE_DATA=" + data)
if "redirect" in data: if "redirect" in data:
return True return True
else: else:
@@ -183,7 +180,6 @@ def generos(item):
tipo = item.url.replace("https://www.plusdede.com/", "") tipo = item.url.replace("https://www.plusdede.com/", "")
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data, data = scrapertools.find_single_match(data,
@@ -198,7 +194,6 @@ def generos(item):
plot = "" plot = ""
# https://www.plusdede.com/pelis?genre_id=1 # https://www.plusdede.com/pelis?genre_id=1
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append( itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title)) fulltitle=title))
@@ -229,11 +224,9 @@ def buscar(item):
# Descarga la pagina # Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
json_object = jsontools.load(data) json_object = jsontools.load(data)
logger.debug("content=" + json_object["content"])
data = json_object["content"] data = json_object["content"]
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
patron += '.*?<div class="year">([^<]+)</div>+' patron += '.*?<div class="year">([^<]+)</div>+'
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>' patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
logger.debug("PARSE_DATA:" + data)
if item.tipo == "lista": if item.tipo == "lista":
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">') following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">') data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
sectionStr = "docu" sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl) referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl) url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "series": if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
else: else:
referer = item.url referer = item.url
url = urlparse.urljoin(item.url, scrapedurl) url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "pelis": if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url, itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">') '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
if next_page != "": if next_page != "":
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "") url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
logger.debug("URL_SIGUIENTE:" + url)
itemlist.append( itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente", Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url)) extra=item.extra, url=url))
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">') bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode)) fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
return itemlist return itemlist
@@ -369,7 +356,6 @@ def episodio(item):
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
# logger.debug("data="+data)
session = str(int(item.extra.split("|")[0])) session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1])) episode = str(int(item.extra.split("|")[1]))
@@ -377,7 +363,6 @@ def episodio(item):
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas: for bloque_episodios in matchestemporadas:
logger.debug("bloque_episodios=" + bloque_episodios)
# Extrae los episodios # Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
@@ -401,7 +386,6 @@ def episodio(item):
itemlist.append( itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show)) fulltitle=title, fanart=item.fanart, show=item.show))
logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist2 = [] itemlist2 = []
for capitulo in itemlist: for capitulo in itemlist:
@@ -415,11 +399,9 @@ def peliculas(item):
# Descarga la pagina # Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data_DEF_PELICULAS=" + data)
# Extrae las entradas (carpetas) # Extrae las entradas (carpetas)
json_object = jsontools.load(data) json_object = jsontools.load(data)
logger.debug("html=" + json_object["content"])
data = json_object["content"] data = json_object["content"]
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -432,24 +414,18 @@ def episodios(item):
# Descarga la pagina # Descarga la pagina
idserie = '' idserie = ''
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
# logger.debug("dataEPISODIOS="+data)
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>' patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
logger.debug(matchestemporadas)
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"') idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")): if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
for nombre_temporada, bloque_episodios in matchestemporadas: for nombre_temporada, bloque_episodios in matchestemporadas:
logger.debug("nombre_temporada=" + nombre_temporada)
logger.debug("bloque_episodios=" + bloque_episodios)
logger.debug("id_serie=" + idserie)
# Extrae los episodios # Extrae los episodios
patron_episodio = '<li><a href="#"(.*?)</a></li>' patron_episodio = '<li><a href="#"(.*?)</a></li>'
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"' # patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios) matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
# logger.debug(matches)
for data_episodio in matches: for data_episodio in matches:
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"') scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
@@ -462,7 +438,6 @@ def episodios(item):
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ", title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
"") + "x" + numero + " " + scrapertools.htmlclean( "") + "x" + numero + " " + scrapertools.htmlclean(
scrapedtitle) scrapedtitle)
logger.debug("CAP_VISTO:" + visto)
if visto.strip() == "seen": if visto.strip() == "seen":
title = "[visto] " + title title = "[visto] " + title
@@ -478,7 +453,6 @@ def episodios(item):
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url, Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show)) thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if config.get_videolibrary_support(): if config.get_videolibrary_support():
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
@@ -540,7 +514,6 @@ def parse_listas(item, bloque_lista):
thumbnail = "" thumbnail = ""
itemlist.append( itemlist.append(
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url)) Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]")
nextpage = scrapertools.find_single_match(bloque_lista, nextpage = scrapertools.find_single_match(bloque_lista,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"') '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
@@ -569,13 +542,10 @@ def listas(item):
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>' patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("dataSINHEADERS=" + data)
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip() item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
logger.debug("token_LISTA_" + item.token)
bloque_lista = scrapertools.find_single_match(data, patron) bloque_lista = scrapertools.find_single_match(data, patron)
logger.debug("bloque_LISTA" + bloque_lista)
return parse_listas(item, bloque_lista) return parse_listas(item, bloque_lista)
@@ -585,7 +555,6 @@ def lista_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_listas(item, data) return parse_listas(item, data)
@@ -595,7 +564,6 @@ def pag_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"} headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_mixed_results(item, data) return parse_mixed_results(item, data)
@@ -605,8 +573,6 @@ def findvideos(item, verTodos=False):
# Descarga la pagina # Descarga la pagina
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.info("URL:" + item.url + " DATA=" + data)
# logger.debug("data="+data)
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"') data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"') data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
@@ -616,7 +582,6 @@ def findvideos(item, verTodos=False):
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1" url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
data = httptools.downloadpage(url).data data = httptools.downloadpage(url).data
logger.debug("URL:" + url + " dataLINKS=" + data)
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
patron = 'target="_blank" (.*?)</a>' patron = 'target="_blank" (.*?)</a>'
@@ -628,7 +593,6 @@ def findvideos(item, verTodos=False):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
logger.debug("TRAILER_YOUTUBE:" + trailer)
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer, itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
@@ -637,9 +601,6 @@ def findvideos(item, verTodos=False):
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
# sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0
# showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0
if sortlinks != '' and sortlinks != "No": if sortlinks != '' and sortlinks != "No":
sortlinks = int(sortlinks) sortlinks = int(sortlinks)
else: else:
@@ -651,14 +612,13 @@ def findvideos(item, verTodos=False):
showlinks = 0 showlinks = 0
for match in matches: for match in matches:
# logger.debug("match="+match)
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)') jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
if (showlinks == 1 and jdown != '') or ( if (showlinks == 1 and jdown != '') or (
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue continue
idioma_1 = "" idioma_1 = ""
idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match) idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
idioma_0 = idiomas[0] idioma_0 = idiomas[0]
if len(idiomas) > 1: if len(idiomas) > 1:
idioma_1 = idiomas[1] idioma_1 = idiomas[1]
@@ -670,16 +630,12 @@ def findvideos(item, verTodos=False):
calidad_video = scrapertools.find_single_match(match, calidad_video = scrapertools.find_single_match(match,
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace( '<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
" ", "").replace("\n", "") " ", "").replace("\n", "")
logger.debug("calidad_video=" + calidad_video)
calidad_audio = scrapertools.find_single_match(match, calidad_audio = scrapertools.find_single_match(match,
'<span class="fa fa-headphones"></span>(.*?)</div>').replace( '<span class="fa fa-headphones"></span>(.*?)</div>').replace(
" ", "").replace("\n", "") " ", "").replace("\n", "")
logger.debug("calidad_audio=" + calidad_audio)
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">') thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
logger.debug("thumb_servidor=" + thumb_servidor)
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png") nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
logger.debug("nombre_servidor=" + nombre_servidor)
if jdown != '': if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor thumbnail = thumb_servidor
plot = "" plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if sortlinks > 0: if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar # orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración # orden2 segun configuración
@@ -788,13 +743,10 @@ def play(item):
headers = {'Referer': item.extra} headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data data = httptools.downloadpage(item.url, headers=headers).data
# logger.debug("dataLINK="+data)
url = scrapertools.find_single_match(data, url = scrapertools.find_single_match(data,
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>') '<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
url = urlparse.urljoin("https://www.plusdede.com", url) url = urlparse.urljoin("https://www.plusdede.com", url)
# logger.debug("DATA_LINK_FINAL:"+url)
logger.debug("URL_PLAY:" + url)
headers = {'Referer': item.url} headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location") media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
# logger.info("media_url="+media_url) # logger.info("media_url="+media_url)
@@ -808,7 +760,6 @@ def play(item):
videoitem.channel = item.channel videoitem.channel = item.channel
# Marcar como visto # Marcar como visto
logger.debug(item)
checkseen(item) checkseen(item)
return itemlist return itemlist
@@ -827,7 +778,6 @@ def checkseen(item):
tipo_str = "pelis" tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest", headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token} "X-CSRF-TOKEN": item.token}
logger.debug("Entrando a checkseen " + url_temp + item.token)
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True return True
@@ -836,7 +786,6 @@ def infosinopsis(item):
logger.info() logger.info()
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("SINOPSISdata=" + data)
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>') scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>') scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
@@ -845,11 +794,8 @@ def infosinopsis(item):
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data, scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace( '<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
" ", "").replace("\n", "")) " ", "").replace("\n", ""))
logger.debug(scrapedduration)
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip() scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
logger.debug("SINOPSISdataplot=" + scrapedplot)
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>') generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
logger.debug("generos=" + generos)
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos) scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
scrapedcasting = re.compile( scrapedcasting = re.compile(
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>', '<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
@@ -954,7 +900,6 @@ def plusdede_check(item):
if item.tipo_esp == "lista": if item.tipo_esp == "lista":
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1" url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data data = httptools.downloadpage(url_temp).data
logger.debug("DATA_CHECK_LISTA:" + data)
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+' patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+' patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
@@ -986,8 +931,6 @@ def plusdede_check(item):
"X-CSRF-TOKEN": item.token} "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip() replace_headers=True).data.strip()
logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
logger.debug("PLUSDEDECHECK_DATA=" + data)
dialog = platformtools dialog = platformtools
dialog.ok = platformtools.dialog_ok dialog.ok = platformtools.dialog_ok
if data == "1": if data == "1":

View File

@@ -30,11 +30,6 @@ def mainlist(item):
itemlist.append( itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1", Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan)) thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas", itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url= host + "/pag/1", url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan)) thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
@@ -70,7 +65,8 @@ def menupelis(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.genre:
item.extra = item.genre
if item.extra == '': if item.extra == '':
section = 'Recién Agregadas' section = 'Recién Agregadas'
elif item.extra == 'year': elif item.extra == 'year':
@@ -79,17 +75,13 @@ def menupelis(item):
section = 'de Eróticas \+18' section = 'de Eróticas \+18'
else: else:
section = 'de %s'%item.extra section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces: for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?' patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)' patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", ""); title = title.replace("Online", "");
@@ -144,21 +136,14 @@ def menudesta(item):
# Peliculas de Estreno # Peliculas de Estreno
def menuestre(item): def menuestre(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>' patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces: for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?' patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)' patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
@@ -255,32 +240,22 @@ def search(item, texto):
patron += '<div class="row">.*?' patron += '<div class="row">.*?'
patron += '<a href="(.*?)" title="(.*?)">.*?' patron += '<a href="(.*?)" title="(.*?)">.*?'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
logger.info(patron)
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = [] itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "") title = title.replace("Online", "")
url = item.url + scrapedurl url = scrapedurl
thumbnail = item.url + scrapedthumbnail thumbnail = scrapedthumbnail
logger.info(url)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail)) thumbnail=thumbnail, fanart=thumbnail))
return itemlist return itemlist
def poranyo(item): def poranyo(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>' patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches: for scrapedurl, scrapedtitle in matches:
@@ -289,7 +264,6 @@ def poranyo(item):
url = item.url + scrapedurl url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year')) fanart=item.fanart, extra='year'))
return itemlist return itemlist
@@ -300,24 +274,25 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">' patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
adult_mode = config.get_setting("adult_mode")
for scrapedurl, scrapedtitle in matches: for scrapedurl, scrapedtitle in matches:
if "18" in scrapedtitle and adult_mode == 0:
continue
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "") title = title.replace("Online", "")
url = scrapedurl url = scrapedurl
logger.info(url) logger.info(url)
# si no esta permitidas categoria adultos, la filtramos # si no esta permitidas categoria adultos, la filtramos
extra = title extra1 = title
adult_mode = config.get_setting("adult_mode")
if adult_mode != 0: if adult_mode != 0:
if 'erotic' in scrapedurl: if 'erotic' in scrapedurl:
extra = 'adult' extra1 = 'adult'
else: else:
extra=title extra1=title
if (extra=='adult' and adult_mode != 0) or extra != 'adult': if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra = extra)) fanart=item.fanart, genre = extra1))
return itemlist return itemlist
@@ -338,7 +313,6 @@ def decode(string):
i += 1 i += 1
enc4 = keyStr.index(input[i]) enc4 = keyStr.index(input[i])
i += 1 i += 1
chr1 = (enc1 << 2) | (enc2 >> 4) chr1 = (enc1 << 2) | (enc2 >> 4)
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2) chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
chr3 = ((enc3 & 3) << 6) | enc4 chr3 = ((enc3 & 3) << 6) | enc4

View File

@@ -290,7 +290,10 @@ def do_search(item, categories=None):
multithread = config.get_setting("multithread", "search") multithread = config.get_setting("multithread", "search")
result_mode = config.get_setting("result_mode", "search") result_mode = config.get_setting("result_mode", "search")
tecleado = item.extra if item.wanted!='':
tecleado=item.wanted
else:
tecleado = item.extra
itemlist = [] itemlist = []

View File

@@ -21,8 +21,7 @@ list_language = ['default']
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p'] CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
list_quality = CALIDADES list_quality = CALIDADES
list_servers = ['streamix', list_servers = ['powvideo',
'powvideo',
'streamcloud', 'streamcloud',
'openload', 'openload',
'flashx', 'flashx',
@@ -312,6 +311,7 @@ def findvideos(item):
d=d.lstrip( ) d=d.lstrip( )
list_links[i].server=d list_links[i].server=d
list_links = servertools.get_servers_itemlist(list_links)
autoplay.start(list_links, item) autoplay.start(list_links, item)
return list_links return list_links
@@ -319,7 +319,6 @@ def findvideos(item):
def play(item): def play(item):
logger.info("%s - %s = %s" % (item.show, item.title, item.url)) logger.info("%s - %s = %s" % (item.show, item.title, item.url))
if item.url.startswith(HOST): if item.url.startswith(HOST):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
@@ -340,7 +339,7 @@ def play(item):
else: else:
url = item.url url = item.url
itemlist = servertools.find_video_items(data=url) itemlist = servertools.find_video_items(item=item,data=url)
titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
if titulo: if titulo:

View File

@@ -14,10 +14,7 @@ from channels import autoplay
IDIOMAS = {'latino': 'Latino'} IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values() list_language = IDIOMAS.values()
list_servers = ['openload', list_servers = ['openload'
'okru',
'netutv',
'rapidvideo'
] ]
list_quality = ['default'] list_quality = ['default']
@@ -49,7 +46,11 @@ def lista(item):
patron = '<a href="([^"]+)" ' patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?' patron += 'class="link">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">' patron += 'title="([^"]+)">'
if item.url==host:
a=1
else:
num=(item.url).split('-')
a=int(num[1])
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion # Paginacion
@@ -57,17 +58,28 @@ def lista(item):
min = item.page * num_items_x_pagina min = item.page * num_items_x_pagina
min=min-item.page min=min-item.page
max = min + num_items_x_pagina - 1 max = min + num_items_x_pagina - 1
b=0
for link, img, name in matches[min:max]: for link, img, name in matches[min:max]:
title = name b=b+1
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link url = host + link
scrapedthumbnail = host + img scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context] context1=[renumbertools.context(item), autoplay.context]
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title, itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
context=context1)) context=context1))
logger.info("gasdfsa "+str(b))
itemlist.append( if b<29:
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1)) a=a+1
url="https://serieslan.com/pag-"+str(a)
if b>10:
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=url, action="lista", page=0))
else:
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
return itemlist return itemlist
@@ -78,13 +90,14 @@ def episodios(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("info %s " % data)
# obtener el numero total de episodios # obtener el numero total de episodios
total_episode = 0 total_episode = 0
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>' patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps) matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>') # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>' patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info) scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail scrapedthumbnail = host + scrapedthumbnail
@@ -92,6 +105,10 @@ def episodios(item):
title = "" title = ""
pat = "/" pat = "/"
if "Mike, Lu & Og"==item.title:
pat="&/"
if "KND" in item.title:
pat="-"
# varios episodios en un enlace # varios episodios en un enlace
if len(name.split(pat)) > 1: if len(name.split(pat)) > 1:
i = 0 i = 0
@@ -133,7 +150,7 @@ def findvideos(item):
itemlist = [] itemlist = []
url_server = "https://openload.co/embed/%s/" url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s" url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
def txc(key, _str): def txc(key, _str):
s = range(256) s = range(256)
@@ -156,7 +173,7 @@ def findvideos(item):
return res return res
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">' pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
idv, ide = scrapertools.find_single_match(data, pattern) idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data, thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">') '<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')

View File

@@ -160,7 +160,9 @@ def findvideos(item):
'gamo': 'http://gamovideo.com/embed-', 'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-', 'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-', 'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'} 'vido': 'http://vidoza.net/embed-',
'net': 'http://hqq.tv/player/embed_player.php?vid='
}
data = get_source(item.url) data = get_source(item.url)
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>') noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>' patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'

View File

@@ -7,7 +7,8 @@
"banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png", "banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png",
"thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png", "thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png",
"categories": [ "categories": [
"movie" "movie",
"direct"
], ],
"settings": [ "settings": [
{ {
@@ -33,6 +34,14 @@
"default": true, "default": true,
"enabled": true, "enabled": true,
"visible": true "visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades -Terror",
"default": true,
"enabled": true,
"visible": true
} }
] ]
} }

View File

@@ -192,27 +192,24 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>' patron = '<iframe class=metaframe rptss src=(.*?) (?:width=.*?|frameborder=0) allowfullscreen><\/iframe>'
matches = matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches: for video_url in matches:
if 'stream' in video_url:
data = httptools.downloadpage('https:'+video_url).data
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
new_data = httptools.downloadpage(new_url).data
# TODO Reparar directos url, quality = scrapertools.find_single_match(new_data, 'file:.*?"(.*?)",label:.*?"(.*?)"')
# if 'stream' in video_url: headers_string = '|Referer=%s' % url
# data = httptools.downloadpage('https:'+video_url).data url = url.replace('download', 'preview')+headers_string
# new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"') sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
# new_data = httptools.downloadpage(new_url).data new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
# logger.debug(new_data) subtitle=sub))
# itemlist.append(new_item)
# url, quality = scrapertools.find_single_match(new_data, "file:'(.*?)',label:'(.*?)'") else:
# headers_string = '|Referer=%s' % url itemlist.extend(servertools.find_video_items(data=video_url))
# url = url.replace('download', 'preview')+headers_string
# sub = scrapertools.find_single_match(new_data, "file:.*?'(.*?srt)'")
# new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
# subtitle=sub))
# itemlist.append(new_item)
# else:
itemlist.extend(servertools.find_video_items(data=video_url))
for videoitem in itemlist: for videoitem in itemlist:
videoitem.channel = item.channel videoitem.channel = item.channel
@@ -255,10 +252,13 @@ def newest(categoria):
item.extra = 'estrenos/' item.extra = 'estrenos/'
try: try:
if categoria == 'peliculas': if categoria == 'peliculas':
item.url = host + '/category/estrenos/' item.url = host + '/genre/estrenos/'
elif categoria == 'infantiles': elif categoria == 'infantiles':
item.url = host + '/category/infantil/' item.url = host + '/genre/animacion/'
elif categoria == 'terror':
item.url = host + '/genre/terror/'
itemlist = lista(item) itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>': if itemlist[-1].title == 'Siguiente >>>':

View File

@@ -381,6 +381,7 @@ def findvideos(item):
item_json.show = item.library_filter_show.get(nom_canal, "") item_json.show = item.library_filter_show.get(nom_canal, "")
# Ejecutamos find_videos, del canal o común # Ejecutamos find_videos, del canal o común
item_json.contentChannel='videolibrary'
if hasattr(channel, 'findvideos'): if hasattr(channel, 'findvideos'):
from core import servertools from core import servertools
list_servers = getattr(channel, 'findvideos')(item_json) list_servers = getattr(channel, 'findvideos')(item_json)

View File

@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
if item.contentType in ['movie','tvshow']and item.channel != 'search': if item.contentType in ['movie','tvshow']and item.channel != 'search':
# Buscar en otros canales # Buscar en otros canales
if item.contentSerieName!='': if item.contentSerieName!='':
item.extra=item.contentSerieName item.wanted=item.contentSerieName
else: else:
item.extra = item.contentTitle item.wanted = item.contentTitle
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]", context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], "XBMC.Container.Update (%s?%s)" % (sys.argv[0],
item.clone(channel='search', item.clone(channel='search',

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

View File

@@ -33,16 +33,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Cookie': ''} 'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","") data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)') cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","") cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx # Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)') js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches: for f, v in matches:
pfxfx += f + "=" + v + "&" pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" %js_fxfx)
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'} # {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')

View File

@@ -7,13 +7,13 @@ from core import scrapertools
from lib import jsunpack from lib import jsunpack
from platformcode import logger from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 ' \ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 ' \
'Firefox/40.0'} 'Firefox/57.0'}
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data data = httptools.downloadpage(page_url, headers=headers).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data: if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado" return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -26,7 +26,6 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data data = httptools.downloadpage(page_url, headers=headers).data
logger.debug(data)
packer = scrapertools.find_single_match(data, packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>") "<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "": if packer != "":

View File

@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
streams =[] streams =[]
logger.debug('page_url: %s'%page_url) logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url: if 'googleusercontent' in page_url:
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
url=data.headers['location'] response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
url=response.headers['location']
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
quality = scrapertools.find_single_match (url, '.itag=(\d+).') quality = scrapertools.find_single_match (url, '.itag=(\d+).')
streams.append((quality, url)) streams.append((quality, url))
headers_string=""
else: else:
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url}) response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})

View File

@@ -23,6 +23,8 @@ def test_video_exists(page_url):
if "Object not found" in response.data: if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado" return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
if reponse.code == 500:
return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
return True, "" return True, ""

View File

@@ -8,7 +8,6 @@ from platformcode import logger
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if "Not Found" in data: if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado" return False, "[streamixcloud] El archivo no existe o ha sido borrado"
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
video_urls = [] video_urls = []
packed = scrapertools.find_single_match(data, patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
packed = scrapertools.find_single_match(data, patron)
data = jsunpack.unpack(packed) data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",') media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:] ext = scrapertools.get_filename_from_url(media_url[0])[-4:]

View File

@@ -3,8 +3,8 @@
"find_videos": { "find_videos": {
"patterns": [ "patterns": [
{ {
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']", "pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
"url": "\\1" "url": "http://\\1"
} }
] ]
}, },

View File

@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")] arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult) strResult = "".join(arrayResult)
logger.debug(strResult) logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult) videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources: for url, label in videoSources:
logger.debug("[" + label + "] " + url)
video_urls.append([label, url]) video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
return video_urls return video_urls