85 Commits

Author SHA1 Message Date
alfa-addon
df0607ec90 v2.3.7 2017-11-14 18:45:27 -05:00
alfa-addon
d83a49743c fixed 2017-11-14 18:45:09 -05:00
Alfa
66762b2c46 Merge pull request #169 from danielr460/master
Arreglos en canales
2017-11-14 19:41:50 -03:00
Alfa
79c761206d Merge pull request #170 from Intel11/patch-3
Actualizados
2017-11-14 19:40:58 -03:00
Intel1
f04647f348 Update settings.xml 2017-11-14 12:43:31 -05:00
Intel1
0f81113225 Update infoplus.py 2017-11-14 12:42:02 -05:00
Intel1
169c09db16 Update pelisfox.py 2017-11-14 08:49:58 -05:00
Intel1
306bb6533d stormo: actualizado 2017-11-14 08:45:54 -05:00
Intel1
210e90cb96 Update help.py 2017-11-14 08:28:40 -05:00
Intel1
74e53f362b help: actualizado 2017-11-13 14:51:24 -05:00
Intel1
947cb7f51f crunchyroll: fix 2017-11-13 14:47:18 -05:00
Daniel Rincón Rodríguez
f88ca81ff5 Corrección estetica 2017-11-13 14:07:12 -05:00
danielr460
42cd9ac14b Danimados: Añadida seccion de peliculas 2017-11-13 10:07:01 -05:00
danielr460
b7520145bb Arreglos Menores 2017-11-13 09:31:15 -05:00
danielr460
209af696b2 Serieslan: Arreglos menores en presentación de los datos 2017-11-13 09:24:14 -05:00
Daniel Rincón Rodríguez
03589b9c39 Seodiv: Añadido tvdb y correcion al idioma 2017-11-13 00:35:48 -05:00
danielr460
a3337df4da Peliculashindu: Arreglado canal 2017-11-12 19:21:03 -05:00
danielr460
acf7f9a27a Mundiseries: Detalles estéticos 2017-11-12 19:19:53 -05:00
danielr460
8082e1b244 CartoonLatino: Detalles estéticos 2017-11-12 18:08:11 -05:00
danielr460
9345115869 Asialiveaction: Eliminado enlace a "agregar a videoteca" cuando esto ya ocurrió 2017-11-12 17:58:43 -05:00
danielr460
7ae8b203b6 AnitoonsTV: detalles esteticos 2017-11-12 17:52:34 -05:00
danielr460
56c16f2922 Eliminado enlace de youtube inexistente 2017-11-12 17:44:40 -05:00
Alfa
7e47e3ae59 Merge pull request #162 from numa00009/patch-1
Update httptools.py
2017-11-12 13:20:01 -03:00
Alfa
9eef89d1b0 Merge pull request #164 from q1316480/sb-streamixcloud
Seriesblanco -> StreamixCloud
2017-11-12 13:19:35 -03:00
Alfa
2b3d81c9a0 Merge pull request #165 from danielr460/master
Arreglos menores
2017-11-12 13:19:07 -03:00
Alfa
876b02b81f Merge pull request #168 from Intel11/patch-1
Actualizados
2017-11-12 13:18:26 -03:00
Intel1
8028290051 Update xbmc_config_menu.py 2017-11-12 10:22:16 -05:00
Intel1
78252d3452 gamovideo: fix 2017-11-12 09:59:33 -05:00
Intel1
9aa77400d5 hdfull: actualizado 2017-11-12 09:57:26 -05:00
danielr460
5d592f724d Serieslan: Actualizado 2017-11-10 14:04:07 -05:00
danielr460
d288031a83 Eliminado codigo innecesario 2017-11-10 13:45:26 -05:00
Daniel Rincón Rodríguez
41a39ff02b Update anitoonstv.py 2017-11-10 11:16:35 -05:00
Daniel Rincón Rodríguez
0bad69a7cb lineas innecesarias 2017-11-10 11:15:54 -05:00
danielr460
74e6145d2f Fix netutv 2017-11-10 11:12:07 -05:00
q1316480
c344832c8c Fix: Borar "Ver en" y "Descargar en" 2017-11-10 01:21:05 +01:00
q1316480
a9caf59ce1 Seriesblanco -> StreamixCloud
https://github.com/alfa-addon/addon/issues/163
2017-11-10 00:57:26 +01:00
numa00009
770a2e215a Update httptools.py
Change Firefor Headers into Chrome ones.
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
2017-11-09 10:17:45 +01:00
alfa-addon
28d99deb48 v2.3.6 2017-11-09 03:57:01 -05:00
alfa-addon
23ac80fbd6 v2.3.5 2017-11-08 21:38:26 -05:00
alfa-addon
9a5ddfbccb fixed 2017-11-08 21:38:12 -05:00
alfa-addon
50bbf7d9aa good bye playmax 2017-11-08 21:37:57 -05:00
Alfa
2aab5ae0ff Merge pull request #161 from Intel11/patch-1
Actualizado
2017-11-09 02:36:18 +01:00
Intel1
1bbc51a885 gamovideo: fix 2017-11-08 09:02:49 -05:00
Intel1
f95c3621d4 Ayuda: actualizado 2017-11-08 08:30:30 -05:00
Intel1
f05cbba109 Update channelselector.py 2017-11-08 08:29:26 -05:00
Intel1
16968f9204 seriesblanco: actualizado 2017-11-08 08:22:17 -05:00
alfa-addon
8985f3ebdd v2.3.4 2017-11-06 19:04:42 -05:00
Alfa
d60c246bbb Merge pull request #155 from Intel11/patch-3
Actualizados
2017-11-07 00:09:00 +01:00
Alfa
3b29fe47bb Merge pull request #156 from danielr460/master
Arreglos menores
2017-11-07 00:08:47 +01:00
Alfa
3093f72ce5 Merge pull request #159 from Alfa-beto/Fixes
Corregido error con extras
2017-11-07 00:08:33 +01:00
Unknown
55dcf3f091 Corregido error con extras 2017-11-05 18:21:26 -03:00
Intel1
2924b6958d Update allpeliculas.py 2017-11-04 15:01:27 -05:00
Intel1
927310c7c6 flashx: actualizado 2017-11-04 14:58:29 -05:00
danielr460
0c25891790 fix servers 2017-11-04 00:06:45 -05:00
danielr460
212c06057f Arreglos menores 2017-11-03 22:04:28 -05:00
Intel1
9c3b3e9256 allpeliculas: paginador para colecciones 2017-11-03 17:54:51 -05:00
Intel1
6dc853b41e repelis: fix categoria 2017-11-03 15:49:52 -05:00
Intel1
7afd09dfa9 streamixcloud: fix 2017-11-03 11:08:16 -05:00
Intel1
6855508eaa Update ultrapeliculashd.py 2017-11-03 10:21:18 -05:00
Intel1
2925c29671 Update ultrapeliculashd.json 2017-11-03 10:20:47 -05:00
Intel1
506e68e8a3 vshare: cambiado el orden de resoluciones 2017-11-03 10:17:12 -05:00
Intel1
9cc30152f8 vshare: actualizado patron 2017-11-03 10:15:27 -05:00
Intel1
267c9d8031 gvideo: fix 2017-11-03 10:07:46 -05:00
Intel1
bd68b83b6c flashx: fix 2017-11-01 06:47:51 -05:00
Unknown
c1f8039672 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-01 08:37:33 -03:00
alfa-addon
99dfa2be58 v2.3.3 2017-10-31 21:09:17 -04:00
Alfa
39e711b3cb Merge pull request #150 from danielr460/master
Arreglos en canales
2017-11-01 01:40:53 +01:00
Alfa
2d8d2b3baf Merge pull request #151 from Intel11/patch-2
Actualizados
2017-11-01 01:40:36 +01:00
Alfa
82d126c3e1 Merge pull request #152 from Alfa-beto/Fixes
Correcciones
2017-11-01 01:40:20 +01:00
Alfa
8d41fd1c64 Merge pull request #153 from danielr460/patch-1
Otros arreglos
2017-11-01 01:40:05 +01:00
Unknown
a8c2f409eb Correcciones a canales 2017-10-31 14:57:55 -03:00
Daniel Rincón Rodríguez
7b2a3c2181 Update mundiseries.json 2017-10-31 07:19:55 -05:00
Daniel Rincón Rodríguez
9e6729f0be Update danimados.json 2017-10-31 07:17:04 -05:00
Unknown
241e644dcf Correcciones 2017-10-30 15:02:57 -03:00
Intel1
ae318721ab Add files via upload 2017-10-30 10:34:08 -05:00
Intel1
8328610ffa Delete bajui2.json 2017-10-30 10:32:41 -05:00
Intel1
19101b5310 Delete bajui2.py 2017-10-30 10:32:27 -05:00
Intel1
22827e0f7e Update animemovil.json 2017-10-30 10:28:29 -05:00
Unknown
1747c9795d Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-30 10:14:07 -03:00
Unknown
f3effe9a7f Corregidas series en pelisplus 2017-10-30 10:13:40 -03:00
Intel1
0621b1fa91 gamovideo: fix 2017-10-30 04:16:22 -05:00
Intel1
16473764c9 flashx: fix 2017-10-30 04:15:00 -05:00
danielr460
6b1727a0b8 Arreglado serieslan 2017-10-29 19:50:42 -05:00
Intel1
11fceffd14 bajui2: fix 2017-10-29 10:00:39 -05:00
danielr460
3a49b8a442 Función Play eliminaba info de la serie. Corregido 2017-10-29 08:54:27 -05:00
46 changed files with 827 additions and 747 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.2" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.3.7" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,16 +19,15 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» cinetux » animemovil » anitoonstv » asialiveaction
» anitoonstv » cartoonlatino » cinehindi » danimados
» seriesblanco » damimados » mundiseries » pelisculashndu
» mundiseries » serieslan » seodiv » serieslan
» cinetux » animemovil » crunchyroll » pelisfox
» plusdede » pelisplus » stormo ¤ arreglos internos
» rapidvideo » flashx [COLOR green]Gracias a [COLOR yellow]Danielr460, numa00009 y numa00009[/COLOR]
¤ arreglos internos por su colaboración en esta versión[/COLOR]
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR] </news>
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description> <description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import urlparse
from core import httptools from core import httptools
from core import jsontools from core import jsontools
from core import scrapertools from core import scrapertools
@@ -59,6 +57,7 @@ def colecciones(item):
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")" title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel, itemlist.append(Item(channel = item.channel,
action = "listado_colecciones", action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail, thumbnail = host + scrapedthumbnail,
title = title, title = title,
url = host + scrapedurl url = host + scrapedurl
@@ -71,7 +70,7 @@ def listado_colecciones(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)") data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1" post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?' patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?' patron += 'src="([^"]+).*?'
@@ -88,6 +87,16 @@ def listado_colecciones(item):
url = host + scrapedurl url = host + scrapedurl
)) ))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist return itemlist
@@ -159,6 +168,7 @@ def lista(item):
params = jsontools.dump(dict_param) params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
for it in dict_data["items"]: for it in dict_data["items"]:
@@ -167,7 +177,7 @@ def lista(item):
rating = it["imdb"] rating = it["imdb"]
year = it["year"] year = it["year"]
url = host + "pelicula/" + it["slug"] url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"]) thumb = host + it["image"]
item.infoLabels['year'] = year item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb, itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie")) plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))

View File

@@ -3,7 +3,7 @@
"name": "Animemovil", "name": "Animemovil",
"active": true, "active": true,
"adult": false, "adult": false,
"language": ["cat", "lat"], "language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png", "thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "", "banner": "",
"categories": [ "categories": [

View File

@@ -127,11 +127,21 @@ def episodios(item):
plot=scrapedplot, url=url, show=show)) plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist
def googl(url):
logger.info()
a=url.split("/")
link=a[3]
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
trueurl = scrapertools.find_single_match(data_other, patron)
return trueurl
def findvideos(item): def findvideos(item):
logger.info() logger.info()
@@ -147,16 +157,23 @@ def findvideos(item):
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">') scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"') itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla: for server, quality, url in itemla:
if "HQ" in quality:
quality = "HD"
if "Calidad Alta" in quality: if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ") quality = "HQ"
if " Calidad media - Carga mas rapido" in quality: if " Calidad media - Carga mas rapido" in quality:
quality = quality.replace(" Calidad media - Carga mas rapido", "360p") quality = "360p"
server = server.lower().strip() server = server.lower().strip()
if "ok" == server: if "ok" in server:
server = 'okru' server = 'okru'
if "rapid" in server:
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
url = googl(url)
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality, itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot, thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality))) title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist

View File

@@ -180,7 +180,7 @@ def findvideos(item):
show = item.show show = item.show
for videoitem in itemlist: for videoitem in itemlist:
videoitem.channel = item.channel videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie": if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append( itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show)) action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -1,6 +1,6 @@
{ {
"id": "bajui2", "id": "bajui",
"name": "Bajui2", "name": "Bajui",
"active": true, "active": true,
"adult": false, "adult": false,
"language": ["cast"], "language": ["cast"],

View File

@@ -13,7 +13,7 @@ def mainlist(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas", itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui2.com/descargas/categoria/2/peliculas", url="http://www.bajui.org/descargas/categoria/2/peliculas",
fanart=item.fanart)) fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries", itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart)) fanart=item.fanart))
@@ -51,13 +51,13 @@ def menuseries(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series", url="http://www.bajui.org/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre", url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas", itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre", url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart)) fanart=item.fanart))
@@ -68,10 +68,10 @@ def menudocumentales(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas", itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv", url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas", itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre", url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot")) fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart)) fanart=item.fanart))
@@ -86,7 +86,7 @@ def search(item, texto, categoria=""):
texto = texto.replace(" ", "+") texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url) logger.info("categoria: " + categoria + " url: " + url)
try: try:
item.url = "http://www.bajui2.com/descargas/busqueda/%s" item.url = "http://www.bajui.org/descargas/busqueda/%s"
item.url = item.url % texto item.url = item.url % texto
itemlist.extend(peliculas(item)) itemlist.extend(peliculas(item))
return itemlist return itemlist
@@ -118,7 +118,7 @@ def peliculas(item, paginacion=True):
scrapedtitle = title scrapedtitle = title
scrapedplot = clean_plot(plot) scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url) scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg")) scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC # Añade al listado de XBMC
@@ -133,7 +133,7 @@ def peliculas(item, paginacion=True):
scrapertools.printMatches(matches) scrapertools.printMatches(matches)
if len(matches) > 0: if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0]) scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl, pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot") fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion: if not paginacion:
@@ -197,7 +197,7 @@ def enlaces(item):
try: try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"') item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail) item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
except: except:
pass pass
@@ -234,8 +234,8 @@ def enlaces(item):
lista_servidores = lista_servidores[:-2] lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail scrapedthumbnail = item.thumbnail
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861 # http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2 scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")" scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"

View File

@@ -150,7 +150,7 @@ def episodios(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url, itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist

View File

@@ -123,7 +123,7 @@ def lista(item):
if next_page_url != "": if next_page_url != "":
item.url = next_page_url item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url, itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist return itemlist
@@ -132,14 +132,18 @@ def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data)) itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>' patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show) show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist: for videoitem in itemlist1:
videoitem.channel = item.channel videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0: for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append( itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show)) action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -168,11 +168,11 @@ def episodios(item):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t|\s{2,}', '', data) data = re.sub(r'\n|\t|\s{2,}', '', data)
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)" ' \ patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
'style="width: (.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \ 'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
'\s*(.*?)</p>.*?description":"([^"]+)"' '\s*(.*?)</p>.*?description":"([^"]+)"'
if data.count('class="season-dropdown') > 1: if data.count('class="season-dropdown') > 1:
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+" title="([^"]+)"(.*?)</ul>') bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
for season, b in bloques: for season, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron) matches = scrapertools.find_multiple_matches(b, patron)
if matches: if matches:
@@ -209,7 +209,6 @@ def episodios(item):
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id, Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle, server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, contentType="tvshow")) contentSerieName=item.contentSerieName, contentType="tvshow"))
return itemlist return itemlist

View File

@@ -6,13 +6,6 @@
"language": ["lat"], "language": ["lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png", "thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png", "banner": "https://imgur.com/xG5xqBq.png",
"version": 1,
"changes": [
{
"date": "24/10/2017",
"description": "Primera version del canal"
}
],
"categories": [ "categories": [
"tvshow" "tvshow"
] ]

View File

@@ -32,8 +32,8 @@ def mainlist(item):
thumbnail=thumb_series)) thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host, itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series)) thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host, itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
# thumbnail=thumb_series)) thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist)
return itemlist return itemlist
@@ -82,7 +82,6 @@ def mainpage(item):
return itemlist return itemlist
return itemlist return itemlist
def lista(item): def lista(item):
logger.info() logger.info()
@@ -90,15 +89,26 @@ def lista(item):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>') if item.title=="Peliculas Animadas":
data_lista = scrapertools.find_single_match(data,
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
else:
data_lista = scrapertools.find_single_match(data,
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>' patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot #scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron) matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches: for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
itemlist.append( if item.title=="Peliculas Animadas":
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
plot=scrapedplot, action="findvideos", show=scrapedtitle))
else:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle)) context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
tmdb.set_infoLabels(itemlist) if item.title!="Peliculas Animadas":
tmdb.set_infoLabels(itemlist)
return itemlist return itemlist
@@ -124,7 +134,7 @@ def episodios(item):
action="findvideos", title=title, url=scrapedurl, show=show)) action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url, itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
@@ -141,6 +151,7 @@ def findvideos(item):
data = scrapertools.find_single_match(data, data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>') '<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"' patron='src="(.+?)"'
logger.info("assfxxv "+data)
itemla = scrapertools.find_multiple_matches(data,patron) itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)): for i in range(len(itemla)):
#for url in itemla: #for url in itemla:
@@ -152,6 +163,8 @@ def findvideos(item):
server='okru' server='okru'
else: else:
server='' server=''
if "youtube" in url:
server='youtube'
if "openload" in url: if "openload" in url:
server='openload' server='openload'
if "google" in url: if "google" in url:
@@ -166,6 +179,10 @@ def findvideos(item):
title="NO DISPONIBLE" title="NO DISPONIBLE"
if title!="NO DISPONIBLE": if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server)) itemlist.append(item.clone(title=title,url=url, action="play", server=server))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", contentTitle=item.show))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
return itemlist return itemlist

View File

@@ -302,7 +302,7 @@ def epienlaces(item):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
if (item.extra and item.extra != "findvideos") or item.path: if item.contentSeason!='':
return epienlaces(item) return epienlaces(item)
itemlist = [] itemlist = []

View File

@@ -0,0 +1,7 @@
{
"id": "help",
"name": "Ayuda",
"active": false,
"adult": false,
"language": ["*"]
}

View File

@@ -0,0 +1,224 @@
# -*- coding: utf-8 -*-
import os
import xbmc
from core.item import Item
from platformcode import config, logger, platformtools
from channelselector import get_thumb
if config.is_xbmc():
import xbmcgui
class TextBox(xbmcgui.WindowXMLDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.title = kwargs.get('title')
self.text = kwargs.get('text')
self.doModal()
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, control_id):
pass
def onFocus(self, control_id):
pass
def onAction(self, action):
# self.close()
if action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
self.close()
def mainlist(item):
logger.info()
itemlist = []
if config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Cómo reportar un error?",
thumbnail=get_thumb("help.png"),
folder=False, extra="report_error"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
return itemlist
def faq(item):
if item.extra == "onoff_canales":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto se puede hacer en 'Configuración'>'Activar/Desactivar canales'. "
"Puedes activar/desactivar los canales uno por uno o todos a la vez. ",
"¿Deseas gestionar ahora los canales?")
if respuesta == 1:
from channels import setting
setting.conf_tools(Item(extra='channels_onoff'))
elif item.extra == "trakt_sync":
respuesta = platformtools.dialog_yesno("Alfa",
"Actualmente se puede activar la sincronización (silenciosa) "
"tras marcar como visto un episodio (esto se hace automáticamente). "
"Esta opción se puede activar en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "tiempo_enlaces":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto puede mejorarse limitando el número máximo de "
"enlaces o mostrandolos en una ventana emergente. "
"Estas opciones se encuentran en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "prob_busquedacont":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede que no hayas escrito la ruta de la librería correctamente en "
"'Configuración'>'Preferencias'.\n"
"La ruta específicada debe ser exactamente la misma de la 'fuente' "
"introducida en 'Archivos' de la videoteca de Kodi.\n"
"AVANZADO: Esta ruta también se encuentra en 'sources.xml'.\n"
"También puedes estar experimentando problemas por estar "
"usando algun fork de Kodi y rutas con 'special://'. "
"SPMC, por ejemplo, tiene problemas con esto, y no parece tener solución, "
"ya que es un problema ajeno a Alfa que existe desde hace mucho.\n"
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
"También puedes acudir a 'http://alfa-addon.com' en busca de ayuda.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "canal_fallo":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede ser que la página web del canal no funcione. "
"En caso de que funcione la página web puede que no seas el primero"
" en haberlo visto y que el canal este arreglado. "
"Puedes mirar en 'alfa-addon.com' o en el "
"repositorio de GitHub (github.com/alfa-addon/addon). "
"Si no encuentras el canal arreglado puedes reportar un "
"problema en el foro.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "prob_bib":
platformtools.dialog_ok("Alfa",
"Puede ser que hayas actualizado el plugin recientemente "
"y que las actualizaciones no se hayan aplicado del todo "
"bien. Puedes probar en 'Configuración'>'Otras herramientas', "
"comprobando los archivos *_data.json o "
"volviendo a añadir toda la videoteca.")
respuesta = platformtools.dialog_yesno("Alfa",
"¿Deseas acceder ahora a esa seccion?")
if respuesta == 1:
itemlist = []
from channels import setting
new_item = Item(channel="setting", action="submenu_tools", folder=True)
itemlist.extend(setting.submenu_tools(new_item))
return itemlist
elif item.extra == "prob_torrent":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puedes probar descargando el modulo 'libtorrent' de Kodi o "
"instalando algun addon como 'Quasar' o 'Torrenter', "
"los cuales apareceran entre las opciones de la ventana emergente "
"que aparece al pulsar sobre un enlace torrent. "
"'Torrenter' es más complejo pero también más completo "
"y siempre funciona.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "buscador_juntos":
respuesta = platformtools.dialog_yesno("Alfa",
"Si. La opcion de mostrar los resultados juntos "
"o divididos por canales se encuentra en "
"'setting'>'Ajustes del buscador global'>"
"'Otros ajustes'.",
"¿Deseas acceder a ahora dichos ajustes?")
if respuesta == 1:
from channels import search
search.settings("")
elif item.extra == "report_error":
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
ruta = xbmc.translatePath("special://logpath") + log_name
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Para reportar un problema en 'http://alfa-addon.com' es necesario:\n"
" - Versión que usas de Alfa.\n"
" - Versión que usas de kodi, mediaserver, etc.\n"
" - Versión y nombre del sistema operativo que usas.\n"
" - Nombre del skin (en el caso que uses Kodi) y si se "
"te ha resuelto el problema al usar el skin por defecto.\n"
" - Descripción del problema y algún caso de prueba.\n"
" - Agregar el log en modo detallado, una vez hecho esto, "
"zipea el log y lo puedes adjuntar en un post.\n\n"
"Para activar el log en modo detallado, ingresar a:\n"
" - Configuración.\n"
" - Preferencias.\n"
" - En la pestaña General - Marcar la opción: Generar log detallado.\n\n"
"El archivo de log detallado se encuentra en la siguiente ruta: \n\n"
"%s" % ruta)
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
else:
platformtools.dialog_ok("Alfa",
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com")

File diff suppressed because it is too large Load Diff

View File

@@ -319,61 +319,34 @@ def findvideos(item):
duplicados = [] duplicados = []
data = get_source(item.url) data = get_source(item.url)
src = data src = data
patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight' patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for option, videoitem in matches: for option, videoitem in matches:
lang = scrapertools.find_single_match(src, lang = scrapertools.find_single_match(src,
'<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option) '<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
if 'audio ' in lang.lower():
lang=lang.lower().replace('audio ','')
lang=lang.capitalize()
data = get_source(videoitem) data = get_source(videoitem)
if 'play' in videoitem: video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>') for video in video_urls:
else: video_data = get_source(video)
url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=') if not 'fastplay' in video:
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
data_final = get_source(new_url)
else:
data_final=video_data
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
quality = item.quality
server = servertools.get_server_from_url(url)
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
if item.quality != '':
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
url_list.append([url, lang]) if url!='':
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
for video_url in url_list:
language = video_url[1]
if 'jw.miradetodo' in video_url[0]:
data = get_source('http:' + video_url[0])
patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, scrapedurl in matches:
quality = quality
title = item.contentTitle + ' (%s) %s' % (quality, language)
server = 'directo'
url = scrapedurl
url = url.replace('\/', '/')
subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
if url not in duplicados:
itemlist.append(item.clone(title=title,
action='play',
url=url,
quality=quality,
server=server,
subtitle=subtitle,
language=language
))
duplicados.append(url)
elif video_url != '':
itemlist.extend(servertools.find_video_items(data=video_url[0]))
import os
for videoitem in itemlist:
if videoitem.server != 'directo':
quality = item.quality
title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
if item.quality != '':
title = item.contentTitle + ' (%s) %s' % (quality, language)
videoitem.title = title
videoitem.channel = item.channel
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
"server_%s.png" % videoitem.server)
videoitem.quality = item.quality
if item.infoLabels['mediatype'] == 'movie': if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -6,13 +6,6 @@
"language": ["cast", "lat"], "language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png", "thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png", "banner": "https://imgur.com/1bDbYY1.png",
"version": 1,
"changes": [
{
"date": "23/10/2017",
"description": "Primera versión del canal"
}
],
"categories": [ "categories": [
"tvshow" "tvshow"
] ]

View File

@@ -76,7 +76,7 @@ def episodios(item):
title=title, url=url, show=show)) title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url, itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist

View File

@@ -33,15 +33,14 @@ def mainlist(item):
def explorar(item): def explorar(item):
logger.info() logger.info()
itemlist = list() itemlist = list()
url1 = str(item.url) url1 = item.title
data = httptools.downloadpage(host).data data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info("loca :"+url1+" aaa"+data) if 'Género' in url1:
if 'genero' in url1: patron = '<div class="d">.+?<h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>' if 'Listado Alfabético' in url1:
if 'alfabetico' in url1: patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>' if 'Año' in url1:
if 'año' in url1:
patron = '<ul class="anio"><li>(.+?)<\/ul>' patron = '<ul class="anio"><li>(.+?)<\/ul>'
data_explorar = scrapertools.find_single_match(data, patron) data_explorar = scrapertools.find_single_match(data, patron)
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>' patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
@@ -79,26 +78,22 @@ def lista(item):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
url1 = str(item.url) data_mov= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">(.+)<ul class="pag">')
if 'http://www.peliculashindu.com/' in url1:
url1 = url1.replace("http://www.peliculashindu.com/", "")
if url1 != 'estrenos':
data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras')
# data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data_mov, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches: for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos", itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
show=scrapedtitle)) show=scrapedtitle))
# Paginacion # Paginacion
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">' patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
paginasig = scrapertools.find_single_match(data, patron_pag) paginasig = scrapertools.find_single_match(data, patron_pag)
logger.info("algoooosadf "+paginasig)
next_page_url = item.url + paginasig next_page_url = host + paginasig
if paginasig != "": if paginasig != "":
item.url = next_page_url item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url, itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist return itemlist
@@ -114,10 +109,9 @@ def findvideos(item):
logger.info("holaa" + data) logger.info("holaa" + data)
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>' patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
show = scrapertools.find_single_match(data, patron_show) show = scrapertools.find_single_match(data, patron_show)
logger.info("holaa" + show)
for videoitem in itemlist: for videoitem in itemlist:
videoitem.channel = item.channel videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append( itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show)) action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -1,30 +0,0 @@
{
"id": "pelisencasa",
"name": "PelisEnCasa",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s14.postimg.org/iqiq0bxn5/pelisencasa.png",
"banner": "https://s18.postimg.org/j775ehbg9/pelisencasa_banner.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,217 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from lib import jsunpack
from platformcode import config, logger
host = 'http://pelisencasa.net'
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png"}
tletras = {'#': 'https://s32.postimg.org/drojt686d/image.png',
'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.org/vlon87gmd/image.png',
'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
'f': 'https://s32.postimg.org/y6u7vq605/image.png',
'g': 'https://s32.postimg.org/9237ib6jp/image.png',
'h': 'https://s32.postimg.org/812yt6pk5/image.png',
'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
'j': 'https://s32.postimg.org/axpztgvdx/image.png',
'k': 'https://s32.postimg.org/976yrzdut/image.png',
'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
'm': 'https://s32.postimg.org/m19lz2go5/image.png',
'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.org/c6igsucpx/image.png',
'p': 'https://s32.postimg.org/jnro82291/image.png',
'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
's': 'https://s32.postimg.org/zd2t89jol/image.png',
't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.org/e7dlrey91/image.png',
'w': 'https://s32.postimg.org/fnp49k15x/image.png',
'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
'y': 'https://s32.postimg.org/um7j3zg85/image.png',
'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host))
itemlist.append(
item.clone(title="Generos", action="seccion", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='generos'))
itemlist.append(
item.clone(title="Alfabetico", action="seccion", thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', url=host, extra='letras'))
itemlist.append(item.clone(title="Buscar", action="search", url=host + '/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra != 'letras':
patron = '<li class="TPostMv">.*?<a href="(.*?)"><div class="Image">.*?src="(.*?)\?resize=.*?".*?class="Title">(.*?)<\/h2>.*?'
patron += '<span class="Year">(.*?)<\/span>.*?<span class="Qlty">(.*?)<\/span><\/p><div class="Description"><p>(.*?)<\/p>'
else:
patron = '<td class="MvTbImg"> <a href="(.*?)".*?src="(.*?)\?resize=.*?".*?<strong>(.*?)<\/strong> <\/a><\/td><td>(.*?)<\/td><td>.*?'
patron += 'class="Qlty">(.*?)<\/span><\/p><\/td><td>(.*?)<\/td><td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
contentTitle = scrapedtitle
title = contentTitle + ' (' + calidad + ')'
year = scrapedyear
fanart = ''
itemlist.append(
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="(.*?)">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra == 'generos':
patron = 'menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
else:
patron = '<li><a href="(.*?\/letter\/.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
if item.extra == 'generos' and scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
elif scrapedtitle.lower() in tletras:
thumbnail = tletras[scrapedtitle.lower()]
fanart = ''
title = scrapedtitle
url = scrapedurl
itemlist.append(
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
fanart=fanart, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="Num">.*?<\/span>.*?href="(.*?)" class="Button STPb">.*?<\/a>.*?<span>(.*?)<\/span><\/td><td><span>(.*?)<\/span><\/td><td><span>.*?<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, servidor, idioma in matches:
new_item = (item.clone(url=scrapedurl, servidor=servidor, idioma=idioma, infoLabels=infoLabels))
itemlist += get_video_urls(new_item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
itemlist.insert(len(itemlist) - 1, item.clone(channel='trailertools', action='buscartrailer',
title='[COLOR orange]Trailer en Youtube[/COLOR]'))
return itemlist
def get_video_urls(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<script type="text\/javascript">(.*?)<\/script>')
data = jsunpack.unpack(data)
patron = '"file":"(.*?)","label":"(.*?)","type":"video.*?"}'
subtitle = scrapertools.find_single_match(data, 'tracks:\[{"file":"(.*?)","label":".*?","kind":"captions"}')
matches = re.compile(patron, re.DOTALL).findall(data)
for url, calidad in matches:
if item.servidor == 'PELISENCASA':
item.servidor = 'Directo'
title = item.contentTitle + ' (' + item.idioma + ')' + ' (' + calidad + ')' + ' (' + item.servidor + ')'
itemlist.append(item.clone(title=title, url=url, calidad=calidad, action='play', subtitle=subtitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + '/category/animacion/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -240,10 +240,10 @@ def findvideos(item):
)) ))
for videoitem in templist: for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
for element in urls_list: for element in urls_list:
json_data=jsontools.load(element) json_data=jsontools.load(element)
id = json_data['id'] id = json_data['id']
sub = json_data['srt'] sub = json_data['srt']
url = json_data['source'] url = json_data['source']
@@ -253,7 +253,6 @@ def findvideos(item):
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \ new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub) '=%s&srt=%s' % (url, sub)
logger.debug('new_url: %s' % new_url)
data = httptools.downloadpage(new_url).data data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data) data = re.sub(r'\\', "", data)

View File

@@ -215,18 +215,20 @@ def search(item, texto):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
duplicados = []
data = get_source(item.url) data = get_source(item.url)
patron = '<div class=TPlayerTbCurrent id=(.*?)><iframe.*?src=(.*?) frameborder' patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for opt, urls_page in matches: for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.*?' logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt) '<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(data,'<button id="(.*?)"') video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers: for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get') info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info) video_info = jsontools.load(video_info)
@@ -238,8 +240,13 @@ def findvideos(item):
url = 'https://'+video_server+'/embed/'+video_id url = 'https://'+video_server+'/embed/'+video_id
else: else:
url = 'https://'+video_server+'/e/'+video_id url = 'https://'+video_server+'/e/'+video_id
title = item.title title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title, url=url, action='play', language=language)) itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist) itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist return itemlist

View File

@@ -428,13 +428,17 @@ def get_vip(url):
itemlist =[] itemlist =[]
url= url.replace('reproductor','vip') url= url.replace('reproductor','vip')
data = httptools.downloadpage(url).data data = httptools.downloadpage(url).data
patron = '<a href="(.*?)"> ' video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)".*?>')
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)">')
for item in video_urls: for item in video_urls:
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)') if 'elreyxhd' in item:
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang) if 'plus'in item:
data=httptools.downloadpage(new_url, follow_redirects=False).headers id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
itemlist.extend(servertools.find_video_items(data=str(data))) new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
data=httptools.downloadpage(new_url, follow_redirects=False).headers
itemlist.extend(servertools.find_video_items(data=str(data)))
return itemlist return itemlist

View File

@@ -2,7 +2,7 @@
"id": "playmax", "id": "playmax",
"name": "PlayMax", "name": "PlayMax",
"language": ["cast", "lat"], "language": ["cast", "lat"],
"active": true, "active": false,
"adult": false, "adult": false,
"thumbnail": "playmax.png", "thumbnail": "playmax.png",
"banner": "playmax.png", "banner": "playmax.png",

View File

@@ -30,11 +30,6 @@ def mainlist(item):
itemlist.append( itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1", Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan)) thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas", itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url= host + "/pag/1", url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan)) thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
@@ -70,7 +65,8 @@ def menupelis(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.genre:
item.extra = item.genre
if item.extra == '': if item.extra == '':
section = 'Recién Agregadas' section = 'Recién Agregadas'
elif item.extra == 'year': elif item.extra == 'year':
@@ -79,17 +75,13 @@ def menupelis(item):
section = 'de Eróticas \+18' section = 'de Eróticas \+18'
else: else:
section = 'de %s'%item.extra section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces: for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?' patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)' patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", ""); title = title.replace("Online", "");
@@ -144,21 +136,14 @@ def menudesta(item):
# Peliculas de Estreno # Peliculas de Estreno
def menuestre(item): def menuestre(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>' patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces: for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?' patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)' patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
@@ -255,32 +240,22 @@ def search(item, texto):
patron += '<div class="row">.*?' patron += '<div class="row">.*?'
patron += '<a href="(.*?)" title="(.*?)">.*?' patron += '<a href="(.*?)" title="(.*?)">.*?'
patron += '<img src="(.*?)"' patron += '<img src="(.*?)"'
logger.info(patron)
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = [] itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches: for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "") title = title.replace("Online", "")
url = item.url + scrapedurl url = scrapedurl
thumbnail = item.url + scrapedthumbnail thumbnail = scrapedthumbnail
logger.info(url)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail)) thumbnail=thumbnail, fanart=thumbnail))
return itemlist return itemlist
def poranyo(item): def poranyo(item):
logger.info(item.url) logger.info(item.url)
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>' patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches: for scrapedurl, scrapedtitle in matches:
@@ -289,7 +264,6 @@ def poranyo(item):
url = item.url + scrapedurl url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year')) fanart=item.fanart, extra='year'))
return itemlist return itemlist
@@ -300,24 +274,25 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">' patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
adult_mode = config.get_setting("adult_mode")
for scrapedurl, scrapedtitle in matches: for scrapedurl, scrapedtitle in matches:
if "18" in scrapedtitle and adult_mode == 0:
continue
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "") title = title.replace("Online", "")
url = scrapedurl url = scrapedurl
logger.info(url) logger.info(url)
# si no esta permitidas categoria adultos, la filtramos # si no esta permitidas categoria adultos, la filtramos
extra = title extra1 = title
adult_mode = config.get_setting("adult_mode")
if adult_mode != 0: if adult_mode != 0:
if 'erotic' in scrapedurl: if 'erotic' in scrapedurl:
extra = 'adult' extra1 = 'adult'
else: else:
extra=title extra1=title
if (extra=='adult' and adult_mode != 0) or extra != 'adult': if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra = extra)) fanart=item.fanart, genre = extra1))
return itemlist return itemlist
@@ -338,7 +313,6 @@ def decode(string):
i += 1 i += 1
enc4 = keyStr.index(input[i]) enc4 = keyStr.index(input[i])
i += 1 i += 1
chr1 = (enc1 << 2) | (enc2 >> 4) chr1 = (enc1 << 2) | (enc2 >> 4)
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2) chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
chr3 = ((enc3 & 3) << 6) | enc4 chr3 = ((enc3 & 3) << 6) | enc4

View File

@@ -290,7 +290,10 @@ def do_search(item, categories=None):
multithread = config.get_setting("multithread", "search") multithread = config.get_setting("multithread", "search")
result_mode = config.get_setting("result_mode", "search") result_mode = config.get_setting("result_mode", "search")
tecleado = item.extra if item.wanted!='':
tecleado=item.wanted
else:
tecleado = item.extra
itemlist = [] itemlist = []

View File

@@ -8,6 +8,7 @@ from core import httptools
from core import scrapertools from core import scrapertools
from core import servertools from core import servertools
from core.item import Item from core.item import Item
from core import tmdb
from platformcode import config, logger from platformcode import config, logger
IDIOMAS = {'latino': 'Latino'} IDIOMAS = {'latino': 'Latino'}
@@ -35,6 +36,7 @@ def mainlist(item):
url=host, url=host,
thumbnail='https://s27.postimg.org/iahczwgrn/series.png', thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png', fanart='https://s27.postimg.org/iahczwgrn/series.png',
page=0
)) ))
autoplay.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist)
return itemlist return itemlist
@@ -49,15 +51,21 @@ def todas(item):
'Serie><span>(.*?)<\/span>' 'Serie><span>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches: # Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=int(min)-int(item.page)
max = min + num_items_x_pagina - 1
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches[min:max]:
url = host + scrapedurl url = host + scrapedurl
calidad = scrapedcalidad calidad = scrapedcalidad
title = scrapedtitle.decode('utf-8') title = scrapedtitle.decode('utf-8')
thumbnail = scrapedthumbnail thumbnail = scrapedthumbnail
fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png' fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png'
if not 'xxxxxx' in scrapedtitle:
itemlist.append( itemlist.append(
Item(channel=item.channel, Item(channel=item.channel,
action="temporadas", action="temporadas",
title=title, url=url, title=title, url=url,
thumbnail=thumbnail, thumbnail=thumbnail,
@@ -67,7 +75,13 @@ def todas(item):
language=language, language=language,
context=autoplay.context context=autoplay.context
)) ))
tmdb.set_infoLabels(itemlist)
if len(itemlist)>28:
itemlist.append(
Item(channel=item.channel,
title="[COLOR cyan]Página Siguiente >>[/COLOR]",
url=item.url, action="todas",
page=item.page + 1))
return itemlist return itemlist
@@ -222,16 +236,31 @@ def episodiosxtemp(item):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
lang=[]
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
video_items = servertools.find_video_items(item) video_items = servertools.find_video_items(item)
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
language_items=scrapertools.find_single_match(data,
'<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
matches=scrapertools.find_multiple_matches(language_items,
'<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
for idl,scrapedlang in matches:
if int(idl)<5 and int(idl)!=1:
lang.append(scrapedlang)
i=0
logger.info(lang)
for videoitem in video_items: for videoitem in video_items:
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server) videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span ' #videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
'class="f-info-text">(.*?)<\/span>') # 'class="f-info-text">(.*?)<\/span>')
if len(lang)<=i:
videoitem.language=lang[i]
else:
videoitem.language=lang[len(lang)-1]
videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')' videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
videoitem.quality = 'default' videoitem.quality = 'default'
videoitem.context = item.context videoitem.context = item.context
i=i+1
itemlist.append(videoitem) itemlist.append(videoitem)
# Requerido para FilterTools # Requerido para FilterTools

View File

@@ -21,8 +21,7 @@ list_language = ['default']
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p'] CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
list_quality = CALIDADES list_quality = CALIDADES
list_servers = ['streamix', list_servers = ['powvideo',
'powvideo',
'streamcloud', 'streamcloud',
'openload', 'openload',
'flashx', 'flashx',
@@ -30,7 +29,8 @@ list_servers = ['streamix',
'nowvideo', 'nowvideo',
'gamovideo', 'gamovideo',
'kingvid', 'kingvid',
'vidabc' 'vidabc',
'streamixcloud'
] ]
@@ -103,11 +103,14 @@ def extract_series_from_data(item, data):
else: else:
action = "findvideos" action = "findvideos"
context1=[filtertools.context(item, list_idiomas, CALIDADES), autoplay.context] context = filtertools.context(item, list_idiomas, CALIDADES)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url), itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
action=action, show=name, action=action, show=name,
thumbnail=img, thumbnail=img,
context=context1)) context=context))
more_pages = re.search('pagina=([0-9]+)">>>', data) more_pages = re.search('pagina=([0-9]+)">>>', data)
if more_pages: if more_pages:
@@ -306,11 +309,13 @@ def findvideos(item):
for i in range(len(list_links)): for i in range(len(list_links)):
a=list_links[i].title a=list_links[i].title
b=a.lstrip('Ver en') b=a[a.find("en") + 2:]
c=b.split('[') c=b.split('[')
d=c[0].rstrip( ) d=c[0].rstrip( )
d=d.lstrip( ) d=d.lstrip( )
list_links[i].server=d list_links[i].server=d.replace("streamix", "streamixcloud")
list_links = servertools.get_servers_itemlist(list_links)
autoplay.start(list_links, item) autoplay.start(list_links, item)
return list_links return list_links
@@ -338,7 +343,7 @@ def play(item):
else: else:
url = item.url url = item.url
itemlist = servertools.find_video_items(data=url) itemlist = servertools.find_video_items(item=item,data=url)
titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
if titulo: if titulo:

View File

@@ -14,10 +14,7 @@ from channels import autoplay
IDIOMAS = {'latino': 'Latino'} IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values() list_language = IDIOMAS.values()
list_servers = ['openload', list_servers = ['openload'
'okru',
'netutv',
'rapidvideo'
] ]
list_quality = ['default'] list_quality = ['default']
@@ -49,7 +46,11 @@ def lista(item):
patron = '<a href="([^"]+)" ' patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?' patron += 'class="link">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">' patron += 'title="([^"]+)">'
if item.url==host:
a=1
else:
num=(item.url).split('-')
a=int(num[1])
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion # Paginacion
@@ -57,17 +58,30 @@ def lista(item):
min = item.page * num_items_x_pagina min = item.page * num_items_x_pagina
min=min-item.page min=min-item.page
max = min + num_items_x_pagina - 1 max = min + num_items_x_pagina - 1
b=0
for link, img, name in matches[min:max]: for link, img, name in matches[min:max]:
title = name b=b+1
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link url = host + link
scrapedthumbnail = host + img scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context] context = renumbertools.context(item)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title, context2 = autoplay.context
context=context1)) context.extend(context2)
itemlist.append( itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1)) context=context))
if b<29:
a=a+1
url="https://serieslan.com/pag-"+str(a)
if b>10:
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
else:
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
return itemlist return itemlist
@@ -78,7 +92,6 @@ def episodios(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
logger.debug("info %s " % data)
# obtener el numero total de episodios # obtener el numero total de episodios
total_episode = 0 total_episode = 0
@@ -93,6 +106,10 @@ def episodios(item):
title = "" title = ""
pat = "/" pat = "/"
if "Mike, Lu & Og"==item.title:
pat="&/"
if "KND" in item.title:
pat="-"
# varios episodios en un enlace # varios episodios en un enlace
if len(name.split(pat)) > 1: if len(name.split(pat)) > 1:
i = 0 i = 0
@@ -120,7 +137,7 @@ def episodios(item):
thumbnail=scrapedthumbnail)) thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0: if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show)) action="add_serie_to_library", extra="episodios", show=show))
return itemlist return itemlist
@@ -134,7 +151,7 @@ def findvideos(item):
itemlist = [] itemlist = []
url_server = "https://openload.co/embed/%s/" url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s" url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
def txc(key, _str): def txc(key, _str):
s = range(256) s = range(256)
@@ -157,7 +174,7 @@ def findvideos(item):
return res return res
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">' pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
idv, ide = scrapertools.find_single_match(data, pattern) idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data, thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">') '<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
@@ -167,7 +184,6 @@ def findvideos(item):
data = eval(data) data = eval(data)
if type(data) == list: if type(data) == list:
logger.debug("inside")
video_url = url_server % (txc(ide, base64.decodestring(data[2]))) video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload" server = "openload"
if " SUB" in item.title: if " SUB" in item.title:
@@ -177,7 +193,11 @@ def findvideos(item):
else: else:
lang = "Latino" lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]" title = "Enlace encontrado en " + server + " [" + lang + "]"
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot, if item.contentChannel=='videolibrary':
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
thumbnail=thumbnail, server=server, folder=False))
else:
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False)) thumbnail=thumbnail, server=server, folder=False))
autoplay.start(itemlist, item) autoplay.start(itemlist, item)
@@ -185,17 +205,3 @@ def findvideos(item):
else: else:
return [] return []
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -160,7 +160,9 @@ def findvideos(item):
'gamo': 'http://gamovideo.com/embed-', 'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-', 'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-', 'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'} 'vido': 'http://vidoza.net/embed-',
'net': 'http://hqq.tv/player/embed_player.php?vid='
}
data = get_source(item.url) data = get_source(item.url)
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>') noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>' patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'

View File

@@ -7,7 +7,8 @@
"banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png", "banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png",
"thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png", "thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png",
"categories": [ "categories": [
"movie" "movie",
"direct"
], ],
"settings": [ "settings": [
{ {
@@ -33,6 +34,14 @@
"default": true, "default": true,
"enabled": true, "enabled": true,
"visible": true "visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades -Terror",
"default": true,
"enabled": true,
"visible": true
} }
] ]
} }

View File

@@ -192,27 +192,24 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>' patron = '<iframe class=metaframe rptss src=(.*?) (?:width=.*?|frameborder=0) allowfullscreen><\/iframe>'
matches = matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches: for video_url in matches:
if 'stream' in video_url:
data = httptools.downloadpage('https:'+video_url).data
new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
new_data = httptools.downloadpage(new_url).data
# TODO Reparar directos url, quality = scrapertools.find_single_match(new_data, 'file:.*?"(.*?)",label:.*?"(.*?)"')
# if 'stream' in video_url: headers_string = '|Referer=%s' % url
# data = httptools.downloadpage('https:'+video_url).data url = url.replace('download', 'preview')+headers_string
# new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"') sub = scrapertools.find_single_match(new_data, 'file:.*?"(.*?srt)"')
# new_data = httptools.downloadpage(new_url).data new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
# logger.debug(new_data) subtitle=sub))
# itemlist.append(new_item)
# url, quality = scrapertools.find_single_match(new_data, "file:'(.*?)',label:'(.*?)'") else:
# headers_string = '|Referer=%s' % url itemlist.extend(servertools.find_video_items(data=video_url))
# url = url.replace('download', 'preview')+headers_string
# sub = scrapertools.find_single_match(new_data, "file:.*?'(.*?srt)'")
# new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
# subtitle=sub))
# itemlist.append(new_item)
# else:
itemlist.extend(servertools.find_video_items(data=video_url))
for videoitem in itemlist: for videoitem in itemlist:
videoitem.channel = item.channel videoitem.channel = item.channel
@@ -255,10 +252,13 @@ def newest(categoria):
item.extra = 'estrenos/' item.extra = 'estrenos/'
try: try:
if categoria == 'peliculas': if categoria == 'peliculas':
item.url = host + '/category/estrenos/' item.url = host + '/genre/estrenos/'
elif categoria == 'infantiles': elif categoria == 'infantiles':
item.url = host + '/category/infantil/' item.url = host + '/genre/animacion/'
elif categoria == 'terror':
item.url = host + '/genre/terror/'
itemlist = lista(item) itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>': if itemlist[-1].title == 'Siguiente >>>':

View File

@@ -45,15 +45,15 @@ def getmainlist(view="thumb_"):
context=[{"title": "Configurar Descargas", "channel": "setting", "config": "downloads", context=[{"title": "Configurar Descargas", "channel": "setting", "config": "downloads",
"action": "channel_config"}])) "action": "channel_config"}]))
thumb_configuracion = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available") thumb_setting = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available")
itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist", itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist",
thumbnail=get_thumb(thumb_configuracion, view), thumbnail=get_thumb(thumb_setting, view),
category=config.get_localized_string(30100), viewmode="list")) category=config.get_localized_string(30100), viewmode="list"))
# TODO REVISAR LA OPCION AYUDA
# itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist", itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist",
# thumbnail=get_thumb("help.png", view), thumbnail=get_thumb("help.png", view),
# category=config.get_localized_string(30104), viewmode="list")) category=config.get_localized_string(30104), viewmode="list"))
return itemlist return itemlist

View File

@@ -23,8 +23,8 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada # Headers por defecto, si no se especifica nada
default_headers = dict() default_headers = dict()
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8" default_headers["Accept-Charset"] = "UTF-8"
default_headers["Accept-Encoding"] = "gzip" default_headers["Accept-Encoding"] = "gzip"

View File

@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
if item.contentType in ['movie','tvshow']and item.channel != 'search': if item.contentType in ['movie','tvshow']and item.channel != 'search':
# Buscar en otros canales # Buscar en otros canales
if item.contentSerieName!='': if item.contentSerieName!='':
item.extra=item.contentSerieName item.wanted=item.contentSerieName
else: else:
item.extra = item.contentTitle item.wanted = item.contentTitle
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]", context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], "XBMC.Container.Update (%s?%s)" % (sys.argv[0],
item.clone(channel='search', item.clone(channel='search',

View File

@@ -412,8 +412,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
self.addControl(control) self.addControl(control)
control.setVisible(False) control.setVisible(False)
control.setLabel(c["label"]) # frodo fix
control.setText(self.values[c["id"]]) s = self.values[c["id"]]
if s is None:
s = ''
control.setText(s)
# control.setText(self.values[c["id"]])
control.setWidth(self.controls_width - 5) control.setWidth(self.controls_width - 5)
control.setHeight(self.height_control) control.setHeight(self.height_control)

View File

@@ -40,6 +40,7 @@
</category> </category>
<category label="Opciones Visuales"> <category label="Opciones Visuales">
<setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/> <setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/>
<setting id="infoplus_set" type="labelenum" label="Opción visual Infoplus" values="Sin animación|Con animación" default="Sin animación"/>
</category> </category>
<category label="Otros"> <category label="Otros">
<setting label="Info de películas/series en menú contextual" type="lsep"/> <setting label="Info de películas/series en menú contextual" type="lsep"/>

View File

@@ -37,12 +37,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
cgi_counter = cgi_counter.replace("%0A","").replace("%22","") cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx # Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/js/code.js.*?[^(?:'|")]+)""") js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/jss/coder.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches: for f, v in matches:
pfxfx += f + "=" + v + "&" pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" %js_fxfx)
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'} # {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')

View File

@@ -7,13 +7,12 @@ from core import scrapertools
from lib import jsunpack from lib import jsunpack
from platformcode import logger from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 ' \ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0'}
'Firefox/40.0'}
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data: if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado" return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -25,8 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data data = httptools.downloadpage(page_url).data
logger.debug(data)
packer = scrapertools.find_single_match(data, packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>") "<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "": if packer != "":

View File

@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
streams =[] streams =[]
logger.debug('page_url: %s'%page_url) logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url: if 'googleusercontent' in page_url:
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
url=data.headers['location'] response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
url=response.headers['location']
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
quality = scrapertools.find_single_match (url, '.itag=(\d+).') quality = scrapertools.find_single_match (url, '.itag=(\d+).')
streams.append((quality, url)) streams.append((quality, url))
headers_string=""
else: else:
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url}) response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})

View File

@@ -8,9 +8,11 @@ from platformcode import logger
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data response = httptools.downloadpage(page_url)
if "video_error.mp4" in data: if "video_error.mp4" in response.data:
return False, "[Stormo] El archivo no existe o ha sido borrado" return False, "[Stormo] El archivo no existe o ha sido borrado"
if response.code == 451:
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
return True, "" return True, ""

View File

@@ -8,7 +8,6 @@ from platformcode import logger
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if "Not Found" in data: if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado" return False, "[streamixcloud] El archivo no existe o ha sido borrado"
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
video_urls = [] video_urls = []
packed = scrapertools.find_single_match(data, patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
packed = scrapertools.find_single_match(data, patron)
data = jsunpack.unpack(packed) data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",') media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:] ext = scrapertools.get_filename_from_url(media_url[0])[-4:]

View File

@@ -3,8 +3,8 @@
"find_videos": { "find_videos": {
"patterns": [ "patterns": [
{ {
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']", "pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
"url": "\\1" "url": "http://\\1"
} }
] ]
}, },

View File

@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")] arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult) strResult = "".join(arrayResult)
logger.debug(strResult) logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult) videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources: for url, label in videoSources:
logger.debug("[" + label + "] " + url)
video_urls.append([label, url]) video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
return video_urls return video_urls