Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8985f3ebdd | ||
|
|
d60c246bbb | ||
|
|
3b29fe47bb | ||
|
|
3093f72ce5 | ||
|
|
55dcf3f091 | ||
|
|
2924b6958d | ||
|
|
927310c7c6 | ||
|
|
0c25891790 | ||
|
|
212c06057f | ||
|
|
9c3b3e9256 | ||
|
|
6dc853b41e | ||
|
|
7afd09dfa9 | ||
|
|
6855508eaa | ||
|
|
2925c29671 | ||
|
|
506e68e8a3 | ||
|
|
9cc30152f8 | ||
|
|
267c9d8031 | ||
|
|
bd68b83b6c | ||
|
|
c1f8039672 |
@@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.3" provider-name="Alfa Addon">
|
<addon id="plugin.video.alfa" name="Alfa" version="2.3.4" provider-name="Alfa Addon">
|
||||||
<requires>
|
<requires>
|
||||||
<import addon="xbmc.python" version="2.1.0"/>
|
<import addon="xbmc.python" version="2.1.0"/>
|
||||||
<import addon="script.module.libtorrent" optional="true"/>
|
<import addon="script.module.libtorrent" optional="true"/>
|
||||||
@@ -19,13 +19,11 @@
|
|||||||
</assets>
|
</assets>
|
||||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||||
» serieslan » animemovil
|
» allpeliculas » repelis
|
||||||
» mundiseries » bajui
|
» flashx » ultrapeliculashd
|
||||||
» seriesblanco » descargamix
|
» gvideo » streamixcloud
|
||||||
» miradetodo » pelisgratis
|
» vshare » anitoonstv
|
||||||
» tvseriesdk » ultrapeliculashd
|
¤ arreglos internos
|
||||||
» gamovideo » flashx
|
|
||||||
» danimados ¤ arreglos internos
|
|
||||||
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
||||||
</news>
|
</news>
|
||||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import urlparse
|
|
||||||
|
|
||||||
from core import httptools
|
from core import httptools
|
||||||
from core import jsontools
|
from core import jsontools
|
||||||
from core import scrapertools
|
from core import scrapertools
|
||||||
@@ -59,6 +57,7 @@ def colecciones(item):
|
|||||||
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
|
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
|
||||||
itemlist.append(Item(channel = item.channel,
|
itemlist.append(Item(channel = item.channel,
|
||||||
action = "listado_colecciones",
|
action = "listado_colecciones",
|
||||||
|
page = 1,
|
||||||
thumbnail = host + scrapedthumbnail,
|
thumbnail = host + scrapedthumbnail,
|
||||||
title = title,
|
title = title,
|
||||||
url = host + scrapedurl
|
url = host + scrapedurl
|
||||||
@@ -71,7 +70,7 @@ def listado_colecciones(item):
|
|||||||
itemlist = []
|
itemlist = []
|
||||||
data = httptools.downloadpage(item.url).data
|
data = httptools.downloadpage(item.url).data
|
||||||
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
|
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
|
||||||
post = "page=1"
|
post = "page=%s" %item.page
|
||||||
data = httptools.downloadpage(host + data_url, post=post).data
|
data = httptools.downloadpage(host + data_url, post=post).data
|
||||||
patron = 'a href="(/peli[^"]+).*?'
|
patron = 'a href="(/peli[^"]+).*?'
|
||||||
patron += 'src="([^"]+).*?'
|
patron += 'src="([^"]+).*?'
|
||||||
@@ -88,6 +87,16 @@ def listado_colecciones(item):
|
|||||||
url = host + scrapedurl
|
url = host + scrapedurl
|
||||||
))
|
))
|
||||||
tmdb.set_infoLabels(itemlist)
|
tmdb.set_infoLabels(itemlist)
|
||||||
|
item.page += 1
|
||||||
|
post = "page=%s" %item.page
|
||||||
|
data = httptools.downloadpage(host + data_url, post=post).data
|
||||||
|
if len(data) > 50:
|
||||||
|
itemlist.append(Item(channel = item.channel,
|
||||||
|
action = "listado_colecciones",
|
||||||
|
title = "Pagina siguiente>>",
|
||||||
|
page = item.page,
|
||||||
|
url = item.url
|
||||||
|
))
|
||||||
return itemlist
|
return itemlist
|
||||||
|
|
||||||
|
|
||||||
@@ -159,6 +168,7 @@ def lista(item):
|
|||||||
params = jsontools.dump(dict_param)
|
params = jsontools.dump(dict_param)
|
||||||
|
|
||||||
data = httptools.downloadpage(item.url, post=params).data
|
data = httptools.downloadpage(item.url, post=params).data
|
||||||
|
data = data.replace("<mark>","").replace("<\/mark>","")
|
||||||
dict_data = jsontools.load(data)
|
dict_data = jsontools.load(data)
|
||||||
|
|
||||||
for it in dict_data["items"]:
|
for it in dict_data["items"]:
|
||||||
@@ -167,7 +177,7 @@ def lista(item):
|
|||||||
rating = it["imdb"]
|
rating = it["imdb"]
|
||||||
year = it["year"]
|
year = it["year"]
|
||||||
url = host + "pelicula/" + it["slug"]
|
url = host + "pelicula/" + it["slug"]
|
||||||
thumb = urlparse.urljoin(host, it["image"])
|
thumb = host + it["image"]
|
||||||
item.infoLabels['year'] = year
|
item.infoLabels['year'] = year
|
||||||
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
|
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
|
||||||
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
|
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
|
||||||
|
|||||||
@@ -148,15 +148,21 @@ def findvideos(item):
|
|||||||
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
|
||||||
for server, quality, url in itemla:
|
for server, quality, url in itemla:
|
||||||
if "Calidad Alta" in quality:
|
if "Calidad Alta" in quality:
|
||||||
quality = quality.replace("Calidad Alta", "HQ")
|
quality = "HQ"
|
||||||
|
if "HQ" in quality:
|
||||||
|
quality = "HD"
|
||||||
if " Calidad media - Carga mas rapido" in quality:
|
if " Calidad media - Carga mas rapido" in quality:
|
||||||
quality = quality.replace(" Calidad media - Carga mas rapido", "360p")
|
quality = "360p"
|
||||||
server = server.lower().strip()
|
server = server.lower().strip()
|
||||||
if "ok" == server:
|
if "ok" in server:
|
||||||
server = 'okru'
|
server = 'okru'
|
||||||
|
if "rapid" in server:
|
||||||
|
server = 'rapidvideo'
|
||||||
|
if "netu" in server:
|
||||||
|
server = 'netutv'
|
||||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||||
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
|
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
|
||||||
|
|
||||||
autoplay.start(itemlist, item)
|
autoplay.start(itemlist, item)
|
||||||
return itemlist
|
return itemlist
|
||||||
|
|||||||
@@ -30,11 +30,6 @@ def mainlist(item):
|
|||||||
itemlist.append(
|
itemlist.append(
|
||||||
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
|
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
|
||||||
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
|
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
|
||||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
|
|
||||||
url= host + "/archivos/proximos-estrenos/pag/1",
|
|
||||||
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
|
|
||||||
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
|
|
||||||
fanart=mifan))
|
|
||||||
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
|
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
|
||||||
url= host + "/pag/1",
|
url= host + "/pag/1",
|
||||||
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
|
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
|
||||||
@@ -70,7 +65,8 @@ def menupelis(item):
|
|||||||
logger.info(item.url)
|
logger.info(item.url)
|
||||||
itemlist = []
|
itemlist = []
|
||||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||||
|
if item.genre:
|
||||||
|
item.extra = item.genre
|
||||||
if item.extra == '':
|
if item.extra == '':
|
||||||
section = 'Recién Agregadas'
|
section = 'Recién Agregadas'
|
||||||
elif item.extra == 'year':
|
elif item.extra == 'year':
|
||||||
@@ -79,17 +75,13 @@ def menupelis(item):
|
|||||||
section = 'de Eróticas \+18'
|
section = 'de Eróticas \+18'
|
||||||
else:
|
else:
|
||||||
section = 'de %s'%item.extra
|
section = 'de %s'%item.extra
|
||||||
|
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
|
||||||
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
|
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
|
||||||
|
|
||||||
for bloque_enlaces in matchesenlaces:
|
for bloque_enlaces in matchesenlaces:
|
||||||
|
|
||||||
patron = '<div class="poster-media-card">.*?'
|
patron = '<div class="poster-media-card">.*?'
|
||||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||||
patron += '<img src="(.*?)"'
|
patron += '<img src="(.*?)"'
|
||||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||||
|
|
||||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||||
title = title.replace("Online", "");
|
title = title.replace("Online", "");
|
||||||
@@ -144,21 +136,14 @@ def menudesta(item):
|
|||||||
# Peliculas de Estreno
|
# Peliculas de Estreno
|
||||||
def menuestre(item):
|
def menuestre(item):
|
||||||
logger.info(item.url)
|
logger.info(item.url)
|
||||||
|
|
||||||
itemlist = []
|
itemlist = []
|
||||||
|
|
||||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||||
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
|
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
|
||||||
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
|
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
|
||||||
|
|
||||||
for bloque_enlaces in matchesenlaces:
|
for bloque_enlaces in matchesenlaces:
|
||||||
|
|
||||||
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
|
|
||||||
|
|
||||||
patron = '<div class="poster-media-card">.*?'
|
patron = '<div class="poster-media-card">.*?'
|
||||||
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
|
||||||
patron += '<img src="(.*?)"'
|
patron += '<img src="(.*?)"'
|
||||||
|
|
||||||
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
|
||||||
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
|
||||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||||
@@ -255,32 +240,22 @@ def search(item, texto):
|
|||||||
patron += '<div class="row">.*?'
|
patron += '<div class="row">.*?'
|
||||||
patron += '<a href="(.*?)" title="(.*?)">.*?'
|
patron += '<a href="(.*?)" title="(.*?)">.*?'
|
||||||
patron += '<img src="(.*?)"'
|
patron += '<img src="(.*?)"'
|
||||||
|
|
||||||
logger.info(patron)
|
|
||||||
|
|
||||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||||
|
|
||||||
itemlist = []
|
itemlist = []
|
||||||
|
|
||||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||||
title = title.replace("Online", "")
|
title = title.replace("Online", "")
|
||||||
url = item.url + scrapedurl
|
url = scrapedurl
|
||||||
thumbnail = item.url + scrapedthumbnail
|
thumbnail = scrapedthumbnail
|
||||||
logger.info(url)
|
|
||||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||||
thumbnail=thumbnail, fanart=thumbnail))
|
thumbnail=thumbnail, fanart=thumbnail))
|
||||||
|
|
||||||
return itemlist
|
return itemlist
|
||||||
|
|
||||||
|
|
||||||
def poranyo(item):
|
def poranyo(item):
|
||||||
logger.info(item.url)
|
logger.info(item.url)
|
||||||
|
|
||||||
itemlist = []
|
itemlist = []
|
||||||
|
|
||||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||||
|
|
||||||
patron = '<option value="([^"]+)">(.*?)</option>'
|
patron = '<option value="([^"]+)">(.*?)</option>'
|
||||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||||
for scrapedurl, scrapedtitle in matches:
|
for scrapedurl, scrapedtitle in matches:
|
||||||
@@ -289,7 +264,6 @@ def poranyo(item):
|
|||||||
url = item.url + scrapedurl
|
url = item.url + scrapedurl
|
||||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||||
fanart=item.fanart, extra='year'))
|
fanart=item.fanart, extra='year'))
|
||||||
|
|
||||||
return itemlist
|
return itemlist
|
||||||
|
|
||||||
|
|
||||||
@@ -300,24 +274,25 @@ def porcateg(item):
|
|||||||
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
|
||||||
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
|
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
|
||||||
matches = scrapertools.find_multiple_matches(data, patron)
|
matches = scrapertools.find_multiple_matches(data, patron)
|
||||||
|
adult_mode = config.get_setting("adult_mode")
|
||||||
for scrapedurl, scrapedtitle in matches:
|
for scrapedurl, scrapedtitle in matches:
|
||||||
|
if "18" in scrapedtitle and adult_mode == 0:
|
||||||
|
continue
|
||||||
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
|
||||||
title = title.replace("Online", "")
|
title = title.replace("Online", "")
|
||||||
url = scrapedurl
|
url = scrapedurl
|
||||||
logger.info(url)
|
logger.info(url)
|
||||||
# si no esta permitidas categoria adultos, la filtramos
|
# si no esta permitidas categoria adultos, la filtramos
|
||||||
extra = title
|
extra1 = title
|
||||||
adult_mode = config.get_setting("adult_mode")
|
|
||||||
if adult_mode != 0:
|
if adult_mode != 0:
|
||||||
if 'erotic' in scrapedurl:
|
if 'erotic' in scrapedurl:
|
||||||
extra = 'adult'
|
extra1 = 'adult'
|
||||||
else:
|
else:
|
||||||
extra=title
|
extra1=title
|
||||||
|
|
||||||
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
|
if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
|
||||||
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
|
||||||
fanart=item.fanart, extra = extra))
|
fanart=item.fanart, genre = extra1))
|
||||||
|
|
||||||
return itemlist
|
return itemlist
|
||||||
|
|
||||||
@@ -338,7 +313,6 @@ def decode(string):
|
|||||||
i += 1
|
i += 1
|
||||||
enc4 = keyStr.index(input[i])
|
enc4 = keyStr.index(input[i])
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
chr1 = (enc1 << 2) | (enc2 >> 4)
|
chr1 = (enc1 << 2) | (enc2 >> 4)
|
||||||
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
|
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
|
||||||
chr3 = ((enc3 & 3) << 6) | enc4
|
chr3 = ((enc3 & 3) << 6) | enc4
|
||||||
@@ -352,4 +326,4 @@ def decode(string):
|
|||||||
|
|
||||||
output = output.decode('utf8')
|
output = output.decode('utf8')
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|||||||
@@ -290,7 +290,10 @@ def do_search(item, categories=None):
|
|||||||
multithread = config.get_setting("multithread", "search")
|
multithread = config.get_setting("multithread", "search")
|
||||||
result_mode = config.get_setting("result_mode", "search")
|
result_mode = config.get_setting("result_mode", "search")
|
||||||
|
|
||||||
tecleado = item.extra
|
if item.wanted!='':
|
||||||
|
tecleado=item.wanted
|
||||||
|
else:
|
||||||
|
tecleado = item.extra
|
||||||
|
|
||||||
itemlist = []
|
itemlist = []
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,14 @@
|
|||||||
"default": true,
|
"default": true,
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"visible": true
|
"visible": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "include_in_newest_terror",
|
||||||
|
"type": "bool",
|
||||||
|
"label": "Incluir en Novedades -Terror",
|
||||||
|
"default": true,
|
||||||
|
"enabled": true,
|
||||||
|
"visible": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -252,10 +252,13 @@ def newest(categoria):
|
|||||||
item.extra = 'estrenos/'
|
item.extra = 'estrenos/'
|
||||||
try:
|
try:
|
||||||
if categoria == 'peliculas':
|
if categoria == 'peliculas':
|
||||||
item.url = host + '/category/estrenos/'
|
item.url = host + '/genre/estrenos/'
|
||||||
|
|
||||||
elif categoria == 'infantiles':
|
elif categoria == 'infantiles':
|
||||||
item.url = host + '/category/infantil/'
|
item.url = host + '/genre/animacion/'
|
||||||
|
|
||||||
|
elif categoria == 'terror':
|
||||||
|
item.url = host + '/genre/terror/'
|
||||||
|
|
||||||
itemlist = lista(item)
|
itemlist = lista(item)
|
||||||
if itemlist[-1].title == 'Siguiente >>>':
|
if itemlist[-1].title == 'Siguiente >>>':
|
||||||
|
|||||||
@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
|
|||||||
if item.contentType in ['movie','tvshow']and item.channel != 'search':
|
if item.contentType in ['movie','tvshow']and item.channel != 'search':
|
||||||
# Buscar en otros canales
|
# Buscar en otros canales
|
||||||
if item.contentSerieName!='':
|
if item.contentSerieName!='':
|
||||||
item.extra=item.contentSerieName
|
item.wanted=item.contentSerieName
|
||||||
else:
|
else:
|
||||||
item.extra = item.contentTitle
|
item.wanted = item.contentTitle
|
||||||
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
|
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
|
||||||
"XBMC.Container.Update (%s?%s)" % (sys.argv[0],
|
"XBMC.Container.Update (%s?%s)" % (sys.argv[0],
|
||||||
item.clone(channel='search',
|
item.clone(channel='search',
|
||||||
|
|||||||
@@ -37,12 +37,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
|||||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
||||||
# Para obtener el f y el fxfx
|
# Para obtener el f y el fxfx
|
||||||
js_fxfx = "https://www." + scrapertools.find_single_match(data, """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
|
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
|
||||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||||
for f, v in matches:
|
for f, v in matches:
|
||||||
pfxfx += f + "=" + v + "&"
|
pfxfx += f + "=" + v + "&"
|
||||||
|
logger.info("mfxfxfx1= %s" %js_fxfx)
|
||||||
|
logger.info("mfxfxfx2= %s" %pfxfx)
|
||||||
|
if pfxfx == "":
|
||||||
|
pfxfx = "ss=yes&f=fail&fxfx=6"
|
||||||
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
||||||
# {f: 'y', fxfx: '6'}
|
# {f: 'y', fxfx: '6'}
|
||||||
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
||||||
|
|||||||
@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
|
|||||||
streams =[]
|
streams =[]
|
||||||
logger.debug('page_url: %s'%page_url)
|
logger.debug('page_url: %s'%page_url)
|
||||||
if 'googleusercontent' in page_url:
|
if 'googleusercontent' in page_url:
|
||||||
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
|
|
||||||
url=data.headers['location']
|
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
|
||||||
|
url=response.headers['location']
|
||||||
|
cookies = ""
|
||||||
|
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||||
|
for c in cookie:
|
||||||
|
cookies += c.split(";", 1)[0] + "; "
|
||||||
|
data = response.data.decode('unicode-escape')
|
||||||
|
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||||
|
headers_string = "|Cookie=" + cookies
|
||||||
|
|
||||||
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
|
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
|
||||||
|
|
||||||
streams.append((quality, url))
|
streams.append((quality, url))
|
||||||
headers_string=""
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ from platformcode import logger
|
|||||||
|
|
||||||
def test_video_exists(page_url):
|
def test_video_exists(page_url):
|
||||||
logger.info("(page_url='%s')" % page_url)
|
logger.info("(page_url='%s')" % page_url)
|
||||||
|
|
||||||
data = httptools.downloadpage(page_url).data
|
data = httptools.downloadpage(page_url).data
|
||||||
if "Not Found" in data:
|
if "Not Found" in data:
|
||||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||||
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
|||||||
logger.info("(page_url='%s')" % page_url)
|
logger.info("(page_url='%s')" % page_url)
|
||||||
data = httptools.downloadpage(page_url).data
|
data = httptools.downloadpage(page_url).data
|
||||||
video_urls = []
|
video_urls = []
|
||||||
packed = scrapertools.find_single_match(data,
|
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
|
||||||
|
packed = scrapertools.find_single_match(data, patron)
|
||||||
data = jsunpack.unpack(packed)
|
data = jsunpack.unpack(packed)
|
||||||
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
||||||
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
||||||
|
|||||||
@@ -3,8 +3,8 @@
|
|||||||
"find_videos": {
|
"find_videos": {
|
||||||
"patterns": [
|
"patterns": [
|
||||||
{
|
{
|
||||||
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
|
"pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
|
||||||
"url": "\\1"
|
"url": "http://\\1"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
|||||||
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
|
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
|
||||||
strResult = "".join(arrayResult)
|
strResult = "".join(arrayResult)
|
||||||
logger.debug(strResult)
|
logger.debug(strResult)
|
||||||
|
|
||||||
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
|
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
|
||||||
|
|
||||||
for url, label in videoSources:
|
for url, label in videoSources:
|
||||||
logger.debug("[" + label + "] " + url)
|
|
||||||
video_urls.append([label, url])
|
video_urls.append([label, url])
|
||||||
|
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
|
||||||
return video_urls
|
return video_urls
|
||||||
|
|||||||
Reference in New Issue
Block a user