Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
Unknown
2017-10-04 21:27:32 -03:00
85 changed files with 85 additions and 129 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.1.3" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.2.0" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -12,19 +12,18 @@
<assets>
<icon>icon.png</icon>
<fanart>fanart.jpg</fanart>
<screenshot>resources/media/general/ss/1.jpg</screenshot>
<screenshot>resources/media/general/ss/2.jpg</screenshot>
<screenshot>resources/media/general/ss/3.jpg</screenshot>
<screenshot>resources/media/general/ss/4.jpg</screenshot>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» repelis » allcalidad
» cinetux » allpeliculas
» hdfull » zonatorrent
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]prpeaprendiz[/COLOR] por su colaboración en esta versión[/COLOR]
» playmax » allcalidad
» cinetux » allpeliculas
» pedropolis » pelisplanet
» flashx » gvideo
¤ selector de temas ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -94,8 +94,9 @@ def peliculas(item):
idioma = scrapertools.find_single_match(varios, '(?s)Idioma.*?kinopoisk">([^<]+)')
year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)')
year = scrapertools.find_single_match(year, '[0-9]{4}')
mtitulo = titulo + " (" + idioma + ") (" + year + ")"
mtitulo = titulo + " (" + idioma + ")"
if year:
mtitulo += " (" + year + ")"
item.infoLabels['year'] = int(year)
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
@@ -107,9 +108,8 @@ def peliculas(item):
contentType="movie",
language = idioma
))
tmdb.set_infoLabels(itemlist, True)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
@@ -138,7 +138,7 @@ def findvideos(item):
title = titulo,
url = url
))
tmdb.set_infoLabels(itemlist, True)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
@@ -148,7 +148,8 @@ def findvideos(item):
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist

View File

@@ -82,6 +82,7 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist

View File

@@ -7,7 +7,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
CHANNEL_HOST = "http://www.cinetux.net/"
CHANNEL_HOST = "http://www.cinetux.io/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'cinetux')
@@ -36,7 +36,7 @@ def mainlist(item):
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Directors%20Chair.png",
text_color=color1))
itemlist.append(item.clone(action="destacadas", title=" Destacadas", url="http://www.cinetux.net/mas-vistos/",
itemlist.append(item.clone(action="destacadas", title=" Destacadas", url=CHANNEL_HOST + "mas-vistos/",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Favorites.png",
text_color=color1))
@@ -69,7 +69,7 @@ def configuracion(item):
def search(item, texto):
logger.info()
item.url = "http://www.cinetux.net/?s="
item.url = CHANNEL_HOST + "?s="
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
@@ -147,7 +147,7 @@ def peliculas(item):
scrapedtitle += " [%s]" % quality
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentTitle=fulltitle, contentType="movie", quality=quality)
contentType="movie", quality=quality)
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
@@ -177,11 +177,11 @@ def destacadas(item):
patron += '.*?src="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = "http://www.cinetux.net" + scrapedurl
scrapedurl = CHANNEL_HOST + scrapedurl
scrapedtitle = scrapedtitle.replace("Ver ", "")
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle, contentType="movie")
contentType="movie")
itemlist.append(new_item)
# Extrae el paginador
@@ -241,14 +241,7 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(item.title, "\(([0-9]+)")
if year and item.extra != "library":
item.infoLabels['year'] = int(year)
# Ampliamos datos en tmdb
if not item.infoLabels['plot']:
try:
tmdb.set_infoLabels(item, __modo_grafico__)
except:
pass
tmdb.set_infoLabels(item, __modo_grafico__)
if not item.infoLabels.get('plot'):
plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>')
@@ -274,7 +267,7 @@ def findvideos(item):
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
))
else:

View File

@@ -203,7 +203,7 @@ def sub_search(item):
action=action, infoLabels={"year": year}, contentType=contentType,
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if paginacion:

View File

@@ -37,6 +37,7 @@ if __perfil__ < 3:
else:
color1 = color2 = color3 = color4 = color5 = ""
def mainlist(item):
logger.info()
itemlist = []
@@ -178,22 +179,21 @@ def peliculas(item):
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
datas = httptools.downloadpage(scrapedurl).data
datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
#logger.info(datas)
# logger.info(datas)
if '/ ' in scrapedtitle:
scrapedtitle = scrapedtitle.partition('/ ')[2]
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
director = scrapertools.find_single_match(datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
director = scrapertools.find_single_match(
datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
text_color=color3)
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
text_color=color3)
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
if year:
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
itemlist.append(new_item)
@@ -202,8 +202,8 @@ def peliculas(item):
if paginacion:
itemlist.append(Item(channel=item.channel, action="peliculas",
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
for item in itemlist:
if item.infoLabels['plot'] == '':
@@ -250,80 +250,32 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(datas)
for scrapedurl, lang, servidores in matches:
# doc_url = ''
doc_id = ''
video_urls = []
if 'drive' in scrapedurl:
doc_id = httptools.downloadpage(scrapedurl).data
doc_id = scrapertools.find_single_match(doc_id, "docid=(\w+)")
elif 'youtube' in scrapedurl:
doc_id = scrapertools.find_single_match(scrapedurl, "docid=(\w+)")
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
response = httptools.downloadpage(doc_url, cookies=False)
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
for itag, video_url in streams:
video_url += headers_string
video_urls.append([video_url, itags[itag]])
for video_item in video_urls:
calidad = video_item[1]
title = '%s [COLOR green](%s)[/COLOR] [COLOR green]([/COLOR][COLOR black]You[/COLOR][COLOR red]tube[/COLOR][COLOR green])[/COLOR]'%(item.contentTitle, calidad)
url = video_item[0]
itemlist.append(
item.clone(channel=item.channel,
action='play',
title=title,
url= url,
thumbnail=item.thumbnail,
quality = calidad,
plot=item.plot,
fanart=item.fanart,
contentTitle=item.contentTitle,
language=lang.replace('Español ', ''),
server='directo',
context = item.context
))
itemlist.sort(key=lambda it: it.title, reverse=True)
if 'pelispp.com' in scrapedurl or 'ultrapelis' in scrapedurl:
servidores = servidores.lower().strip()
if 'streamvips' or 'mediastream' or 'ultrastream' in servidores:
data = httptools.downloadpage(scrapedurl, headers=headers).data
patronr = 'file: "([^"]+)",label:"([^"]+)",type'
patronr = "file:'([^']+)',label:'([^']+)',type"
matchesr = re.compile(patronr, re.DOTALL).findall(data)
for scrapedurl, label in matchesr:
url = scrapedurl.replace('\\', '')
quality = label.decode('cp1252').encode('utf8')
title = item.contentTitle + ' (' + str(label) + ') ([COLOR blue]G[/COLOR][COLOR red]o[/COLOR][COLOR yellow]o[/COLOR][COLOR blue]g[/COLOR][COLOR green]l[/COLOR][COLOR red]e[/COLOR])'
thumbnail = item.thumbnail
fanart = item.fanart
itemlist.append(item.clone(action="play", title=title, url=url, server='directo',
thumbnail=thumbnail, fanart=fanart, extra='directo',
quality=quality, language=lang.replace('Español ', '')))
title = 'Ver en: [COLOR yellowgreen][%s][/COLOR] [COLOR yellow][%s][/COLOR]' % (servidores.title(),
item.contentQuality.upper())
itemlist.append(item.clone(action="play", title=title, url=scrapedurl, server='directo',
thumbnail=item.thumbnail, fanart=item.fanart, extra='directo',
quality=item.contentQuality, language=lang.replace('Español ', '')))
itemlist.sort(key=lambda it: it.title, reverse=True)
# if 'youtube' not in scrapedurl:
servidores.lower()
if 'drive' not in scrapedurl and 'pelispp.com' not in scrapedurl and 'youtube' not in scrapedurl and 'streamplus' not in servidores:
quality = scrapertools.find_single_match(
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
item.contentTitle, quality.upper(), servidores.capitalize())
url = scrapedurl.replace('\\', '')
thumbnail = item.thumbnail
server = servertools.get_server_from_url(url)
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality, language=lang.replace('Español ', ''),
server=server, text_color=color3, thumbnail=thumbnail))
if 'drive' not in servidores and 'streamvips' not in servidores and 'mediastream' not in servidores:
if 'ultrastream' not in servidores:
server = servertools.get_server_from_url('scrapedurl')
quality = scrapertools.find_single_match(
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(servidores.capitalize(),
quality.upper())
itemlist.append(item.clone(action='play', title=title, url='url', quality=item.quality,
server=server, language=lang.replace('Español ', ''),
text_color=color3, thumbnail=item.thumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,

View File

@@ -63,6 +63,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",

View File

@@ -211,7 +211,7 @@ def newest(categoria):
elif categoria == 'infantiles':
item.channel = "playmax"
item.extra = "newest"
item.url = host + "/catalogo.php?tipo[]=2&genero[]=60&ad=2&ordenar=novedades&con_dis=on"
item.url = host + "/catalogo.php?tipo[]=2&generos[]=60&ad=2&ordenar=novedades&con_dis=on"
item.contentType = "movie"
itemlist = fichas(item)

View File

@@ -202,14 +202,12 @@ def filterchannels(category, view="thumb_"):
def get_thumb(thumb_name, view="thumb_"):
path = os.path.join(config.get_runtime_path(), "resources", "media", "general")
icon_pack_name = config.get_setting('icon_set')
if icon_pack_name == "default":
resource_path = os.path.join(config.get_runtime_path(), "resources", "media", "themes")
else:
resource_path = "https://raw.githubusercontent.com/alfa-addon/media/master/themes/"
# if config.get_setting("icons"): # TODO obtener de la configuración el pack de thumbs seleccionado
# selected_icon = config.get_setting("icons")
# else:
# selected_icon = os.sep + "default"
media_path = os.path.join(resource_path, icon_pack_name)
selected_icon = os.sep + "default"
web_path = path + selected_icon + os.sep
return os.path.join(web_path, view + thumb_name)
return os.path.join(media_path, view + thumb_name)

Binary file not shown.

After

Width:  |  Height:  |  Size: 836 KiB

View File

@@ -2,7 +2,7 @@
# ------------------------------------------------------------
# platformtools
# ------------------------------------------------------------
# Herramientas responsables de adaptar los diferentes
# Herramientas responsables de adaptar los diferentes
# cuadros de dialogo a una plataforma en concreto,
# en este caso Kodi.
# version 2.0
@@ -135,7 +135,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
fanart = os.path.join(config.get_runtime_path(), "fanart.jpg")
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
# Creamos el listitem
listitem = xbmcgui.ListItem(item.title)

View File

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

View File

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

Before

Width:  |  Height:  |  Size: 4.0 KiB

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

Before

Width:  |  Height:  |  Size: 5.8 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

View File

Before

Width:  |  Height:  |  Size: 7.8 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

View File

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

Before

Width:  |  Height:  |  Size: 6.1 KiB

After

Width:  |  Height:  |  Size: 6.1 KiB

View File

Before

Width:  |  Height:  |  Size: 3.5 KiB

After

Width:  |  Height:  |  Size: 3.5 KiB

View File

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

Before

Width:  |  Height:  |  Size: 9.0 KiB

After

Width:  |  Height:  |  Size: 9.0 KiB

View File

Before

Width:  |  Height:  |  Size: 6.6 KiB

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

Before

Width:  |  Height:  |  Size: 7.1 KiB

After

Width:  |  Height:  |  Size: 7.1 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 30 KiB

View File

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 13 KiB

View File

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

View File

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 30 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 8.8 KiB

After

Width:  |  Height:  |  Size: 8.8 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View File

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 26 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 23 KiB

View File

Before

Width:  |  Height:  |  Size: 7.8 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

View File

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 10 KiB

View File

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

View File

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

View File

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 26 KiB

View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

Before

Width:  |  Height:  |  Size: 239 KiB

After

Width:  |  Height:  |  Size: 239 KiB

View File

Before

Width:  |  Height:  |  Size: 222 KiB

After

Width:  |  Height:  |  Size: 222 KiB

View File

Before

Width:  |  Height:  |  Size: 239 KiB

After

Width:  |  Height:  |  Size: 239 KiB

View File

Before

Width:  |  Height:  |  Size: 392 KiB

After

Width:  |  Height:  |  Size: 392 KiB

View File

@@ -38,13 +38,16 @@
<setting id="videolibrary_kodi_flag" type="number" label="" default="0" visible="false"/>
<setting id="videolibrary_kodi" type="bool" label="Autoconfigurar videoteca de XBMC/Kodi para contenido de Alfa" enable="lt(-1,2)+eq(0,false)" default="false"/>
</category>
<category label="Opciones Visuales">
<setting id="icon_set" type="labelenum" label="Set de iconos" lvalues="default|dark" default="default"/>
</category>
<category label="Otros">
<setting label="Info de películas/series en menú contextual" type="lsep"/>
<setting id="infoplus" type="bool" label="Mostrar opción Infoplus:" default="true"/>
<setting id="extended_info" type="bool" label="Mostrar opción ExtendedInfo (Necesario addon externo):" default="false"/>
<setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/>
<setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" />
</category>
</settings>

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
import base64
import os
import time
import urllib
@@ -37,17 +36,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed to the video' % (
flashx_id, urllib.quote(fname), hash_f)
imhuman = scrapertools.find_single_match(data, "value='([^']+)' name='imhuman'")
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), hash_f, imhuman)
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
headers['Referer'] = "https://www.flashx.tv/"
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?fxfx=5'
coding_url = 'https://www.flashx.tv/flashx.php?fxfx=7'
headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
httptools.downloadpage(coding_url, headers=headers)
try:
time.sleep(int(wait_time) + 1)

View File

@@ -18,6 +18,8 @@ def test_video_exists(page_url):
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tienes permiso para acceder a este video"
if "Se ha producido un error" in response.data:
return False, "[gvideo] Se ha producido un error en el reproductor de google"
return True, ""
@@ -43,7 +45,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
logger.info("Intel88 %s" %data)
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,