38 Commits

Author SHA1 Message Date
alfa-addon
df0607ec90 v2.3.7 2017-11-14 18:45:27 -05:00
alfa-addon
d83a49743c fixed 2017-11-14 18:45:09 -05:00
Alfa
66762b2c46 Merge pull request #169 from danielr460/master
Arreglos en canales
2017-11-14 19:41:50 -03:00
Alfa
79c761206d Merge pull request #170 from Intel11/patch-3
Actualizados
2017-11-14 19:40:58 -03:00
Intel1
f04647f348 Update settings.xml 2017-11-14 12:43:31 -05:00
Intel1
0f81113225 Update infoplus.py 2017-11-14 12:42:02 -05:00
Intel1
169c09db16 Update pelisfox.py 2017-11-14 08:49:58 -05:00
Intel1
306bb6533d stormo: actualizado 2017-11-14 08:45:54 -05:00
Intel1
210e90cb96 Update help.py 2017-11-14 08:28:40 -05:00
Intel1
74e53f362b help: actualizado 2017-11-13 14:51:24 -05:00
Intel1
947cb7f51f crunchyroll: fix 2017-11-13 14:47:18 -05:00
Daniel Rincón Rodríguez
f88ca81ff5 Corrección estetica 2017-11-13 14:07:12 -05:00
danielr460
42cd9ac14b Danimados: Añadida seccion de peliculas 2017-11-13 10:07:01 -05:00
danielr460
b7520145bb Arreglos Menores 2017-11-13 09:31:15 -05:00
danielr460
209af696b2 Serieslan: Arreglos menores en presentación de los datos 2017-11-13 09:24:14 -05:00
Daniel Rincón Rodríguez
03589b9c39 Seodiv: Añadido tvdb y correcion al idioma 2017-11-13 00:35:48 -05:00
danielr460
a3337df4da Peliculashindu: Arreglado canal 2017-11-12 19:21:03 -05:00
danielr460
acf7f9a27a Mundiseries: Detalles estéticos 2017-11-12 19:19:53 -05:00
danielr460
8082e1b244 CartoonLatino: Detalles estéticos 2017-11-12 18:08:11 -05:00
danielr460
9345115869 Asialiveaction: Eliminado enlace a "agregar a videoteca" cuando esto ya ocurrió 2017-11-12 17:58:43 -05:00
danielr460
7ae8b203b6 AnitoonsTV: detalles esteticos 2017-11-12 17:52:34 -05:00
danielr460
56c16f2922 Eliminado enlace de youtube inexistente 2017-11-12 17:44:40 -05:00
Alfa
7e47e3ae59 Merge pull request #162 from numa00009/patch-1
Update httptools.py
2017-11-12 13:20:01 -03:00
Alfa
9eef89d1b0 Merge pull request #164 from q1316480/sb-streamixcloud
Seriesblanco -> StreamixCloud
2017-11-12 13:19:35 -03:00
Alfa
2b3d81c9a0 Merge pull request #165 from danielr460/master
Arreglos menores
2017-11-12 13:19:07 -03:00
Alfa
876b02b81f Merge pull request #168 from Intel11/patch-1
Actualizados
2017-11-12 13:18:26 -03:00
Intel1
8028290051 Update xbmc_config_menu.py 2017-11-12 10:22:16 -05:00
Intel1
78252d3452 gamovideo: fix 2017-11-12 09:59:33 -05:00
Intel1
9aa77400d5 hdfull: actualizado 2017-11-12 09:57:26 -05:00
danielr460
5d592f724d Serieslan: Actualizado 2017-11-10 14:04:07 -05:00
danielr460
d288031a83 Eliminado codigo innecesario 2017-11-10 13:45:26 -05:00
Daniel Rincón Rodríguez
41a39ff02b Update anitoonstv.py 2017-11-10 11:16:35 -05:00
Daniel Rincón Rodríguez
0bad69a7cb lineas innecesarias 2017-11-10 11:15:54 -05:00
danielr460
74e6145d2f Fix netutv 2017-11-10 11:12:07 -05:00
q1316480
c344832c8c Fix: Borar "Ver en" y "Descargar en" 2017-11-10 01:21:05 +01:00
q1316480
a9caf59ce1 Seriesblanco -> StreamixCloud
https://github.com/alfa-addon/addon/issues/163
2017-11-10 00:57:26 +01:00
numa00009
770a2e215a Update httptools.py
Change Firefor Headers into Chrome ones.
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
2017-11-09 10:17:45 +01:00
alfa-addon
28d99deb48 v2.3.6 2017-11-09 03:57:01 -05:00
22 changed files with 429 additions and 324 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.5" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.3.7" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,8 +19,14 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» seriesblanco » hdfull
» gamovideo ¤ arreglos internos
» anitoonstv » asialiveaction
» cinehindi » danimados
» mundiseries » pelisculashndu
» seodiv » serieslan
» crunchyroll » pelisfox
» stormo ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460, numa00009 y numa00009[/COLOR]
por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -127,11 +127,21 @@ def episodios(item):
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def googl(url):
logger.info()
a=url.split("/")
link=a[3]
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
trueurl = scrapertools.find_single_match(data_other, patron)
return trueurl
def findvideos(item):
logger.info()
@@ -147,10 +157,10 @@ def findvideos(item):
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = "HQ"
if "HQ" in quality:
quality = "HD"
if "Calidad Alta" in quality:
quality = "HQ"
if " Calidad media - Carga mas rapido" in quality:
quality = "360p"
server = server.lower().strip()
@@ -160,6 +170,7 @@ def findvideos(item):
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
url = googl(url)
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))

View File

@@ -180,7 +180,7 @@ def findvideos(item):
show = item.show
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie":
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -150,7 +150,7 @@ def episodios(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist

View File

@@ -123,7 +123,7 @@ def lista(item):
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
@@ -132,14 +132,18 @@ def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist:
for videoitem in itemlist1:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0:
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -168,11 +168,11 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t|\s{2,}', '', data)
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)" ' \
'style="width: (.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
'\s*(.*?)</p>.*?description":"([^"]+)"'
if data.count('class="season-dropdown') > 1:
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+" title="([^"]+)"(.*?)</ul>')
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
for season, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
@@ -209,7 +209,6 @@ def episodios(item):
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, contentType="tvshow"))
return itemlist

View File

@@ -32,8 +32,8 @@ def mainlist(item):
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
# thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -82,7 +82,6 @@ def mainpage(item):
return itemlist
return itemlist
def lista(item):
logger.info()
@@ -90,15 +89,26 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
if item.title=="Peliculas Animadas":
data_lista = scrapertools.find_single_match(data,
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
else:
data_lista = scrapertools.find_single_match(data,
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
if item.title=="Peliculas Animadas":
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
plot=scrapedplot, action="findvideos", show=scrapedtitle))
else:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
if item.title!="Peliculas Animadas":
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -124,7 +134,7 @@ def episodios(item):
action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
@@ -141,6 +151,7 @@ def findvideos(item):
data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
logger.info("assfxxv "+data)
itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)):
#for url in itemla:
@@ -152,6 +163,8 @@ def findvideos(item):
server='okru'
else:
server=''
if "youtube" in url:
server='youtube'
if "openload" in url:
server='openload'
if "google" in url:
@@ -166,6 +179,10 @@ def findvideos(item):
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", contentTitle=item.show))
autoplay.start(itemlist, item)
return itemlist

View File

@@ -15,7 +15,6 @@ from platformcode import platformtools
host = "http://hdfull.tv"
A_A = {'User-Agent':'Mozilla/5.0 AppLeWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 SaFAri/537.36'}
if config.get_setting('hdfulluser', 'hdfull'):
account = True
else:
@@ -29,7 +28,7 @@ def settingCanal(item):
def login():
logger.info()
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(host).data)
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
sid = scrapertools.find_single_match(data, patron)
@@ -38,7 +37,7 @@ def login():
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
httptools.downloadpage(host, post=post, headers=A_A)
httptools.downloadpage(host, post=post)
def mainlist(item):
@@ -138,7 +137,7 @@ def menuseries(item):
def search(item, texto):
logger.info()
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(host).data)
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
@@ -174,7 +173,7 @@ def items_usuario(item):
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Fichas usuario
url = item.url.split("?")[0]
@@ -188,7 +187,7 @@ def items_usuario(item):
next_page = url + "?" + post
## Carga las fichas de usuario
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
@@ -256,7 +255,7 @@ def listado_series(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -276,10 +275,10 @@ def fichas(item):
textoidiomas=''
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
@@ -291,7 +290,7 @@ def fichas(item):
else:
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
@@ -363,12 +362,11 @@ def fichas(item):
def episodios(item):
logger.info()
A_F = L_A
id = "0"
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
@@ -378,7 +376,7 @@ def episodios(item):
item.url = item.url.split("###")[0]
## Temporadas
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
if id == "0":
## Se saca el id de la serie de la página cuando viene de listado_series
@@ -412,7 +410,7 @@ def episodios(item):
for scrapedurl in matches:
## Episodios
data = agrupa_datos(httptools.downloadpage(scrapedurl, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
@@ -420,7 +418,7 @@ def episodios(item):
url = host + "/a/episodes"
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
@@ -482,9 +480,10 @@ def episodios(item):
def novedades_episodios(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Episodios
url = item.url.split("?")[0]
@@ -496,7 +495,7 @@ def novedades_episodios(item):
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
@@ -568,7 +567,7 @@ def generos(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/peliculas"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -587,10 +586,10 @@ def generos(item):
def generos_series(item):
logger.info()
A_F= L_A
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/series"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -613,7 +612,7 @@ def findvideos(item):
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
@@ -642,10 +641,10 @@ def findvideos(item):
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js", headers=A_A).data
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js", headers=A_A).data
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
try:
data_js = jhexdecode(data_js)
except:
@@ -658,7 +657,7 @@ def findvideos(item):
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
@@ -723,7 +722,7 @@ def play(item):
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
data = httptools.downloadpage(host + "/a/status", post=post, headers=A_A).data
data = httptools.downloadpage(host + "/a/status", post=post).data
devuelve = servertools.findvideosbyserver(item.url, item.server)
if devuelve:
@@ -786,7 +785,7 @@ def set_status(item):
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
data = httptools.downloadpage(host + path, post=post, headers=A_A).data
data = httptools.downloadpage(host + path, post=post).data
title = "[COLOR green][B]OK[/B][/COLOR]"

View File

@@ -136,7 +136,7 @@ def faq(item):
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
"También puedes acudir a 'http://alfa-addon.ga' en busca de ayuda.")
"También puedes acudir a 'http://alfa-addon.com' en busca de ayuda.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
@@ -145,7 +145,7 @@ def faq(item):
text = ("Puede ser que la página web del canal no funcione. "
"En caso de que funcione la página web puede que no seas el primero"
" en haberlo visto y que el canal este arreglado. "
"Puedes mirar en 'alfa-addon.ga' o en el "
"Puedes mirar en 'alfa-addon.com' o en el "
"repositorio de GitHub (github.com/alfa-addon/addon). "
"Si no encuentras el canal arreglado puedes reportar un "
"problema en el foro.")
@@ -198,15 +198,20 @@ def faq(item):
log_name = "kodi.log"
ruta = xbmc.translatePath("special://logpath") + log_name
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Para reportar un problema en 'http://alfa-addon.ga' es necesario:\n"
text = ("Para reportar un problema en 'http://alfa-addon.com' es necesario:\n"
" - Versión que usas de Alfa.\n"
" - Versión que usas de kodi, mediaserver, etc.\n"
" - Versión y nombre del sistema operativo que usas.\n"
" - Nombre del skin (en el caso que uses Kodi) y si se "
"te ha resuelto el problema al usar el skin por defecto.\n"
" - Descripción del problema y algún caso de prueba.\n"
" - Agregar el log en modo detallado, una vez hecho esto, "
"zipea el log y lo puedes adjuntar en un post.\n\n"
"El log se encuentra en: \n\n"
"Para activar el log en modo detallado, ingresar a:\n"
" - Configuración.\n"
" - Preferencias.\n"
" - En la pestaña General - Marcar la opción: Generar log detallado.\n\n"
"El archivo de log detallado se encuentra en la siguiente ruta: \n\n"
"%s" % ruta)
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
@@ -214,6 +219,6 @@ def faq(item):
else:
platformtools.dialog_ok("Alfa",
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.ga")
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com")

File diff suppressed because it is too large Load Diff

View File

@@ -76,7 +76,7 @@ def episodios(item):
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist

View File

@@ -33,15 +33,14 @@ def mainlist(item):
def explorar(item):
logger.info()
itemlist = list()
url1 = str(item.url)
url1 = item.title
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info("loca :"+url1+" aaa"+data)
if 'genero' in url1:
patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'alfabetico' in url1:
patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'año' in url1:
if 'Género' in url1:
patron = '<div class="d">.+?<h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'Listado Alfabético' in url1:
patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'Año' in url1:
patron = '<ul class="anio"><li>(.+?)<\/ul>'
data_explorar = scrapertools.find_single_match(data, patron)
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
@@ -79,26 +78,22 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
url1 = str(item.url)
if 'http://www.peliculashindu.com/' in url1:
url1 = url1.replace("http://www.peliculashindu.com/", "")
if url1 != 'estrenos':
data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras')
# data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras')
data_mov= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">(.+)<ul class="pag">')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(data_mov, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
show=scrapedtitle))
# Paginacion
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
paginasig = scrapertools.find_single_match(data, patron_pag)
logger.info("algoooosadf "+paginasig)
next_page_url = item.url + paginasig
next_page_url = host + paginasig
if paginasig != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
@@ -114,10 +109,9 @@ def findvideos(item):
logger.info("holaa" + data)
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
show = scrapertools.find_single_match(data, patron_show)
logger.info("holaa" + show)
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -240,10 +240,10 @@ def findvideos(item):
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
for element in urls_list:
json_data=jsontools.load(element)
id = json_data['id']
sub = json_data['srt']
url = json_data['source']
@@ -253,7 +253,6 @@ def findvideos(item):
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
logger.debug('new_url: %s' % new_url)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)

View File

@@ -8,6 +8,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config, logger
IDIOMAS = {'latino': 'Latino'}
@@ -35,6 +36,7 @@ def mainlist(item):
url=host,
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png',
page=0
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -49,15 +51,21 @@ def todas(item):
'Serie><span>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches:
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=int(min)-int(item.page)
max = min + num_items_x_pagina - 1
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches[min:max]:
url = host + scrapedurl
calidad = scrapedcalidad
title = scrapedtitle.decode('utf-8')
thumbnail = scrapedthumbnail
fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png'
itemlist.append(
Item(channel=item.channel,
if not 'xxxxxx' in scrapedtitle:
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title, url=url,
thumbnail=thumbnail,
@@ -67,7 +75,13 @@ def todas(item):
language=language,
context=autoplay.context
))
tmdb.set_infoLabels(itemlist)
if len(itemlist)>28:
itemlist.append(
Item(channel=item.channel,
title="[COLOR cyan]Página Siguiente >>[/COLOR]",
url=item.url, action="todas",
page=item.page + 1))
return itemlist
@@ -222,16 +236,31 @@ def episodiosxtemp(item):
def findvideos(item):
logger.info()
itemlist = []
lang=[]
data = httptools.downloadpage(item.url).data
video_items = servertools.find_video_items(item)
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
language_items=scrapertools.find_single_match(data,
'<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
matches=scrapertools.find_multiple_matches(language_items,
'<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
for idl,scrapedlang in matches:
if int(idl)<5 and int(idl)!=1:
lang.append(scrapedlang)
i=0
logger.info(lang)
for videoitem in video_items:
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
'class="f-info-text">(.*?)<\/span>')
#videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
# 'class="f-info-text">(.*?)<\/span>')
if len(lang)<=i:
videoitem.language=lang[i]
else:
videoitem.language=lang[len(lang)-1]
videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
videoitem.quality = 'default'
videoitem.context = item.context
i=i+1
itemlist.append(videoitem)
# Requerido para FilterTools

View File

@@ -29,7 +29,8 @@ list_servers = ['powvideo',
'nowvideo',
'gamovideo',
'kingvid',
'vidabc'
'vidabc',
'streamixcloud'
]
@@ -308,11 +309,11 @@ def findvideos(item):
for i in range(len(list_links)):
a=list_links[i].title
b=a.lstrip('Ver en')
b=a[a.find("en") + 2:]
c=b.split('[')
d=c[0].rstrip( )
d=d.lstrip( )
list_links[i].server=d
list_links[i].server=d.replace("streamix", "streamixcloud")
list_links = servertools.get_servers_itemlist(list_links)
autoplay.start(list_links, item)

View File

@@ -67,19 +67,21 @@ def lista(item):
title = name
url = host + link
scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context]
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
context=context1))
logger.info("gasdfsa "+str(b))
context=context))
if b<29:
a=a+1
url="https://serieslan.com/pag-"+str(a)
if b>10:
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=url, action="lista", page=0))
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
else:
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -90,7 +92,6 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
logger.debug("info %s " % data)
# obtener el numero total de episodios
total_episode = 0
@@ -136,7 +137,7 @@ def episodios(item):
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
@@ -183,7 +184,6 @@ def findvideos(item):
data = eval(data)
if type(data) == list:
logger.debug("inside")
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload"
if " SUB" in item.title:
@@ -193,7 +193,11 @@ def findvideos(item):
else:
lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]"
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
if item.contentChannel=='videolibrary':
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
thumbnail=thumbnail, server=server, folder=False))
else:
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False))
autoplay.start(itemlist, item)
@@ -201,17 +205,3 @@ def findvideos(item):
else:
return []
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -23,8 +23,8 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada
default_headers = dict()
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"
default_headers["Accept-Encoding"] = "gzip"

View File

@@ -412,8 +412,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
self.addControl(control)
control.setVisible(False)
control.setLabel(c["label"])
control.setText(self.values[c["id"]])
# frodo fix
s = self.values[c["id"]]
if s is None:
s = ''
control.setText(s)
# control.setText(self.values[c["id"]])
control.setWidth(self.controls_width - 5)
control.setHeight(self.height_control)

View File

@@ -40,6 +40,7 @@
</category>
<category label="Opciones Visuales">
<setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/>
<setting id="infoplus_set" type="labelenum" label="Opción visual Infoplus" values="Sin animación|Con animación" default="Sin animación"/>
</category>
<category label="Otros">
<setting label="Info de películas/series en menú contextual" type="lsep"/>

View File

@@ -37,7 +37,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/jss/coder.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')

View File

@@ -7,13 +7,12 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 ' \
'Firefox/58.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -25,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "":

View File

@@ -8,9 +8,11 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "video_error.mp4" in data:
response = httptools.downloadpage(page_url)
if "video_error.mp4" in response.data:
return False, "[Stormo] El archivo no existe o ha sido borrado"
if response.code == 451:
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
return True, ""