19 Commits

Author SHA1 Message Date
alfa-addon
8985f3ebdd v2.3.4 2017-11-06 19:04:42 -05:00
Alfa
d60c246bbb Merge pull request #155 from Intel11/patch-3
Actualizados
2017-11-07 00:09:00 +01:00
Alfa
3b29fe47bb Merge pull request #156 from danielr460/master
Arreglos menores
2017-11-07 00:08:47 +01:00
Alfa
3093f72ce5 Merge pull request #159 from Alfa-beto/Fixes
Corregido error con extras
2017-11-07 00:08:33 +01:00
Unknown
55dcf3f091 Corregido error con extras 2017-11-05 18:21:26 -03:00
Intel1
2924b6958d Update allpeliculas.py 2017-11-04 15:01:27 -05:00
Intel1
927310c7c6 flashx: actualizado 2017-11-04 14:58:29 -05:00
danielr460
0c25891790 fix servers 2017-11-04 00:06:45 -05:00
danielr460
212c06057f Arreglos menores 2017-11-03 22:04:28 -05:00
Intel1
9c3b3e9256 allpeliculas: paginador para colecciones 2017-11-03 17:54:51 -05:00
Intel1
6dc853b41e repelis: fix categoria 2017-11-03 15:49:52 -05:00
Intel1
7afd09dfa9 streamixcloud: fix 2017-11-03 11:08:16 -05:00
Intel1
6855508eaa Update ultrapeliculashd.py 2017-11-03 10:21:18 -05:00
Intel1
2925c29671 Update ultrapeliculashd.json 2017-11-03 10:20:47 -05:00
Intel1
506e68e8a3 vshare: cambiado el orden de resoluciones 2017-11-03 10:17:12 -05:00
Intel1
9cc30152f8 vshare: actualizado patron 2017-11-03 10:15:27 -05:00
Intel1
267c9d8031 gvideo: fix 2017-11-03 10:07:46 -05:00
Intel1
bd68b83b6c flashx: fix 2017-11-01 06:47:51 -05:00
Unknown
c1f8039672 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-01 08:37:33 -03:00
13 changed files with 87 additions and 76 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.3" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.3.4" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,13 +19,11 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» serieslan » animemovil
» mundiseries » bajui
» seriesblanco » descargamix
» miradetodo » pelisgratis
» tvseriesdk » ultrapeliculashd
» gamovideo » flashx
» danimados ¤ arreglos internos
» allpeliculas » repelis
» flashx » ultrapeliculashd
» gvideo » streamixcloud
» vshare » anitoonstv
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
@@ -59,6 +57,7 @@ def colecciones(item):
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
@@ -71,7 +70,7 @@ def listado_colecciones(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1"
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
@@ -88,6 +87,16 @@ def listado_colecciones(item):
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist
@@ -159,6 +168,7 @@ def lista(item):
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data)
for it in dict_data["items"]:
@@ -167,7 +177,7 @@ def lista(item):
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"])
thumb = host + it["image"]
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))

View File

@@ -148,15 +148,21 @@ def findvideos(item):
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
quality = "HQ"
if "HQ" in quality:
quality = "HD"
if " Calidad media - Carga mas rapido" in quality:
quality = quality.replace(" Calidad media - Carga mas rapido", "360p")
quality = "360p"
server = server.lower().strip()
if "ok" == server:
if "ok" in server:
server = 'okru'
if "rapid" in server:
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist

View File

@@ -30,11 +30,6 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
@@ -70,7 +65,8 @@ def menupelis(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.genre:
item.extra = item.genre
if item.extra == '':
section = 'Recién Agregadas'
elif item.extra == 'year':
@@ -79,17 +75,13 @@ def menupelis(item):
section = 'de Eróticas \+18'
else:
section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
@@ -144,21 +136,14 @@ def menudesta(item):
# Peliculas de Estreno
def menuestre(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
@@ -255,32 +240,22 @@ def search(item, texto):
patron += '<div class="row">.*?'
patron += '<a href="(.*?)" title="(.*?)">.*?'
patron += '<img src="(.*?)"'
logger.info(patron)
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = item.url + scrapedurl
thumbnail = item.url + scrapedthumbnail
logger.info(url)
url = scrapedurl
thumbnail = scrapedthumbnail
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
return itemlist
def poranyo(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -289,7 +264,6 @@ def poranyo(item):
url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year'))
return itemlist
@@ -300,24 +274,25 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
adult_mode = config.get_setting("adult_mode")
for scrapedurl, scrapedtitle in matches:
if "18" in scrapedtitle and adult_mode == 0:
continue
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = scrapedurl
logger.info(url)
# si no esta permitidas categoria adultos, la filtramos
extra = title
adult_mode = config.get_setting("adult_mode")
extra1 = title
if adult_mode != 0:
if 'erotic' in scrapedurl:
extra = 'adult'
extra1 = 'adult'
else:
extra=title
extra1=title
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra = extra))
fanart=item.fanart, genre = extra1))
return itemlist
@@ -338,7 +313,6 @@ def decode(string):
i += 1
enc4 = keyStr.index(input[i])
i += 1
chr1 = (enc1 << 2) | (enc2 >> 4)
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
chr3 = ((enc3 & 3) << 6) | enc4
@@ -352,4 +326,4 @@ def decode(string):
output = output.decode('utf8')
return output
return output

View File

@@ -290,7 +290,10 @@ def do_search(item, categories=None):
multithread = config.get_setting("multithread", "search")
result_mode = config.get_setting("result_mode", "search")
tecleado = item.extra
if item.wanted!='':
tecleado=item.wanted
else:
tecleado = item.extra
itemlist = []

View File

@@ -34,6 +34,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades -Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

View File

@@ -252,10 +252,13 @@ def newest(categoria):
item.extra = 'estrenos/'
try:
if categoria == 'peliculas':
item.url = host + '/category/estrenos/'
item.url = host + '/genre/estrenos/'
elif categoria == 'infantiles':
item.url = host + '/category/infantil/'
item.url = host + '/genre/animacion/'
elif categoria == 'terror':
item.url = host + '/genre/terror/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':

View File

@@ -398,9 +398,9 @@ def set_context_commands(item, parent_item):
if item.contentType in ['movie','tvshow']and item.channel != 'search':
# Buscar en otros canales
if item.contentSerieName!='':
item.extra=item.contentSerieName
item.wanted=item.contentSerieName
else:
item.extra = item.contentTitle
item.wanted = item.contentTitle
context_commands.append(("[COLOR yellow]Buscar en otros canales[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0],
item.clone(channel='search',

View File

@@ -37,12 +37,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data, """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" %js_fxfx)
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')

View File

@@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""):
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
url=data.headers['location']
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
url=response.headers['location']
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
streams.append((quality, url))
headers_string=""
else:
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})

View File

@@ -8,7 +8,6 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
@@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
packed = scrapertools.find_single_match(data,
patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script"
packed = scrapertools.find_single_match(data, patron)
data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]

View File

@@ -3,8 +3,8 @@
"find_videos": {
"patterns": [
{
"pattern": "(http://vshare.io/v/[\\w]+[^\"']*)[\"']",
"url": "\\1"
"pattern": "(vshare.io/v/[a-zA-Z0-9/-]+)",
"url": "http://\\1"
}
]
},

View File

@@ -40,11 +40,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
logger.debug("[" + label + "] " + url)
video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
return video_urls