This commit is contained in:
alfa_addon_10
2017-08-08 12:13:51 +02:00
14 changed files with 441 additions and 23 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="0.1.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="0.1.1" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,9 +18,12 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[I]- correccion de errores y fix por cambios en web[/I]
[COLOR green][B]Canales agreados y arreglos[/B][/COLOR]
[I]- cinetux
- bajui2
- seriesblanco[/I]
[COLOR blue]Gracias a [COLOR yellow]j2331223[/COLOR] por su colaboración en esta versión.[/COLOR]
[COLOR blue]Gracias a [COLOR yellow]devalls[/COLOR] y [COLOR yellow]j2331223[/COLOR] por su colaboración en esta versión.[/COLOR]
</news>
<description lang="es">Descripción en Español</description>
<summary lang="en">English summary</summary>

View File

@@ -1,14 +1,18 @@
{
"id": "bajui",
"name": "Bajui",
"id": "bajui2",
"name": "Bajui2",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "bajui.png",
"banner": "bajui.png",
"fanart": "bajui.png",
"version": 1,
"version": 2,
"changes": [
{
"date": "07/08/2017",
"description": "Fix URL HOST changed to Bajui2"
},
{
"date": "15/03/2017",
"description": "limpieza código"
@@ -34,4 +38,4 @@
"visible": true
}
]
}
}

View File

@@ -13,7 +13,7 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui.com/descargas/categoria/2/peliculas",
url="http://www.bajui2.com/descargas/categoria/2/peliculas",
fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart))
@@ -51,13 +51,13 @@ def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui.com/descargas/categoria/3/series",
url="http://www.bajui2.com/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui.com/descargas/categoria/3/series/orden:nombre",
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui.com/descargas/subcategoria/11/hd/orden:nombre",
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
@@ -68,10 +68,10 @@ def menudocumentales(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui.com/descargas/categoria/7/docus-y-tv",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui.com/descargas/categoria/7/docus-y-tv/orden:nombre",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
@@ -86,7 +86,7 @@ def search(item, texto, categoria=""):
texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url)
try:
item.url = "http://www.bajui.com/descargas/busqueda/%s"
item.url = "http://www.bajui2.com/descargas/busqueda/%s"
item.url = item.url % texto
itemlist.extend(peliculas(item))
return itemlist
@@ -118,7 +118,7 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui.com/", thumbnail.replace("_m.jpg", "_g.jpg"))
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
@@ -133,7 +133,7 @@ def peliculas(item, paginacion=True):
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui.com/", matches[0])
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion:
@@ -197,7 +197,7 @@ def enlaces(item):
try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui.com/", item.thumbnail)
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail)
except:
pass
@@ -234,8 +234,8 @@ def enlaces(item):
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
# http://www.bajui.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"

View File

@@ -324,8 +324,8 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
bloque2 = bloque2.replace("\t","").replace("\r","")
patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^"]+)'
patron += '.*?src.*?src="[^>]+"\s/>([^<]+)'
patron += '.*?src="[^>]+"\s/>([^<]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)'
matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
filtrados = []

View File

@@ -190,7 +190,7 @@ def episodios(item):
episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data,
re.MULTILINE | re.DOTALL)
for url, title, flags in episodes:
title = title.replace("<span itemprop='episodeNumber'>", "").replace("</span>", "")
title = re.sub("<span[^>]+>", "", title).replace("</span>", "")
idiomas = " ".join(["[%s]" % IDIOMAS.get(language, "OVOS") for language in
re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")
@@ -302,7 +302,7 @@ def play(item):
if ajax_data:
data = ajax_data
patron = "onclick='window.open\(\"([^\"]+)\"\);'/>"
patron = "window.location.href\s*=\s*[\"']([^\"']+)'"
url = scrapertoolsV2.find_single_match(data, patron)
else:

View File

@@ -0,0 +1,67 @@
{
"id": "yaske",
"name": "Yaske",
"active": true,
"adult": false,
"language": "es",
"banner": "yaske.png",
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
"thumbnail": "yaske.png",
"version": 1,
"changes": [
{
"date": "27/06/17",
"description": "Desactivar por falta de contenidos"
},
{
"date": "04/06/17",
"description": "Desactivar por falta de contenidos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/02/17",
"description": "Añadir imagenes, sinopsis, etc..."
},
{
"date": "18/01/17",
"description": "Uso de httptools"
},
{
"date": "12/12/16",
"description": "Cambios en la web"
},
{
"date": "01/07/16",
"description": "Eliminado código innecesario."
},
{
"date": "29/04/16",
"description": "Adaptar a Novedades Peliculas e Infantiles"
}
],
"categories": [
"latino",
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
import re
from core import channeltools
from core import config
from core import httptools
from core import logger
from core import scrapertoolsV2
from core import servertools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST + "/ultimas-y-actualizadas",
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="audio", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
# http://www.yaske.ro/search/?q=los+pitufos
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST + "/ultimas-y-actualizadas"
elif categoria == 'infantiles':
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<article class.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<aside class="item-control down">(.*?)</aside>.*?'
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
patron += '<h2 class.*?>([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
patronidiomas = "<img src='([^']+)'"
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("la_la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("en_en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("es_es.png"):
idiomas_disponibles.append("ESP")
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
data = httptools.downloadpage(item.url).data
patron = '<select name="' + item.extra + '(.*?)</select>'
data = scrapertoolsV2.get_match(data, patron)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedvalue, scrapedtitle in matches:
thumbnail = ""
if item.extra == 'genre':
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
continue
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
filename = scrapedtitle.lower().replace(' ', '%20')
if filename == "ciencia%20ficción":
filename = "ciencia%20ficcion"
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% filename
elif item.extra == 'year':
url = HOST + "/search/?q=&year=" + scrapedvalue
thumbnail = item.thumbnail
else:
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
else:
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
sublist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '<option value="([^"]+)"[^>]+'
patron += '>([^<]+).*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, idioma, calidad in matches:
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -0,0 +1,48 @@
{
"active": true,
"changes": [
{
"date": "18/07/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
"free": true,
"id": "gvideo",
"name": "gvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
}

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
urls = []
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls