Merge pull request #3 from alfa-addon/master

Merge with base addon
This commit is contained in:
devalls
2017-08-04 11:58:58 +02:00
committed by GitHub
12 changed files with 213 additions and 589 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="0.0.7" provider-name="unknown">
<addon id="plugin.video.alfa" name="Alfa" version="0.0.8" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -9,17 +9,32 @@
</extension>
<extension point="xbmc.addon.metadata">
<summary lang="es">Sumario en Español</summary>
<news>[B]Estos son los cambios para esta versión:[/B][CR]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
[I]- Elitetorrent
- Seriespapaya
- Newpct1 -- [COLOR red]Momentáneamente búsqueda deshabilitada[/COLOR][/I][CR]
<assets>
<icon>icon.png</icon>
<fanart>fanart.jpg</fanart>
<screenshot>resources/media/general/ss/1.jpg</screenshot>
<screenshot>resources/media/general/ss/2.jpg</screenshot>
<screenshot>resources/media/general/ss/3.jpg</screenshot>
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos de canales[/B][/COLOR]
[I]- Seriespapaya
- Playmax - Gracias a d3v3l0p1n
- Seriesblanco - Posible error en versiones anteriores a kodi 16 por https
- AnimesHD
- Cinetux[/I]
[COLOR green][B]Servidor Nuevo[/B][/COLOR]
[I]- Vidlox
[COLOR green][B]Arreglos internos[/B][/COLOR]
[I]- videolibrary_service -- ahora no actualiza una serie de un canal, si está desactivado[/I]
[I]- platformtools - posible solución de favoritos
- videolibrarytools - solución a añadir películas de varios canales[/I]
[COLOR blue]Gracias a devalls por su cooperación en esta release.[/COLOR]
</news>
<description lang="es">Descripción en Español</description>
<summary lang="en">English summary</summary>
<description lang="en">English description</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer>
<platform>all</platform>
<license>GNU GPL v3</license>
<forum>foro</forum>

View File

@@ -1,190 +1,185 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Hentai": "https://s29.postimg.org/aamrngu2f/hentai.png",
"Magia": "https://s9.postimg.org/nhkfzqffj/magia.png",
"Psicológico": "https://s13.postimg.org/m9ghzr86f/psicologico.png",
"Sobrenatural": "https://s9.postimg.org/6hxbvd4ov/sobrenatural.png",
"Torneo": "https://s2.postimg.org/ajoxkk9ih/torneo.png",
"Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"Otros": "https://s30.postimg.org/uj5tslenl/otros.png"}
host = "http://www.animeshd.tv"
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Ultimas",
action="lista",
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
url=host + '/ultimos'
))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url=host + '/buscar?t=todos&q='
))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/buscar?t=todos&q=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def lista(item):
logger.info()
itemlist = []
post = ''
if item.extra in ['episodios']:
post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'}
post = urllib.urlencode(post)
data = get_source(item.url)
patron = 'class=anime><div class=cover style=background-image: url(.*?)>.*?<a href=(.*?)><h2>(.*?)<\/h2><\/a><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
title = scrapedtitle
itemlist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title
))
# Paginacion
next_page = scrapertools.find_single_match(data,
'<li class=active><span>.*?<\/span><\/li><li><a href=(.*?)>.*?<\/a><\/li>')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="lista",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return lista(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li class=><a href=http:\/\/www\.animeshd\.tv\/genero\/(.*?)>(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
if title == 'Recuentos de la vida':
title = 'Otros'
genero = scrapertools.decodeHtmlentities(scrapedurl)
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
url = 'http://www.animeshd.tv/genero/%s' % genero
itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li id=epi-.*? class=list-group-item ><a href=(.*?) class=badge.*?width=25 title=(.*?)> <\/span>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedlang, scrapedtitle in matches:
language = scrapedlang
title = scrapedtitle + ' (%s)' % language
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<iframe.*?src=(.*?) frameborder=0'
matches = re.compile(patron, re.DOTALL).findall(data)
for video_url in matches:
data = get_source(video_url)
data = data.replace("'", '')
patron = 'file:(.*?),label:(.*?),type'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedquality in matches:
url = scrapedurl
quality = scrapedquality
title = item.contentSerieName + ' (%s)' % quality
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality))
return itemlist
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
from core import servertools
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Hentai": "https://s29.postimg.org/aamrngu2f/hentai.png",
"Magia": "https://s9.postimg.org/nhkfzqffj/magia.png",
"Psicológico": "https://s13.postimg.org/m9ghzr86f/psicologico.png",
"Sobrenatural": "https://s9.postimg.org/6hxbvd4ov/sobrenatural.png",
"Torneo": "https://s2.postimg.org/ajoxkk9ih/torneo.png",
"Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"Otros": "https://s30.postimg.org/uj5tslenl/otros.png"}
host = "http://www.animeshd.tv"
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Ultimas",
action="lista",
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
url=host + '/ultimos'
))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url=host + '/buscar?t=todos&q='
))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/buscar?t=todos&q=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def lista(item):
logger.info()
itemlist = []
post = ''
if item.extra in ['episodios']:
post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'}
post = urllib.urlencode(post)
data = get_source(item.url)
patron = 'class=anime><div class=cover style=background-image: url(.*?)>.*?<a href=(.*?)><h2>(.*?)<\/h2><\/a><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
title = scrapedtitle
itemlist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title
))
# Paginacion
next_page = scrapertools.find_single_match(data,
'<li class=active><span>.*?<\/span><\/li><li><a href=(.*?)>.*?<\/a><\/li>')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="lista",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return lista(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li class=><a href=http:\/\/www\.animeshd\.tv\/genero\/(.*?)>(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
if title == 'Recuentos de la vida':
title = 'Otros'
genero = scrapertools.decodeHtmlentities(scrapedurl)
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
url = 'http://www.animeshd.tv/genero/%s' % genero
itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li id=epi-.*? class=list-group-item ><a href=(.*?) class=badge.*?width=25 title=(.*?)> <\/span>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedlang, scrapedtitle in matches:
language = scrapedlang
title = scrapedtitle + ' (%s)' % language
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
title = item.title+' (%s)'%videoitem.server
videoitem.channel = item.channel
videoitem.title = title
videoitem.action = 'play'
return itemlist

View File

@@ -1,78 +0,0 @@
{
"id": "pelisdanko",
"name": "PelisDanko",
"language": "es",
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "08/07/2016",
"description": "Correcciones y adaptacion a la nueva version."
}
],
"thumbnail": "pelisdanko.png",
"banner": "pelisdanko.png",
"categories": [
"movie",
"latino",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
},
{
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
}
]
}

View File

@@ -1,308 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import config
from core import logger
from core import scrapertools
from core.item import Item
__modo_grafico__ = config.get_setting('modo_grafico', 'pelisdanko')
host = "http://pelisdanko.com"
art = "http://pelisdanko.com/img/background.jpg"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="novedades", title="Novedades", url=host + "/novedades",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Estrenos", url=host + "/estrenos",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Populares", url=host + "/populares",
fanart=art))
itemlist.append(item.clone(action="actualizadas", title="Películas actualizadas", url=host,
fanart=art))
itemlist.append(item.clone(action="indices", title="Índices", fanart=art))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", fanart=art))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", fanart=art,
text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://pelisdanko.com/busqueda?terms=%s" % texto
try:
return novedades(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://pelisdanko.com/novedades"
itemlist = novedades(item)
if itemlist[-1].action == "novedades":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def novedades(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque = scrapertools.find_multiple_matches(data, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d] col-lg-[\d]'
' text-center"(.*?)</div>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + "[/COLOR]"
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context=["buscar_trailer"], contentType="movie", trailer=True))
# Busca enlaces de paginas siguientes...
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" rel="next">')
if len(next_page_url) > 0:
itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url))
return itemlist
def actualizadas(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque_big = scrapertools.find_single_match(data, 'Últimas actualizaciones(.*?)<div class="col-xs-10 col-md-8 '
'text-left">')
bloque = scrapertools.find_multiple_matches(bloque_big, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d]'
' col-lg-[\d] text-center"(.*?)<br><br>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
languages = scrapertools.find_multiple_matches(match, '<img width="28".*?alt="([^"]+)"')
idiomas = " ("
for idioma in languages:
idioma = idioma.replace('ES_', '').replace('ES', 'CAST')
if idioma != "CAST" and idioma != "LAT":
idioma = "VOSE"
idiomas += idioma + "/"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + idiomas[
:-1] + ")[/COLOR]"
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context=["buscar_trailer"], contentType="movie"))
return itemlist
def indices(item):
logger.info()
itemlist = []
item.text_color = "orchid"
itemlist.append(item.clone(action="indice_list", title="Género", url=host, fulltitle="genero"))
itemlist.append(item.clone(action="indice_list", title="Alfabético", url=host, fulltitle="letra"))
itemlist.append(item.clone(action="indice_list", title="Idioma", url=host, fulltitle="idioma"))
itemlist.append(item.clone(action="indice_list", title="Calidad", url=host, fulltitle="calidad"))
itemlist.append(item.clone(action="indice_list", title="Nacionalidad", url=host, fulltitle="nacionalidad"))
return itemlist
def indice_list(item):
logger.info()
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
patron = '<a href="(http://pelisdanko.com/%s/[^"]+)">([^<]+)</a>' % item.fulltitle
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl))
return itemlist
def enlaces(item):
logger.info()
item.extra = ""
item.text_color = ""
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}", '', data)
item.fanart = scrapertools.find_single_match(data, "CUSTOM BACKGROUND.*?url\('([^']+)'")
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>')
year = scrapertools.find_single_match(data, '<dt>Estreno</dt> <dd>(\d+)</dd>')
try:
from core import tmdb
item.infoLabels['year'] = int(year)
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0}
if filtro_enlaces != 0:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "ss", item)
if filtro_enlaces != 1:
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "dd", item)
trailer_id = scrapertools.find_single_match(data, 'data:\s*\{\s*id:\s*"([^"]+)"')
data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer", post="id=%s" % trailer_id)
url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"')
if url_trailer != "":
url_trailer = url_trailer.replace("embed/", "watch?v=")
item.infoLabels['trailer'] = url_trailer
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
return itemlist
def bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, type, item):
logger.info()
bloque = scrapertools.find_single_match(data, '<div role="tabpanel" class="tab-pane fade" id="tab-' +
type + '">(.*?)</table>')
patron = '<tr class="rip hover".*?data-slug="([^"]+)".*?src="http://pelisdanko.com/img/flags/(.*?).png"' \
'.*?<span class="label label-default quality[^>]+>([^<]+)</span>.*?<td class="small">([^<]+)</td>'
matches = scrapertools.find_multiple_matches(bloque, patron)
filtrados = []
for slug, flag, quality, date in matches:
if flag != "ES" and flag != "ES_LAT":
flag = "VOSE"
flag = flag.replace('ES_LAT', 'LAT').replace('ES', 'CAST')
scrapedurl = "%s/%s/%s?#%s" % (item.url, slug, type, type)
scrapedtitle = " [COLOR firebrick]Mostrar enlaces: [/COLOR][COLOR goldenrod][" \
+ flag + "/" + quality + "][/COLOR][COLOR khaki] " + date + "[/COLOR]"
if filtro_idioma == 3 or item.filtro:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle), action="findvideos",
url=scrapedurl, id_enlaces=slug, calidad=quality))
else:
idioma = dict_idiomas[flag]
if idioma == filtro_idioma:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle),
action="findvideos", url=scrapedurl, id_enlaces=slug))
else:
if flag not in filtrados:
filtrados.append(flag)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = bbcode_kodi2html("[COLOR orangered] Mostrar enlaces filtrados en %s[/COLOR]") % ", ".join(
filtrados)
itemlist.append(item.clone(title=title, action="enlaces", url=item.url, filtro=True))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.url[-2:] == "ss":
prefix = "strms"
else:
prefix = "lnks"
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
# Parametros para redireccion donde muestra los enlaces
data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"')
data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"')
url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug)
data = scrapertools.downloadpage(url, post="")
from core import servertools
video_item_list = servertools.find_video_items(data=data)
for video_item in video_item_list:
title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad)
itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play",
server=video_item.server, text_color=""))
# Opción "Añadir esta película a la videoteca de XBMC"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.category != "Cine":
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", url=item.url,
infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library",
fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces))
return itemlist
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>') \
.replace('[CR]', '<br>') \
.replace('[B]', '<strong>') \
.replace('[/B]', '</strong>') \
.replace('"color: white"', '"color: auto"')
return text

2
plugin.video.alfa/channels/seriesblanco.py Executable file → Normal file
View File

@@ -46,7 +46,7 @@ def mainlist(item):
url=urlparse.urljoin(HOST, "fichas_creadas/"), thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series por género", action="generos",
url=HOST, thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=HOST + "finder.php",
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="https://seriesblanco.com/finder.php",
thumbnail=thumb_buscar))
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)

View File

@@ -197,7 +197,7 @@ def findvideos(item):
quality=quality,
) for lang, date, server, url, linkType, quality, uploader in links]
itemlist = filtertools.get_links(itemlist, item.channel, list_idiomas, CALIDADES)
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
return itemlist

2
plugin.video.alfa/core/videolibrarytools.py Executable file → Normal file
View File

@@ -132,7 +132,7 @@ def save_movie(item):
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
path = c
path = filetools.join(MOVIES_PATH, c)
_id = code
break

6
plugin.video.alfa/platformcode/platformtools.py Executable file → Normal file
View File

@@ -375,7 +375,7 @@ def set_context_commands(item, parent_item):
((item.channel not in ["favorites", "videolibrary", "help", ""]
or item.action in ["update_videolibrary"]) and parent_item.channel != "favorites"):
context_commands.append((config.get_localized_string(30155), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(channel="favoritos", action="addFavourite",
(sys.argv[0], item.clone(channel="favorites", action="addFavourite",
from_channel=item.channel,
from_action=item.action).tourl())))
@@ -660,7 +660,7 @@ def get_dialogo_opciones(item, default_action, strm):
dialog_ok("No puedes ver ese vídeo porque...", "El servidor donde está alojado no está",
"soportado en alfa todavía", item.url)
if item.channel == "favoritos":
if item.channel == "favorites":
# "Quitar de favoritos"
opciones.append(config.get_localized_string(30154))
@@ -699,7 +699,7 @@ def set_opcion(item, seleccion, opciones, video_urls):
# "Añadir a favoritos":
elif opciones[seleccion] == config.get_localized_string(30155):
from channels import favorites
item.from_channel = "favoritos"
item.from_channel = "favorites"
favorites.addFavourite(item)
salir = True

Binary file not shown.

After

Width:  |  Height:  |  Size: 239 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 239 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 392 KiB