This commit is contained in:
Unknown
2018-10-10 15:31:53 -03:00
parent 41cb83611c
commit 00310bb802
8 changed files with 656 additions and 4 deletions

View File

@@ -0,0 +1,68 @@
{
"id": "bloghorror",
"name": "BlogHorror",
"active": true,
"adult": false,
"language": [""],
"thumbnail": "",
"banner": "",
"categories": [
"movie",
"vo",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
}
]
}

View File

@@ -0,0 +1,234 @@
# -*- coding: utf-8 -*-
# -*- Channel BlogHorror -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import os
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://bloghorror.com/'
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<divclass="post-thumbnail">.?<.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip()
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, fanart=fanart, title=title, url=url, action='findvideos',
thumbnail=thumbnail, infoLabels={'year':year})
new_item.contentTitle=title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a class="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page))
else:
item.url=next_page
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, 'tabindex="0">Generos<.*?</ul>')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, 'tabindex="0">Año<.*?</ul>')
patron = 'href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='list_all', pages=3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '>FICHA TECNICA:<.*?</ul>')
#patron = '(?:bold|strong>|<br/>|<em>)([^<]+)(?:</em>|<br/>).*?="(magnet[^"]+)"'
patron = '(?:<em>|<br/><em>|/> )(DVD|720|1080)(?:</em>|<br/>|</span>).*?="(magnet[^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '<a href="(magnet[^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(full_data)
patron_sub = 'href="(http://www.subdivx.com/bajar.php[^"]+)"'
sub_url = scrapertools.find_single_match(full_data, patron_sub)
sub_num = scrapertools.find_single_match(sub_url, 'u=(\d+)')
if sub_url == '':
sub = ''
lang = 'VO'
else:
try:
sub = get_sub_from_subdivx(sub_url, sub_num)
except:
sub = ''
lang = 'VOSE'
try:
for quality, scrapedurl in matches:
if quality.strip() not in ['DVD', '720', '1080']:
quality = 'DVD'
url = scrapedurl
if not config.get_setting('unify'):
title = ' [Torrent] [%s] [%s]' % (quality, lang)
else:
title = 'Torrent'
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
subtitle=sub))
except:
for scrapedurl in matches:
quality = 'DVD'
url = scrapedurl
if not config.get_setting('unify'):
title = ' [Torrent] [%s] [%s]' % (quality, lang)
else:
title = 'Torrent'
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
subtitle=sub))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'terror', 'torrent']:
item.url = host
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def get_sub_from_subdivx(sub_url, sub_num):
logger.info()
import xbmc
from time import sleep
import urlparse
sub_dir = os.path.join(config.get_data_path(), 'temp_subs')
if os.path.exists(sub_dir):
for sub_file in os.listdir(sub_dir):
old_sub = os.path.join(sub_dir, sub_file)
os.remove(old_sub)
sub_data = httptools.downloadpage(sub_url, follow_redirects=False)
if 'x-frame-options' not in sub_data.headers:
sub_url = 'http://subdivx.com/sub%s/%s' % (sub_num, sub_data.headers['location'])
sub_url = sub_url.replace('http:///', '')
sub_data = httptools.downloadpage(sub_url).data
fichero_rar = os.path.join(config.get_data_path(), "subtitle.rar")
outfile = open(fichero_rar, 'wb')
outfile.write(sub_data)
outfile.close()
xbmc.executebuiltin("XBMC.Extract(%s, %s/temp_subs)" % (fichero_rar, config.get_data_path()))
sleep(1)
if len(os.listdir(sub_dir)) > 0:
sub = os.path.join(sub_dir, os.listdir(sub_dir)[0])
else:
sub = ''
else:
logger.info('sub no valido')
sub = ''
return sub

View File

@@ -0,0 +1,66 @@
{
"id": "pelkex",
"name": "Pelkex",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "http://pelkex.net/wp-content/uploads/2018/06/35227842_1998485733529574_5906247779155443712_n.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
}
]
}

View File

@@ -0,0 +1,188 @@
# -*- coding: utf-8 -*-
# -*- Channel Pelkex -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://pelkex.net/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'fastplay', 'okru', 'rapidvideo']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + '?s=', pages=2,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
i = 1
while i <= item.pages:
try:
data = get_source(item.url)
except:
break
patron = '<div class="card-image"><a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?'
patron += '</h3><p>([^<]+)</p>.*?</i>(\d{4})</li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, plot, year in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url, action='findvideos',
thumbnail=thumbnail, plot=plot, infoLabels={'year':year})
new_item.contentTitle=scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, "href='#'>\d+</a></li><li class='page-item'>"
"<a class='page-link' href='([^']+)'>")
if next_page != '' and i == item.pages:
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page,
pages=item.pages))
else:
item.url=next_page
i += 1
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, 'tabindex="0">Generos<.*?</ul>')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, 'tabindex="0">Año<.*?</ul>')
patron = 'href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all', pages=3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id="[^"]+" class="tab.*?"><(?:iframe|IFRAME).*?(?:src|SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if 'http' not in scrapedurl:
url = 'http:'+scrapedurl
else:
url = scrapedurl
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', language=IDIOMAS['Latino'],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'latino']:
item.url = host + '?s='
elif categoria == 'infantiles':
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,12 @@
{
"id": "special",
"name": "<Terror 2018>",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://i.postimg.cc/FR2nygS0/g4567.png",
"banner": "",
"categories": [
"movie"
]
}

View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
# -*- Channel Halloween -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.imdb.com/list/ls027655523/?sort=list_order,asc&st_dt=&mode=detail&page='
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
item.url = host
item.first = 60
item.last = 80
item.page = 1
return list_all(item)
def list_all(item):
logger.info()
from core import jsontools
itemlist = []
data = get_source('%s%s' % (host, item.page))
data = scrapertools.find_single_match(data, '"itemListElement":([^\]]+)\]')
data = data + ']'
#logger.debug(data)
movie_list = eval(data)
for movie in movie_list[item.first:item.last]:
IMDBNumber = movie['url'].replace('title','').replace('/','')
new_item = Item(channel='search', contentType='movie', action='do_search',
infoLabels={'imdb_id': IMDBNumber})
#new_item.infoLabels = tmdb.find_and_set_infoLabels(new_item)
itemlist.append(new_item)
logger.debug('id %s' % IMDBNumber)
#logger.debug(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
for movie in itemlist:
movie.title = movie.infoLabels['title']
movie.wanted = movie.title
if item.last + 20 < len(movie_list):
first = item.last
last = item.last + 20
page = item.page
else:
first = 0
last = 20
page = item.page + 1
itemlist.append(Item(channel=item.channel, title='Siguiente >>', action='list_all',
last=last, first=first, page=page))
return itemlist

View File

@@ -229,8 +229,10 @@ def render_items(itemlist, parent_item):
set_infolabels(listitem, item)
# Montamos el menu contextual
context_commands = set_context_commands(item, parent_item)
if parent_item.channel != 'special':
context_commands = set_context_commands(item, parent_item)
else:
context_commands = []
# Añadimos el item
if config.get_platform(True)['num_version'] >= 17.0 and parent_item.list_type == '':
listitem.addContextMenuItems(context_commands)
@@ -1091,16 +1093,19 @@ def play_torrent(item, xlistitem, mediaurl):
mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % (item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], item.infoLabels['tmdb_id'])
elif item.contentType == 'movie':
mediaurl += "&library=&tmdb=%s&type=movie" % (item.infoLabels['tmdb_id'])
xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")")
#Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan
#if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]:
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering
while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.subtitle != '':
xbmc_player.setSubtitles(item.subtitle)
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar

View File

@@ -49,6 +49,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
"animacion": "https://s14.postimg.cc/vl193mupd/animation.png",
"anime" : "https://s10.postimg.cc/n9mc2ikzt/anime.png",
"artes marciales" : "https://s10.postimg.cc/4u1v51tzt/martial_arts.png",
"asiaticas" : "https://i.postimg.cc/Xq0HXD5d/asiaticas.png",
"aventura": "https://s14.postimg.cc/ky7fy5he9/adventure.png",
"belico": "https://s14.postimg.cc/5e027lru9/war.png",
"biografia" : "https://s10.postimg.cc/jq0ecjxnt/biographic.png",