@@ -143,7 +143,7 @@ def start(itemlist, item):
|
||||
# 2: Solo servidores
|
||||
# 3: Solo calidades
|
||||
# 4: No ordenar
|
||||
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
|
||||
if (settings_node['custom_servers'] and settings_node['custom_quality']):
|
||||
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
|
||||
elif settings_node['custom_servers']:
|
||||
priority = 2 # Solo servidores
|
||||
@@ -254,7 +254,10 @@ def start(itemlist, item):
|
||||
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
|
||||
|
||||
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
|
||||
plan_b = settings_node['plan_b']
|
||||
try:
|
||||
plan_b = settings_node['plan_b']
|
||||
except:
|
||||
plan_b = True
|
||||
text_b = ''
|
||||
if plan_b:
|
||||
autoplay_list.extend(autoplay_b)
|
||||
@@ -321,7 +324,7 @@ def start(itemlist, item):
|
||||
platformtools.play_video(videoitem, autoplay=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
sleep(3)
|
||||
try:
|
||||
if platformtools.is_playing():
|
||||
PLAYED = True
|
||||
|
||||
@@ -311,6 +311,34 @@ def findvideos(item):
|
||||
'Mega': '',
|
||||
'MediaFire': ''}
|
||||
dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
|
||||
|
||||
torrent_link = scrapertools.find_single_match(data, '<a href="/protect/v\.php\?i=([^"]+)"')
|
||||
if torrent_link != '':
|
||||
import urllib
|
||||
base_url = '%s/protect/v.php' % host
|
||||
post = {'i': torrent_link, 'title': item.title}
|
||||
post = urllib.urlencode(post)
|
||||
headers = {'Referer': item.url}
|
||||
protect = httptools.downloadpage(base_url + '?' + post, headers=headers).data
|
||||
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
|
||||
server = 'torrent'
|
||||
|
||||
title = item.contentTitle + ' (%s)' % server
|
||||
quality = 'default'
|
||||
language = IDIOMAS[lang]
|
||||
|
||||
new_item = Item(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
fulltitle=item.contentTitle,
|
||||
url=url,
|
||||
language=language,
|
||||
thumbnail=item.thumbnail,
|
||||
quality=quality,
|
||||
server=server
|
||||
)
|
||||
itemlist.append(new_item)
|
||||
|
||||
for video_cod, server_id in matches:
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
|
||||
video_id = dec(video_cod, dec_value)
|
||||
@@ -321,25 +349,14 @@ def findvideos(item):
|
||||
if server_id == 'TVM':
|
||||
server = 'thevideome'
|
||||
url = server_url[server_id] + video_id + '.html'
|
||||
elif server_id == 'BitTorrent':
|
||||
import urllib
|
||||
base_url = '%s/protect/v.php' % host
|
||||
post = {'i':video_id, 'title':item.title}
|
||||
post = urllib.urlencode(post)
|
||||
headers = {'Referer':item.url}
|
||||
protect = httptools.downloadpage(base_url+'?'+post, headers=headers).data
|
||||
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
|
||||
server = 'torrent'
|
||||
else:
|
||||
url = server_url[server_id] + video_id
|
||||
title = item.contentTitle + ' (%s)' % server
|
||||
quality = 'default'
|
||||
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer']:
|
||||
if server != 'torrent':
|
||||
language = IDIOMAS[lang]
|
||||
else:
|
||||
language = [IDIOMAS[lang], 'vose']
|
||||
|
||||
language = [IDIOMAS[lang], 'vose']
|
||||
if url not in duplicados:
|
||||
new_item = Item(channel=item.channel,
|
||||
action='play',
|
||||
|
||||
33
plugin.video.alfa/channels/community.json
Normal file
33
plugin.video.alfa/channels/community.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "community",
|
||||
"name": "Community",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"fanart": "",
|
||||
"categories": [
|
||||
"direct",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vo"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filterlanguages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces del canal en idioma...",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No Filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VO",
|
||||
"VOSE"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
299
plugin.video.alfa/channels/community.py
Normal file
299
plugin.video.alfa/channels/community.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Community -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import os
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from channelselector import get_thumb
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config, platformtools
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
list_data = {}
|
||||
|
||||
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
|
||||
list_servers = ['directo']
|
||||
list_quality = ['SD', '720', '1080', '4k']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
if not os.path.exists(path):
|
||||
with open(path, "w") as file:
|
||||
file.write('{"channels":{}}')
|
||||
file.close()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
return show_channels(item)
|
||||
|
||||
|
||||
def show_channels(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
context = [{"title": "Eliminar este canal",
|
||||
"action": "remove_channel",
|
||||
"channel": "community"}]
|
||||
|
||||
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
file = open(path, "r")
|
||||
json = jsontools.load(file.read())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Agregar un canal', action='add_channel', thumbnail=get_thumb('add.png')))
|
||||
|
||||
for key, channel in json['channels'].items():
|
||||
|
||||
if 'poster' in channel:
|
||||
poster = channel['poster']
|
||||
else:
|
||||
poster = ''
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
|
||||
thumbnail=poster, action='show_menu', channel_id = key, context=context))
|
||||
return itemlist
|
||||
|
||||
def load_json(item):
|
||||
logger.info()
|
||||
|
||||
if item.url.startswith('http'):
|
||||
json_file = httptools.downloadpage(item.url).data
|
||||
else:
|
||||
json_file = open(item.url, "r").read()
|
||||
|
||||
json_data = jsontools.load(json_file)
|
||||
|
||||
return json_data
|
||||
|
||||
def show_menu(item):
|
||||
global list_data
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
json_data = load_json(item)
|
||||
|
||||
if "menu" in json_data:
|
||||
for option in json_data['menu']:
|
||||
itemlist.append(Item(channel=item.channel, title=option['title'], action='show_menu', url=option['link']))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
if "movies_list" in json_data:
|
||||
item.media_type='movies_list'
|
||||
|
||||
elif "tvshows_list" in json_data:
|
||||
item.media_type = 'tvshows_list'
|
||||
|
||||
elif "episodes_list" in json_data:
|
||||
item.media_type = 'episodes_list'
|
||||
|
||||
return list_all(item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
media_type = item.media_type
|
||||
json_data = load_json(item)
|
||||
for media in json_data[media_type]:
|
||||
|
||||
quality, language, plot, poster = set_extra_values(media)
|
||||
|
||||
title = media['title']
|
||||
title = set_title(title, language, quality)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, quality=quality,
|
||||
language=language, plot=plot, thumbnail=poster)
|
||||
|
||||
|
||||
if 'movies_list' in json_data:
|
||||
new_item.url = media
|
||||
new_item.contentTitle = media['title']
|
||||
new_item.action = 'findvideos'
|
||||
if 'year' in media:
|
||||
new_item.infoLabels['year'] = media['year']
|
||||
else:
|
||||
new_item.url = media['seasons_list']
|
||||
new_item.contentSerieName = media['title']
|
||||
new_item.action = 'seasons'
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
list_seasons = item.url
|
||||
for season in list_seasons:
|
||||
infoLabels['season'] = season['season']
|
||||
title = 'Temporada %s' % season['season']
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
|
||||
contentSeasonNumber=season['season'], infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
itemlist = sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
json_data = load_json(item)
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
season_number = infoLabels['season']
|
||||
for episode in json_data['episodes_list']:
|
||||
episode_number = episode['number']
|
||||
infoLabels['season'] = season_number
|
||||
infoLabels['episode'] = episode_number
|
||||
|
||||
title = '%sx%s - Episodio %s' % (season_number, episode_number, episode_number)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
|
||||
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
for url in item.url['links']:
|
||||
quality, language, plot, poster = set_extra_values(url)
|
||||
title = ''
|
||||
title = set_title(title, language, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
|
||||
language=language, infoLabels = item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def add_channel(item):
|
||||
logger.info()
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
channel_to_add = {}
|
||||
json_file = ''
|
||||
result = platformtools.dialog_select('Agregar un canal', ['Desde archivo local', 'Desde URL'])
|
||||
if result == -1:
|
||||
return
|
||||
if result==0:
|
||||
file_path = xbmcgui.Dialog().browseSingle(1, 'Alfa - (Comunidad)', 'files')
|
||||
try:
|
||||
channel_to_add['path'] = file_path
|
||||
json_file = jsontools.load(open(file_path, "r").read())
|
||||
channel_to_add['channel_name'] = json_file['channel_name']
|
||||
except:
|
||||
pass
|
||||
|
||||
elif result==1:
|
||||
url = platformtools.dialog_input("", 'Ingresa la URL del canal', False)
|
||||
try:
|
||||
channel_to_add['path'] = url
|
||||
json_file = jsontools.load(httptools.downloadpage(url).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(json_file) == 0:
|
||||
return
|
||||
if "episodes_list" in json_file:
|
||||
platformtools.dialog_ok('Alfa', 'No es posible agregar este tipo de canal')
|
||||
return
|
||||
channel_to_add['channel_name'] = json_file['channel_name']
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
|
||||
community_json = open(path, "r")
|
||||
community_json = jsontools.load(community_json.read())
|
||||
id = len(community_json['channels']) + 1
|
||||
community_json['channels'][id]=(channel_to_add)
|
||||
|
||||
with open(path, "w") as file:
|
||||
file.write(jsontools.dump(community_json))
|
||||
file.close()
|
||||
|
||||
platformtools.dialog_notification('Alfa', '%s se ha agregado' % json_file['channel_name'])
|
||||
return
|
||||
|
||||
def remove_channel(item):
|
||||
logger.info()
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
path = os.path.join(config.get_data_path(), 'community_channels.json')
|
||||
|
||||
community_json = open(path, "r")
|
||||
community_json = jsontools.load(community_json.read())
|
||||
|
||||
id = item.channel_id
|
||||
to_delete = community_json['channels'][id]['channel_name']
|
||||
del community_json['channels'][id]
|
||||
with open(path, "w") as file:
|
||||
file.write(jsontools.dump(community_json))
|
||||
file.close()
|
||||
|
||||
platformtools.dialog_notification('Alfa', '%s ha sido eliminado' % to_delete)
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
|
||||
|
||||
def set_extra_values(dict):
|
||||
logger.info()
|
||||
quality = ''
|
||||
language = ''
|
||||
plot = ''
|
||||
poster = ''
|
||||
|
||||
if 'quality' in dict and dict['quality'] != '':
|
||||
quality = dict['quality'].upper()
|
||||
if 'language' in dict and dict['language'] != '':
|
||||
language = dict['language'].upper()
|
||||
if 'plot' in dict and dict['plot'] != '':
|
||||
plot = dict['plot']
|
||||
if 'poster' in dict and dict['poster'] != '':
|
||||
poster = dict['poster']
|
||||
|
||||
return quality, language, plot, poster
|
||||
|
||||
def set_title(title, language, quality):
|
||||
logger.info()
|
||||
|
||||
if not config.get_setting('unify'):
|
||||
if quality != '':
|
||||
title += ' [%s]' % quality
|
||||
if language != '':
|
||||
if not isinstance(language, list):
|
||||
title += ' [%s]' % language.upper()
|
||||
else:
|
||||
title += ' '
|
||||
for lang in language:
|
||||
title += '[%s]' % lang.upper()
|
||||
|
||||
return title.capitalize()
|
||||
@@ -270,6 +270,7 @@ def entradas(item):
|
||||
item.text_color = color2
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub("\n", "", data)
|
||||
if "valores" in item and item.valores:
|
||||
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
|
||||
|
||||
@@ -295,7 +296,6 @@ def entradas(item):
|
||||
# Extrae las entradas
|
||||
if item.extra == "Novedades":
|
||||
data2 = data.split("<h2>Últimas Películas Agregadas y Actualizadas</h2>", 1)[1]
|
||||
|
||||
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
|
||||
else:
|
||||
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
|
||||
@@ -381,6 +381,7 @@ def findvideos(item):
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub("\n", "", data)
|
||||
sinopsis = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
|
||||
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
|
||||
# Busca en tmdb si no se ha hecho antes
|
||||
@@ -409,8 +410,8 @@ def findvideos(item):
|
||||
if server == "Ul":
|
||||
server = "Uploaded"
|
||||
title = "%s [%s][%s]" % (server, idioma, calidad)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server, infoLabels=item.infoLabels))
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
|
||||
quality=calidad, server=server, infoLabels=item.infoLabels))
|
||||
|
||||
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -420,8 +421,8 @@ def findvideos(item):
|
||||
title = "Directo"
|
||||
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
|
||||
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server))
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
|
||||
quality=calidad, server=server))
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -25,7 +25,7 @@ list_servers = ['openload', 'gamovideo', 'streamplay', 'flashx', 'streamito', 's
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
@@ -51,8 +51,8 @@ def list_all(item):
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data1 = scrapertools.find_single_match(data, '<div class=col-md-80 lado2(.*?)</div></div></div>')
|
||||
patron = '<a class=poster href=(.*?) title=(.*?)> <img.*?src=(.*?) alt'
|
||||
data1 = scrapertools.find_single_match(data, '<div class="col-md-80 lado2"(.*?)</div></div></div>')
|
||||
patron = '<a class="poster" href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data1)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
@@ -70,7 +70,7 @@ def list_all(item):
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+)><span aria-hidden=true>»</span>')
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)"><span aria-hidden="true">»</span>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
|
||||
@@ -83,7 +83,7 @@ def seasons(item):
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '</span> Temporada (\d+) </a>'
|
||||
patron = '</span>Temporada (\d+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
@@ -118,9 +118,9 @@ def episodesxseason(item):
|
||||
data = get_source(item.url)
|
||||
|
||||
if item.extra1 != 'library':
|
||||
patron = '<tr><td>.*?<a href=([^\s]+) title=Temporada %s, Episodio (\d+.*?)>' % item.contentSeasonNumber
|
||||
patron = '<tr><td>.*?<a href="([^"]+)" title="Temporada %s, Episodio (\d+.*?)>' % item.contentSeasonNumber
|
||||
else:
|
||||
patron = '<tr><td>.*?<a href=([^\s]+) title=Temporada \d+, Episodio (\d+.*?)>'
|
||||
patron = '<tr><td>.*?<a href="([^"]+)" title=Temporada \d+, Episodio (\d+.*?)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
@@ -148,7 +148,7 @@ def genres(item):
|
||||
itemlist = []
|
||||
norep = []
|
||||
data = get_source(item.url)
|
||||
patron = '<a href=([^>]+)><span.*?<i>(.*?)</i>.*?>(.*?)</b>'
|
||||
patron = '<a href="([^"]+)"><span.*?<i>([^<])</i>.*?>(.*?)</b>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, cantidad in matches:
|
||||
@@ -167,7 +167,7 @@ def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<td><a ([^\s]+) class=btn.*?style=margin:.*?<span>.*?</span></td><td>(.*?)</td><td>.*?</td>'
|
||||
patron = '<td><a href="([^"]+)" class="btn.*?style="margin:.*?<span>.*?</span></td><td>(.*?)</td><td>.*?</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, language in matches:
|
||||
|
||||
@@ -29,6 +29,7 @@ def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
logger.debug(data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
@@ -59,10 +60,10 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?src="([^"]+)".*?title=".*?">([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapedtitle.lower().replace('enlace permanente a', '').capitalize()
|
||||
contentSerieName = scrapedtitle
|
||||
|
||||
@@ -87,6 +87,9 @@ def getchanneltypes(view="thumb_"):
|
||||
channel_type=channel_type, viewmode="thumbnails",
|
||||
thumbnail=get_thumb("channels_%s.png" % channel_type, view)))
|
||||
|
||||
itemlist.append(Item(title='Comunidad', channel="community", action="mainlist", view=view,
|
||||
category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view),
|
||||
viewmode="thumbnails"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -119,6 +122,9 @@ def filterchannels(category, view="thumb_"):
|
||||
try:
|
||||
channel_parameters = channeltools.get_channel_parameters(channel)
|
||||
|
||||
if channel_parameters["channel"] == 'community':
|
||||
continue
|
||||
|
||||
# si el canal no es compatible, no se muestra
|
||||
if not channel_parameters["compatible"]:
|
||||
continue
|
||||
|
||||
BIN
plugin.video.alfa/resources/media/themes/default/thumb_add.png
Normal file
BIN
plugin.video.alfa/resources/media/themes/default/thumb_add.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.8 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 38 KiB |
Reference in New Issue
Block a user