diff --git a/plugin.video.alfa/channels/allcalidad.json b/plugin.video.alfa/channels/allcalidad.json
index 1d553516..0caed17c 100755
--- a/plugin.video.alfa/channels/allcalidad.json
+++ b/plugin.video.alfa/channels/allcalidad.json
@@ -12,6 +12,18 @@
],
"settings": [
{
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "LAT"
+ ]
+ },
+ {
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py
index 93bf520f..58d9bfa2 100755
--- a/plugin.video.alfa/channels/allcalidad.py
+++ b/plugin.video.alfa/channels/allcalidad.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
from channelselector import get_thumb
+from channels import autoplay
+from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -8,6 +10,13 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
+
+IDIOMAS = {'Latino': 'LAT'}
+list_language = IDIOMAS.values()
+list_quality = []
+list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'vimeo', 'netutv']
+
+
__channel__='allcalidad'
host = "http://allcalidad.com/"
@@ -20,6 +29,7 @@ except:
def mainlist(item):
logger.info()
+ autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
@@ -27,6 +37,7 @@ def mainlist(item):
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
+ autoplay.show_option(item.channel, itemlist)
return itemlist
def newest(categoria):
@@ -146,6 +157,13 @@ def findvideos(item):
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
+ # Requerido para FilterTools
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
diff --git a/plugin.video.alfa/channels/allpeliculas.json b/plugin.video.alfa/channels/allpeliculas.json
index 198fc1bb..f0cae0fb 100755
--- a/plugin.video.alfa/channels/allpeliculas.json
+++ b/plugin.video.alfa/channels/allpeliculas.json
@@ -19,6 +19,18 @@
"enabled": true,
"visible": true
},
+ {
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "LAT"
+ ]
+ },
{
"id": "include_in_newest_latino",
"type": "bool",
diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py
index 0aabc854..4e249b29 100644
--- a/plugin.video.alfa/channels/allpeliculas.py
+++ b/plugin.video.alfa/channels/allpeliculas.py
@@ -8,6 +8,9 @@ from core import tmdb
from core.item import Item
from channelselector import get_thumb
from platformcode import config, logger
+from channels import autoplay
+from channels import filtertools
+
__modo_grafico__ = config.get_setting('modo_grafico', "allpeliculas")
__perfil__ = int(config.get_setting('perfil', "allpeliculas"))
@@ -18,11 +21,18 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
-IDIOMAS = {"Castellano": "CAST", "Latino": "LAT", "Subtitulado": "VOSE", "Ingles": "VO"}
+IDIOMAS = {"Latino": "LAT"}
+list_language = IDIOMAS.values()
+
+list_quality = []
+
SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65": "thevideos",
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
+list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
+ 'fastplay', 'raptu', 'tusfiles']
+
host = "http://allpeliculas.com/"
def mainlist(item):
@@ -30,6 +40,8 @@ def mainlist(item):
itemlist = []
item.text_color = color1
+ autoplay.init(item.channel, list_servers, list_quality)
+
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/newmovies?page=1", extra1 = 0,
thumbnail=get_thumb('movies', auto=True)))
@@ -40,6 +52,8 @@ def mainlist(item):
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
+ autoplay.show_option(item.channel, itemlist)
+
return itemlist
@@ -136,11 +150,9 @@ def findvideos(item):
fulltitle = item.title,
contentThumbnail = item.thumbnail,
url = url,
+ language = IDIOMAS['Latino']
))
- try:
- tmdb.set_infoLabels(itemlist, __modo_grafico__)
- except:
- pass
+ tmdb.set_infoLabels(itemlist, seekTmdb = True)
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.append(Item(channel=item.channel))
if config.get_videolibrary_support():
@@ -148,6 +160,13 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
+ # Requerido para FilterTools
+
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
return itemlist
diff --git a/plugin.video.alfa/channels/autoplay.py b/plugin.video.alfa/channels/autoplay.py
index eadc9577..a6352fc6 100644
--- a/plugin.video.alfa/channels/autoplay.py
+++ b/plugin.video.alfa/channels/autoplay.py
@@ -615,8 +615,11 @@ def get_languages(channel):
list_language = ['No filtrar']
list_controls, dict_settings = channeltools.get_channel_controls_settings(channel)
for control in list_controls:
- if control["id"] == 'filter_languages':
- list_language = control["lvalues"]
+ try:
+ if control["id"] == 'filter_languages':
+ list_language = control["lvalues"]
+ except:
+ pass
return list_language
diff --git a/plugin.video.alfa/channels/ciberpeliculashd.py b/plugin.video.alfa/channels/ciberpeliculashd.py
index cc9c5a86..01184f6a 100644
--- a/plugin.video.alfa/channels/ciberpeliculashd.py
+++ b/plugin.video.alfa/channels/ciberpeliculashd.py
@@ -29,36 +29,17 @@ def mainlist(item):
extra = "qualitys", thumbnail=get_thumb('quality', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host,
extra = "languages", thumbnail=get_thumb('language', auto=True)))
+ itemlist.append(Item(channel = item.channel, title = " Por año", action = "filtro", url = host,
+ extra = "years", thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series",
url = host +"/series/?peli=1", thumbnail=get_thumb('newest', auto=True)))
- itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos",
- url = host + "/series/?peli=1", thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s=",
thumbnail=get_thumb('search', auto=True)))
return itemlist
-def nuevos_capitulos(item):
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- patron = 'class="episode" href="([^"]+).*?'
- patron += 'src="([^"]+).*?'
- patron += 'title="([^"]+).*?'
- patron += '-->([^<]+).*?'
- patron += 'created_at">([^<]+)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
- scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
- itemlist.append(Item(action = "findvideos",
- channel = item.channel,
- title = scrapedtitle,
- thumbnail = scrapedthumbnail,
- url = scrapedurl
- ))
- return itemlist
def series(item):
logger.info()
@@ -70,7 +51,7 @@ def series(item):
patron += 'title="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
- itemlist.append(Item(action = "temporadas",
+ itemlist.append(Item(action = "capitulos",
channel = item.channel,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
@@ -84,25 +65,41 @@ def series(item):
next_page += "%s" %page
itemlist.append(Item(action = "series",
channel = item.channel,
- title = "Página siguiente",
+ title = "Página siguiente >>",
url = next_page
))
return itemlist
-def temporadas(item):
+def episodios(item):
+ logger.info()
+ itemlist = []
+ itemlist = capitulos(item)
+ return itemlist
+
+
+def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
- bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?')
- matches = scrapertools.find_multiple_matches(bloque, ' (.*?[0-9]+)')
- for scrapedtitle in matches:
- season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
+ bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?Content principal')
+ patron = '(.*?)'
+ matches = scrapertools.find_multiple_matches(bloque, patron)
+ for scrapedurl, scrapedtitle in matches:
+ scrapedtitle = scrapedtitle.strip()
+ s_e = scrapertools.get_season_and_episode(scrapedurl.replace("-",""))
+ if s_e != "":
+ season = s_e.split("x")[0]
+ episode = s_e.split("x")[1]
+ else:
+ season = episode = ""
+ scrapedtitle = s_e + " - " + scrapedtitle
+ item.infoLabels["episode"] = episode
item.infoLabels["season"] = season
- url = item.url + "?temporada=%s" %season
- itemlist.append(item.clone(action = "capitulos",
+ itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle,
- url = url
+ url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
@@ -116,36 +113,6 @@ def temporadas(item):
return itemlist
-def episodios(item):
- logger.info()
- itemlist = []
- templist = temporadas(item)
- for tempitem in templist:
- itemlist += capitulos(tempitem)
- return itemlist
-
-
-def capitulos(item):
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- patron = '
(.*?)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle in matches:
- scrapedtitle = scrapedtitle.replace("", "")
- episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
- scrapedtitle = scrapedtitle.split(":")[1]
- scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
- item.infoLabels["episode"] = episode
- itemlist.append(item.clone(action = "findvideos",
- title = scrapedtitle,
- url = scrapedurl
- ))
- tmdb.set_infoLabels(itemlist)
- return itemlist
-
-
def newest(categoria):
logger.info()
itemlist = []
@@ -183,17 +150,30 @@ def search(item, texto):
def filtro(item):
logger.info()
itemlist = []
+ filter = ""
+ filter_end = "data-uk-dropdown"
+ if item.extra == "categories":
+ filter = "genero"
+ elif item.extra == "qualitys":
+ filter = "calidad"
+ elif item.extra == "languages":
+ filter = "audio"
+ elif item.extra == "years":
+ filter = "ano"
+ filter_end = "([^<]+)"
+ patron = 'id="([^"]+).*?'
+ patron += 'label for.*?>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
+ url = filter + url
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
- url = url + "/?peli=1"
+ url = url + "&peli=1"
))
return itemlist
@@ -202,8 +182,11 @@ def peliculas(item):
logger.info()
itemlist = []
infoLabels = dict()
+ filter = "uk-icon-angle-right next"
+ if item.extra == "busca":
+ filter = ' '
data = httptools.downloadpage(item.url).data
- bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
+ bloque = scrapertools.find_single_match(data, '%s.*?panel-pagination pagination-bottom' %(filter))
patron = 'a href="([^"]+)".*?'
patron += 'img alt="([^"]+)".*?'
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
@@ -218,7 +201,7 @@ def peliculas(item):
year = 0
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
if "serie" in scrapedurl:
- action = "temporadas"
+ action = "capitulos"
infoLabels ['tvshowtitle'] = scrapedtitle
else:
action = "findvideos"
@@ -239,7 +222,7 @@ def peliculas(item):
next_page += "%s" %page
itemlist.append(Item(action = "peliculas",
channel = item.channel,
- title = "Página siguiente",
+ title = "Página siguiente >>",
url = next_page
))
return itemlist
@@ -249,7 +232,9 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
- patron = 'src="([^&]+)'
+ url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
+ data = httptools.downloadpage(url).data
+ patron = ' ]+>Más resultados')
if next_page != "":
@@ -330,10 +351,7 @@ def peliculas(item):
if "valores" in item and item.valores:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
- if __menu_info__:
- action = "menu_info"
- else:
- action = "findvideos"
+ action = "findvideos"
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data,
@@ -344,14 +362,15 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl)
+ filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
+ filter_list = {"poster_path": filter_thumb}
+ filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
- text_color=color2, contentType="movie"))
+ text_color=color2, contentType="movie", infoLabels={'filtro':filter_list}))
else:
- patron = '(.*?) (.*?)' \
- 'src="([^"]+)".*?href="([^"]+)">([^<]+)'
+ patron = ' (.*?) ([^<]+)'
matches = scrapertools.find_multiple_matches(match, patron)
-
for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
calidad = scrapertools.find_single_match(calidad, ' ([^<]+) ')
if calidad:
@@ -361,17 +380,25 @@ def peliculas(item):
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
- audios.append('V.O')
+ audios.append('VO')
title = "%s [%s]" % (scrapedtitle, "/".join(audios))
+
if calidad:
title += " (%s)" % calidad
url = urlparse.urljoin(host, scrapedurl)
+ filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
+ filter_list = {"poster_path": filter_thumb}
+ filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
- text_color=color2, contentType="movie", quality=calidad, language=audios))
+ text_color=color2, contentType="movie", quality=calidad, language=audios,
+ infoLabels={'filtro':filter_list}))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
if next_page != "" and item.title != "":
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
@@ -387,10 +414,10 @@ def ultimos(item):
logger.info()
item.text_color = color2
- if __menu_info__:
- action = "menu_info_episode"
- else:
- action = "episodios"
+ # if __menu_info__:
+ # action = "menu_info_episode"
+ # else:
+ action = "episodios"
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -407,20 +434,16 @@ def ultimos(item):
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
- audios.append('V.O')
+ audios.append('VO')
title = "%s - %s" % (show, re.sub(show, '', scrapedtitle))
if audios:
title += " [%s]" % "/".join(audios)
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail,
- contentTitle=show, fulltitle=show, show=show,
+ contentSerieName=show, fulltitle=show, show=show,
text_color=color2, extra="ultimos", contentType="tvshow"))
- try:
- from core import tmdb
- tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
- except:
- pass
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
@@ -444,12 +467,12 @@ def series(item):
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl + "/episodios")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
- thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
+ thumbnail=scrapedthumbnail, contentSerieName=scrapedtitle, fulltitle=scrapedtitle,
show=scrapedtitle, text_color=color2, contentType="tvshow"))
try:
from core import tmdb
- tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
except:
pass
@@ -512,10 +535,10 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data_season = data[:]
- if "episodios" in item.extra or not __menu_info__ or item.path:
- action = "findvideos"
- else:
- action = "menu_info_episode"
+ #if "episodios" in item.extra or not __menu_info__ or item.path:
+ action = "findvideos"
+ # else:
+ # action = "menu_info_episode"
seasons = scrapertools.find_single_match(data, ' ]+> |