diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml
index c290d918..3f943f9b 100755
--- a/plugin.video.alfa/addon.xml
+++ b/plugin.video.alfa/addon.xml
@@ -1,5 +1,5 @@
-
+
@@ -19,16 +19,13 @@
[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
- » canalpelis » allcalidad
- » cinefox » cineasiaenlinea
- » cinetux » divxatope
- » maxipelis » pedropolis
- » doomtv » animeshd
- » hdfull » ultrapelishd
- » pelisplus » cinecalidad
- » peliculasnu » allpeliculas
- ¤ arreglos internos
- [COLOR green]Gracias a [COLOR yellow]prpeaprendiz[/COLOR] por su colaboración en esta versión[/COLOR]
+ » cinecalidad » estadepelis
+ » datoporn » seriesyonkis
+ » allcalidad » allpeliculas
+ » cartoonlatino » pasateatorrent
+ » vidz7 » zonatorrent
+ » gvideo » okru
+ » openload ¤ arreglos internos
Navega con Kodi por páginas web para ver sus videos de manera fácil.
Browse web pages using Kodi
diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py
index 0894a0a5..ede804b0 100755
--- a/plugin.video.alfa/channels/allcalidad.py
+++ b/plugin.video.alfa/channels/allcalidad.py
@@ -138,8 +138,8 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
- infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
- extra="library"))
+ infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
+ ))
return itemlist
diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py
index 089ad59f..63481210 100644
--- a/plugin.video.alfa/channels/allpeliculas.py
+++ b/plugin.video.alfa/channels/allpeliculas.py
@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
-import string
+import urlparse
from core import httptools
+from core import jsontools
from core import scrapertools
from core import servertools
+from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -22,6 +24,7 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
+host = "http://allpeliculas.com/"
def mainlist(item):
logger.info()
@@ -29,32 +32,111 @@ def mainlist(item):
item.text_color = color1
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
- url="http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"))
- itemlist.append(item.clone(title="Series", action="lista", fanart="http://i.imgur.com/9loVksV.png", extra="tv",
- url="http://allpeliculas.co/Movies/fullView/1/86/?ajax=1&withoutFilter=1", ))
- itemlist.append(item.clone(title="Géneros", action="subindice", fanart="http://i.imgur.com/ymazCWq.jpg"))
- itemlist.append(item.clone(title="Índices", action="indices", fanart="http://i.imgur.com/c3HS8kj.png"))
+ url= host + "movies/newmovies?page=1", extra1 = 0))
+ itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
+ url= host + "movies/getGanres"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
- itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
-def configuracion(item):
- from platformcode import platformtools
- ret = platformtools.show_channel_settings()
- platformtools.itemlist_refresh()
- return ret
+def generos(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ dict_data = jsontools.load(data)
+ for it in dict_data:
+ itemlist.append(Item(
+ channel = item.channel,
+ action = "lista",
+ title = it['label'],
+ url = host + "movies/newmovies?page=1",
+ extra1 = it['id']
+ ))
+ return itemlist
+
+def findvideos(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ patron = 'data-link="([^"]+).*?'
+ patron += '>([^<]+)'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for url, calidad in matches:
+ itemlist.append(Item(
+ channel = item.channel,
+ action = "play",
+ title = calidad,
+ url = url,
+ ))
+ itemlist = servertools.get_servers_itemlist(itemlist)
+ itemlist.append(Item(channel=item.channel))
+ if config.get_videolibrary_support():
+ itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
+ filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
+ infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
+ ))
+ try:
+ tmdb.set_infoLabels(itemlist, __modo_grafico__)
+ except:
+ pass
+
+ return itemlist
+
+
+def lista(item):
+ logger.info()
+ itemlist = []
+ dict_param = dict()
+ item.infoLabels = {}
+ item.text_color = color2
+
+ params = '{}'
+ if item.extra1 != 0:
+ dict_param["genero"] = [item.extra1]
+ params = jsontools.dump(dict_param)
+
+ data = httptools.downloadpage(item.url, post=params).data
+ dict_data = jsontools.load(data)
+
+ for it in dict_data["items"]:
+ title = it["title"]
+ plot = it["slogan"]
+ rating = it["imdb"]
+ year = it["year"]
+ url = host + "pelicula/" + it["slug"]
+ thumb = urlparse.urljoin(host, it["image"])
+ item.infoLabels['year'] = year
+ itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
+ plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
+
+ pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
+ item.url = item.url.replace(pagina, "")
+ if pagina == "":
+ pagina = "0"
+ pagina = int(pagina) + 1
+ item.url = item.url + "%s" %pagina
+ if item.extra != "busqueda":
+ itemlist.append(Item(channel = item.channel, action="lista", title="Pagina %s" %pagina, url=item.url, extra1 = item.extra1
+ ))
+ try:
+ # Obtenemos los datos basicos de todas las peliculas mediante multihilos
+ tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
+ except:
+ pass
+
+ return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
- item.url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=" + texto + "&ajax=1"
+ item.url = host + "/movies/search/" + texto
+ item.extra = "busqueda"
try:
- return busqueda(item)
+ return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -68,7 +150,7 @@ def newest(categoria):
item = Item()
try:
if categoria == "peliculas":
- item.url = "http://allpeliculas.co/Movies/fullView/1/0/&ajax=1"
+ item.url = host + "movies/newmovies?page=1"
item.action = "lista"
itemlist = lista(item)
@@ -83,402 +165,3 @@ def newest(categoria):
return []
return itemlist
-
-
-def busqueda(item):
- logger.info()
- itemlist = []
- item.infoLabels = {}
- item.text_color = color2
-
- data = httptools.downloadpage(item.url).data
- data = data.replace("\n", "").replace("\t", "")
- data = scrapertools.decodeHtmlentities(data)
-
- patron = '
(.*?)/.*?' \
- ' (.*?)
.*?Género: (.*?)'
- matches = scrapertools.find_multiple_matches(data, patron)
- for thumbnail, vote, url, title, year, genre in matches:
- url = "http://allpeliculas.co" + url.replace("#", "") + "&ajax=1"
- thumbnail = thumbnail.replace("/105/", "/400/").replace("/141/", "/600/").replace(" ", "%20")
- titulo = title + " (" + year + ")"
- item.infoLabels['year'] = year
- item.infoLabels['genre'] = genre
- item.infoLabels['rating'] = vote
- if "Series" not in genre:
- itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
- context=["buscar_trailer"], contentTitle=title, contentType="movie"))
- else:
- itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
- context=["buscar_trailer"], contentTitle=title, contentType="tvshow"))
-
- # Paginacion
- next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
- if next_page != "":
- url = next_page.replace("#", "") + "&ajax=1"
- itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
-
- return itemlist
-
-
-def indices(item):
- logger.info()
- itemlist = []
- item.text_color = color1
-
- itemlist.append(item.clone(title="Alfabético", action="subindice"))
- itemlist.append(item.clone(title="Por idioma", action="subindice"))
- itemlist.append(item.clone(title="Por valoración", action="lista",
- url="http://allpeliculas.co/Movies/fullView/1/0/rating:imdb|date:1900-3000|"
- "alphabet:all|?ajax=1&withoutFilter=1"))
- itemlist.append(item.clone(title="Por año", action="subindice"))
- itemlist.append(item.clone(title="Por calidad", action="subindice"))
-
- return itemlist
-
-
-def lista(item):
- logger.info()
- itemlist = []
- item.infoLabels = {}
- item.text_color = color2
-
- data = httptools.downloadpage(item.url).data
- data = data.replace("\n", "").replace("\t", "")
- data = scrapertools.decodeHtmlentities(data)
-
- bloque = scrapertools.find_single_match(data, '([^<]+)<\/span>|
)' \
- '.*?
(.*?).*?
Year.*?">(.*?).*?' \
- '(?:Género|Genre).*?(.*?).*?Language.*?(.*?).*?' \
- '(.*?)<.*?
(.*?)<.*?' \
- '
(.*?)<'
-
- if bloque == "":
- bloque = data[:]
- matches = scrapertools.find_multiple_matches(bloque, patron)
- for thumbnail, url, trailer, vote, year, genre, idioma, sinopsis, calidad, title in matches:
- url = url.replace("#", "") + "&ajax=1"
- thumbnail = thumbnail.replace("/157/", "/400/").replace("/236/", "/600/").replace(" ", "%20")
- idioma = idioma.replace(" ", "").split(",")
- idioma.sort()
- titleidioma = "[" + "/".join(idioma) + "]"
-
- titulo = title + " " + titleidioma + " [" + calidad + "]"
- item.infoLabels['plot'] = sinopsis
- item.infoLabels['year'] = year
- item.infoLabels['genre'] = genre
- item.infoLabels['rating'] = vote
- item.infoLabels['trailer'] = trailer.replace("youtu.be/", "http://www.youtube.com/watch?v=")
- if item.extra != "tv" or "Series" not in genre:
- itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
- context=["buscar_trailer"], contentTitle=title, contentType="movie"))
- else:
- itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail,
- context=["buscar_trailer"], contentTitle=title, show=title,
- contentType="tvshow"))
-
- try:
- from core import tmdb
- # Obtenemos los datos basicos de todas las peliculas mediante multihilos
- tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
- except:
- pass
- # Paginacion
- next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"')
- if next_page != "":
- url = next_page.replace("#", "") + "&ajax=1"
- itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3))
-
- return itemlist
-
-
-def subindice(item):
- logger.info()
- itemlist = []
-
- url_base = "http://allpeliculas.co/Movies/fullView/1/0/date:1900-3000|alphabet:all|?ajax=1&withoutFilter=1"
- indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad = dict_indices()
- if "Géneros" in item.title:
- for key, value in indice_genero.items():
- url = url_base.replace("/0/", "/" + key + "/")
- itemlist.append(item.clone(action="lista", title=value, url=url))
- itemlist.sort(key=lambda item: item.title)
-
- elif "Alfabético" in item.title:
- for i in range(len(indice_alfa)):
- url = url_base.replace(":all", ":" + indice_alfa[i])
- itemlist.append(item.clone(action="lista", title=indice_alfa[i], url=url))
-
- elif "Por idioma" in item.title:
- for key, value in indice_idioma.items():
- url = url_base.replace("3000|", "3000|language:" + key)
- itemlist.append(item.clone(action="lista", title=value, url=url))
- itemlist.sort(key=lambda item: item.title)
-
- elif "Por año" in item.title:
- for i in range(len(indice_year)):
- year = indice_year[i]
- url = url_base.replace("1900-3000", year + "-" + year)
- itemlist.append(item.clone(action="lista", title=year, url=url))
-
- elif "Por calidad" in item.title:
- for key, value in indice_calidad.items():
- url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=&movieDirector=&movieGenre" \
- "=&movieActor=&movieYear=&language=&movieTypeId=" + key + "&ajax=1"
- itemlist.append(item.clone(action="busqueda", title=value, url=url))
- itemlist.sort(key=lambda item: item.title)
-
- return itemlist
-
-
-def findvideos(item):
- logger.info()
- itemlist = []
- item.text_color = color3
-
- # Rellena diccionarios idioma y calidad
- idiomas_videos, calidad_videos = dict_videos()
-
- data = httptools.downloadpage(item.url).data
- data = data.replace("\n", "").replace("\t", "")
- data = scrapertools.decodeHtmlentities(data)
-
- if item.extra != "library":
- try:
- from core import tmdb
- tmdb.set_infoLabels(item, __modo_grafico__)
- except:
- pass
-
- # Enlaces Online
- patron = '
([^<]+)')
- item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
-
- itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
- text_color="magenta", context=""))
- if item.extra != "library":
- if config.get_videolibrary_support():
- itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
- action="add_pelicula_to_library", url=item.url, text_color="green",
- infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
- extra="library"))
-
- return itemlist
-
-
-def temporadas(item):
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- try:
- from core import tmdb
- tmdb.set_infoLabels_item(item, __modo_grafico__)
- except:
- pass
-
- matches = scrapertools.find_multiple_matches(data, '
([^<]+)')
- item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")
-
- itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
- text_color="magenta", context=""))
-
- return itemlist
-
-
-def episodios(item):
- logger.info()
- itemlist = []
-
- # Rellena diccionarios idioma y calidad
- idiomas_videos, calidad_videos = dict_videos()
-
- data = httptools.downloadpage(item.url).data
- data = data.replace("\n", "").replace("\t", "")
- data = scrapertools.decodeHtmlentities(data)
-
- patron = ']+season="' + str(item.infoLabels['season']) + '"[^>]+>([^<]+)'
- matches = scrapertools.find_multiple_matches(data, patron)
- capitulos = []
- for title in matches:
- if not title in capitulos:
- episode = int(title.split(" ")[1])
- capitulos.append(title)
- itemlist.append(
- item.clone(action="findvideostv", title=title, contentEpisodeNumber=episode, contentType="episode"))
-
- itemlist.sort(key=lambda item: item.contentEpisodeNumber)
- try:
- from core import tmdb
- tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
- except:
- pass
- for item in itemlist:
- if item.infoLabels["episodio_titulo"]:
- item.title = "%dx%02d: %s" % (
- item.contentSeason, item.contentEpisodeNumber, item.infoLabels["episodio_titulo"])
- else:
- item.title = "%dx%02d: %s" % (item.contentSeason, item.contentEpisodeNumber, item.title)
-
- return itemlist
-
-
-def findvideostv(item):
- logger.info()
- itemlist = []
-
- # Rellena diccionarios idioma y calidad
- idiomas_videos, calidad_videos = dict_videos()
-
- data = httptools.downloadpage(item.url).data
- data = data.replace("\n", "").replace("\t", "")
- data = scrapertools.decodeHtmlentities(data)
-
- patron = '')
- matches = scrapertools.find_multiple_matches(bloque_idioma, '')
- for key1, key2 in matches:
- idiomas_videos[key1] = unicode(key2, "utf8").capitalize().encode("utf8")
- bloque_calidad = scrapertools.find_single_match(data, '